From 84f9c875571a5c3935397c00b107f8d4d88a0a45 Mon Sep 17 00:00:00 2001 From: Sergey Shepelev Date: Sun, 22 Jan 2023 07:10:35 +0300 Subject: [PATCH 01/35] dep: greenlet>=1.0 removing unused clear_sys_exc_info stub fixes https://github.com/eventlet/eventlet/issues/763 --- eventlet/hubs/hub.py | 6 +----- eventlet/hubs/kqueue.py | 1 - eventlet/hubs/poll.py | 1 - eventlet/hubs/selects.py | 1 - eventlet/support/__init__.py | 9 --------- eventlet/support/greenlets.py | 4 ---- setup.py | 2 +- tests/hub_test.py | 1 - tests/test__refcount.py | 1 - 9 files changed, 2 insertions(+), 24 deletions(-) diff --git a/eventlet/hubs/hub.py b/eventlet/hubs/hub.py index db55958544..c27b81f709 100644 --- a/eventlet/hubs/hub.py +++ b/eventlet/hubs/hub.py @@ -21,7 +21,7 @@ def alarm_signal(seconds): import eventlet.hubs from eventlet.hubs import timer -from eventlet.support import greenlets as greenlet, clear_sys_exc_info +from eventlet.support import greenlets as greenlet try: from monotonic import monotonic except ImportError: @@ -309,7 +309,6 @@ def switch(self): cur.parent = self.greenlet except ValueError: pass # gets raised if there is a greenlet parent cycle - clear_sys_exc_info() return self.greenlet.switch() def squelch_exception(self, fileno, exc_info): @@ -397,13 +396,11 @@ def squelch_generic_exception(self, exc_info): if self.debug_exceptions: traceback.print_exception(*exc_info) sys.stderr.flush() - clear_sys_exc_info() def squelch_timer_exception(self, timer, exc_info): if self.debug_exceptions: traceback.print_exception(*exc_info) sys.stderr.flush() - clear_sys_exc_info() def add_timer(self, timer): scheduled_time = self.clock() + timer.seconds @@ -478,7 +475,6 @@ def fire_timers(self, when): raise except: self.squelch_timer_exception(timer, sys.exc_info()) - clear_sys_exc_info() # for debugging: diff --git a/eventlet/hubs/kqueue.py b/eventlet/hubs/kqueue.py index bad4a87b85..8438805c5a 100644 --- a/eventlet/hubs/kqueue.py +++ b/eventlet/hubs/kqueue.py @@ -109,4 +109,3 @@ def wait(self, seconds=None): raise except: self.squelch_exception(fileno, sys.exc_info()) - support.clear_sys_exc_info() diff --git a/eventlet/hubs/poll.py b/eventlet/hubs/poll.py index 1bbd4019c9..d3f9c6a3a6 100644 --- a/eventlet/hubs/poll.py +++ b/eventlet/hubs/poll.py @@ -113,7 +113,6 @@ def wait(self, seconds=None): raise except: self.squelch_exception(fileno, sys.exc_info()) - support.clear_sys_exc_info() if self.debug_blocking: self.block_detect_post() diff --git a/eventlet/hubs/selects.py b/eventlet/hubs/selects.py index 0ead5b8935..0386a1ed2f 100644 --- a/eventlet/hubs/selects.py +++ b/eventlet/hubs/selects.py @@ -61,4 +61,3 @@ def wait(self, seconds=None): raise except: self.squelch_exception(fileno, sys.exc_info()) - support.clear_sys_exc_info() diff --git a/eventlet/support/__init__.py b/eventlet/support/__init__.py index 43bac91a4d..b1c160715d 100644 --- a/eventlet/support/__init__.py +++ b/eventlet/support/__init__.py @@ -30,15 +30,6 @@ def get_errno(exc): return None -if sys.version_info[0] < 3 and not greenlets.preserves_excinfo: - from sys import exc_clear as clear_sys_exc_info -else: - def clear_sys_exc_info(): - """No-op In py3k. - Exception information is not visible outside of except statements. - sys.exc_clear became obsolete and removed.""" - pass - if sys.version_info[0] < 3: def bytes_to_str(b, encoding='ascii'): return b diff --git a/eventlet/support/greenlets.py b/eventlet/support/greenlets.py index d4e1793a9b..b93932852a 100644 --- a/eventlet/support/greenlets.py +++ b/eventlet/support/greenlets.py @@ -1,8 +1,4 @@ -import distutils.version - import greenlet getcurrent = greenlet.greenlet.getcurrent GreenletExit = greenlet.greenlet.GreenletExit -preserves_excinfo = (distutils.version.LooseVersion(greenlet.__version__) - >= distutils.version.LooseVersion('0.3.2')) greenlet = greenlet.greenlet diff --git a/setup.py b/setup.py index a8f46845a8..9b927e0a65 100644 --- a/setup.py +++ b/setup.py @@ -19,7 +19,7 @@ packages=setuptools.find_packages(exclude=['benchmarks', 'tests', 'tests.*']), install_requires=( 'dnspython >= 1.15.0', - 'greenlet >= 0.3', + 'greenlet >= 1.0', 'monotonic >= 1.4;python_version<"3.5"', 'six >= 1.10.0', ), diff --git a/tests/hub_test.py b/tests/hub_test.py index 3f403b1ffb..e1e9b84a43 100644 --- a/tests/hub_test.py +++ b/tests/hub_test.py @@ -190,7 +190,6 @@ def fail(): class TestExceptionInGreenthread(tests.LimitedTestCase): - @tests.skip_unless(greenlets.preserves_excinfo) def test_exceptionpreservation(self): # events for controlling execution order gt1event = eventlet.Event() diff --git a/tests/test__refcount.py b/tests/test__refcount.py index 1090a1ff36..5c1c002438 100644 --- a/tests/test__refcount.py +++ b/tests/test__refcount.py @@ -57,7 +57,6 @@ def run_interaction(run_client): def run_and_check(run_client): w = run_interaction(run_client=run_client) - # clear_sys_exc_info() gc.collect() fd = w() print('run_and_check: weakref fd:', fd) From dd2f0ea32d6b6d064be52fe8e27fca275985cdc4 Mon Sep 17 00:00:00 2001 From: Sergey Shepelev Date: Mon, 27 Mar 2023 22:07:15 +0300 Subject: [PATCH 02/35] tests: getaddrinfo(host, 0) is not supported on OpenIndiana platform https://github.com/eventlet/eventlet/issues/791 --- tests/greendns_test.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/tests/greendns_test.py b/tests/greendns_test.py index a05e5827da..51305ab04c 100644 --- a/tests/greendns_test.py +++ b/tests/greendns_test.py @@ -630,14 +630,14 @@ def test_getaddrinfo_inet6(self): def test_getaddrinfo_only_a_ans(self): greendns.resolve = _make_mock_resolve() greendns.resolve.add('example.com', '1.2.3.4') - res = greendns.getaddrinfo('example.com', 0) + res = greendns.getaddrinfo('example.com', None) addr = [('1.2.3.4', 0)] * len(res) assert addr == [ai[-1] for ai in res] def test_getaddrinfo_only_aaaa_ans(self): greendns.resolve = _make_mock_resolve() greendns.resolve.add('example.com', 'dead:beef::1') - res = greendns.getaddrinfo('example.com', 0) + res = greendns.getaddrinfo('example.com', None) addr = [('dead:beef::1', 0, 0, 0)] * len(res) assert addr == [ai[-1] for ai in res] @@ -654,7 +654,7 @@ def clear_raises(res_self): res.raises = greendns.dns.exception.Timeout greendns.resolver._resolver = res() - result = greendns.getaddrinfo('example.com', 0, 0) + result = greendns.getaddrinfo('example.com', None, 0) addr = [('1.2.3.4', 0)] * len(result) assert addr == [ai[-1] for ai in result] @@ -671,7 +671,7 @@ def clear_raises(res_self): res.raises = greendns.dns.exception.DNSException greendns.resolver._resolver = res() - result = greendns.getaddrinfo('example.com', 0, 0) + result = greendns.getaddrinfo('example.com', None, 0) addr = [('1.2.3.4', 0)] * len(result) assert addr == [ai[-1] for ai in result] @@ -691,7 +691,7 @@ def test_getaddrinfo_hosts_only_timeout(self): greendns.resolver._resolver = res() with tests.assert_raises(socket.gaierror): - greendns.getaddrinfo('example.com', 0, 0) + greendns.getaddrinfo('example.com', None, 0) def test_getaddrinfo_hosts_only_dns_error(self): hostsres = _make_mock_base_resolver() @@ -702,13 +702,13 @@ def test_getaddrinfo_hosts_only_dns_error(self): greendns.resolver._resolver = res() with tests.assert_raises(socket.gaierror): - greendns.getaddrinfo('example.com', 0, 0) + greendns.getaddrinfo('example.com', None, 0) def test_canonname(self): greendns.resolve = _make_mock_resolve() greendns.resolve.add('host.example.com', '1.2.3.4') greendns.resolve_cname = self._make_mock_resolve_cname() - res = greendns.getaddrinfo('host.example.com', 0, + res = greendns.getaddrinfo('host.example.com', None, 0, 0, 0, socket.AI_CANONNAME) assert res[0][3] == 'cname.example.com' From c606d953de3b1f3368e1c59088e0dec2f842c045 Mon Sep 17 00:00:00 2001 From: Itamar Turner-Trauring Date: Thu, 14 Dec 2023 07:57:57 -0500 Subject: [PATCH 03/35] Drop older Python versions (#827) * Drop support for Python 2.7 to 3.7. --- .github/workflows/test.yaml | 7 ------- NEWS | 5 +++++ README.rst | 2 +- setup.py | 8 +++----- tox.ini | 16 +++++----------- 5 files changed, 14 insertions(+), 24 deletions(-) diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index 80ba514ea7..319c8fbed9 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -29,12 +29,6 @@ jobs: fail-fast: false matrix: include: - - { py: 2.7, toxenv: py27-epolls, ignore-error: true, os: ubuntu-latest } - - { py: 3.5, toxenv: py35-epolls, ignore-error: true, os: ubuntu-20.04 } - - { py: 3.6, toxenv: py36-epolls, ignore-error: true, os: ubuntu-20.04 } - - { py: 3.7, toxenv: py37-epolls, ignore-error: false, os: ubuntu-latest } - - { py: 3.7, toxenv: py37-poll, ignore-error: false, os: ubuntu-latest } - - { py: 3.7, toxenv: py37-selects, ignore-error: false, os: ubuntu-latest } - { py: 3.8, toxenv: py38-epolls, ignore-error: false, os: ubuntu-latest } - { py: 3.8, toxenv: py38-openssl, ignore-error: false, os: ubuntu-latest } - { py: 3.8, toxenv: py38-poll, ignore-error: false, os: ubuntu-latest } @@ -47,7 +41,6 @@ jobs: - { py: "3.10", toxenv: py310-poll, ignore-error: false, os: ubuntu-latest } - { py: "3.10", toxenv: py310-selects, ignore-error: false, os: ubuntu-latest } - { py: "3.10", toxenv: ipv6, ignore-error: false, os: ubuntu-latest } - - { py: pypy2.7, toxenv: pypy2-epolls, ignore-error: true, os: ubuntu-20.04 } - { py: pypy3.9, toxenv: pypy3-epolls, ignore-error: true, os: ubuntu-20.04 } steps: diff --git a/NEWS b/NEWS index a6b5dfd32d..328c4ba486 100644 --- a/NEWS +++ b/NEWS @@ -1,3 +1,8 @@ +Unreleased +========== + +* Dropped support for Python 3.7 and earlier. + 0.33.3 ====== * dnspython 2.3.0 raised AttributeError: module 'dns.rdtypes' has no attribute 'ANY' https://github.com/eventlet/eventlet/issues/781 diff --git a/README.rst b/README.rst index d5379f2c75..a3e0629802 100644 --- a/README.rst +++ b/README.rst @@ -65,7 +65,7 @@ Apologies for any inconvenience. Supported Python versions ========================= -Currently CPython 2.7 and 3.4+ are supported, but **2.7 and 3.4 support is deprecated and will be removed in the future**, only CPython 3.5+ support will remain. +Python 3.8-3.11 are currently supported. Flair ===== diff --git a/setup.py b/setup.py index 9b927e0a65..f5ed3d4725 100644 --- a/setup.py +++ b/setup.py @@ -13,6 +13,7 @@ author='Linden Lab', author_email='eventletdev@lists.secondlife.com', url='http://eventlet.net', + python_requires=">=3.8.0", project_urls={ 'Source': 'https://github.com/eventlet/eventlet', }, @@ -38,14 +39,11 @@ "Operating System :: MacOS :: MacOS X", "Operating System :: Microsoft :: Windows", "Operating System :: POSIX", - "Programming Language :: Python :: 2", - "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", - "Programming Language :: Python :: 3.5", - "Programming Language :: Python :: 3.6", - "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", "Programming Language :: Python", "Topic :: Internet", "Topic :: Software Development :: Libraries :: Python Modules", diff --git a/tox.ini b/tox.ini index 040d827212..71b86d37c1 100644 --- a/tox.ini +++ b/tox.ini @@ -20,8 +20,8 @@ envlist = pep8 py38-openssl py39-dnspython1 - py{27,35,36,py2,py3}-epolls - py{37,38,39,310}-{selects,poll,epolls} + pypy3-epolls + py{38,39,310}-{selects,poll,epolls} skipsdist = True [testenv:ipv6] @@ -63,16 +63,10 @@ setenv = deps = coverage==4.5.1 nose3==1.3.8 - py27-{selects,poll,epolls}: pyopenssl==19.1.0 - py27: mysqlclient==1.4.6 - py{27,35}: setuptools==38.5.1 - py27: subprocess32==3.2.7 py38-openssl: pyopenssl==20.0.0 - pypy{2,3}: psycopg2cffi-compat==1.1 - py{27,35}-{selects,poll,epolls}: pyzmq==19.0.2 - py{36,37,38,39}-{selects,poll,epolls}: pyzmq==21.0.2 - py{27,35,36,37}: psycopg2-binary==2.7.7 - py{35,36,37,38,39,310,311}: mysqlclient==2.0.3 + pypy3: psycopg2cffi-compat==1.1 + py{38,39}-{selects,poll,epolls}: pyzmq==21.0.2 + py{38,39,310,311}: mysqlclient==2.0.3 py{38,39}: psycopg2-binary==2.8.4 py{310,311}: psycopg2-binary==2.9.5 py{310,311}: pyzmq==25.0.0 From 77cb7f0a64a5c3ee4af661279cd18665a41df25c Mon Sep 17 00:00:00 2001 From: Itamar Turner-Trauring Date: Thu, 14 Dec 2023 09:15:21 -0500 Subject: [PATCH 04/35] Green CI, part 1: Pass tests on Python 3.8 and 3.9 (#831) * Update to newer version * Pass tests on Python 3.8 and 3.9 --------- Co-authored-by: Itamar Turner-Trauring --- tests/wsgi_test.py | 3 +++ tox.ini | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/tests/wsgi_test.py b/tests/wsgi_test.py index 6020b9b2e0..13a6e83c5d 100644 --- a/tests/wsgi_test.py +++ b/tests/wsgi_test.py @@ -565,6 +565,9 @@ def server(sock, site, log): client_socket, addr = sock.accept() serv.process_request([addr, client_socket, wsgi.STATE_IDLE]) return True + except (ssl.SSLZeroReturnError, ssl.SSLEOFError): + # Can't write a response to a closed TLS session + return True except Exception: traceback.print_exc() return False diff --git a/tox.ini b/tox.ini index 71b86d37c1..23fbbf3362 100644 --- a/tox.ini +++ b/tox.ini @@ -63,7 +63,7 @@ setenv = deps = coverage==4.5.1 nose3==1.3.8 - py38-openssl: pyopenssl==20.0.0 + py38-openssl: pyopenssl==22.1.0 pypy3: psycopg2cffi-compat==1.1 py{38,39}-{selects,poll,epolls}: pyzmq==21.0.2 py{38,39,310,311}: mysqlclient==2.0.3 From abbe7a5d6569b4518fbc68309175d93e9cf9081f Mon Sep 17 00:00:00 2001 From: Itamar Turner-Trauring Date: Thu, 14 Dec 2023 09:57:51 -0500 Subject: [PATCH 05/35] Green CI, part 2: pass tests on 3.10 and 3.11 (#832) * Make RLock upgrading a lot more robust * Run tests on Python 3.11 in CI --------- Co-authored-by: Itamar Turner-Trauring --- .github/workflows/test.yaml | 1 + eventlet/patcher.py | 28 ++++++++++++++++++++++++++-- 2 files changed, 27 insertions(+), 2 deletions(-) diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index 319c8fbed9..11949e9b14 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -41,6 +41,7 @@ jobs: - { py: "3.10", toxenv: py310-poll, ignore-error: false, os: ubuntu-latest } - { py: "3.10", toxenv: py310-selects, ignore-error: false, os: ubuntu-latest } - { py: "3.10", toxenv: ipv6, ignore-error: false, os: ubuntu-latest } + - { py: "3.11", toxenv: py311-epolls, ignore-error: false, os: ubuntu-latest } - { py: pypy3.9, toxenv: pypy3-epolls, ignore-error: true, os: ubuntu-20.04 } steps: diff --git a/eventlet/patcher.py b/eventlet/patcher.py index 9c6727d71b..0d1157afcd 100644 --- a/eventlet/patcher.py +++ b/eventlet/patcher.py @@ -411,7 +411,7 @@ def _green_existing_locks(): if not py3_style and isinstance(obj._RLock__block, lock_type): _fix_py2_rlock(obj, tid) elif py3_style and not isinstance(obj, pyrlock_type): - _fix_py3_rlock(obj) + _fix_py3_rlock(obj, tid) def _fix_py2_rlock(rlock, tid): @@ -424,17 +424,41 @@ def _fix_py2_rlock(rlock, tid): rlock._RLock__owner = tid -def _fix_py3_rlock(old): +def _fix_py3_rlock(old, tid): import gc import threading + from eventlet.green.thread import allocate_lock new = threading._PyRLock() + if not hasattr(new, "_block") or not hasattr(new, "_owner"): + # These will only fail if Python changes its internal implementation of + # _PyRLock: + raise RuntimeError( + "INTERNAL BUG. Perhaps you are using a major version " + + "of Python that is unsupported by eventlet? Please file a bug " + + "at https://github.com/eventlet/eventlet/issues/new") + new._block = allocate_lock() + acquired = False while old._is_owned(): old.release() new.acquire() + acquired = True if old._is_owned(): new.acquire() + acquired = True + if acquired: + new._owner = tid gc.collect() for ref in gc.get_referrers(old): + if isinstance(ref, dict): + for k, v in list(ref.items()): + if v is old: + ref[k] = new + continue + if isinstance(ref, list): + for i, v in enumerate(ref): + if v is old: + ref[i] = new + continue try: ref_vars = vars(ref) except TypeError: From 97dc606bffaa53030dd540fe0b0b291b9a1e147b Mon Sep 17 00:00:00 2001 From: Itamar Turner-Trauring Date: Thu, 14 Dec 2023 10:00:33 -0500 Subject: [PATCH 06/35] tests: Drop unmaintained and unused stdlib tests (#820) Co-authored-by: Itamar Turner-Trauring --- doc/testing.rst | 15 ------- tests/stdlib/all.py | 50 ---------------------- tests/stdlib/all_modules.py | 40 ----------------- tests/stdlib/all_monkey.py | 26 ----------- tests/stdlib/test_SimpleHTTPServer.py | 10 ----- tests/stdlib/test_asynchat.py | 20 --------- tests/stdlib/test_asyncore.py | 54 ----------------------- tests/stdlib/test_ftplib.py | 10 ----- tests/stdlib/test_httplib.py | 12 ------ tests/stdlib/test_httpservers.py | 21 --------- tests/stdlib/test_os.py | 10 ----- tests/stdlib/test_queue.py | 14 ------ tests/stdlib/test_select.py | 11 ----- tests/stdlib/test_socket.py | 23 ---------- tests/stdlib/test_socket_ssl.py | 41 ------------------ tests/stdlib/test_socketserver.py | 24 ----------- tests/stdlib/test_ssl.py | 57 ------------------------- tests/stdlib/test_subprocess.py | 12 ------ tests/stdlib/test_thread.py | 15 ------- tests/stdlib/test_thread__boundedsem.py | 20 --------- tests/stdlib/test_threading.py | 46 -------------------- tests/stdlib/test_threading_local.py | 18 -------- tests/stdlib/test_timeout.py | 16 ------- tests/stdlib/test_urllib.py | 12 ------ tests/stdlib/test_urllib2.py | 18 -------- tests/stdlib/test_urllib2_localnet.py | 17 -------- 26 files changed, 612 deletions(-) delete mode 100644 tests/stdlib/all.py delete mode 100644 tests/stdlib/all_modules.py delete mode 100644 tests/stdlib/all_monkey.py delete mode 100644 tests/stdlib/test_SimpleHTTPServer.py delete mode 100644 tests/stdlib/test_asynchat.py delete mode 100644 tests/stdlib/test_asyncore.py delete mode 100644 tests/stdlib/test_ftplib.py delete mode 100644 tests/stdlib/test_httplib.py delete mode 100644 tests/stdlib/test_httpservers.py delete mode 100644 tests/stdlib/test_os.py delete mode 100644 tests/stdlib/test_queue.py delete mode 100644 tests/stdlib/test_select.py delete mode 100644 tests/stdlib/test_socket.py delete mode 100644 tests/stdlib/test_socket_ssl.py delete mode 100644 tests/stdlib/test_socketserver.py delete mode 100644 tests/stdlib/test_ssl.py delete mode 100644 tests/stdlib/test_subprocess.py delete mode 100644 tests/stdlib/test_thread.py delete mode 100644 tests/stdlib/test_thread__boundedsem.py delete mode 100644 tests/stdlib/test_threading.py delete mode 100644 tests/stdlib/test_threading_local.py delete mode 100644 tests/stdlib/test_timeout.py delete mode 100644 tests/stdlib/test_urllib.py delete mode 100644 tests/stdlib/test_urllib2.py delete mode 100644 tests/stdlib/test_urllib2_localnet.py diff --git a/doc/testing.rst b/doc/testing.rst index 30f045fd7e..12da9dae4f 100644 --- a/doc/testing.rst +++ b/doc/testing.rst @@ -34,21 +34,6 @@ To run the doctests included in many of the eventlet modules, use this command: Currently there are 16 doctests. -Standard Library Tests ----------------------- - -Eventlet provides the ability to test itself with the standard Python networking tests. This verifies that the libraries it wraps work at least as well as the standard ones do. The directory tests/stdlib contains a bunch of stubs that import the standard lib tests from your system and run them. If you do not have any tests in your python distribution, they'll simply fail to import. - -There's a convenience module called all.py designed to handle the impedance mismatch between Nose and the standard tests: - -.. code-block:: sh - - $ nosetests tests/stdlib/all.py - -That will run all the tests, though the output will be a little weird because it will look like Nose is running about 20 tests, each of which consists of a bunch of sub-tests. Not all test modules are present in all versions of Python, so there will be an occasional printout of "Not importing %s, it doesn't exist in this installation/version of Python". - -If you see "Ran 0 tests in 0.001s", it means that your Python installation lacks its own tests. This is usually the case for Linux distributions. One way to get the missing tests is to download a source tarball (of the same version you have installed on your system!) and copy its Lib/test directory into the correct place on your PYTHONPATH. - Testing Eventlet Hubs --------------------- diff --git a/tests/stdlib/all.py b/tests/stdlib/all.py deleted file mode 100644 index adf791f231..0000000000 --- a/tests/stdlib/all.py +++ /dev/null @@ -1,50 +0,0 @@ -""" Convenience module for running standard library tests with nose. The standard -tests are not especially homogeneous, but they mostly expose a test_main method that -does the work of selecting which tests to run based on what is supported by the -platform. On its own, Nose would run all possible tests and many would fail; therefore -we collect all of the test_main methods here in one module and Nose can run it. - -Hopefully in the future the standard tests get rewritten to be more nosey. - -Many of these tests make connections to external servers, and all.py tries to skip these -tests rather than failing them, so you can get some work done on a plane. -""" - -import eventlet.hubs -import eventlet.debug -eventlet.debug.hub_prevent_multiple_readers(False) - - -def restart_hub(): - hub = eventlet.hubs.get_hub() - hub_name = hub.__module__ - hub.abort() - eventlet.hubs.use_hub(hub_name) - - -def assimilate_patched(name): - try: - modobj = __import__(name, globals(), locals(), ['test_main']) - restart_hub() - except ImportError: - print("Not importing %s, it doesn't exist in this installation/version of Python" % name) - return - else: - method_name = name + "_test_main" - try: - test_method = modobj.test_main - - def test_main(): - restart_hub() - test_method() - restart_hub() - globals()[method_name] = test_main - test_main.__name__ = name + '.test_main' - except AttributeError: - print("No test_main for %s, assuming it tests on import" % name) - - -import all_modules - -for m in all_modules.get_modules(): - assimilate_patched(m) diff --git a/tests/stdlib/all_modules.py b/tests/stdlib/all_modules.py deleted file mode 100644 index ec8a5f8113..0000000000 --- a/tests/stdlib/all_modules.py +++ /dev/null @@ -1,40 +0,0 @@ -def get_modules(): - test_modules = [ - 'test_select', - 'test_SimpleHTTPServer', - 'test_asynchat', - 'test_asyncore', - 'test_ftplib', - 'test_httplib', - 'test_os', - 'test_queue', - 'test_socket_ssl', - 'test_socketserver', - # 'test_subprocess', - 'test_thread', - 'test_threading', - 'test_threading_local', - 'test_urllib', - 'test_urllib2_localnet'] - - network_modules = [ - 'test_httpservers', - 'test_socket', - 'test_ssl', - 'test_timeout', - 'test_urllib2'] - - # quick and dirty way of testing whether we can access - # remote hosts; any tests that try internet connections - # will fail if we cannot - import socket - s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - try: - s.settimeout(0.5) - s.connect(('eventlet.net', 80)) - s.close() - test_modules = test_modules + network_modules - except socket.error as e: - print("Skipping network tests") - - return test_modules diff --git a/tests/stdlib/all_monkey.py b/tests/stdlib/all_monkey.py deleted file mode 100644 index f6e901c7fd..0000000000 --- a/tests/stdlib/all_monkey.py +++ /dev/null @@ -1,26 +0,0 @@ -import eventlet -eventlet.sleep(0) -from eventlet import patcher -patcher.monkey_patch() - - -def assimilate_real(name): - print("Assimilating", name) - try: - modobj = __import__('test.' + name, globals(), locals(), ['test_main']) - except ImportError: - print("Not importing %s, it doesn't exist in this installation/version of Python" % name) - return - else: - method_name = name + "_test_main" - try: - globals()[method_name] = modobj.test_main - modobj.test_main.__name__ = name + '.test_main' - except AttributeError: - print("No test_main for %s, assuming it tests on import" % name) - - -import all_modules - -for m in all_modules.get_modules(): - assimilate_real(m) diff --git a/tests/stdlib/test_SimpleHTTPServer.py b/tests/stdlib/test_SimpleHTTPServer.py deleted file mode 100644 index 312ec583e4..0000000000 --- a/tests/stdlib/test_SimpleHTTPServer.py +++ /dev/null @@ -1,10 +0,0 @@ -from eventlet import patcher -from eventlet.green import SimpleHTTPServer - -patcher.inject( - 'test.test_SimpleHTTPServer', - globals(), - ('SimpleHTTPServer', SimpleHTTPServer)) - -if __name__ == "__main__": - test_main() diff --git a/tests/stdlib/test_asynchat.py b/tests/stdlib/test_asynchat.py deleted file mode 100644 index 3d96bc3a25..0000000000 --- a/tests/stdlib/test_asynchat.py +++ /dev/null @@ -1,20 +0,0 @@ -from eventlet import patcher -from eventlet.green import asyncore -from eventlet.green import asynchat -from eventlet.green import socket -from eventlet.green import thread -from eventlet.green import threading -from eventlet.green import time - -patcher.inject( - "test.test_asynchat", - globals(), - ('asyncore', asyncore), - ('asynchat', asynchat), - ('socket', socket), - ('thread', thread), - ('threading', threading), - ('time', time)) - -if __name__ == "__main__": - test_main() diff --git a/tests/stdlib/test_asyncore.py b/tests/stdlib/test_asyncore.py deleted file mode 100644 index d8acdb131e..0000000000 --- a/tests/stdlib/test_asyncore.py +++ /dev/null @@ -1,54 +0,0 @@ -from eventlet import patcher -from eventlet.green import asyncore -from eventlet.green import select -from eventlet.green import socket -from eventlet.green import threading -from eventlet.green import time - -patcher.inject("test.test_asyncore", globals()) - - -def new_closeall_check(self, usedefault): - # Check that close_all() closes everything in a given map - - l = [] - testmap = {} - for i in range(10): - c = dummychannel() - l.append(c) - self.assertEqual(c.socket.closed, False) - testmap[i] = c - - if usedefault: - # the only change we make is to not assign to asyncore.socket_map - # because doing so fails to assign to the real asyncore's socket_map - # and thus the test fails - socketmap = asyncore.socket_map.copy() - try: - asyncore.socket_map.clear() - asyncore.socket_map.update(testmap) - asyncore.close_all() - finally: - testmap = asyncore.socket_map.copy() - asyncore.socket_map.clear() - asyncore.socket_map.update(socketmap) - else: - asyncore.close_all(testmap) - - self.assertEqual(len(testmap), 0) - - for c in l: - self.assertEqual(c.socket.closed, True) - - -HelperFunctionTests.closeall_check = new_closeall_check - -try: - # Eventlet's select() emulation doesn't support the POLLPRI flag, - # which this test relies on. Therefore, nuke it! - BaseTestAPI.test_handle_expt = lambda *a, **kw: None -except NameError: - pass - -if __name__ == "__main__": - test_main() diff --git a/tests/stdlib/test_ftplib.py b/tests/stdlib/test_ftplib.py deleted file mode 100644 index 6dc2d7630f..0000000000 --- a/tests/stdlib/test_ftplib.py +++ /dev/null @@ -1,10 +0,0 @@ -from eventlet import patcher -from eventlet.green import asyncore -from eventlet.green import ftplib -from eventlet.green import threading -from eventlet.green import socket - -patcher.inject('test.test_ftplib', globals()) - -if __name__ == "__main__": - test_main() diff --git a/tests/stdlib/test_httplib.py b/tests/stdlib/test_httplib.py deleted file mode 100644 index e8bf09d3c9..0000000000 --- a/tests/stdlib/test_httplib.py +++ /dev/null @@ -1,12 +0,0 @@ -from eventlet import patcher -from eventlet.green import httplib -from eventlet.green import socket - -patcher.inject( - 'test.test_httplib', - globals(), - ('httplib', httplib), - ('socket', socket)) - -if __name__ == "__main__": - test_main() diff --git a/tests/stdlib/test_httpservers.py b/tests/stdlib/test_httpservers.py deleted file mode 100644 index 90ed27e9d1..0000000000 --- a/tests/stdlib/test_httpservers.py +++ /dev/null @@ -1,21 +0,0 @@ -from eventlet import patcher - -from eventlet.green import BaseHTTPServer -from eventlet.green import SimpleHTTPServer -from eventlet.green import CGIHTTPServer -from eventlet.green import urllib -from eventlet.green import httplib -from eventlet.green import threading - -patcher.inject( - 'test.test_httpservers', - globals(), - ('BaseHTTPServer', BaseHTTPServer), - ('SimpleHTTPServer', SimpleHTTPServer), - ('CGIHTTPServer', CGIHTTPServer), - ('urllib', urllib), - ('httplib', httplib), - ('threading', threading)) - -if __name__ == "__main__": - test_main() diff --git a/tests/stdlib/test_os.py b/tests/stdlib/test_os.py deleted file mode 100644 index 9dc5506ed5..0000000000 --- a/tests/stdlib/test_os.py +++ /dev/null @@ -1,10 +0,0 @@ -from eventlet import patcher -from eventlet.green import os - -patcher.inject( - 'test.test_os', - globals(), - ('os', os)) - -if __name__ == "__main__": - test_main() diff --git a/tests/stdlib/test_queue.py b/tests/stdlib/test_queue.py deleted file mode 100644 index 5d6acc7cb9..0000000000 --- a/tests/stdlib/test_queue.py +++ /dev/null @@ -1,14 +0,0 @@ -from eventlet import patcher -from eventlet.green import Queue -from eventlet.green import threading -from eventlet.green import time - -patcher.inject( - 'test.test_queue', - globals(), - ('Queue', Queue), - ('threading', threading), - ('time', time)) - -if __name__ == "__main__": - test_main() diff --git a/tests/stdlib/test_select.py b/tests/stdlib/test_select.py deleted file mode 100644 index 55e1d6a95d..0000000000 --- a/tests/stdlib/test_select.py +++ /dev/null @@ -1,11 +0,0 @@ -from eventlet import patcher -from eventlet.green import select - - -patcher.inject( - 'test.test_select', - globals(), - ('select', select)) - -if __name__ == "__main__": - test_main() diff --git a/tests/stdlib/test_socket.py b/tests/stdlib/test_socket.py deleted file mode 100644 index 15df53c2bc..0000000000 --- a/tests/stdlib/test_socket.py +++ /dev/null @@ -1,23 +0,0 @@ -#!/usr/bin/env python - -from eventlet import patcher -from eventlet.green import socket -from eventlet.green import select -from eventlet.green import time -from eventlet.green import thread -from eventlet.green import threading - -patcher.inject( - 'test.test_socket', - globals(), - ('socket', socket), - ('select', select), - ('time', time), - ('thread', thread), - ('threading', threading)) - -# TODO: fix -TCPTimeoutTest.testInterruptedTimeout = lambda *a: None - -if __name__ == "__main__": - test_main() diff --git a/tests/stdlib/test_socket_ssl.py b/tests/stdlib/test_socket_ssl.py deleted file mode 100644 index 3d9c07641a..0000000000 --- a/tests/stdlib/test_socket_ssl.py +++ /dev/null @@ -1,41 +0,0 @@ -#!/usr/bin/env python - -from eventlet import patcher -from eventlet.green import socket - -# enable network resource -import test.test_support -i_r_e = test.test_support.is_resource_enabled - - -def is_resource_enabled(resource): - if resource == 'network': - return True - else: - return i_r_e(resource) - - -test.test_support.is_resource_enabled = is_resource_enabled - -try: - socket.ssl - socket.sslerror -except AttributeError: - raise ImportError("Socket module doesn't support ssl") - -patcher.inject('test.test_socket_ssl', globals()) - -test_basic = patcher.patch_function(test_basic) -test_rude_shutdown = patcher.patch_function(test_rude_shutdown) - - -def test_main(): - if not hasattr(socket, "ssl"): - raise test_support.TestSkipped("socket module has no ssl support") - test_rude_shutdown() - test_basic() - test_timeout() - - -if __name__ == "__main__": - test_main() diff --git a/tests/stdlib/test_socketserver.py b/tests/stdlib/test_socketserver.py deleted file mode 100644 index e5f5d55106..0000000000 --- a/tests/stdlib/test_socketserver.py +++ /dev/null @@ -1,24 +0,0 @@ -#!/usr/bin/env python - -from eventlet import patcher -from eventlet.green import SocketServer -from eventlet.green import socket -from eventlet.green import select -from eventlet.green import time -from eventlet.green import threading - -# to get past the silly 'requires' check -from test import test_support -test_support.use_resources = ['network'] - -patcher.inject( - 'test.test_socketserver', - globals(), - ('SocketServer', SocketServer), - ('socket', socket), - ('select', select), - ('time', time), - ('threading', threading)) - -if __name__ == "__main__": - test_main() diff --git a/tests/stdlib/test_ssl.py b/tests/stdlib/test_ssl.py deleted file mode 100644 index 44cf62f99a..0000000000 --- a/tests/stdlib/test_ssl.py +++ /dev/null @@ -1,57 +0,0 @@ -from eventlet import patcher -from eventlet.green import asyncore -from eventlet.green import BaseHTTPServer -from eventlet.green import select -from eventlet.green import socket -from eventlet.green import SocketServer -from eventlet.green import SimpleHTTPServer -from eventlet.green import ssl -from eventlet.green import threading -from eventlet.green import urllib - -# stupid test_support messing with our mojo -import test.test_support -i_r_e = test.test_support.is_resource_enabled - - -def is_resource_enabled(resource): - if resource == 'network': - return True - else: - return i_r_e(resource) - - -test.test_support.is_resource_enabled = is_resource_enabled - -patcher.inject( - 'test.test_ssl', - globals(), - ('asyncore', asyncore), - ('BaseHTTPServer', BaseHTTPServer), - ('select', select), - ('socket', socket), - ('SocketServer', SocketServer), - ('ssl', ssl), - ('threading', threading), - ('urllib', urllib)) - - -# TODO svn.python.org stopped serving up the cert that these tests expect; -# presumably they've updated svn trunk but the tests in released versions will -# probably break forever. This is why you don't write tests that connect to -# external servers. -NetworkedTests.testConnect = lambda s: None -NetworkedTests.testFetchServerCert = lambda s: None -NetworkedTests.test_algorithms = lambda s: None - -# these don't pass because nonblocking ssl sockets don't report -# when the socket is closed uncleanly, per the docstring on -# eventlet.green.GreenSSLSocket -# *TODO: fix and restore these tests -ThreadedTests.testProtocolSSL2 = lambda s: None -ThreadedTests.testProtocolSSL3 = lambda s: None -ThreadedTests.testProtocolTLS1 = lambda s: None -ThreadedTests.testSocketServer = lambda s: None - -if __name__ == "__main__": - test_main() diff --git a/tests/stdlib/test_subprocess.py b/tests/stdlib/test_subprocess.py deleted file mode 100644 index d62cbc7890..0000000000 --- a/tests/stdlib/test_subprocess.py +++ /dev/null @@ -1,12 +0,0 @@ -from eventlet import patcher -from eventlet.green import subprocess -from eventlet.green import time - -patcher.inject( - 'test.test_subprocess', - globals(), - ('subprocess', subprocess), - ('time', time)) - -if __name__ == "__main__": - test_main() diff --git a/tests/stdlib/test_thread.py b/tests/stdlib/test_thread.py deleted file mode 100644 index 1df908f386..0000000000 --- a/tests/stdlib/test_thread.py +++ /dev/null @@ -1,15 +0,0 @@ -from eventlet import patcher -from eventlet.green import thread -from eventlet.green import time - - -patcher.inject('test.test_thread', globals()) - -try: - # this is a new test in 2.7 that we don't support yet - TestForkInThread.test_forkinthread = lambda *a, **kw: None -except NameError: - pass - -if __name__ == "__main__": - test_main() diff --git a/tests/stdlib/test_thread__boundedsem.py b/tests/stdlib/test_thread__boundedsem.py deleted file mode 100644 index 3fc4ee6729..0000000000 --- a/tests/stdlib/test_thread__boundedsem.py +++ /dev/null @@ -1,20 +0,0 @@ -"""Test that BoundedSemaphore with a very high bound is as good as unbounded one""" -from eventlet import semaphore -from eventlet.green import thread - - -def allocate_lock(): - return semaphore.Semaphore(1, 9999) - - -original_allocate_lock = thread.allocate_lock -thread.allocate_lock = allocate_lock -original_LockType = thread.LockType -thread.LockType = semaphore.CappedSemaphore - -try: - import os.path - execfile(os.path.join(os.path.dirname(__file__), 'test_thread.py')) -finally: - thread.allocate_lock = original_allocate_lock - thread.LockType = original_LockType diff --git a/tests/stdlib/test_threading.py b/tests/stdlib/test_threading.py deleted file mode 100644 index 5729163aa6..0000000000 --- a/tests/stdlib/test_threading.py +++ /dev/null @@ -1,46 +0,0 @@ -from eventlet import patcher -from eventlet.green import threading -from eventlet.green import thread -from eventlet.green import time - -# *NOTE: doesn't test as much of the threading api as we'd like because many of -# the tests are launched via subprocess and therefore don't get patched - -patcher.inject('test.test_threading', - globals()) - -# "PyThreadState_SetAsyncExc() is a CPython-only gimmick, not (currently) -# exposed at the Python level. This test relies on ctypes to get at it." -# Therefore it's also disabled when testing eventlet, as it's not emulated. -try: - ThreadTests.test_PyThreadState_SetAsyncExc = lambda s: None -except (AttributeError, NameError): - pass - -# disabling this test because it fails when run in Hudson even though it always -# succeeds when run manually -try: - ThreadJoinOnShutdown.test_3_join_in_forked_from_thread = lambda *a, **kw: None -except (AttributeError, NameError): - pass - -# disabling this test because it relies on dorking with the hidden -# innards of the threading module in a way that doesn't appear to work -# when patched -try: - ThreadTests.test_limbo_cleanup = lambda *a, **kw: None -except (AttributeError, NameError): - pass - -# this test has nothing to do with Eventlet; if it fails it's not -# because of patching (which it does, grump grump) -try: - ThreadTests.test_finalize_runnning_thread = lambda *a, **kw: None - # it's misspelled in the stdlib, silencing this version as well because - # inevitably someone will correct the error - ThreadTests.test_finalize_running_thread = lambda *a, **kw: None -except (AttributeError, NameError): - pass - -if __name__ == "__main__": - test_main() diff --git a/tests/stdlib/test_threading_local.py b/tests/stdlib/test_threading_local.py deleted file mode 100644 index 6b923a6a91..0000000000 --- a/tests/stdlib/test_threading_local.py +++ /dev/null @@ -1,18 +0,0 @@ -from eventlet import patcher -from eventlet.green import thread -from eventlet.green import threading -from eventlet.green import time - -# hub requires initialization before test can run -from eventlet import hubs -hubs.get_hub() - -patcher.inject( - 'test.test_threading_local', - globals(), - ('time', time), - ('thread', thread), - ('threading', threading)) - -if __name__ == '__main__': - test_main() diff --git a/tests/stdlib/test_timeout.py b/tests/stdlib/test_timeout.py deleted file mode 100644 index 08b7ba0114..0000000000 --- a/tests/stdlib/test_timeout.py +++ /dev/null @@ -1,16 +0,0 @@ -from eventlet import patcher -from eventlet.green import socket -from eventlet.green import time - -patcher.inject( - 'test.test_timeout', - globals(), - ('socket', socket), - ('time', time)) - -# to get past the silly 'requires' check -from test import test_support -test_support.use_resources = ['network'] - -if __name__ == "__main__": - test_main() diff --git a/tests/stdlib/test_urllib.py b/tests/stdlib/test_urllib.py deleted file mode 100644 index 175fd2f01b..0000000000 --- a/tests/stdlib/test_urllib.py +++ /dev/null @@ -1,12 +0,0 @@ -from eventlet import patcher -from eventlet.green import httplib -from eventlet.green import urllib - -patcher.inject( - 'test.test_urllib', - globals(), - ('httplib', httplib), - ('urllib', urllib)) - -if __name__ == "__main__": - test_main() diff --git a/tests/stdlib/test_urllib2.py b/tests/stdlib/test_urllib2.py deleted file mode 100644 index edbdd21f88..0000000000 --- a/tests/stdlib/test_urllib2.py +++ /dev/null @@ -1,18 +0,0 @@ -from eventlet import patcher -from eventlet.green import socket -from eventlet.green import urllib2 - -patcher.inject( - 'test.test_urllib2', - globals(), - ('socket', socket), - ('urllib2', urllib2)) - -HandlerTests.test_file = patcher.patch_function(HandlerTests.test_file, ('socket', socket)) -HandlerTests.test_cookie_redirect = patcher.patch_function( - HandlerTests.test_cookie_redirect, ('urllib2', urllib2)) -OpenerDirectorTests.test_badly_named_methods = patcher.patch_function( - OpenerDirectorTests.test_badly_named_methods, ('urllib2', urllib2)) - -if __name__ == "__main__": - test_main() diff --git a/tests/stdlib/test_urllib2_localnet.py b/tests/stdlib/test_urllib2_localnet.py deleted file mode 100644 index c8e200da19..0000000000 --- a/tests/stdlib/test_urllib2_localnet.py +++ /dev/null @@ -1,17 +0,0 @@ -from eventlet import patcher - -from eventlet.green import BaseHTTPServer -from eventlet.green import threading -from eventlet.green import socket -from eventlet.green import urllib2 - -patcher.inject( - 'test.test_urllib2_localnet', - globals(), - ('BaseHTTPServer', BaseHTTPServer), - ('threading', threading), - ('socket', socket), - ('urllib2', urllib2)) - -if __name__ == "__main__": - test_main() From 1c2fb7a202b25d119064976752696ed76f7562c3 Mon Sep 17 00:00:00 2001 From: Itamar Turner-Trauring Date: Thu, 14 Dec 2023 12:22:09 -0500 Subject: [PATCH 07/35] Replace nose with pytest (#836) * Use pytest instead of nose * Remove nose dependencies * "setup.py test" is deprecated * More removal of nose usage * Update to pytest * Make ipv6 tests use pytest * Make pytest happier with the name * Upgrade coverage.py which seems to help with the 3.11 errors * Link to doctests issue * Switch to name that pytest will detect as being a test module. * Expose tests to pytest * Fix style --------- Co-authored-by: Itamar Turner-Trauring --- doc/testing.rst | 28 ++++++------------- setup.py | 1 - tests/README | 9 ++---- tests/__init__.py | 15 +--------- tests/dagpool_test.py | 11 ++++++-- tests/greenio_test.py | 8 ++---- tests/nosewrapper.py | 20 ------------- tests/pools_test.py | 2 +- tests/test_infrastructure_tests.py | 15 ++++++++++ ...ment.py => timeout_with_statement_test.py} | 0 tox.ini | 18 ++++++------ 11 files changed, 49 insertions(+), 78 deletions(-) delete mode 100644 tests/nosewrapper.py create mode 100644 tests/test_infrastructure_tests.py rename tests/{timeout_test_with_statement.py => timeout_with_statement_test.py} (100%) diff --git a/doc/testing.rst b/doc/testing.rst index 12da9dae4f..88608587aa 100644 --- a/doc/testing.rst +++ b/doc/testing.rst @@ -1,25 +1,13 @@ Testing Eventlet ================ -Eventlet is tested using `Nose `_. To run tests, simply install nose, and then, in the eventlet tree, do: +Eventlet is tested using `Pytest `_. To run tests, simply install pytest, and then, in the eventlet tree, do: .. code-block:: sh - $ python setup.py test + $ pytest -If you want access to all the nose plugins via command line, you can run: - -.. code-block:: sh - - $ python setup.py nosetests - -Lastly, you can just use nose directly if you want: - -.. code-block:: sh - - $ nosetests - -That's it! The output from running nose is the same as unittest's output, if the entire directory was one big test file. +That's it! Many tests are skipped based on environmental factors; for example, it makes no sense to test kqueue-specific functionality when your OS does not support it. These are printed as S's during execution, and in the summary printed after the tests run it will tell you how many were skipped. @@ -30,9 +18,9 @@ To run the doctests included in many of the eventlet modules, use this command: .. code-block :: sh - $ nosetests --with-doctest eventlet/*.py + $ pytest --doctest-modules eventlet/ -Currently there are 16 doctests. +The doctests currently `do not pass `_. Testing Eventlet Hubs @@ -42,7 +30,7 @@ When you run the tests, Eventlet will use the most appropriate hub for the curre .. code-block:: sh - $ EVENTLET_HUB=epolls nosetests + $ EVENTLET_HUB=epolls pytest See :ref:`understanding_hubs` for the full list of hubs. @@ -62,11 +50,11 @@ If you are writing a test that involves a client connecting to a spawned server, Coverage -------- -Coverage.py is an awesome tool for evaluating how much code was exercised by unit tests. Nose supports it if both are installed, so it's easy to generate coverage reports for eventlet. Here's how: +Coverage.py is an awesome tool for evaluating how much code was exercised by unit tests. pytest supports it pytest-cov is installed, so it's easy to generate coverage reports for eventlet. Here's how: .. code-block:: sh - nosetests --with-coverage --cover-package=eventlet + pytest --cov=eventlet After running the tests to completion, this will emit a huge wodge of module names and line numbers. For some reason, the ``--cover-inclusive`` option breaks everything rather than serving its purpose of limiting the coverage to the local files, so don't use that. diff --git a/setup.py b/setup.py index f5ed3d4725..9ea30017f4 100644 --- a/setup.py +++ b/setup.py @@ -31,7 +31,6 @@ 'README.rst' ) ).read(), - test_suite='nose.collector', classifiers=[ "Development Status :: 4 - Beta", "Intended Audience :: Developers", diff --git a/tests/README b/tests/README index 4e28ae30fd..a1c3987f22 100644 --- a/tests/README +++ b/tests/README @@ -1,7 +1,4 @@ -The tests are intended to be run using Nose. -http://somethingaboutorange.com/mrl/projects/nose/ +The tests are intended to be run using pytest. -To run tests, simply install nose, and then, in the eventlet tree, do: - $ nosetests - -That's it! Its output is the same as unittest's output. It tends to emit a lot of tracebacks from various poorly-behaving tests, but they still (generally) pass. \ No newline at end of file +To run tests, simply install pytest, and then, in the eventlet tree, do: + $ pytest diff --git a/tests/__init__.py b/tests/__init__.py index 24a82e5263..6a6512aca7 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -20,7 +20,7 @@ import unittest import warnings -from nose.plugins.skip import SkipTest +from unittest import SkipTest import eventlet from eventlet import tpool @@ -213,7 +213,6 @@ def assert_less_than_equal(self, a, b, msg=None): def check_idle_cpu_usage(duration, allowed_part): if resource is None: # TODO: use https://code.google.com/p/psutil/ - from nose.plugins.skip import SkipTest raise SkipTest('CPU usage testing not supported (`import resource` failed)') r1 = resource.getrusage(resource.RUSAGE_SELF) @@ -392,18 +391,6 @@ def capture_stderr(): private_key_file = os.path.join(os.path.dirname(__file__), 'test_server.key') -def test_run_python_timeout(): - output = run_python('', args=('-c', 'import time; time.sleep(0.5)'), timeout=0.1) - assert output.endswith(b'FAIL - timed out') - - -def test_run_python_pythonpath_extend(): - code = '''import os, sys ; print('\\n'.join(sys.path))''' - output = run_python('', args=('-c', code), pythonpath_extend=('dira', 'dirb')) - assert b'/dira\n' in output - assert b'/dirb\n' in output - - @contextlib.contextmanager def dns_tcp_server(ip_to_give, request_count=1): state = [0] # request count storage writable by thread diff --git a/tests/dagpool_test.py b/tests/dagpool_test.py index fdc23e7d7e..0b5a745c60 100644 --- a/tests/dagpool_test.py +++ b/tests/dagpool_test.py @@ -5,7 +5,6 @@ @brief Test DAGPool class """ -from nose.tools import * import eventlet from eventlet.dagpool import DAGPool, Collision, PropagateError import six @@ -13,6 +12,14 @@ import itertools +def assert_equals(a, b): + """Backwards compatibility so we don't have to touch a bunch of tests.""" + assert a == b + + +assert_equal = assert_equals + + # Not all versions of nose.tools.assert_raises() support the usage in this # module, but it's straightforward enough to code that explicitly. @contextmanager @@ -375,7 +382,7 @@ def test_spawn_multiple(): dict(a=1, b=2, c=3, d="dval", e="eval", f="fval", g="gval", h="hval")) assert_equals(pool.running(), 0) - assert_false(pool.running_keys()) + assert not pool.running_keys() assert_equals(pool.waiting(), 0) assert_equals(pool.waiting_for("h"), set()) diff --git a/tests/greenio_test.py b/tests/greenio_test.py index 318ac80f85..5f3f916e02 100644 --- a/tests/greenio_test.py +++ b/tests/greenio_test.py @@ -9,8 +9,6 @@ import sys import tempfile -from nose.tools import eq_ - import eventlet from eventlet import event, greenio, debug from eventlet.hubs import get_hub @@ -39,7 +37,7 @@ def expect_socket_timeout(function, *args): raise AssertionError("socket.timeout not raised") except socket.timeout as e: assert hasattr(e, 'args') - eq_(e.args[0], 'timed out') + assert e.args[0] == 'timed out' def min_buf_size(): @@ -672,8 +670,8 @@ def test_datagram_socket_operations_work(self): sender.sendto(b'second', 0, address) sender_address = ('127.0.0.1', sender.getsockname()[1]) - eq_(receiver.recvfrom(1024), (b'first', sender_address)) - eq_(receiver.recvfrom(1024), (b'second', sender_address)) + assert receiver.recvfrom(1024) == (b'first', sender_address) + assert receiver.recvfrom(1024) == (b'second', sender_address) def test_get_fileno_of_a_socket_works(): diff --git a/tests/nosewrapper.py b/tests/nosewrapper.py deleted file mode 100644 index 5837d4300d..0000000000 --- a/tests/nosewrapper.py +++ /dev/null @@ -1,20 +0,0 @@ -""" This script simply gets the paths correct for testing eventlet with the -hub extension for Nose.""" -import nose -from os.path import dirname, realpath, abspath -import sys - - -parent_dir = dirname(dirname(realpath(abspath(__file__)))) -if parent_dir not in sys.path: - sys.path.insert(0, parent_dir) - -# hudson does a better job printing the test results if the exit value is 0 -zero_status = '--force-zero-status' -if zero_status in sys.argv: - sys.argv.remove(zero_status) - launch = nose.run -else: - launch = nose.main - -launch(argv=sys.argv) diff --git a/tests/pools_test.py b/tests/pools_test.py index 32e971e252..080cc56ed1 100644 --- a/tests/pools_test.py +++ b/tests/pools_test.py @@ -245,7 +245,7 @@ def test_it(self): SOMETIMES = RuntimeError('I fail half the time') -class TestTookTooLong(Exception): +class TookTooLongToRunTest(Exception): pass diff --git a/tests/test_infrastructure_tests.py b/tests/test_infrastructure_tests.py new file mode 100644 index 0000000000..27ee71ce9d --- /dev/null +++ b/tests/test_infrastructure_tests.py @@ -0,0 +1,15 @@ +"""Tests for the testing infrastructure.""" + +from . import run_python + + +def test_run_python_timeout(): + output = run_python('', args=('-c', 'import time; time.sleep(0.5)'), timeout=0.1) + assert output.endswith(b'FAIL - timed out') + + +def test_run_python_pythonpath_extend(): + code = '''import os, sys ; print('\\n'.join(sys.path))''' + output = run_python('', args=('-c', code), pythonpath_extend=('dira', 'dirb')) + assert b'/dira\n' in output + assert b'/dirb\n' in output diff --git a/tests/timeout_test_with_statement.py b/tests/timeout_with_statement_test.py similarity index 100% rename from tests/timeout_test_with_statement.py rename to tests/timeout_with_statement_test.py diff --git a/tox.ini b/tox.ini index 23fbbf3362..7832199ec4 100644 --- a/tox.ini +++ b/tox.ini @@ -30,14 +30,13 @@ setenv = {[testenv]setenv} eventlet_test_ipv6 = 1 deps = - coverage==4.5.1 - nose3==1.3.8 + coverage==7.3.3 + pytest + pytest-cov usedevelop = True commands = pip install -e . - nosetests --verbose {env:tox_cover_args} \ - tests.backdoor_test:BackdoorTest.test_server_on_ipv6_socket \ - tests.wsgi_test:TestHttpd.test_ipv6 + pytest --verbose {env:tox_cover_args} -k "ipv6" tests/ coverage xml -i [testenv:pep8] @@ -59,10 +58,11 @@ setenv = selects: EVENTLET_HUB = selects poll: EVENTLET_HUB = poll epolls: EVENTLET_HUB = epolls - tox_cover_args = --with-coverage --cover-erase --cover-package=eventlet + tox_cover_args = --cov=eventlet deps = - coverage==4.5.1 - nose3==1.3.8 + coverage==7.3.3 + pytest + pytest-cov py38-openssl: pyopenssl==22.1.0 pypy3: psycopg2cffi-compat==1.1 py{38,39}-{selects,poll,epolls}: pyzmq==21.0.2 @@ -74,5 +74,5 @@ deps = usedevelop = True commands = pip install -e . - nosetests --verbose {env:tox_cover_args} {posargs:tests/} + pytest --verbose {env:tox_cover_args} {posargs:tests/} coverage xml -i From d78f8f67f2027d5bc141c3078be626f2c9edc095 Mon Sep 17 00:00:00 2001 From: shoito <37051+shoito@users.noreply.github.com> Date: Fri, 15 Dec 2023 18:07:02 +0900 Subject: [PATCH 08/35] fix GitHub workflow badge URL (#815) See https://github.com/badges/shields/issues/8671 --- README.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.rst b/README.rst index a3e0629802..365062145e 100644 --- a/README.rst +++ b/README.rst @@ -73,7 +73,7 @@ Flair .. image:: https://img.shields.io/pypi/v/eventlet :target: https://pypi.org/project/eventlet/ -.. image:: https://img.shields.io/github/workflow/status/eventlet/eventlet/test/master +.. image:: https://img.shields.io/github/actions/workflow/status/eventlet/eventlet/test.yaml?branch=master :target: https://github.com/eventlet/eventlet/actions?query=workflow%3Atest+branch%3Amaster .. image:: https://codecov.io/gh/eventlet/eventlet/branch/master/graph/badge.svg From e51f72a60c868b7b88e2c5758b5c65d6f0ab446d Mon Sep 17 00:00:00 2001 From: nat-goodspeed Date: Fri, 15 Dec 2023 14:55:13 -0500 Subject: [PATCH 09/35] Fixes Python 3.12 compatilibity #817 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Update unittest asserts to support Python 3.12+ Related to https://github.com/python/cpython/issues/89325 * Update green.http.client for changes in Python 3.12 Related to https://github.com/python/cpython/commit/f0b234e6ed83e810bd9844e744f5e22aa538a356 Co-Authored-By: Victor Stinner * Set green.thred.daemon_threads_allowed for Python 3.12+ support Related to https://github.com/python/cpython/commit/4702552885811d0af8f0e4545f494336801ad4dd * Python 3.12+ only: Adjust for removal of ssl.wrap_socket() Related to https://github.com/python/cpython/commit/00464bbed66e5f64bdad7f930b315a88d5afccae * Python 3.12 fixes by hroncok This PR attempts to add compatibility for older Python versions. We draw the line at Python versions that predate ssl.SSLContext, though. The remaining Python 2.7 documentation doesn't even mention the version at which that was introduced. * Allow access to global __ssl within class definition. * Remember whether original ssl module has wrap_socket() function. * Replace a few assertTrue() calls with assert statements. * In GreenSSLSocket.__new__(), use cls, not self. * Add 3.12 * Add newer versions * Warnings don't mean the test should fail * Fix syntax error * Style fixes * Simplify given we only support 3.8+ * Document 3.12 support --------- Co-authored-by: Miro HronĨok Co-authored-by: Victor Stinner Co-authored-by: Itamar Turner-Trauring Co-authored-by: Itamar Turner-Trauring --- .github/workflows/test.yaml | 1 + README.rst | 2 +- eventlet/green/http/client.py | 29 +++++-- eventlet/green/ssl.py | 139 +++++++++++++++++-------------- eventlet/green/thread.py | 3 + setup.py | 1 + tests/__init__.py | 5 +- tests/dagpool_test.py | 149 +++++++++++++++++----------------- tests/tpool_test.py | 8 +- tox.ini | 2 +- 10 files changed, 186 insertions(+), 153 deletions(-) diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index 11949e9b14..a3d6324aaf 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -42,6 +42,7 @@ jobs: - { py: "3.10", toxenv: py310-selects, ignore-error: false, os: ubuntu-latest } - { py: "3.10", toxenv: ipv6, ignore-error: false, os: ubuntu-latest } - { py: "3.11", toxenv: py311-epolls, ignore-error: false, os: ubuntu-latest } + - { py: "3.12", toxenv: py312-epolls, ignore-error: false, os: ubuntu-latest } - { py: pypy3.9, toxenv: pypy3-epolls, ignore-error: true, os: ubuntu-20.04 } steps: diff --git a/README.rst b/README.rst index 365062145e..78d46087b0 100644 --- a/README.rst +++ b/README.rst @@ -65,7 +65,7 @@ Apologies for any inconvenience. Supported Python versions ========================= -Python 3.8-3.11 are currently supported. +Python 3.8-3.12 are currently supported. Flair ===== diff --git a/eventlet/green/http/client.py b/eventlet/green/http/client.py index 77256497d5..3399333e5f 100644 --- a/eventlet/green/http/client.py +++ b/eventlet/green/http/client.py @@ -74,13 +74,13 @@ | ( putheader() )* endheaders() v Request-sent - |\_____________________________ + |\\_____________________________ | | getresponse() raises | response = getresponse() | ConnectionError v v Unread-response Idle [Response-headers-read] - |\____________________ + |\\____________________ | | | response.read() | putrequest() v v @@ -1450,6 +1450,22 @@ def getresponse(self): except ImportError: pass else: + def _create_https_context(http_version): + # Function also used by urllib.request to be able to set the check_hostname + # attribute on a context object. + context = ssl._create_default_https_context() + # send ALPN extension to indicate HTTP/1.1 protocol + if http_version == 11: + context.set_alpn_protocols(['http/1.1']) + # enable PHA for TLS 1.3 connections if available + if context.post_handshake_auth is not None: + context.post_handshake_auth = True + return context + + def _populate_https_context(context, check_hostname): + if check_hostname is not None: + context.check_hostname = check_hostname + class HTTPSConnection(HTTPConnection): "This class allows communication via SSL." @@ -1466,13 +1482,8 @@ def __init__(self, host, port=None, key_file=None, cert_file=None, self.key_file = key_file self.cert_file = cert_file if context is None: - context = ssl._create_default_https_context() - will_verify = context.verify_mode != ssl.CERT_NONE - if check_hostname is None: - check_hostname = context.check_hostname - if check_hostname and not will_verify: - raise ValueError("check_hostname needs a SSL context with " - "either CERT_OPTIONAL or CERT_REQUIRED") + context = _create_https_context(self._http_vsn) + _populate_https_context(context, check_hostname) if key_file or cert_file: context.load_cert_chain(cert_file, key_file) self._context = context diff --git a/eventlet/green/ssl.py b/eventlet/green/ssl.py index be4f29b578..29cbecbc65 100644 --- a/eventlet/green/ssl.py +++ b/eventlet/green/ssl.py @@ -6,7 +6,7 @@ import sys from eventlet import greenio, hubs from eventlet.greenio import ( - set_nonblocking, GreenSocket, CONNECT_ERR, CONNECT_SUCCESS, + GreenSocket, CONNECT_ERR, CONNECT_SUCCESS, ) from eventlet.hubs import trampoline, IOClosed from eventlet.support import get_errno, PY33 @@ -22,9 +22,9 @@ 'create_default_context', '_create_default_https_context'] _original_sslsocket = __ssl.SSLSocket -_original_wrap_socket = __ssl.wrap_socket -_original_sslcontext = getattr(__ssl, 'SSLContext', None) +_original_sslcontext = __ssl.SSLContext _is_under_py_3_7 = sys.version_info < (3, 7) +_original_wrap_socket = __ssl.SSLContext.wrap_socket @contextmanager @@ -76,7 +76,7 @@ def __new__(cls, sock=None, keyfile=None, certfile=None, session=kw.get('session'), ) else: - ret = _original_wrap_socket( + ret = cls._wrap_socket( sock=sock.fd, keyfile=keyfile, certfile=certfile, @@ -95,6 +95,26 @@ def __new__(cls, sock=None, keyfile=None, certfile=None, ret.__class__ = GreenSSLSocket return ret + @staticmethod + def _wrap_socket(sock, keyfile, certfile, server_side, cert_reqs, + ssl_version, ca_certs, do_handshake_on_connect, ciphers): + context = _original_sslcontext(protocol=ssl_version) + context.options |= cert_reqs + if certfile or keyfile: + context.load_cert_chain( + certfile=certfile, + keyfile=keyfile, + ) + if ca_certs: + context.load_verify_locations(ca_certs) + if ciphers: + context.set_ciphers(ciphers) + return context.wrap_socket( + sock=sock, + server_side=server_side, + do_handshake_on_connect=do_handshake_on_connect, + ) + # we are inheriting from SSLSocket because its constructor calls # do_handshake whose behavior we wish to override def __init__(self, sock, keyfile=None, certfile=None, @@ -436,58 +456,59 @@ def sslwrap_simple(sock, keyfile=None, certfile=None): return ssl_sock -if hasattr(__ssl, 'SSLContext'): - _original_sslcontext = __ssl.SSLContext - - class GreenSSLContext(_original_sslcontext): - __slots__ = () - - def wrap_socket(self, sock, *a, **kw): - return GreenSSLSocket(sock, *a, _context=self, **kw) - - # https://github.com/eventlet/eventlet/issues/371 - # Thanks to Gevent developers for sharing patch to this problem. - if hasattr(_original_sslcontext.options, 'setter'): - # In 3.6, these became properties. They want to access the - # property __set__ method in the superclass, and they do so by using - # super(SSLContext, SSLContext). But we rebind SSLContext when we monkey - # patch, which causes infinite recursion. - # https://github.com/python/cpython/commit/328067c468f82e4ec1b5c510a4e84509e010f296 - @_original_sslcontext.options.setter - def options(self, value): - super(_original_sslcontext, _original_sslcontext).options.__set__(self, value) - - @_original_sslcontext.verify_flags.setter - def verify_flags(self, value): - super(_original_sslcontext, _original_sslcontext).verify_flags.__set__(self, value) - - @_original_sslcontext.verify_mode.setter - def verify_mode(self, value): - super(_original_sslcontext, _original_sslcontext).verify_mode.__set__(self, value) - - if hasattr(_original_sslcontext, "maximum_version"): - @_original_sslcontext.maximum_version.setter - def maximum_version(self, value): - super(_original_sslcontext, _original_sslcontext).maximum_version.__set__(self, value) - - if hasattr(_original_sslcontext, "minimum_version"): - @_original_sslcontext.minimum_version.setter - def minimum_version(self, value): - super(_original_sslcontext, _original_sslcontext).minimum_version.__set__(self, value) - - SSLContext = GreenSSLContext - - if hasattr(__ssl, 'create_default_context'): - _original_create_default_context = __ssl.create_default_context - - def green_create_default_context(*a, **kw): - # We can't just monkey-patch on the green version of `wrap_socket` - # on to SSLContext instances, but SSLContext.create_default_context - # does a bunch of work. Rather than re-implementing it all, just - # switch out the __class__ to get our `wrap_socket` implementation - context = _original_create_default_context(*a, **kw) - context.__class__ = GreenSSLContext - return context - - create_default_context = green_create_default_context - _create_default_https_context = green_create_default_context +class GreenSSLContext(_original_sslcontext): + __slots__ = () + + def wrap_socket(self, sock, *a, **kw): + return GreenSSLSocket(sock, *a, _context=self, **kw) + + # https://github.com/eventlet/eventlet/issues/371 + # Thanks to Gevent developers for sharing patch to this problem. + if hasattr(_original_sslcontext.options, 'setter'): + # In 3.6, these became properties. They want to access the + # property __set__ method in the superclass, and they do so by using + # super(SSLContext, SSLContext). But we rebind SSLContext when we monkey + # patch, which causes infinite recursion. + # https://github.com/python/cpython/commit/328067c468f82e4ec1b5c510a4e84509e010f296 + @_original_sslcontext.options.setter + def options(self, value): + super(_original_sslcontext, _original_sslcontext).options.__set__(self, value) + + @_original_sslcontext.verify_flags.setter + def verify_flags(self, value): + super(_original_sslcontext, _original_sslcontext).verify_flags.__set__(self, value) + + @_original_sslcontext.verify_mode.setter + def verify_mode(self, value): + super(_original_sslcontext, _original_sslcontext).verify_mode.__set__(self, value) + + if hasattr(_original_sslcontext, "maximum_version"): + @_original_sslcontext.maximum_version.setter + def maximum_version(self, value): + super(_original_sslcontext, _original_sslcontext).maximum_version.__set__(self, value) + + if hasattr(_original_sslcontext, "minimum_version"): + @_original_sslcontext.minimum_version.setter + def minimum_version(self, value): + super(_original_sslcontext, _original_sslcontext).minimum_version.__set__(self, value) + + +SSLContext = GreenSSLContext + + +# TODO: ssl.create_default_context() was added in 2.7.9. +# Not clear we're still trying to support Python versions even older than that. +if hasattr(__ssl, 'create_default_context'): + _original_create_default_context = __ssl.create_default_context + + def green_create_default_context(*a, **kw): + # We can't just monkey-patch on the green version of `wrap_socket` + # on to SSLContext instances, but SSLContext.create_default_context + # does a bunch of work. Rather than re-implementing it all, just + # switch out the __class__ to get our `wrap_socket` implementation + context = _original_create_default_context(*a, **kw) + context.__class__ = GreenSSLContext + return context + + create_default_context = green_create_default_context + _create_default_https_context = green_create_default_context diff --git a/eventlet/green/thread.py b/eventlet/green/thread.py index e26f6b37de..7bcb563570 100644 --- a/eventlet/green/thread.py +++ b/eventlet/green/thread.py @@ -113,3 +113,6 @@ def stack_size(size=None): # this thread will suffer from eventlet.corolocal import local as _local + +if hasattr(__thread, 'daemon_threads_allowed'): + daemon_threads_allowed = __thread.daemon_threads_allowed diff --git a/setup.py b/setup.py index 9ea30017f4..b68bc25c00 100644 --- a/setup.py +++ b/setup.py @@ -43,6 +43,7 @@ "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", "Programming Language :: Python", "Topic :: Internet", "Topic :: Software Development :: Libraries :: Python Modules", diff --git a/tests/__init__.py b/tests/__init__.py index 6a6512aca7..1f3a2136ab 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -357,8 +357,9 @@ def run_python(path, env=None, args=None, timeout=None, pythonpath_extend=None, if len(parts) > 1: skip_args.append(parts[1]) raise SkipTest(*skip_args) - ok = output.rstrip() == b'pass' - if not ok: + lines = output.splitlines() + ok = lines[-1].rstrip() == b'pass' + if not ok or len(lines) > 1: sys.stderr.write('Program {0} output:\n---\n{1}\n---\n'.format(path, output.decode())) assert ok, 'Expected single line "pass" in stdout' diff --git a/tests/dagpool_test.py b/tests/dagpool_test.py index 0b5a745c60..0b4880dbfe 100644 --- a/tests/dagpool_test.py +++ b/tests/dagpool_test.py @@ -198,14 +198,14 @@ def test_init(): with check_no_suspend(): results = pool.waitall() # with no spawn() or post(), waitall() returns preload data - assert_equals(results, dict(a=1, b=2, c=3)) + assert_equal(results, dict(a=1, b=2, c=3)) # preload sequence of pairs pool = DAGPool([("d", 4), ("e", 5), ("f", 6)]) # this must not hang with check_no_suspend(): results = pool.waitall() - assert_equals(results, dict(d=4, e=5, f=6)) + assert_equal(results, dict(d=4, e=5, f=6)) def test_wait_each_empty(): @@ -223,10 +223,10 @@ def test_wait_each_preload(): with check_no_suspend(): # wait_each() may deliver in arbitrary order; collect into a dict # for comparison - assert_equals(dict(pool.wait_each("abc")), dict(a=1, b=2, c=3)) + assert_equal(dict(pool.wait_each("abc")), dict(a=1, b=2, c=3)) # while we're at it, test wait() for preloaded keys - assert_equals(pool.wait("bc"), dict(b=2, c=3)) + assert_equal(pool.wait("bc"), dict(b=2, c=3)) def post_each(pool, capture): @@ -264,10 +264,10 @@ def test_wait_posted(): eventlet.spawn(post_each, pool, capture) gotten = pool.wait("bcdefg") capture.add("got all") - assert_equals(gotten, - dict(b=2, c=3, - d="dval", e="eval", - f="fval", g="gval")) + assert_equal(gotten, + dict(b=2, c=3, + d="dval", e="eval", + f="fval", g="gval")) capture.validate([ [], [], @@ -292,7 +292,7 @@ def test_spawn_collision_spawn(): pool = DAGPool() pool.spawn("a", (), lambda key, results: "aval") # hasn't yet even started - assert_equals(pool.get("a"), None) + assert_equal(pool.get("a"), None) with assert_raises(Collision): # Attempting to spawn again with same key should collide even if the # first spawned greenthread hasn't yet had a chance to run. @@ -300,7 +300,7 @@ def test_spawn_collision_spawn(): # now let the spawned eventlet run eventlet.sleep(0) # should have finished - assert_equals(pool.get("a"), "aval") + assert_equal(pool.get("a"), "aval") with assert_raises(Collision): # Attempting to spawn with same key collides even when the greenthread # has completed. @@ -331,61 +331,60 @@ def test_spawn_multiple(): capture.step() # but none of them has yet produced a result for k in "defgh": - assert_equals(pool.get(k), None) - assert_equals(set(pool.keys()), set("abc")) - assert_equals(dict(pool.items()), dict(a=1, b=2, c=3)) - assert_equals(pool.running(), 5) - assert_equals(set(pool.running_keys()), set("defgh")) - assert_equals(pool.waiting(), 1) - assert_equals(pool.waiting_for(), dict(h=set("defg"))) - assert_equals(pool.waiting_for("d"), set()) - assert_equals(pool.waiting_for("c"), set()) + assert_equal(pool.get(k), None) + assert_equal(set(pool.keys()), set("abc")) + assert_equal(dict(pool.items()), dict(a=1, b=2, c=3)) + assert_equal(pool.running(), 5) + assert_equal(set(pool.running_keys()), set("defgh")) + assert_equal(pool.waiting(), 1) + assert_equal(pool.waiting_for(), dict(h=set("defg"))) + assert_equal(pool.waiting_for("d"), set()) + assert_equal(pool.waiting_for("c"), set()) with assert_raises(KeyError): pool.waiting_for("j") - assert_equals(pool.waiting_for("h"), set("defg")) + assert_equal(pool.waiting_for("h"), set("defg")) # let one of the upstream greenthreads complete events["f"].send("fval") spin() capture.step() - assert_equals(pool.get("f"), "fval") - assert_equals(set(pool.keys()), set("abcf")) - assert_equals(dict(pool.items()), dict(a=1, b=2, c=3, f="fval")) - assert_equals(pool.running(), 4) - assert_equals(set(pool.running_keys()), set("degh")) - assert_equals(pool.waiting(), 1) - assert_equals(pool.waiting_for("h"), set("deg")) + assert_equal(pool.get("f"), "fval") + assert_equal(set(pool.keys()), set("abcf")) + assert_equal(dict(pool.items()), dict(a=1, b=2, c=3, f="fval")) + assert_equal(pool.running(), 4) + assert_equal(set(pool.running_keys()), set("degh")) + assert_equal(pool.waiting(), 1) + assert_equal(pool.waiting_for("h"), set("deg")) # now two others events["e"].send("eval") events["g"].send("gval") spin() capture.step() - assert_equals(pool.get("e"), "eval") - assert_equals(pool.get("g"), "gval") - assert_equals(set(pool.keys()), set("abcefg")) - assert_equals(dict(pool.items()), - dict(a=1, b=2, c=3, e="eval", f="fval", g="gval")) - assert_equals(pool.running(), 2) - assert_equals(set(pool.running_keys()), set("dh")) - assert_equals(pool.waiting(), 1) - assert_equals(pool.waiting_for("h"), set("d")) + assert_equal(pool.get("e"), "eval") + assert_equal(pool.get("g"), "gval") + assert_equal(set(pool.keys()), set("abcefg")) + assert_equal(dict(pool.items()), + dict(a=1, b=2, c=3, e="eval", f="fval", g="gval")) + assert_equal(pool.running(), 2) + assert_equal(set(pool.running_keys()), set("dh")) + assert_equal(pool.waiting(), 1) + assert_equal(pool.waiting_for("h"), set("d")) # last one events["d"].send("dval") # make sure both pool greenthreads get a chance to run spin() capture.step() - assert_equals(pool.get("d"), "dval") - assert_equals(set(pool.keys()), set("abcdefgh")) - assert_equals(dict(pool.items()), - dict(a=1, b=2, c=3, - d="dval", e="eval", f="fval", g="gval", h="hval")) - assert_equals(pool.running(), 0) + assert_equal(pool.get("d"), "dval") + assert_equal(set(pool.keys()), set("abcdefgh")) + assert_equal(dict(pool.items()), + dict(a=1, b=2, c=3, + d="dval", e="eval", f="fval", g="gval", h="hval")) + assert_equal(pool.running(), 0) assert not pool.running_keys() - assert_equals(pool.waiting(), 0) - assert_equals(pool.waiting_for("h"), set()) - + assert_equal(pool.waiting(), 0) + assert_equal(pool.waiting_for("h"), set()) capture.validate([ ["h got b", "h got c"], ["f returning fval", "h got f"], @@ -439,19 +438,19 @@ def test_spawn_many(): spin() # verify that e completed (also that post(key) within greenthread # overrides implicit post of return value, which would be None) - assert_equals(pool.get("e"), "e") + assert_equal(pool.get("e"), "e") # With the dependency graph shown above, it is not guaranteed whether b or # c will complete first. Handle either case. sequence = capture.sequence[:] sequence[1:3] = [set([sequence[1].pop(), sequence[2].pop()])] - assert_equals(sequence, - [set(["a done"]), - set(["b done", "c done"]), - set(["d done"]), - set(["e done"]), - set(["waitall() done"]), - ]) + assert_equal(sequence, + [set(["a done"]), + set(["b done", "c done"]), + set(["d done"]), + set(["e done"]), + set(["waitall() done"]), + ]) # deliberately distinguish this from dagpool._MISSING @@ -473,7 +472,7 @@ def test_wait_each_all(): for pos in range(len(keys)): # next value from wait_each() k, v = next(each) - assert_equals(k, keys[pos]) + assert_equal(k, keys[pos]) # advance every pool greenlet as far as it can go spin() # everything from keys[:pos+1] should have a value by now @@ -501,7 +500,7 @@ def test_kill(): pool.kill("a") # didn't run spin() - assert_equals(pool.get("a"), None) + assert_equal(pool.get("a"), None) # killing it forgets about it with assert_raises(KeyError): pool.kill("a") @@ -512,7 +511,7 @@ def test_kill(): with assert_raises(KeyError): pool.kill("a") # verify it ran to completion - assert_equals(pool.get("a"), 2) + assert_equal(pool.get("a"), 2) def test_post_collision_preload(): @@ -540,7 +539,7 @@ def test_post_collision_spawn(): pool.kill("a") # now we can post pool.post("a", 3) - assert_equals(pool.get("a"), 3) + assert_equal(pool.get("a"), 3) pool = DAGPool() pool.spawn("a", (), lambda key, result: 4) @@ -560,10 +559,10 @@ def test_post_replace(): pool = DAGPool() pool.post("a", 1) pool.post("a", 2, replace=True) - assert_equals(pool.get("a"), 2) - assert_equals(dict(pool.wait_each("a")), dict(a=2)) - assert_equals(pool.wait("a"), dict(a=2)) - assert_equals(pool["a"], 2) + assert_equal(pool.get("a"), 2) + assert_equal(dict(pool.wait_each("a")), dict(a=2)) + assert_equal(pool.wait("a"), dict(a=2)) + assert_equal(pool["a"], 2) def waitfor(capture, pool, key): @@ -605,10 +604,10 @@ def test_waitall_exc(): try: pool.waitall() except PropagateError as err: - assert_equals(err.key, "a") + assert_equal(err.key, "a") assert isinstance(err.exc, BogusError), \ "exc attribute is {0}, not BogusError".format(err.exc) - assert_equals(str(err.exc), "bogus") + assert_equal(str(err.exc), "bogus") msg = str(err) assert_in("PropagateError(a)", msg) assert_in("BogusError", msg) @@ -623,14 +622,14 @@ def test_propagate_exc(): try: pool["c"] except PropagateError as errc: - assert_equals(errc.key, "c") + assert_equal(errc.key, "c") errb = errc.exc - assert_equals(errb.key, "b") + assert_equal(errb.key, "b") erra = errb.exc - assert_equals(erra.key, "a") + assert_equal(erra.key, "a") assert isinstance(erra.exc, BogusError), \ "exc attribute is {0}, not BogusError".format(erra.exc) - assert_equals(str(erra.exc), "bogus") + assert_equal(str(erra.exc), "bogus") msg = str(errc) assert_in("PropagateError(a)", msg) assert_in("PropagateError(b)", msg) @@ -688,13 +687,13 @@ def test_post_get_exc(): pass # wait_each_success() filters - assert_equals(dict(pool.wait_each_success()), dict(a=bogua)) - assert_equals(dict(pool.wait_each_success("ab")), dict(a=bogua)) - assert_equals(dict(pool.wait_each_success("a")), dict(a=bogua)) - assert_equals(dict(pool.wait_each_success("b")), {}) + assert_equal(dict(pool.wait_each_success()), dict(a=bogua)) + assert_equal(dict(pool.wait_each_success("ab")), dict(a=bogua)) + assert_equal(dict(pool.wait_each_success("a")), dict(a=bogua)) + assert_equal(dict(pool.wait_each_success("b")), {}) # wait_each_exception() filters the other way - assert_equals(dict(pool.wait_each_exception()), dict(b=bogub)) - assert_equals(dict(pool.wait_each_exception("ab")), dict(b=bogub)) - assert_equals(dict(pool.wait_each_exception("a")), {}) - assert_equals(dict(pool.wait_each_exception("b")), dict(b=bogub)) + assert_equal(dict(pool.wait_each_exception()), dict(b=bogub)) + assert_equal(dict(pool.wait_each_exception("ab")), dict(b=bogub)) + assert_equal(dict(pool.wait_each_exception("a")), {}) + assert_equal(dict(pool.wait_each_exception("b")), dict(b=bogub)) diff --git a/tests/tpool_test.py b/tests/tpool_test.py index d66d86787f..1d8fa6b396 100644 --- a/tests/tpool_test.py +++ b/tests/tpool_test.py @@ -287,11 +287,9 @@ class TpoolLongTests(tests.LimitedTestCase): TEST_TIMEOUT = 60 def test_a_buncha_stuff(self): - assert_ = self.assert_ - class Dummy(object): def foo(self, when, token=None): - assert_(token is not None) + assert token is not None time.sleep(random.random() / 200.0) return token @@ -330,9 +328,7 @@ def test_leakage_from_tracebacks(self): first_created = middle_objs - initial_objs gc.collect() second_created = len(gc.get_objects()) - middle_objs - self.assert_(second_created - first_created < 10, - "first loop: %s, second loop: %s" % (first_created, - second_created)) + assert second_created - first_created < 10, "first loop: {}, second loop: {}".format(first_created, second_created) tpool.killall() diff --git a/tox.ini b/tox.ini index 7832199ec4..b4e52759b6 100644 --- a/tox.ini +++ b/tox.ini @@ -21,7 +21,7 @@ envlist = py38-openssl py39-dnspython1 pypy3-epolls - py{38,39,310}-{selects,poll,epolls} + py{38,39,310,311,312}-{selects,poll,epolls} skipsdist = True [testenv:ipv6] From 39bf321d926665f0bbbafd1400c1d0aacc23ae1d Mon Sep 17 00:00:00 2001 From: Tim Burke Date: Fri, 15 Dec 2023 11:56:52 -0800 Subject: [PATCH 10/35] Stop claiming to create universal wheels (#841) From https://wheel.readthedocs.io/en/stable/user_guide.html : > If your project contains no C extensions and is expected to work on > both Python 2 and 3, you will want to tell wheel to produce universal > wheels by adding this to your setup.cfg file: > > [bdist_wheel] > universal = 1 Now that we no longer support Python 2, it's inappropriate for us to claim that our wheels are universal. Note that claiming that a wheel is universal when it's not has caused trouble for other projects in the past; see https://github.com/PyCQA/bandit/issues/663 --- setup.cfg | 3 --- 1 file changed, 3 deletions(-) diff --git a/setup.cfg b/setup.cfg index 26c955ab38..ddb7da9ed0 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,5 +1,2 @@ [metadata] description_file = README.rst - -[wheel] -universal = True From 33e05cae342418333dd7ed649d21b77621895585 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Herv=C3=A9=20Beraud?= Date: Mon, 18 Dec 2023 16:19:58 +0100 Subject: [PATCH 11/35] Automatize and modernize deployments (#845) These changes aim to avoid unnecessary workload by automatizing major parts of the release process. With these changes, deployments would be automatic. Pypi will accept new releases coming from eventlet/eventlet by adding a trusted publisher based on OIDC: - https://docs.pypi.org/trusted-publishers/ - https://en.wikipedia.org/wiki/OpenID#OpenID_Connect_(OIDC) These changes propose: - Manage version number by using git tags rather than manual versioning - Modernize packaging by using pyproject.toml rather than setup.py - Remove old artifacts related to publishing (`bin/release`) - Adding a github workflow dedicated to pypi publish - Added OIDC trusted publisher management https://pypi.org/manage/project/eventlet/settings/publishing/ These changes updated the list of authors to reflect current active core maintainers and allow community members to contact the team. Related to recent discussions: - https://github.com/eventlet/eventlet/issues/843 - https://github.com/eventlet/eventlet/issues/842 --- .github/workflows/publish.yml | 38 +++++++++ bin/release | 153 ---------------------------------- eventlet/__init__.py | 108 ++++++++++++------------ pyproject.toml | 61 ++++++++++++++ setup.py | 49 +---------- 5 files changed, 151 insertions(+), 258 deletions(-) create mode 100644 .github/workflows/publish.yml delete mode 100755 bin/release create mode 100644 pyproject.toml diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml new file mode 100644 index 0000000000..25c67b10d8 --- /dev/null +++ b/.github/workflows/publish.yml @@ -0,0 +1,38 @@ +# This workflow will upload a Python Package using Twine when a release is created +# For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-python#publishing-to-package-registries + +# This workflow uses actions that are not certified by GitHub. +# They are provided by a third-party and are governed by +# separate terms of service, privacy policy, and support +# documentation. + +name: Upload Python Package + +on: + release: + types: [published] + +permissions: + contents: read + +jobs: + deploy: + + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v3 + - name: Set up Python + uses: actions/setup-python@v3 + with: + python-version: '3.x' + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install build + - name: Build package + run: python -m build + - name: Publish package + # deploy only when a new tag is pushed to github + if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags') + uses: pypa/gh-action-pypi-publish@release/v1 diff --git a/bin/release b/bin/release deleted file mode 100755 index 9a78c3cbf2..0000000000 --- a/bin/release +++ /dev/null @@ -1,153 +0,0 @@ -#!/bin/bash -set -e -cd "$( dirname "${BASH_SOURCE[0]}" )/.." -if [[ ! -d ./venv-release ]]; then - virtualenv ./venv-release - echo '*' >./venv-release/.gitignore - ./venv-release/bin/pip install -U pip setuptools sphinx twine wheel -fi -source $PWD/venv-release/bin/activate -pip install -e $PWD - -version= -version_next= - -main() { - branch="${1-$(git symbolic-ref --short HEAD)}" - version="$(EVENTLET_IMPORT_VERSION_ONLY=1 python -c 'import eventlet; print(eventlet.__version__)')" - printf "\nbranch: %s eventlet.__version__: '%s'\n" $branch $version >&2 - if [[ "$branch" != "master" ]]; then - echo "Must be on master" >&2 - exit 1 - fi - if [[ -n "$(git status --short -uall)" ]]; then - echo "Tree must be clean. git status:" >&2 - echo "" >&2 - git status --short -uall - echo "" >&2 - exit 1 - fi - last_commit_message=$(git show --format="%s" --no-patch HEAD) - expect_commit_message="v$version release" - if [[ "$last_commit_message" != "$expect_commit_message" ]]; then - printf "Last commit message: '%s' expected: '%s'\n" "$last_commit_message" "$expect_commit_message" >&2 - if confirm "Create release commit? [yN] "; then - create_commit - elif ! confirm "Continue without proper release commit? [yN] "; then - exit 1 - fi - fi - confirm "Continue? [yN] " || exit 1 - - echo "Creating tag v$version" >&2 - if ! git tag "v$version"; then - echo "git tag failed " >&2 - confirm "Continue still? [yN] " || exit 1 - fi - - if confirm "Build documentation (website)? [Yn] "; then - bin/build-website.bash || exit 1 - git checkout "$branch" - fi - - if confirm "Upload to PyPi? [Yn] "; then - rm -rf build dist - python setup.py sdist bdist_wheel || exit 1 - twine upload dist/* || exit 1 - fi - - git push --verbose origin master gh-pages || exit 1 - git push --tags -} - -create_commit() { - echo "" >&2 - echo "Plan:" >&2 - echo "1. bump version" >&2 - echo "2. update NEWS, AUTHORS" >&2 - echo "3. commit" >&2 - echo "4. run bin/release again" >&2 - echo "" >&2 - - bump_version - edit_news - - git diff - confirm "Ready to commit? [Yn] " || exit 1 - git commit -a -m "v$version_next release" - - echo "Re-exec $0 to continue" >&2 - exec $0 -} - -bump_version() { - local current=$version - echo "Current version: '$current'" >&2 - echo -n "Enter next version (empty to abort): " >&2 - read version_next - if [[ -z "$version_next" ]]; then - exit 1 - fi - echo "Next version: '$version_next'" >&2 - - local current_tuple="${current//./, }" - local next_tuple="${version_next//./, }" - local version_path="eventlet/__init__.py" - echo "Updating file '$version_path'" >&2 - if ! sed --in-place='' -e "s/($current_tuple)/($next_tuple)/" "$version_path"; then - echo "sed error $?" >&2 - exit 1 - fi - if git diff --exit-code "$version_path"; then - echo "File '$version_path' is not modified" >&2 - exit 1 - fi - echo "" >&2 - - confirm "Confirm changes? [yN] " || exit 1 -} - -edit_news() { - echo "Changes since last release:" >&2 - git log --format='%h %an %s' "v$version"^.. -- || exit 1 - echo "" >&2 - - local editor=$(which edit 2>/dev/null) - [[ -z "$editor" ]] && editor="$EDITOR" - if [[ -n "$editor" ]]; then - if confirm "Open default editor for NEWS and AUTHORS? [Yn] "; then - $editor NEWS - $editor AUTHORS - else - confirm "Change files NEWS and AUTHORS manually and press any key" - fi - else - echo "Unable to determine default text editor." >&2 - confirm "Change files NEWS and AUTHORS manually and press any key" - fi - echo "" >&2 - - if git diff --exit-code NEWS AUTHORS; then - echo "Files NEWS and AUTHORS are not modified" >&2 - exit 1 - fi - echo "" >&2 - - confirm "Confirm changes? [yN] " || exit 1 -} - -confirm() { - local reply - local prompt="$1" - read -n1 -p "$prompt" reply >&2 - echo "" >&2 - rc=0 - local default_y=" \[Yn\] $" - if [[ -z "$reply" ]] && [[ "$prompt" =~ $default_y ]]; then - reply="y" - fi - [[ "$reply" != "y" ]] && rc=1 - return $rc -} - -main "$@" diff --git a/eventlet/__init__.py b/eventlet/__init__.py index 86a8384e18..16d32cbd8d 100644 --- a/eventlet/__init__.py +++ b/eventlet/__init__.py @@ -8,71 +8,65 @@ DeprecationWarning, ) -version_info = (0, 33, 3) -__version__ = '.'.join(map(str, version_info)) -# This is to make Debian packaging easier, it ignores import -# errors of greenlet so that the packager can still at least -# access the version. Also this makes easy_install a little quieter -if os.environ.get('EVENTLET_IMPORT_VERSION_ONLY') != '1': - from eventlet import convenience - from eventlet import event - from eventlet import greenpool - from eventlet import greenthread - from eventlet import patcher - from eventlet import queue - from eventlet import semaphore - from eventlet import support - from eventlet import timeout - import greenlet - # Force monotonic library search as early as possible. - # Helpful when CPython < 3.5 on Linux blocked in `os.waitpid(-1)` before first use of hub. - # Example: gunicorn - # https://github.com/eventlet/eventlet/issues/401#issuecomment-327500352 - try: - import monotonic - del monotonic - except ImportError: - pass +from eventlet import convenience +from eventlet import event +from eventlet import greenpool +from eventlet import greenthread +from eventlet import patcher +from eventlet import queue +from eventlet import semaphore +from eventlet import support +from eventlet import timeout +import greenlet +# Force monotonic library search as early as possible. +# Helpful when CPython < 3.5 on Linux blocked in `os.waitpid(-1)` before first use of hub. +# Example: gunicorn +# https://github.com/eventlet/eventlet/issues/401#issuecomment-327500352 +try: + import monotonic + del monotonic +except ImportError: + pass - connect = convenience.connect - listen = convenience.listen - serve = convenience.serve - StopServe = convenience.StopServe - wrap_ssl = convenience.wrap_ssl +connect = convenience.connect +listen = convenience.listen +serve = convenience.serve +StopServe = convenience.StopServe +wrap_ssl = convenience.wrap_ssl - Event = event.Event +Event = event.Event - GreenPool = greenpool.GreenPool - GreenPile = greenpool.GreenPile +GreenPool = greenpool.GreenPool +GreenPile = greenpool.GreenPile - sleep = greenthread.sleep - spawn = greenthread.spawn - spawn_n = greenthread.spawn_n - spawn_after = greenthread.spawn_after - kill = greenthread.kill +sleep = greenthread.sleep +spawn = greenthread.spawn +spawn_n = greenthread.spawn_n +spawn_after = greenthread.spawn_after +kill = greenthread.kill - import_patched = patcher.import_patched - monkey_patch = patcher.monkey_patch +import_patched = patcher.import_patched +monkey_patch = patcher.monkey_patch - Queue = queue.Queue +Queue = queue.Queue - Semaphore = semaphore.Semaphore - CappedSemaphore = semaphore.CappedSemaphore - BoundedSemaphore = semaphore.BoundedSemaphore +Semaphore = semaphore.Semaphore +CappedSemaphore = semaphore.CappedSemaphore +BoundedSemaphore = semaphore.BoundedSemaphore - Timeout = timeout.Timeout - with_timeout = timeout.with_timeout - wrap_is_timeout = timeout.wrap_is_timeout - is_timeout = timeout.is_timeout +Timeout = timeout.Timeout +with_timeout = timeout.with_timeout +wrap_is_timeout = timeout.wrap_is_timeout +is_timeout = timeout.is_timeout - getcurrent = greenlet.greenlet.getcurrent +getcurrent = greenlet.greenlet.getcurrent - # deprecated - TimeoutError, exc_after, call_after_global = ( - support.wrap_deprecated(old, new)(fun) for old, new, fun in ( - ('TimeoutError', 'Timeout', Timeout), - ('exc_after', 'greenthread.exc_after', greenthread.exc_after), - ('call_after_global', 'greenthread.call_after_global', greenthread.call_after_global), - )) +# deprecated +TimeoutError, exc_after, call_after_global = ( + support.wrap_deprecated(old, new)(fun) for old, new, fun in ( + ('TimeoutError', 'Timeout', Timeout), + ('exc_after', 'greenthread.exc_after', greenthread.exc_after), + ('call_after_global', 'greenthread.call_after_global', greenthread.call_after_global), + )) -del os +os diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000000..9cda45a1d7 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,61 @@ +[build-system] +requires = [ + "hatch-vcs>=0.3", + "hatchling>=1.12.2", +] +build-backend = "hatchling.build" + +[project] +name = "eventlet" +authors = [ + {name = "Sergey Shepelev", email = "temotor@gmail.com"}, + {name = "Jakub Stasiak", email = "jakub@stasiak.at"}, + {name = "Tim Burke", email = "tim.burke@gmail.com"}, + {name = "Nat Goodspeed", email = "nat@lindenlab.com"}, + {name = "Itamar Turner-Trauring", email = "itamar@itamarst.org"}, + {name = "HervƩ Beraud", email = "hberaud@redhat.com"}, +] +description = "Highly concurrent networking library" +readme = "README.rst" +requires-python = ">=3.8" +license = {text = "MIT"} +classifiers = [ + "Development Status :: 4 - Beta", + "Intended Audience :: Developers", + "License :: OSI Approved :: MIT License", + "Operating System :: MacOS :: MacOS X", + "Operating System :: Microsoft :: Windows", + "Operating System :: POSIX", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", + "Programming Language :: Python", + "Topic :: Internet", + "Topic :: Software Development :: Libraries :: Python Modules", +] +dynamic = ["version"] +dependencies = [ + 'dnspython >= 1.15.0', + 'greenlet >= 1.0', + 'monotonic >= 1.4;python_version<"3.5"', + 'six >= 1.10.0', +] + +[project.urls] +Homepage = "https://github.com/eventlet/eventlet" +History = "https://github.com/eventlet/eventlet/blob/main/NEWS" +Tracker = "https://github.com/eventlet/eventlet/issues" +Source = "https://github.com/eventlet/eventlet" + +[project.optional-dependencies] +dev = ["black", "isort", "pip-tools", "build", "twine", "pre-commit", "commitizen"] + +[options.packages.find] +where = "evenetlet" +exclude = ["tests*", "benchmarks", "examples"] + +[tool.hatch] +version.source = "vcs" diff --git a/setup.py b/setup.py index b68bc25c00..61cc244bf7 100644 --- a/setup.py +++ b/setup.py @@ -1,51 +1,4 @@ #!/usr/bin/env python -import os import setuptools - -os.environ.setdefault('EVENTLET_IMPORT_VERSION_ONLY', '1') -import eventlet - -setuptools.setup( - name='eventlet', - version=eventlet.__version__, - description='Highly concurrent networking library', - author='Linden Lab', - author_email='eventletdev@lists.secondlife.com', - url='http://eventlet.net', - python_requires=">=3.8.0", - project_urls={ - 'Source': 'https://github.com/eventlet/eventlet', - }, - packages=setuptools.find_packages(exclude=['benchmarks', 'tests', 'tests.*']), - install_requires=( - 'dnspython >= 1.15.0', - 'greenlet >= 1.0', - 'monotonic >= 1.4;python_version<"3.5"', - 'six >= 1.10.0', - ), - zip_safe=False, - long_description=open( - os.path.join( - os.path.dirname(__file__), - 'README.rst' - ) - ).read(), - classifiers=[ - "Development Status :: 4 - Beta", - "Intended Audience :: Developers", - "License :: OSI Approved :: MIT License", - "Operating System :: MacOS :: MacOS X", - "Operating System :: Microsoft :: Windows", - "Operating System :: POSIX", - "Programming Language :: Python :: 3", - "Programming Language :: Python :: 3.8", - "Programming Language :: Python :: 3.9", - "Programming Language :: Python :: 3.10", - "Programming Language :: Python :: 3.11", - "Programming Language :: Python :: 3.12", - "Programming Language :: Python", - "Topic :: Internet", - "Topic :: Software Development :: Libraries :: Python Modules", - ] -) +setuptools.setup() From ac50404d79a6c95720d10c3636a80ac318db76d7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Herv=C3=A9=20Beraud?= Date: Tue, 19 Dec 2023 12:15:05 +0100 Subject: [PATCH 12/35] Fix Python 3.13 compat by adding missing attibute '_is_main_interpreter' (#847) https://github.com/python/cpython/issues/112826 Python 3.13 doesn't have workaround and so eventlet is broken with Python versions higher than 3.12. Fix #838 Fix #604 --- NEWS | 1 + eventlet/green/thread.py | 2 ++ 2 files changed, 3 insertions(+) diff --git a/NEWS b/NEWS index 328c4ba486..2c4e6321ed 100644 --- a/NEWS +++ b/NEWS @@ -2,6 +2,7 @@ Unreleased ========== * Dropped support for Python 3.7 and earlier. +* Fix Python 3.13 compat by adding missing attibute '_is_main_interpreter' https://github.com/eventlet/eventlet/pull/847 0.33.3 ====== diff --git a/eventlet/green/thread.py b/eventlet/green/thread.py index 7bcb563570..7b821317b8 100644 --- a/eventlet/green/thread.py +++ b/eventlet/green/thread.py @@ -15,6 +15,8 @@ LockType = Lock __threadcount = 0 +if hasattr(__thread, "_is_main_interpreter"): + _is_main_interpreter = __thread._is_main_interpreter if six.PY3: def _set_sentinel(): From 64410214d0856f989e3b7c5f0bbe256d2b191f2f Mon Sep 17 00:00:00 2001 From: Itamar Turner-Trauring Date: Tue, 19 Dec 2023 06:39:24 -0500 Subject: [PATCH 13/35] Add back 3.7 support (#848) * Restore 3.7 support * Match documented behavior in stdlib * Pass tests on Python 3.7 --------- Co-authored-by: Itamar Turner-Trauring --- .github/workflows/test.yaml | 1 + README.rst | 2 +- eventlet/green/ssl.py | 15 ++++++++++++--- pyproject.toml | 2 +- tests/wsgi_test.py | 6 ++++++ tox.ini | 4 ++-- 6 files changed, 23 insertions(+), 7 deletions(-) diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index a3d6324aaf..056ca116c4 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -29,6 +29,7 @@ jobs: fail-fast: false matrix: include: + - { py: 3.7, toxenv: py37-epolls, ignore-error: false, os: ubuntu-latest } - { py: 3.8, toxenv: py38-epolls, ignore-error: false, os: ubuntu-latest } - { py: 3.8, toxenv: py38-openssl, ignore-error: false, os: ubuntu-latest } - { py: 3.8, toxenv: py38-poll, ignore-error: false, os: ubuntu-latest } diff --git a/README.rst b/README.rst index 78d46087b0..e6b111c28a 100644 --- a/README.rst +++ b/README.rst @@ -65,7 +65,7 @@ Apologies for any inconvenience. Supported Python versions ========================= -Python 3.8-3.12 are currently supported. +Python 3.7-3.12 are currently supported. Flair ===== diff --git a/eventlet/green/ssl.py b/eventlet/green/ssl.py index 29cbecbc65..1a21b2f73a 100644 --- a/eventlet/green/ssl.py +++ b/eventlet/green/ssl.py @@ -24,6 +24,7 @@ _original_sslsocket = __ssl.SSLSocket _original_sslcontext = __ssl.SSLContext _is_under_py_3_7 = sys.version_info < (3, 7) +_is_py_3_7 = sys.version_info[:2] == (3, 7) _original_wrap_socket = __ssl.SSLContext.wrap_socket @@ -191,6 +192,11 @@ def _call_trampolining(self, func, *a, **kw): write=True, timeout=self.gettimeout(), timeout_exc=timeout_exc('timed out')) + elif _is_py_3_7 and "unexpected eof" in exc.args[1]: + # For reasons I don't understand on 3.7 we get [ssl: + # KRB5_S_TKT_NYV] unexpected eof while reading] + # errors... + raise IOClosed else: raise @@ -200,14 +206,17 @@ def write(self, data): return self._call_trampolining( super(GreenSSLSocket, self).write, data) - def read(self, *args, **kwargs): + def read(self, len=1024, buffer=None): """Read up to LEN bytes and return them. Return zero-length string on EOF.""" try: return self._call_trampolining( - super(GreenSSLSocket, self).read, *args, **kwargs) + super(GreenSSLSocket, self).read, len, buffer) except IOClosed: - return b'' + if buffer is None: + return b'' + else: + return 0 def send(self, data, flags=0): if self._sslobj: diff --git a/pyproject.toml b/pyproject.toml index 9cda45a1d7..71502f3214 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -17,7 +17,7 @@ authors = [ ] description = "Highly concurrent networking library" readme = "README.rst" -requires-python = ">=3.8" +requires-python = ">=3.7" license = {text = "MIT"} classifiers = [ "Development Status :: 4 - Beta", diff --git a/tests/wsgi_test.py b/tests/wsgi_test.py index 13a6e83c5d..df376ae6a1 100644 --- a/tests/wsgi_test.py +++ b/tests/wsgi_test.py @@ -568,6 +568,12 @@ def server(sock, site, log): except (ssl.SSLZeroReturnError, ssl.SSLEOFError): # Can't write a response to a closed TLS session return True + except OSError: + if sys.version_info[:2] == (3, 7): + return True + else: + traceback.print_exc() + return False except Exception: traceback.print_exc() return False diff --git a/tox.ini b/tox.ini index b4e52759b6..f53fbebb73 100644 --- a/tox.ini +++ b/tox.ini @@ -30,7 +30,7 @@ setenv = {[testenv]setenv} eventlet_test_ipv6 = 1 deps = - coverage==7.3.3 + coverage pytest pytest-cov usedevelop = True @@ -60,7 +60,7 @@ setenv = epolls: EVENTLET_HUB = epolls tox_cover_args = --cov=eventlet deps = - coverage==7.3.3 + coverage pytest pytest-cov py38-openssl: pyopenssl==22.1.0 From e06bff1792320ab483ebaf039be25319d3824175 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Herv=C3=A9=20Beraud?= Date: Tue, 19 Dec 2023 15:46:24 +0100 Subject: [PATCH 14/35] Update changelog for version 0.34.0 (#849) --- NEWS | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/NEWS b/NEWS index 2c4e6321ed..31f375109d 100644 --- a/NEWS +++ b/NEWS @@ -1,8 +1,13 @@ -Unreleased -========== +0.34.0 +====== -* Dropped support for Python 3.7 and earlier. +* Dropped support for Python 3.6 and earlier. * Fix Python 3.13 compat by adding missing attibute '_is_main_interpreter' https://github.com/eventlet/eventlet/pull/847 +* Add support of Python 3.12 https://github.com/eventlet/eventlet/pull/817 +* Drop unmaintained and unused stdlib tests https://github.com/eventlet/eventlet/pull/820 +* Fix tests and CI for Python 3.7 and higher https://github.com/eventlet/eventlet/pull/831 and https://github.com/eventlet/eventlet/pull/832 +* Stop claiming to create universal wheels https://github.com/eventlet/eventlet/pull/841 +* Fix green logging locks for Python versions <= 3.10 https://github.com/eventlet/eventlet/pull/754 0.33.3 ====== From 72a29b6721342860cf41a8fcd7039dffb9bdd5f2 Mon Sep 17 00:00:00 2001 From: Tim Burke Date: Tue, 19 Dec 2023 06:58:39 -0800 Subject: [PATCH 15/35] py<=3.10: Green logging locks (#754) * Fix issue comparing locks in some circumstances * Patch logging Rlocks on Python 3.9 and earlier, since gc.get_objects() doesn't include them --------- Co-authored-by: Sergey Shepelev Co-authored-by: Itamar Turner-Trauring Co-authored-by: Itamar Turner-Trauring --- eventlet/patcher.py | 19 ++++++++++++++- .../patcher_existing_logging_module_lock.py | 24 +++++++++++++++++++ tests/patcher_test.py | 4 ++++ 3 files changed, 46 insertions(+), 1 deletion(-) create mode 100644 tests/isolated/patcher_existing_logging_module_lock.py diff --git a/eventlet/patcher.py b/eventlet/patcher.py index 0d1157afcd..32506ea7f0 100644 --- a/eventlet/patcher.py +++ b/eventlet/patcher.py @@ -413,6 +413,23 @@ def _green_existing_locks(): elif py3_style and not isinstance(obj, pyrlock_type): _fix_py3_rlock(obj, tid) + if sys.version_info < (3, 10): + # Older py3 won't have RLocks show up in gc.get_objects() -- see + # https://github.com/eventlet/eventlet/issues/546 -- so green a handful + # that we know are significant + import logging + if isinstance(logging._lock, rlock_type): + _fix_py3_rlock(logging._lock, tid) + logging._acquireLock() + try: + for ref in logging._handlerList: + handler = ref() + if handler and isinstance(handler.lock, rlock_type): + _fix_py3_rlock(handler.lock, tid) + del handler + finally: + logging._releaseLock() + def _fix_py2_rlock(rlock, tid): import eventlet.green.threading @@ -465,7 +482,7 @@ def _fix_py3_rlock(old, tid): pass else: for k, v in ref_vars.items(): - if v == old: + if v is old: setattr(ref, k, new) diff --git a/tests/isolated/patcher_existing_logging_module_lock.py b/tests/isolated/patcher_existing_logging_module_lock.py new file mode 100644 index 0000000000..e8e4b6b220 --- /dev/null +++ b/tests/isolated/patcher_existing_logging_module_lock.py @@ -0,0 +1,24 @@ +# https://github.com/eventlet/eventlet/issues/730 +# https://github.com/eventlet/eventlet/pull/754 +__test__ = False + + +if __name__ == "__main__": + import logging + import threading + + handler = logging.Handler() + logger = logging.Logger("test") + logger.addHandler(handler) + + # Initially these are standard Python RLocks: + assert not isinstance(logging._lock, threading._PyRLock) + assert not isinstance(handler.lock, threading._PyRLock) + + # After patching, they get converted: + import eventlet.patcher + eventlet.patcher.monkey_patch(thread=True) + assert isinstance(logging._lock, threading._PyRLock) + assert isinstance(handler.lock, threading._PyRLock) + + print("pass") diff --git a/tests/patcher_test.py b/tests/patcher_test.py index de37fe89e9..4993889e22 100644 --- a/tests/patcher_test.py +++ b/tests/patcher_test.py @@ -482,6 +482,10 @@ def test_patcher_existing_locks_unlocked(): tests.run_isolated('patcher_existing_locks_unlocked.py') +def test_patcher_existing_logging_module_lock(): + tests.run_isolated('patcher_existing_logging_module_lock.py') + + def test_importlib_lock(): tests.run_isolated('patcher_importlib_lock.py') From 4da815f51ba7afb1b476b8c0f539da1c8ba3e15a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Herv=C3=A9=20Beraud?= Date: Tue, 19 Dec 2023 16:40:20 +0100 Subject: [PATCH 16/35] Remove Python version < 3.7 from doc (#851) Their supports have been removed. --- doc/index.rst | 2 +- doc/real_index.html | 2 +- doc/ssl.rst | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/doc/index.rst b/doc/index.rst index fcbc46e399..b3f9cd794f 100644 --- a/doc/index.rst +++ b/doc/index.rst @@ -24,7 +24,7 @@ Code talks! This is a simple web crawler that fetches a bunch of urls concurren Supported Python versions ========================= -Currently CPython 2.7 and 3.4+ are supported, but **2.7 and 3.4 support is deprecated and will be removed in the future**, only CPython 3.5+ support will remain. +Currently supporting CPython 3.7+. Contents diff --git a/doc/real_index.html b/doc/real_index.html index da021cbc84..4083d8f56d 100644 --- a/doc/real_index.html +++ b/doc/real_index.html @@ -39,7 +39,7 @@

Eventlet

License: MIT

-

Currently CPython 2.7 and 3.4+ are supported, but 2.7 and 3.4 support is deprecated and will be removed in the future, only CPython 3.5+ support will remain.

+

Currently supporting CPython 3.7+.

API Documentation

diff --git a/doc/ssl.rst b/doc/ssl.rst index 3596725afa..46d013a402 100644 --- a/doc/ssl.rst +++ b/doc/ssl.rst @@ -1,7 +1,7 @@ Using SSL With Eventlet ======================== -Eventlet makes it easy to use non-blocking SSL sockets. If you're using Python 2.7 or later, you're all set, eventlet wraps the built-in ssl module. +Eventlet makes it easy to use non-blocking SSL sockets. If you're using Python 3.7 or later, you're all set, eventlet wraps the built-in ssl module. In either case, the ``green`` modules handle SSL sockets transparently, just like their standard counterparts. As an example, :mod:`eventlet.green.urllib2` can be used to fetch https urls in as non-blocking a fashion as you please:: From 53502e5ad1de5e2bc1f3699cec992555f3ff5a3b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Herv=C3=A9=20Beraud?= Date: Wed, 20 Dec 2023 16:11:16 +0100 Subject: [PATCH 17/35] rename github workflow to fix ignored action (#852) Also modify action to react on push and pull-requests. Can be useful to try to build the package at each pull-request. --- .github/workflows/{publish.yml => publish.yaml} | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) rename .github/workflows/{publish.yml => publish.yaml} (93%) diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yaml similarity index 93% rename from .github/workflows/publish.yml rename to .github/workflows/publish.yaml index 25c67b10d8..f3ec57547a 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yaml @@ -6,17 +6,15 @@ # separate terms of service, privacy policy, and support # documentation. -name: Upload Python Package +name: Build & Optional Deploy -on: - release: - types: [published] +on: [push, pull_request] permissions: contents: read jobs: - deploy: + build-and-deploy: runs-on: ubuntu-latest From c5f4684e5a31ea27f941c1f28cc257267ca45326 Mon Sep 17 00:00:00 2001 From: victor <16359131+jiajunsu@users.noreply.github.com> Date: Thu, 21 Dec 2023 00:56:01 +0800 Subject: [PATCH 18/35] Fix memory leak in module greendns (alternative to #682) (#811) * Fix memory leak in module greendns Change `EAI_*_ERROR` to `lambda` expression, and initialize them when being called, to avoid memory leak with `__traceback__.tb_next` fixes https://github.com/eventlet/eventlet/issues/810 --- eventlet/support/greendns.py | 29 +++++++++++++++++------------ tests/greendns_test.py | 14 ++++++++++++++ 2 files changed, 31 insertions(+), 12 deletions(-) diff --git a/eventlet/support/greendns.py b/eventlet/support/greendns.py index f30c551c64..5ffbb1fa02 100644 --- a/eventlet/support/greendns.py +++ b/eventlet/support/greendns.py @@ -85,6 +85,7 @@ def import_patched(module_name): DNS_QUERY_TIMEOUT = 10.0 HOSTS_TTL = 10.0 +# NOTE(victor): do not use EAI_*_ERROR instances for raising errors in python3, which will cause a memory leak. EAI_EAGAIN_ERROR = socket.gaierror(socket.EAI_AGAIN, 'Lookup timed out') EAI_NONAME_ERROR = socket.gaierror(socket.EAI_NONAME, 'Name or service not known') # EAI_NODATA was removed from RFC3493, it's now replaced with EAI_NONAME @@ -96,6 +97,10 @@ def import_patched(module_name): EAI_NODATA_ERROR = socket.gaierror(socket.EAI_NODATA, 'No address associated with hostname') +def _raise_new_error(error_instance): + raise error_instance.__class__(*error_instance.args) + + def is_ipv4_addr(host): """Return True if host is a valid IPv4 address""" if not isinstance(host, six.string_types): @@ -464,9 +469,9 @@ def resolve(name, family=socket.AF_INET, raises=True, _proxy=None, rdtype, dns.rdataclass.IN, None, False) raise except dns.exception.Timeout: - raise EAI_EAGAIN_ERROR + _raise_new_error(EAI_EAGAIN_ERROR) except dns.exception.DNSException: - raise EAI_NODATA_ERROR + _raise_new_error(EAI_NODATA_ERROR) def resolve_cname(host): @@ -476,9 +481,9 @@ def resolve_cname(host): except dns.resolver.NoAnswer: return host except dns.exception.Timeout: - raise EAI_EAGAIN_ERROR + _raise_new_error(EAI_EAGAIN_ERROR) except dns.exception.DNSException: - raise EAI_NODATA_ERROR + _raise_new_error(EAI_NODATA_ERROR) else: return str(ans[0].target) @@ -493,9 +498,9 @@ def getaliases(host): try: return resolver.getaliases(host) except dns.exception.Timeout: - raise EAI_EAGAIN_ERROR + _raise_new_error(EAI_EAGAIN_ERROR) except dns.exception.DNSException: - raise EAI_NODATA_ERROR + _raise_new_error(EAI_NODATA_ERROR) def _getaddrinfo_lookup(host, family, flags): @@ -504,7 +509,7 @@ def _getaddrinfo_lookup(host, family, flags): Helper function for getaddrinfo. """ if flags & socket.AI_NUMERICHOST: - raise EAI_NONAME_ERROR + _raise_new_error(EAI_NONAME_ERROR) addrs = [] if family == socket.AF_UNSPEC: err = None @@ -615,11 +620,11 @@ def getnameinfo(sockaddr, flags): raise TypeError('getnameinfo() argument 1 must be a tuple') else: # must be ipv6 sockaddr, pretending we don't know how to resolve it - raise EAI_NONAME_ERROR + _raise_new_error(EAI_NONAME_ERROR) if (flags & socket.NI_NAMEREQD) and (flags & socket.NI_NUMERICHOST): # Conflicting flags. Punt. - raise EAI_NONAME_ERROR + _raise_new_error(EAI_NONAME_ERROR) if is_ipv4_addr(host): try: @@ -630,10 +635,10 @@ def getnameinfo(sockaddr, flags): host = rrset[0].target.to_text(omit_final_dot=True) except dns.exception.Timeout: if flags & socket.NI_NAMEREQD: - raise EAI_EAGAIN_ERROR + _raise_new_error(EAI_EAGAIN_ERROR) except dns.exception.DNSException: if flags & socket.NI_NAMEREQD: - raise EAI_NONAME_ERROR + _raise_new_error(EAI_NONAME_ERROR) else: try: rrset = resolver.query(host) @@ -642,7 +647,7 @@ def getnameinfo(sockaddr, flags): if flags & socket.NI_NUMERICHOST: host = rrset[0].address except dns.exception.Timeout: - raise EAI_EAGAIN_ERROR + _raise_new_error(EAI_EAGAIN_ERROR) except dns.exception.DNSException: raise socket.gaierror( (socket.EAI_NODATA, 'No address associated with hostname')) diff --git a/tests/greendns_test.py b/tests/greendns_test.py index 51305ab04c..801fcb9589 100644 --- a/tests/greendns_test.py +++ b/tests/greendns_test.py @@ -931,6 +931,20 @@ def test_noraise_dns_tcp(self): self.assertEqual(list(response.rrset.items)[0].address, expected_ip) +class TestRaiseErrors(tests.LimitedTestCase): + + def test_raise_new_error(self): + # https://github.com/eventlet/eventlet/issues/810 + # Raise exception multiple times + for _ in range(3): + with self.assertRaises(socket.gaierror) as error: + greendns._raise_new_error(greendns.EAI_EAGAIN_ERROR) + + self.assertIsNone(error.exception.__traceback__) + # Check no memory leak of exception instance + self.assertIsNone(greendns.EAI_EAGAIN_ERROR.__traceback__) + + def test_reverse_name(): tests.run_isolated('greendns_from_address_203.py') From 818f7cd4b58b0f3d6d87e097537cecd22e522302 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Herv=C3=A9=20Beraud?= Date: Wed, 20 Dec 2023 17:57:32 +0100 Subject: [PATCH 19/35] Fix OIDC authentication failure (#855) The publish job require writing permissions to store the `id-token` requested by Pypi. Without that change, we will get the following error: > Trusted publishing exchange failure: > OpenID Connect token retrieval failed: GitHub: missing or insufficient > OIDC token permissions, the ACTIONS_ID_TOKEN_REQUEST_TOKEN environment > variable was unset > This generally indicates a workflow configuration error, such as > insufficient permissions. Make sure that your workflow has `id-token: write` > configured at the job level, e.g.: Learn more at: https://docs.github.com/en/actions/deployment/security-hardening-your-deployments/about-security-hardening-with-openid-connect#adding-permissions-settings. --- .github/workflows/publish.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/publish.yaml b/.github/workflows/publish.yaml index f3ec57547a..096227514f 100644 --- a/.github/workflows/publish.yaml +++ b/.github/workflows/publish.yaml @@ -11,7 +11,7 @@ name: Build & Optional Deploy on: [push, pull_request] permissions: - contents: read + id-token: write jobs: build-and-deploy: From 7e9ca004afc3fba5459dd1629bd1b61d32468f59 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Herv=C3=A9=20Beraud?= Date: Wed, 20 Dec 2023 17:59:33 +0100 Subject: [PATCH 20/35] Ignore asyncore and asynchat for Python 3.12+ (#854) Starting Python 3.12 these modules have been removed https://docs.python.org/3/whatsnew/3.12.html Fix #804 --- NEWS | 5 +++++ eventlet/green/asynchat.py | 21 ++++++++++++--------- eventlet/green/asyncore.py | 25 ++++++++++++++----------- 3 files changed, 31 insertions(+), 20 deletions(-) diff --git a/NEWS b/NEWS index 31f375109d..9f445765f0 100644 --- a/NEWS +++ b/NEWS @@ -1,3 +1,8 @@ +Unreleased +========== + +* Ignore asyncore and asynchat for Python 3.12+ https://github.com/eventlet/eventlet/issues/804 + 0.34.0 ====== diff --git a/eventlet/green/asynchat.py b/eventlet/green/asynchat.py index e074749816..da51396adc 100644 --- a/eventlet/green/asynchat.py +++ b/eventlet/green/asynchat.py @@ -1,11 +1,14 @@ -from eventlet import patcher -from eventlet.green import asyncore -from eventlet.green import socket +import sys -patcher.inject( - 'asynchat', - globals(), - ('asyncore', asyncore), - ('socket', socket)) +if sys.version_info < (3, 12): + from eventlet import patcher + from eventlet.green import asyncore + from eventlet.green import socket -del patcher + patcher.inject( + 'asynchat', + globals(), + ('asyncore', asyncore), + ('socket', socket)) + + del patcher diff --git a/eventlet/green/asyncore.py b/eventlet/green/asyncore.py index 6a5f7976df..e7a7959b6a 100644 --- a/eventlet/green/asyncore.py +++ b/eventlet/green/asyncore.py @@ -1,13 +1,16 @@ -from eventlet import patcher -from eventlet.green import select -from eventlet.green import socket -from eventlet.green import time +import sys -patcher.inject( - "asyncore", - globals(), - ('select', select), - ('socket', socket), - ('time', time)) +if sys.version_info < (3, 12): + from eventlet import patcher + from eventlet.green import select + from eventlet.green import socket + from eventlet.green import time -del patcher + patcher.inject( + "asyncore", + globals(), + ('select', select), + ('socket', socket), + ('time', time)) + + del patcher From b738b0aed523528c8c7e1151aa9110fa8ffe6867 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Herv=C3=A9=20Beraud?= Date: Wed, 20 Dec 2023 18:20:23 +0100 Subject: [PATCH 21/35] Update changelog (#856) --- NEWS | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/NEWS b/NEWS index 9f445765f0..7e649a65e6 100644 --- a/NEWS +++ b/NEWS @@ -1,10 +1,12 @@ -Unreleased -========== +0.34.1 +====== -* Ignore asyncore and asynchat for Python 3.12+ https://github.com/eventlet/eventlet/issues/804 +* [bug] Fix memory leak in greendns https://github.com/eventlet/eventlet/issues/810 +* [infra] Fix OIDC authentication failure https://github.com/eventlet/eventlet/pull/855 +* [bug] Ignore asyncore and asynchat for Python 3.12+ https://github.com/eventlet/eventlet/issues/804 -0.34.0 -====== +0.34.0 (Not released on Pypi) +============================= * Dropped support for Python 3.6 and earlier. * Fix Python 3.13 compat by adding missing attibute '_is_main_interpreter' https://github.com/eventlet/eventlet/pull/847 From be24605ad977a2f3f8923edb90651f2390ece233 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Herv=C3=A9=20Beraud?= Date: Thu, 21 Dec 2023 17:43:35 +0100 Subject: [PATCH 22/35] Fix pypi broken link (#857) --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 71502f3214..b08d9ec61d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -46,7 +46,7 @@ dependencies = [ [project.urls] Homepage = "https://github.com/eventlet/eventlet" -History = "https://github.com/eventlet/eventlet/blob/main/NEWS" +History = "https://github.com/eventlet/eventlet/blob/master/NEWS" Tracker = "https://github.com/eventlet/eventlet/issues" Source = "https://github.com/eventlet/eventlet" From 43053745acd719a55e3ceb9f3e6af847e29118f6 Mon Sep 17 00:00:00 2001 From: Ruitong Zhu Date: Thu, 21 Dec 2023 13:19:55 -0800 Subject: [PATCH 23/35] Allowing inheritance of GreenSSLSocket without overriding the __new__ method (#796) Replace hard coded class name to avoid inconvenience when inheriting from this class. For a wrapper class GreenSSLSocketWrapper to inherit from GreenSSLSocket, it has to overwrite the __new__ method since __class__ was hard coded to be GreenSSLSocket. --- eventlet/green/ssl.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/eventlet/green/ssl.py b/eventlet/green/ssl.py index 1a21b2f73a..ebfeb2cb3d 100644 --- a/eventlet/green/ssl.py +++ b/eventlet/green/ssl.py @@ -93,7 +93,7 @@ def __new__(cls, sock=None, keyfile=None, certfile=None, ret.cert_reqs = cert_reqs ret.ssl_version = ssl_version ret.ca_certs = ca_certs - ret.__class__ = GreenSSLSocket + ret.__class__ = cls return ret @staticmethod From 5214b6e09416cacba032e1b43c6ad494e05bbf7c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Herv=C3=A9=20Beraud?= Date: Fri, 22 Dec 2023 14:26:54 +0100 Subject: [PATCH 24/35] update changelog for version 0.34.2 (#860) --- NEWS | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/NEWS b/NEWS index 7e649a65e6..63ea65637d 100644 --- a/NEWS +++ b/NEWS @@ -1,3 +1,10 @@ +0.34.2 +====== + +* Allowing inheritance of GreenSSLSocket without overriding the __new_ method https://github.com/eventlet/eventlet/pull/796 +* [bug] Fix broken API related to `__version__` removal https://github.com/eventlet/eventlet/pull/859 +* [doc] Fix pypi broken link https://github.com/eventlet/eventlet/pull/857 + 0.34.1 ====== From c85bd19a7fce690bf7f9de1daacb4abac227e6cf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Herv=C3=A9=20Beraud?= Date: Fri, 22 Dec 2023 15:26:25 +0100 Subject: [PATCH 25/35] Generate module version file at build (#859) * Generate module version file at build Gunicore rely on `eventlet.__version__` [1], however this data have been removed during our modernization of the continuous deployment mechanisms [2]. People reported problem with gunicore after 0.34.1 [3][4], so, it could be worth to reintroduce similar version info, to avoid side effects. This patch propose to use a `hatch-vcs` hook [5] to generate dynamically, at build, the missing data. Other solutions exists but each of them have their own problems [6]. Indeed, considering "footgun" described in [6] I choose the hatch-vcs approach, because, retrieving a wrong version number during development when the lib is installed in editable mode, is not, I think, something horrible. I prefer this side effect rather than relying on another additional underlying library just to print a version number when eventlet is installed in editable mode. A new additional requirement which would be installed anytime at runtime and production. Moreover, sometimes you want to import a package from a development repository tarball you just downloaded (where there's no metadata or Git tags present). So, Using `setuptools_scm` or `importlib.metadata.version` won't works in that context. Fix https://github.com/benoitc/gunicorn/issues/3120 _version.py is generated, and therefore shouldn't be checked in. Adding it to `.gitignore`. [1] https://github.com/benoitc/gunicorn/issues/3120 [2] https://github.com/eventlet/eventlet/pull/845 [3] https://github.com/eventlet/eventlet/pull/845#discussion_r1433985555 [4] https://github.com/eventlet/eventlet/issues/842#issuecomment-1864896273 [5] https://github.com/ofek/hatch-vcs#build-hook [6] https://github.com/maresb/hatch-vcs-footgun-example --------- Co-authored-by: Itamar Turner-Trauring --- .gitignore | 3 +++ eventlet/__init__.py | 12 ++++++++++++ pyproject.toml | 3 +++ 3 files changed, 18 insertions(+) diff --git a/.gitignore b/.gitignore index f4202283b8..0aacc62741 100644 --- a/.gitignore +++ b/.gitignore @@ -9,3 +9,6 @@ dist/ doc/changelog.rst venv* website-build/ + +# auto-generated by hatch +eventlet/_version.py \ No newline at end of file diff --git a/eventlet/__init__.py b/eventlet/__init__.py index 16d32cbd8d..959af3fd64 100644 --- a/eventlet/__init__.py +++ b/eventlet/__init__.py @@ -17,7 +17,19 @@ from eventlet import semaphore from eventlet import support from eventlet import timeout +# NOTE(hberaud): Versions are now managed by hatch and control version. +# hatch has a build hook which generates the version file, however, +# if the project is installed in editable mode then the _version.py file +# will not be updated unless the package is reinstalled (or locally rebuilt). +# For further details, please read: +# https://github.com/ofek/hatch-vcs#build-hook +# https://github.com/maresb/hatch-vcs-footgun-example +try: + from eventlet._version import __version__ +except ImportError: + __version__ = "0.0.0" import greenlet + # Force monotonic library search as early as possible. # Helpful when CPython < 3.5 on Linux blocked in `os.waitpid(-1)` before first use of hub. # Example: gunicorn diff --git a/pyproject.toml b/pyproject.toml index b08d9ec61d..7be6115e79 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -59,3 +59,6 @@ exclude = ["tests*", "benchmarks", "examples"] [tool.hatch] version.source = "vcs" + +[tool.hatch.build.hooks.vcs] +version-file = "eventlet/_version.py" From 82363597ad96cae2df8dc7aa449467a7026b43c6 Mon Sep 17 00:00:00 2001 From: Eli Schwartz Date: Fri, 22 Dec 2023 12:47:53 -0500 Subject: [PATCH 26/35] Drop old code based on python < 3.7, part 1: eventlet directory (#853) * inline version comparison checks Assigning global constants for sys.version_info comparisons makes it hard to run automatic migrations. * automatically upgrade code to drop python2-specific Klass(object) part 1 Ported by running the following command: ``` ruff check eventlet/ --select UP004 --fix ``` and committing the results. * automatically upgrade code to drop python2-specific super(), part 1 Ported by running the following command: ``` ruff check eventlet/ --select UP008 --fix --unsafe-fixes ``` fixing a couple of resulting pycodestyle indent issues, and committing the results. * automatically upgrade code to drop python2-specific coding cookie part 1 Ported by running the following command: ``` ruff check eventlet/ --select UP009 --fix ``` and committing the results. * automatically upgrade code to drop python2-specific future import part 1 Ported by running the following command: ``` ruff check eventlet/ --select UP010 --fix ``` and committing the results. * automatically upgrade code to drop python2-specific exceptions, part 1 Ported by running the following command: ``` ruff check eventlet/ --select UP024 --fix ``` and committing the results. * automatically upgrade code to drop python2 format placeholders, part 1 Ported by running the following command: ``` ruff check eventlet/ --select UP030 --fix --unsafe-fixes ``` and committing the results. * automatically upgrade code to drop python2-specific imports, part 1 Ported by running the following command: ``` ruff check eventlet/ --select UP035 --fix ``` and realizing that it was already try/excepted. The automatic rewriter doesn't notice the difference, but we do. Manually fix this up, and commit it. * automatically upgrade remaining code to drop python2-specific logic, part 1 Ported by running the following command: ``` find eventlet -name '*.py' -exec pyupgrade --py3-only --keep-percent-format {} + ``` fixing a couple of resulting pycodestyle indent/spacing issues, and committing the results. Touch only the primary project code for now. Clean up the tests etc. later. This covers a few different types of fixes: - collapse various six imports to their canonical python3 location - elide or collapse various six.PY2 / six.PY3 conditional code to unconditionally, exclusively, run the latter - catch some OSError conversions that ruff did not catch, because of `__import__()` - rewrite set/dict to literals - directly use dict methods (.keys, .items, ...) instead of py2-wrapper .iter - mark strings as raw if they have invalid escape sequences * manually clean up a lot of remaining "import six" code, part 1 Touch only the eventlet/ directory. Manual cleanups to follow. Variously: - simply drop an unused import - convert six.StringIO to io.StringIO - convert six.b() to encoding string -> bytes - collapsing six.moves or six.PY2 logic too complex for the rewriter * Drop remaining python < 3.7 compatible legacy code, part 1 Automatically migrated by running: ``` ruff check eventlet/ --select UP036 --fix --unsafe-fixes ``` and committing the results. Touch only eventlet/ for now. This implements one fixer: - code guarded by sys.version_info conditional on a target python of py37 --------- Co-authored-by: Itamar Turner-Trauring --- eventlet/__init__.py | 5 -- eventlet/backdoor.py | 8 +- eventlet/convenience.py | 2 +- eventlet/corolocal.py | 2 +- eventlet/coros.py | 4 +- eventlet/dagpool.py | 29 ++++--- eventlet/db_pool.py | 18 ++--- eventlet/debug.py | 7 +- eventlet/event.py | 4 +- eventlet/green/BaseHTTPServer.py | 3 +- eventlet/green/MySQLdb.py | 2 +- eventlet/green/Queue.py | 6 +- eventlet/green/SocketServer.py | 3 +- eventlet/green/builtin.py | 13 --- eventlet/green/http/__init__.py | 2 - eventlet/green/http/client.py | 13 ++- eventlet/green/http/cookiejar.py | 6 +- eventlet/green/http/cookies.py | 2 +- eventlet/green/httplib.py | 12 +-- eventlet/green/os.py | 10 +-- eventlet/green/profile.py | 12 +-- eventlet/green/select.py | 7 +- eventlet/green/ssl.py | 127 ++++++++++++------------------ eventlet/green/subprocess.py | 20 ++--- eventlet/green/thread.py | 16 ++-- eventlet/green/threading.py | 10 +-- eventlet/green/urllib/__init__.py | 35 -------- eventlet/green/zmq.py | 9 +-- eventlet/greenio/__init__.py | 7 +- eventlet/greenio/base.py | 72 +++++++---------- eventlet/greenio/py2.py | 27 +++---- eventlet/greenio/py3.py | 19 +++-- eventlet/greenpool.py | 13 ++- eventlet/greenthread.py | 3 +- eventlet/hubs/__init__.py | 3 +- eventlet/hubs/epolls.py | 4 +- eventlet/hubs/hub.py | 12 ++- eventlet/hubs/kqueue.py | 17 ++-- eventlet/hubs/poll.py | 16 ++-- eventlet/hubs/selects.py | 8 +- eventlet/hubs/timer.py | 6 +- eventlet/patcher.py | 64 +++++++-------- eventlet/pools.py | 6 +- eventlet/queue.py | 12 ++- eventlet/semaphore.py | 12 +-- eventlet/support/greendns.py | 31 ++++---- eventlet/support/stacklesss.py | 4 +- eventlet/tpool.py | 17 ++-- eventlet/websocket.py | 63 ++++++++------- eventlet/wsgi.py | 82 +++++++++---------- eventlet/zipkin/api.py | 2 +- eventlet/zipkin/client.py | 2 +- eventlet/zipkin/http.py | 36 +-------- eventlet/zipkin/wsgi.py | 2 +- 54 files changed, 361 insertions(+), 566 deletions(-) diff --git a/eventlet/__init__.py b/eventlet/__init__.py index 959af3fd64..318ee0d69c 100644 --- a/eventlet/__init__.py +++ b/eventlet/__init__.py @@ -2,11 +2,6 @@ import sys import warnings -if sys.version_info < (3, 5): - warnings.warn( - "Support for your Python version is deprecated and will be removed in the future", - DeprecationWarning, - ) from eventlet import convenience from eventlet import event diff --git a/eventlet/backdoor.py b/eventlet/backdoor.py index f49a969a6f..c72ac1e874 100644 --- a/eventlet/backdoor.py +++ b/eventlet/backdoor.py @@ -1,5 +1,3 @@ -from __future__ import print_function - from code import InteractiveConsole import errno import socket @@ -21,7 +19,7 @@ sys.ps2 = '... ' -class FileProxy(object): +class FileProxy: def __init__(self, f): self.f = f @@ -35,7 +33,7 @@ def write(self, data, *a, **kw): try: self.f.write(data, *a, **kw) self.f.flush() - except socket.error as e: + except OSError as e: if get_errno(e) != errno.EPIPE: raise @@ -108,7 +106,7 @@ def backdoor_server(sock, locals=None): try: socketpair = sock.accept() backdoor(socketpair, locals) - except socket.error as e: + except OSError as e: # Broken pipe means it was shutdown if get_errno(e) != errno.EPIPE: raise diff --git a/eventlet/convenience.py b/eventlet/convenience.py index a02284dee6..4d286aa864 100644 --- a/eventlet/convenience.py +++ b/eventlet/convenience.py @@ -63,7 +63,7 @@ def listen(addr, family=socket.AF_INET, backlog=50, reuse_addr=True, reuse_port= try: sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1) # OSError is enough on Python 3+ - except (OSError, socket.error) as ex: + except OSError as ex: if support.get_errno(ex) in (22, 92): # A famous platform defines unsupported socket option. # https://github.com/eventlet/eventlet/issues/380 diff --git a/eventlet/corolocal.py b/eventlet/corolocal.py index 1a1a8dfa23..73b10b6327 100644 --- a/eventlet/corolocal.py +++ b/eventlet/corolocal.py @@ -12,7 +12,7 @@ def get_ident(): # the entire purpose of this class is to store off the constructor # arguments in a local variable without calling __init__ directly -class _localbase(object): +class _localbase: __slots__ = '_local__args', '_local__greens' def __new__(cls, *args, **kw): diff --git a/eventlet/coros.py b/eventlet/coros.py index 431e6f0576..fbd7e995fa 100644 --- a/eventlet/coros.py +++ b/eventlet/coros.py @@ -1,9 +1,7 @@ -from __future__ import print_function - from eventlet import event as _event -class metaphore(object): +class metaphore: """This is sort of an inverse semaphore: a counter that starts at 0 and waits only if nonzero. It's used to implement a "wait for all" scenario. diff --git a/eventlet/dagpool.py b/eventlet/dagpool.py index 68bb49a68e..47d13a8230 100644 --- a/eventlet/dagpool.py +++ b/eventlet/dagpool.py @@ -5,7 +5,6 @@ from eventlet.event import Event from eventlet import greenthread -import six import collections @@ -39,9 +38,9 @@ class PropagateError(Exception): """ def __init__(self, key, exc): # initialize base class with a reasonable string message - msg = "PropagateError({0}): {1}: {2}" \ + msg = "PropagateError({}): {}: {}" \ .format(key, exc.__class__.__name__, exc) - super(PropagateError, self).__init__(msg) + super().__init__(msg) self.msg = msg # Unless we set args, this is unpickleable: # https://bugs.python.org/issue1692335 @@ -53,7 +52,7 @@ def __str__(self): return self.msg -class DAGPool(object): +class DAGPool: """ A DAGPool is a pool that constrains greenthreads, not by max concurrency, but by data dependencies. @@ -123,7 +122,7 @@ def __init__(self, preload={}): try: # If a dict is passed, copy it. Don't risk a subsequent # modification to passed dict affecting our internal state. - iteritems = six.iteritems(preload) + iteritems = preload.items() except AttributeError: # Not a dict, just an iterable of (key, value) pairs iteritems = preload @@ -258,7 +257,7 @@ def _get_keyset_for_wait_each(self, keys): return set(keys) else: # keys arg omitted -- use all the keys we know about - return set(six.iterkeys(self.coros)) | set(six.iterkeys(self.values)) + return set(self.coros.keys()) | set(self.values.keys()) def _wait_each(self, pending): """ @@ -394,7 +393,7 @@ def spawn_many(self, depends, function, *args, **kwds): """ # Iterate over 'depends' items, relying on self.spawn() not to # context-switch so no one can modify 'depends' along the way. - for key, deps in six.iteritems(depends): + for key, deps in depends.items(): self.spawn(key, deps, function, *args, **kwds) def kill(self, key): @@ -502,7 +501,7 @@ def keys(self): """ # Explicitly return a copy rather than an iterator: don't assume our # caller will finish iterating before new values are posted. - return tuple(six.iterkeys(self.values)) + return tuple(self.values.keys()) def items(self): """ @@ -511,7 +510,7 @@ def items(self): # Don't assume our caller will finish iterating before new values are # posted. return tuple((key, self._value_or_raise(value)) - for key, value in six.iteritems(self.values)) + for key, value in self.values.items()) def running(self): """ @@ -529,7 +528,7 @@ def running_keys(self): """ # return snapshot; don't assume caller will finish iterating before we # next modify self.coros - return tuple(six.iterkeys(self.coros)) + return tuple(self.coros.keys()) def waiting(self): """ @@ -567,7 +566,7 @@ def waiting_for(self, key=_MISSING): # some or all of those keys, because those greenthreads have not yet # regained control since values were posted. So make a point of # excluding values that are now available. - available = set(six.iterkeys(self.values)) + available = set(self.values.keys()) if key is not _MISSING: # waiting_for(key) is semantically different than waiting_for(). @@ -596,7 +595,7 @@ def waiting_for(self, key=_MISSING): # are now available. Filter out any pair in which 'pending' is empty, # that is, that greenthread will be unblocked next time it resumes. # Make a dict from those pairs. - return dict((key, pending) - for key, pending in ((key, (coro.pending - available)) - for key, coro in six.iteritems(self.coros)) - if pending) + return {key: pending + for key, pending in ((key, (coro.pending - available)) + for key, coro in self.coros.items()) + if pending} diff --git a/eventlet/db_pool.py b/eventlet/db_pool.py index ef74d99ce4..27c0333cf6 100644 --- a/eventlet/db_pool.py +++ b/eventlet/db_pool.py @@ -1,5 +1,3 @@ -from __future__ import print_function - from collections import deque from contextlib import contextmanager import sys @@ -59,9 +57,7 @@ def __init__(self, db_module, self.connect_timeout = connect_timeout self._expiration_timer = None self.cleanup = cleanup - super(BaseConnectionPool, self).__init__(min_size=min_size, - max_size=max_size, - order_as_stack=True) + super().__init__(min_size=min_size, max_size=max_size, order_as_stack=True) def _schedule_expiration(self): """Sets up a timer that will call _expire_old_connections when the @@ -173,7 +169,7 @@ def _safe_close(self, conn, quiet=False): print("Connection.close raised: %s" % (sys.exc_info()[1])) def get(self): - conn = super(BaseConnectionPool, self).get() + conn = super().get() # None is a flag value that means that put got called with # something it couldn't use @@ -229,12 +225,12 @@ def put(self, conn, cleanup=_MISSING): raise if conn is not None: - super(BaseConnectionPool, self).put((now, created_at, conn)) + super().put((now, created_at, conn)) else: # wake up any waiters with a flag value that indicates # they need to manufacture a connection if self.waiting() > 0: - super(BaseConnectionPool, self).put(None) + super().put(None) else: # no waiters -- just change the size self.current_size -= 1 @@ -308,7 +304,7 @@ def connect(cls, db_module, connect_timeout, *args, **kw): ConnectionPool = TpooledConnectionPool -class GenericConnectionWrapper(object): +class GenericConnectionWrapper: def __init__(self, baseconn): self._base = baseconn @@ -386,7 +382,7 @@ class PooledConnectionWrapper(GenericConnectionWrapper): """ def __init__(self, baseconn, pool): - super(PooledConnectionWrapper, self).__init__(baseconn) + super().__init__(baseconn) self._pool = pool def __nonzero__(self): @@ -416,7 +412,7 @@ def __del__(self): # self.close() -class DatabaseConnector(object): +class DatabaseConnector: """ This is an object which will maintain a collection of database connection pools on a per-host basis. diff --git a/eventlet/debug.py b/eventlet/debug.py index 6481aeac93..3cf27a7e64 100644 --- a/eventlet/debug.py +++ b/eventlet/debug.py @@ -1,6 +1,5 @@ """The debug module contains utilities and functions for better debugging Eventlet-powered applications.""" -from __future__ import print_function import os import sys @@ -13,10 +12,10 @@ 'hub_prevent_multiple_readers', 'hub_timer_stacks', 'hub_blocking_detection'] -_token_splitter = re.compile('\W+') +_token_splitter = re.compile(r'\W+') -class Spew(object): +class Spew: def __init__(self, trace_names=None, show_values=True): self.trace_names = trace_names @@ -37,7 +36,7 @@ def __call__(self, frame, event, arg): try: src = inspect.getsourcelines(frame) line = src[lineno] - except IOError: + except OSError: line = 'Unknown code named [%s]. VM instruction #%d' % ( frame.f_code.co_name, frame.f_lasti) if self.trace_names is None or name in self.trace_names: diff --git a/eventlet/event.py b/eventlet/event.py index 9d99a544dc..b334a53251 100644 --- a/eventlet/event.py +++ b/eventlet/event.py @@ -1,5 +1,3 @@ -from __future__ import print_function - from eventlet import hubs from eventlet.support import greenlets as greenlet @@ -14,7 +12,7 @@ def __repr__(self): NOT_USED = NOT_USED() -class Event(object): +class Event: """An abstraction where an arbitrary number of coroutines can wait for one event from another. diff --git a/eventlet/green/BaseHTTPServer.py b/eventlet/green/BaseHTTPServer.py index 493efd2daf..9a737309bf 100644 --- a/eventlet/green/BaseHTTPServer.py +++ b/eventlet/green/BaseHTTPServer.py @@ -1,10 +1,9 @@ from eventlet import patcher from eventlet.green import socket from eventlet.green import SocketServer -import six patcher.inject( - 'BaseHTTPServer' if six.PY2 else 'http.server', + 'http.server', globals(), ('socket', socket), ('SocketServer', SocketServer), diff --git a/eventlet/green/MySQLdb.py b/eventlet/green/MySQLdb.py index 2395e51965..16a7ec5886 100644 --- a/eventlet/green/MySQLdb.py +++ b/eventlet/green/MySQLdb.py @@ -22,7 +22,7 @@ def Connection(*args, **kw): # replicate the MySQLdb.connections module but with a tpooled Connection factory -class MySQLdbConnectionsModule(object): +class MySQLdbConnectionsModule: pass diff --git a/eventlet/green/Queue.py b/eventlet/green/Queue.py index 59a9a30265..947d43a7f8 100644 --- a/eventlet/green/Queue.py +++ b/eventlet/green/Queue.py @@ -12,21 +12,21 @@ class Queue(queue.Queue): def __init__(self, maxsize=0): if maxsize == 0: maxsize = None - super(Queue, self).__init__(maxsize) + super().__init__(maxsize) class PriorityQueue(queue.PriorityQueue): def __init__(self, maxsize=0): if maxsize == 0: maxsize = None - super(PriorityQueue, self).__init__(maxsize) + super().__init__(maxsize) class LifoQueue(queue.LifoQueue): def __init__(self, maxsize=0): if maxsize == 0: maxsize = None - super(LifoQueue, self).__init__(maxsize) + super().__init__(maxsize) Empty = queue.Empty diff --git a/eventlet/green/SocketServer.py b/eventlet/green/SocketServer.py index 17a4d43575..b94ead39ae 100644 --- a/eventlet/green/SocketServer.py +++ b/eventlet/green/SocketServer.py @@ -3,10 +3,9 @@ from eventlet.green import socket from eventlet.green import select from eventlet.green import threading -import six patcher.inject( - 'SocketServer' if six.PY2 else 'socketserver', + 'socketserver', globals(), ('socket', socket), ('select', select), diff --git a/eventlet/green/builtin.py b/eventlet/green/builtin.py index 8d0603a334..ce982902fe 100644 --- a/eventlet/green/builtin.py +++ b/eventlet/green/builtin.py @@ -13,27 +13,14 @@ from eventlet.hubs import hub from eventlet.patcher import slurp_properties import sys -import six __all__ = dir(builtins_orig) __patched__ = ['open'] -if six.PY2: - __patched__ += ['file'] - slurp_properties(builtins_orig, globals(), ignore=__patched__, srckeys=dir(builtins_orig)) hubs.get_hub() -if six.PY2: - __original_file = file - - class file(__original_file): - def __init__(self, *args, **kwargs): - super(file, self).__init__(*args, **kwargs) - hubs.notify_opened(self.fileno()) - - __original_open = open __opening = False diff --git a/eventlet/green/http/__init__.py b/eventlet/green/http/__init__.py index 2e861755fb..14e74fd9e7 100644 --- a/eventlet/green/http/__init__.py +++ b/eventlet/green/http/__init__.py @@ -52,8 +52,6 @@ # 8. By copying, installing or otherwise using Python, Licensee # agrees to be bound by the terms and conditions of this License # Agreement. -import six -assert six.PY3, 'This is a Python 3 module' from enum import IntEnum diff --git a/eventlet/green/http/client.py b/eventlet/green/http/client.py index 3399333e5f..2051ca9505 100644 --- a/eventlet/green/http/client.py +++ b/eventlet/green/http/client.py @@ -126,10 +126,7 @@ import email.message import io import re -try: - from collections.abc import Iterable -except ImportError: - from collections import Iterable +from collections.abc import Iterable from urllib.parse import urlsplit from eventlet.green import http, os, socket @@ -1126,7 +1123,7 @@ def _send_output(self, message_body=None, encode_chunked=False): if encode_chunked and self._http_vsn == 11: # chunked encoding - chunk = '{0:X}\r\n'.format(len(chunk)).encode('ascii') + chunk + b'\r\n' + chunk = '{:X}\r\n'.format(len(chunk)).encode('ascii') + chunk + b'\r\n' self.send(chunk) if encode_chunked and self._http_vsn == 11: @@ -1294,7 +1291,7 @@ def endheaders(self, message_body=None, **kwds): encode_chunked = kwds.pop('encode_chunked', False) if kwds: # mimic interpreter error for unrecognized keyword - raise TypeError("endheaders() got an unexpected keyword argument '{0}'" + raise TypeError("endheaders() got an unexpected keyword argument '{}'" .format(kwds.popitem()[0])) if self.__state == _CS_REQ_STARTED: @@ -1308,7 +1305,7 @@ def request(self, method, url, body=None, headers={}, **kwds): encode_chunked = kwds.pop('encode_chunked', False) if kwds: # mimic interpreter error for unrecognized keyword - raise TypeError("request() got an unexpected keyword argument '{0}'" + raise TypeError("request() got an unexpected keyword argument '{}'" .format(kwds.popitem()[0])) self._send_request(method, url, body, headers, encode_chunked) @@ -1477,7 +1474,7 @@ def __init__(self, host, port=None, key_file=None, cert_file=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, source_address=None, *, context=None, check_hostname=None): - super(HTTPSConnection, self).__init__(host, port, timeout, + super().__init__(host, port, timeout, source_address) self.key_file = key_file self.cert_file = cert_file diff --git a/eventlet/green/http/cookiejar.py b/eventlet/green/http/cookiejar.py index 9c884e9b60..bbc5a76b02 100644 --- a/eventlet/green/http/cookiejar.py +++ b/eventlet/green/http/cookiejar.py @@ -254,7 +254,7 @@ def _str2time(day, mon, yr, hr, min, sec, tz): STRICT_DATE_RE = re.compile( r"^[SMTWF][a-z][a-z], (\d\d) ([JFMASOND][a-z][a-z]) " - "(\d\d\d\d) (\d\d):(\d\d):(\d\d) GMT$", re.ASCII) + r"(\d\d\d\d) (\d\d):(\d\d):(\d\d) GMT$", re.ASCII) WEEKDAY_RE = re.compile( r"^(?:Sun|Mon|Tue|Wed|Thu|Fri|Sat)[a-z]*,?\s*", re.I | re.ASCII) LOOSE_HTTP_DATE_RE = re.compile( @@ -331,7 +331,7 @@ def http2time(text): return _str2time(day, mon, yr, hr, min, sec, tz) ISO_DATE_RE = re.compile( - """^ + r"""^ (\d{4}) # year [-\/]? (\d\d?) # numerical month @@ -465,7 +465,7 @@ def split_header_words(header_values): pairs = [] else: # skip junk - non_junk, nr_junk_chars = re.subn("^[=\s;]*", "", text) + non_junk, nr_junk_chars = re.subn(r"^[=\s;]*", "", text) assert nr_junk_chars > 0, ( "split_header_words bug: '%s', '%s', %s" % (orig_text, text, pairs)) diff --git a/eventlet/green/http/cookies.py b/eventlet/green/http/cookies.py index 0a0a150182..d93cd7134b 100644 --- a/eventlet/green/http/cookies.py +++ b/eventlet/green/http/cookies.py @@ -511,7 +511,7 @@ def OutputString(self, attrs=None): # _LegalKeyChars = r"\w\d!#%&'~_`><@,:/\$\*\+\-\.\^\|\)\(\?\}\{\=" -_LegalValueChars = _LegalKeyChars + '\[\]' +_LegalValueChars = _LegalKeyChars + r'\[\]' _CookiePattern = re.compile(r""" (?x) # This is a verbose pattern \s* # Optional whitespace at start of cookie diff --git a/eventlet/green/httplib.py b/eventlet/green/httplib.py index 6330efa40b..f67dbfe974 100644 --- a/eventlet/green/httplib.py +++ b/eventlet/green/httplib.py @@ -1,6 +1,5 @@ from eventlet import patcher from eventlet.green import socket -import six to_patch = [('socket', socket)] @@ -10,13 +9,10 @@ except ImportError: pass -if six.PY2: - patcher.inject('httplib', globals(), *to_patch) -if six.PY3: - from eventlet.green.http import client - for name in dir(client): - if name not in patcher.__exclude: - globals()[name] = getattr(client, name) +from eventlet.green.http import client +for name in dir(client): + if name not in patcher.__exclude: + globals()[name] = getattr(client, name) if __name__ == '__main__': test() diff --git a/eventlet/green/os.py b/eventlet/green/os.py index 2d1fe6a1ae..1195ed8a80 100644 --- a/eventlet/green/os.py +++ b/eventlet/green/os.py @@ -26,7 +26,7 @@ def fdopen(fd, *args, **kw): raise TypeError('fd should be int, not %r' % fd) try: return greenio.GreenPipe(fd, *args, **kw) - except IOError as e: + except OSError as e: raise OSError(*e.args) @@ -40,10 +40,10 @@ def read(fd, n): while True: try: return __original_read__(fd, n) - except (OSError, IOError) as e: + except OSError as e: if get_errno(e) != errno.EAGAIN: raise - except socket.error as e: + except OSError as e: if get_errno(e) == errno.EPIPE: return '' raise @@ -64,10 +64,10 @@ def write(fd, st): while True: try: return __original_write__(fd, st) - except (OSError, IOError) as e: + except OSError as e: if get_errno(e) != errno.EAGAIN: raise - except socket.error as e: + except OSError as e: if get_errno(e) != errno.EPIPE: raise hubs.trampoline(fd, write=True) diff --git a/eventlet/green/profile.py b/eventlet/green/profile.py index 9dfd750cd1..a03b507a6f 100644 --- a/eventlet/green/profile.py +++ b/eventlet/green/profile.py @@ -40,9 +40,9 @@ from eventlet import greenthread from eventlet import patcher -import six +import _thread -thread = patcher.original(six.moves._thread.__name__) # non-monkeypatched module needed +thread = patcher.original(_thread.__name__) # non-monkeypatched module needed # This class provides the start() and stop() functions @@ -144,10 +144,10 @@ def TallyTimings(self): # we must keep the timings dicts separate for each tasklet, since it contains # the 'ns' item, recursion count of each function in that tasklet. This is # used in the Unwind dude. - for tasklet, (cur, timings) in six.iteritems(oldtimings): + for tasklet, (cur, timings) in oldtimings.items(): self.Unwind(cur, timings) - for k, v in six.iteritems(timings): + for k, v in timings.items(): if k not in self.timings: self.timings[k] = v else: @@ -157,7 +157,7 @@ def TallyTimings(self): cc += v[0] tt += v[2] ct += v[3] - for k1, v1 in six.iteritems(v[4]): + for k1, v1 in v[4].items(): callers[k1] = callers.get(k1, 0) + v1 self.timings[k] = cc, ns, tt, ct, callers @@ -213,7 +213,7 @@ def ContextWrapper(self, arg, t): 'c_return': Profile.trace_dispatch_c_return_extend_back, }) # Add automatic tasklet detection to the callbacks. -Profile.dispatch = dict((k, ContextWrap(v)) for k, v in six.viewitems(Profile.dispatch)) +Profile.dispatch = {k: ContextWrap(v) for k, v in Profile.dispatch.items()} # run statements shamelessly stolen from profile.py diff --git a/eventlet/green/select.py b/eventlet/green/select.py index e293f3666b..e35ec45309 100644 --- a/eventlet/green/select.py +++ b/eventlet/green/select.py @@ -1,6 +1,5 @@ import eventlet from eventlet.hubs import get_hub -import six __select = eventlet.patcher.original('select') error = __select.error @@ -17,12 +16,12 @@ def get_fileno(obj): try: f = obj.fileno except AttributeError: - if not isinstance(obj, six.integer_types): + if not isinstance(obj, int): raise TypeError("Expected int or long, got %s" % type(obj)) return obj else: rv = f() - if not isinstance(rv, six.integer_types): + if not isinstance(rv, int): raise TypeError("Expected int or long, got %s" % type(rv)) return rv @@ -71,7 +70,7 @@ def on_timeout(): if timeout is not None: timers.append(hub.schedule_call_global(timeout, on_timeout)) try: - for k, v in six.iteritems(ds): + for k, v in ds.items(): if v.get('read'): listeners.append(hub.add(hub.READ, k, on_read, current.throw, lambda: None)) if v.get('write'): diff --git a/eventlet/green/ssl.py b/eventlet/green/ssl.py index ebfeb2cb3d..ebd57f615e 100644 --- a/eventlet/green/ssl.py +++ b/eventlet/green/ssl.py @@ -10,7 +10,6 @@ ) from eventlet.hubs import trampoline, IOClosed from eventlet.support import get_errno, PY33 -import six from contextlib import contextmanager orig_socket = __import__('socket') @@ -23,7 +22,6 @@ _original_sslsocket = __ssl.SSLSocket _original_sslcontext = __ssl.SSLContext -_is_under_py_3_7 = sys.version_info < (3, 7) _is_py_3_7 = sys.version_info[:2] == (3, 7) _original_wrap_socket = __ssl.SSLContext.wrap_socket @@ -59,42 +57,39 @@ def __new__(cls, sock=None, keyfile=None, certfile=None, server_side=False, cert_reqs=CERT_NONE, ssl_version=PROTOCOL_SSLv23, ca_certs=None, do_handshake_on_connect=True, *args, **kw): - if _is_under_py_3_7: - return super(GreenSSLSocket, cls).__new__(cls) - else: - if not isinstance(sock, GreenSocket): - sock = GreenSocket(sock) - with _original_ssl_context(): - context = kw.get('_context') - if context: - ret = _original_sslsocket._create( - sock=sock.fd, - server_side=server_side, - do_handshake_on_connect=False, - suppress_ragged_eofs=kw.get('suppress_ragged_eofs', True), - server_hostname=kw.get('server_hostname'), - context=context, - session=kw.get('session'), - ) - else: - ret = cls._wrap_socket( - sock=sock.fd, - keyfile=keyfile, - certfile=certfile, - server_side=server_side, - cert_reqs=cert_reqs, - ssl_version=ssl_version, - ca_certs=ca_certs, - do_handshake_on_connect=False, - ciphers=kw.get('ciphers'), - ) - ret.keyfile = keyfile - ret.certfile = certfile - ret.cert_reqs = cert_reqs - ret.ssl_version = ssl_version - ret.ca_certs = ca_certs - ret.__class__ = cls - return ret + if not isinstance(sock, GreenSocket): + sock = GreenSocket(sock) + with _original_ssl_context(): + context = kw.get('_context') + if context: + ret = _original_sslsocket._create( + sock=sock.fd, + server_side=server_side, + do_handshake_on_connect=False, + suppress_ragged_eofs=kw.get('suppress_ragged_eofs', True), + server_hostname=kw.get('server_hostname'), + context=context, + session=kw.get('session'), + ) + else: + ret = cls._wrap_socket( + sock=sock.fd, + keyfile=keyfile, + certfile=certfile, + server_side=server_side, + cert_reqs=cert_reqs, + ssl_version=ssl_version, + ca_certs=ca_certs, + do_handshake_on_connect=False, + ciphers=kw.get('ciphers'), + ) + ret.keyfile = keyfile + ret.certfile = certfile + ret.cert_reqs = cert_reqs + ret.ssl_version = ssl_version + ret.ca_certs = ca_certs + ret.__class__ = GreenSSLSocket + return ret @staticmethod def _wrap_socket(sock, keyfile, certfile, server_side, cert_reqs, @@ -126,17 +121,6 @@ def __init__(self, sock, keyfile=None, certfile=None, sock = GreenSocket(sock) self.act_non_blocking = sock.act_non_blocking - if six.PY2: - # On Python 2 SSLSocket constructor queries the timeout, it'd break without - # this assignment - self._timeout = sock.gettimeout() - - if _is_under_py_3_7: - # nonblocking socket handshaking on connect got disabled so let's pretend it's disabled - # even when it's on - super(GreenSSLSocket, self).__init__( - sock.fd, keyfile, certfile, server_side, cert_reqs, ssl_version, - ca_certs, do_handshake_on_connect and six.PY2, *args, **kw) # the superclass initializer trashes the methods so we remove # the local-object versions of them and let the actual class # methods shine through @@ -147,18 +131,17 @@ def __init__(self, sock, keyfile=None, certfile=None, except AttributeError: pass - if six.PY3: - # Python 3 SSLSocket construction process overwrites the timeout so restore it - self._timeout = sock.gettimeout() + # Python 3 SSLSocket construction process overwrites the timeout so restore it + self._timeout = sock.gettimeout() - # it also sets timeout to None internally apparently (tested with 3.4.2) - _original_sslsocket.settimeout(self, 0.0) - assert _original_sslsocket.gettimeout(self) == 0.0 + # it also sets timeout to None internally apparently (tested with 3.4.2) + _original_sslsocket.settimeout(self, 0.0) + assert _original_sslsocket.gettimeout(self) == 0.0 - # see note above about handshaking - self.do_handshake_on_connect = do_handshake_on_connect - if do_handshake_on_connect and self._connected: - self.do_handshake() + # see note above about handshaking + self.do_handshake_on_connect = do_handshake_on_connect + if do_handshake_on_connect and self._connected: + self.do_handshake() def settimeout(self, timeout): self._timeout = timeout @@ -204,14 +187,14 @@ def write(self, data): """Write DATA to the underlying SSL channel. Returns number of bytes of DATA actually transmitted.""" return self._call_trampolining( - super(GreenSSLSocket, self).write, data) + super().write, data) def read(self, len=1024, buffer=None): """Read up to LEN bytes and return them. Return zero-length string on EOF.""" try: return self._call_trampolining( - super(GreenSSLSocket, self).read, len, buffer) + super().read, len, buffer) except IOClosed: if buffer is None: return b'' @@ -221,7 +204,7 @@ def read(self, len=1024, buffer=None): def send(self, data, flags=0): if self._sslobj: return self._call_trampolining( - super(GreenSSLSocket, self).send, data, flags) + super().send, data, flags) else: trampoline(self, write=True, timeout_exc=timeout_exc('timed out')) return socket.send(self, data, flags) @@ -324,22 +307,22 @@ def recvfrom(self, addr, buflen=1024, flags=0): if not self.act_non_blocking: trampoline(self, read=True, timeout=self.gettimeout(), timeout_exc=timeout_exc('timed out')) - return super(GreenSSLSocket, self).recvfrom(addr, buflen, flags) + return super().recvfrom(addr, buflen, flags) def recvfrom_into(self, buffer, nbytes=None, flags=0): if not self.act_non_blocking: trampoline(self, read=True, timeout=self.gettimeout(), timeout_exc=timeout_exc('timed out')) - return super(GreenSSLSocket, self).recvfrom_into(buffer, nbytes, flags) + return super().recvfrom_into(buffer, nbytes, flags) def unwrap(self): return GreenSocket(self._call_trampolining( - super(GreenSSLSocket, self).unwrap)) + super().unwrap)) def do_handshake(self): """Perform a TLS/SSL handshake.""" return self._call_trampolining( - super(GreenSSLSocket, self).do_handshake) + super().do_handshake) def _socket_connect(self, addr): real_connect = socket.connect @@ -390,11 +373,8 @@ def connect(self, addr): sslwrap = _ssl.sslwrap except AttributeError: # sslwrap was removed in 3.x and later in 2.7.9 - if six.PY2: - sslobj = self._context._wrap_socket(self._sock, server_side, ssl_sock=self) - else: - context = self.context if PY33 else self._context - sslobj = context._wrap_socket(self, server_side, server_hostname=self.server_hostname) + context = self.context if PY33 else self._context + sslobj = context._wrap_socket(self, server_side, server_hostname=self.server_hostname) else: sslobj = sslwrap(self._sock, server_side, self.keyfile, self.certfile, self.cert_reqs, self.ssl_version, @@ -406,10 +386,7 @@ def connect(self, addr): except NameError: self._sslobj = sslobj else: - if _is_under_py_3_7: - self._sslobj = SSLObject(sslobj, owner=self) - else: - self._sslobj = sslobj + self._sslobj = sslobj if self.do_handshake_on_connect: self.do_handshake() diff --git a/eventlet/green/subprocess.py b/eventlet/green/subprocess.py index f021ceda26..4509208387 100644 --- a/eventlet/green/subprocess.py +++ b/eventlet/green/subprocess.py @@ -6,15 +6,13 @@ from eventlet import greenio from eventlet import patcher from eventlet.green import select, threading, time -import six __patched__ = ['call', 'check_call', 'Popen'] to_patch = [('select', select), ('threading', threading), ('time', time)] -if sys.version_info > (3, 4): - from eventlet.green import selectors - to_patch.append(('selectors', selectors)) +from eventlet.green import selectors +to_patch.append(('selectors', selectors)) patcher.inject('subprocess', globals(), *to_patch) subprocess_orig = patcher.original("subprocess") @@ -103,17 +101,14 @@ def wait(self, timeout=None, check_interval=0.01): # just want a version that uses eventlet.green.select.select() # instead of select.select(). _communicate = FunctionType( - six.get_function_code(six.get_unbound_function( - subprocess_orig.Popen._communicate)), + subprocess_orig.Popen._communicate.__code__, globals()) try: _communicate_with_select = FunctionType( - six.get_function_code(six.get_unbound_function( - subprocess_orig.Popen._communicate_with_select)), + subprocess_orig.Popen._communicate_with_select.__code__, globals()) _communicate_with_poll = FunctionType( - six.get_function_code(six.get_unbound_function( - subprocess_orig.Popen._communicate_with_poll)), + subprocess_orig.Popen._communicate_with_poll.__code__, globals()) except AttributeError: pass @@ -122,9 +117,8 @@ def wait(self, timeout=None, check_interval=0.01): # Borrow subprocess.call() and check_call(), but patch them so they reference # OUR Popen class rather than subprocess.Popen. def patched_function(function): - new_function = FunctionType(six.get_function_code(function), globals()) - if six.PY3: - new_function.__kwdefaults__ = function.__kwdefaults__ + new_function = FunctionType(function.__code__, globals()) + new_function.__kwdefaults__ = function.__kwdefaults__ new_function.__defaults__ = function.__defaults__ return new_function diff --git a/eventlet/green/thread.py b/eventlet/green/thread.py index 7b821317b8..053a1c3c65 100644 --- a/eventlet/green/thread.py +++ b/eventlet/green/thread.py @@ -1,6 +1,5 @@ """Implements the standard thread module, using greenthreads.""" -from six.moves import _thread as __thread -import six +import _thread as __thread from eventlet.support import greenlets as greenlet from eventlet import greenthread from eventlet.lock import Lock @@ -18,13 +17,14 @@ if hasattr(__thread, "_is_main_interpreter"): _is_main_interpreter = __thread._is_main_interpreter -if six.PY3: - def _set_sentinel(): - # TODO this is a dummy code, reimplementing this may be needed: - # https://hg.python.org/cpython/file/b5e9bc4352e1/Modules/_threadmodule.c#l1203 - return allocate_lock() - TIMEOUT_MAX = __thread.TIMEOUT_MAX +def _set_sentinel(): + # TODO this is a dummy code, reimplementing this may be needed: + # https://hg.python.org/cpython/file/b5e9bc4352e1/Modules/_threadmodule.c#l1203 + return allocate_lock() + + +TIMEOUT_MAX = __thread.TIMEOUT_MAX def _count(): diff --git a/eventlet/green/threading.py b/eventlet/green/threading.py index 93be29e49b..7ea20cdad9 100644 --- a/eventlet/green/threading.py +++ b/eventlet/green/threading.py @@ -3,16 +3,12 @@ from eventlet.green import thread from eventlet.green import time from eventlet.support import greenlets as greenlet -import six __patched__ = ['_start_new_thread', '_allocate_lock', '_sleep', 'local', 'stack_size', 'Lock', 'currentThread', 'current_thread', '_after_fork', '_shutdown'] -if six.PY2: - __patched__ += ['_get_ident'] -else: - __patched__ += ['get_ident', '_set_sentinel'] +__patched__ += ['get_ident', '_set_sentinel'] __orig_threading = eventlet.patcher.original('threading') __threadlocal = __orig_threading.local() @@ -22,14 +18,14 @@ eventlet.patcher.inject( 'threading', globals(), - ('thread' if six.PY2 else '_thread', thread), + ('_thread', thread), ('time', time)) _count = 1 -class _GreenThread(object): +class _GreenThread: """Wrapper for GreenThread objects to provide Thread-like attributes and methods""" diff --git a/eventlet/green/urllib/__init__.py b/eventlet/green/urllib/__init__.py index bcfc349fb3..44335dd1fc 100644 --- a/eventlet/green/urllib/__init__.py +++ b/eventlet/green/urllib/__init__.py @@ -3,38 +3,3 @@ from eventlet.green import time from eventlet.green import httplib from eventlet.green import ftplib -import six - -if six.PY2: - to_patch = [('socket', socket), ('httplib', httplib), - ('time', time), ('ftplib', ftplib)] - try: - from eventlet.green import ssl - to_patch.append(('ssl', ssl)) - except ImportError: - pass - - patcher.inject('urllib', globals(), *to_patch) - try: - URLopener - except NameError: - patcher.inject('urllib.request', globals(), *to_patch) - - - # patch a bunch of things that have imports inside the - # function body; this is lame and hacky but I don't feel - # too bad because urllib is a hacky pile of junk that no - # one should be using anyhow - URLopener.open_http = patcher.patch_function(URLopener.open_http, ('httplib', httplib)) - if hasattr(URLopener, 'open_https'): - URLopener.open_https = patcher.patch_function(URLopener.open_https, ('httplib', httplib)) - - URLopener.open_ftp = patcher.patch_function(URLopener.open_ftp, ('ftplib', ftplib)) - ftpwrapper.init = patcher.patch_function(ftpwrapper.init, ('ftplib', ftplib)) - ftpwrapper.retrfile = patcher.patch_function(ftpwrapper.retrfile, ('ftplib', ftplib)) - - del patcher - - # Run test program when run as a script - if __name__ == '__main__': - main() diff --git a/eventlet/green/zmq.py b/eventlet/green/zmq.py index 373aca17c9..bc152bd7c5 100644 --- a/eventlet/green/zmq.py +++ b/eventlet/green/zmq.py @@ -1,4 +1,3 @@ -# coding: utf-8 """The :mod:`zmq` module wraps the :class:`Socket` and :class:`Context` found in :mod:`pyzmq ` to be non blocking. """ @@ -26,7 +25,7 @@ class LockReleaseError(Exception): pass -class _QueueLock(object): +class _QueueLock: """A Lock that can be acquired by at most one thread. Any other thread calling acquire will be blocked in a queue. When release is called, the threads are awoken in the order they blocked, @@ -76,7 +75,7 @@ def release(self): self._hub.schedule_call_global(0, self._waiters[0].switch) -class _BlockedThread(object): +class _BlockedThread: """Is either empty, or represents a single blocked thread that blocked itself by calling the block() method. The thread can be awoken by calling wake(). Wake() can be called multiple times and @@ -221,7 +220,7 @@ class Socket(_Socket): """ def __init__(self, context, socket_type): - super(Socket, self).__init__(context, socket_type) + super().__init__(context, socket_type) self.__dict__['_eventlet_send_event'] = _BlockedThread() self.__dict__['_eventlet_recv_event'] = _BlockedThread() @@ -250,7 +249,7 @@ def event(fd): @_wraps(_Socket.close) def close(self, linger=None): - super(Socket, self).close(linger) + super().close(linger) if self._eventlet_listener is not None: eventlet.hubs.get_hub().remove(self._eventlet_listener) self.__dict__['_eventlet_listener'] = None diff --git a/eventlet/greenio/__init__.py b/eventlet/greenio/__init__.py index f6c5247f48..513c4a5657 100644 --- a/eventlet/greenio/__init__.py +++ b/eventlet/greenio/__init__.py @@ -1,8 +1,3 @@ -import six - from eventlet.greenio.base import * # noqa -if six.PY2: - from eventlet.greenio.py2 import * # noqa -else: - from eventlet.greenio.py3 import * # noqa +from eventlet.greenio.py3 import * # noqa diff --git a/eventlet/greenio/base.py b/eventlet/greenio/base.py index 51a7ae13ef..d216a71b23 100644 --- a/eventlet/greenio/base.py +++ b/eventlet/greenio/base.py @@ -8,7 +8,6 @@ import eventlet from eventlet.hubs import trampoline, notify_opened, IOClosed from eventlet.support import get_errno -import six __all__ = [ 'GreenSocket', '_GLOBAL_DEFAULT_TIMEOUT', 'set_nonblocking', @@ -18,14 +17,11 @@ ] BUFFER_SIZE = 4096 -CONNECT_ERR = set((errno.EINPROGRESS, errno.EALREADY, errno.EWOULDBLOCK)) -CONNECT_SUCCESS = set((0, errno.EISCONN)) +CONNECT_ERR = {errno.EINPROGRESS, errno.EALREADY, errno.EWOULDBLOCK} +CONNECT_SUCCESS = {0, errno.EISCONN} if sys.platform[:3] == "win": CONNECT_ERR.add(errno.WSAEINVAL) # Bug 67 -if six.PY2: - _python2_fileobject = socket._fileobject - _original_socket = eventlet.patcher.original('socket').socket @@ -44,14 +40,14 @@ def socket_connect(descriptor, address): if err in CONNECT_ERR: return None if err not in CONNECT_SUCCESS: - raise socket.error(err, errno.errorcode[err]) + raise OSError(err, errno.errorcode[err]) return descriptor def socket_checkerr(descriptor): err = descriptor.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR) if err not in CONNECT_SUCCESS: - raise socket.error(err, errno.errorcode[err]) + raise OSError(err, errno.errorcode[err]) def socket_accept(descriptor): @@ -62,7 +58,7 @@ def socket_accept(descriptor): """ try: return descriptor.accept() - except socket.error as e: + except OSError as e: if get_errno(e) == errno.EWOULDBLOCK: return None raise @@ -70,13 +66,13 @@ def socket_accept(descriptor): if sys.platform[:3] == "win": # winsock sometimes throws ENOTCONN - SOCKET_BLOCKING = set((errno.EAGAIN, errno.EWOULDBLOCK,)) - SOCKET_CLOSED = set((errno.ECONNRESET, errno.ENOTCONN, errno.ESHUTDOWN)) + SOCKET_BLOCKING = {errno.EAGAIN, errno.EWOULDBLOCK} + SOCKET_CLOSED = {errno.ECONNRESET, errno.ENOTCONN, errno.ESHUTDOWN} else: # oddly, on linux/darwin, an unconnected socket is expected to block, # so we treat ENOTCONN the same as EWOULDBLOCK - SOCKET_BLOCKING = set((errno.EAGAIN, errno.EWOULDBLOCK, errno.ENOTCONN)) - SOCKET_CLOSED = set((errno.ECONNRESET, errno.ESHUTDOWN, errno.EPIPE)) + SOCKET_BLOCKING = {errno.EAGAIN, errno.EWOULDBLOCK, errno.ENOTCONN} + SOCKET_CLOSED = {errno.ECONNRESET, errno.ESHUTDOWN, errno.EPIPE} def set_nonblocking(fd): @@ -120,7 +116,7 @@ def set_nonblocking(fd): _GLOBAL_DEFAULT_TIMEOUT = object() -class GreenSocket(object): +class GreenSocket: """ Green version of socket.socket class, that is intended to be 100% API-compatible. @@ -135,7 +131,7 @@ class GreenSocket(object): def __init__(self, family=socket.AF_INET, *args, **kwargs): should_set_nonblocking = kwargs.pop('set_nonblocking', True) - if isinstance(family, six.integer_types): + if isinstance(family, int): fd = _original_socket(family, *args, **kwargs) # Notify the hub that this is a newly-opened socket. notify_opened(fd.fileno()) @@ -176,14 +172,13 @@ def __init__(self, family=socket.AF_INET, *args, **kwargs): def _sock(self): return self - if six.PY3: - def _get_io_refs(self): - return self.fd._io_refs + def _get_io_refs(self): + return self.fd._io_refs - def _set_io_refs(self, value): - self.fd._io_refs = value + def _set_io_refs(self, value): + self.fd._io_refs = value - _io_refs = property(_get_io_refs, _set_io_refs) + _io_refs = property(_get_io_refs, _set_io_refs) # Forward unknown attributes to fd, cache the value for future use. # I do not see any simple attribute which could be changed @@ -252,7 +247,7 @@ def connect(self, address): try: self._trampoline(fd, write=True) except IOClosed: - raise socket.error(errno.EBADFD) + raise OSError(errno.EBADFD) socket_checkerr(fd) else: end = time.time() + self.gettimeout() @@ -266,7 +261,7 @@ def connect(self, address): self._trampoline(fd, write=True, timeout=timeout, timeout_exc=_timeout_exc) except IOClosed: # ... we need some workable errno here. - raise socket.error(errno.EBADFD) + raise OSError(errno.EBADFD) socket_checkerr(fd) def connect_ex(self, address): @@ -278,7 +273,7 @@ def connect_ex(self, address): try: self._trampoline(fd, write=True) socket_checkerr(fd) - except socket.error as ex: + except OSError as ex: return get_errno(ex) except IOClosed: return errno.EBADFD @@ -295,7 +290,7 @@ def connect_ex(self, address): self._trampoline(fd, write=True, timeout=end - time.time(), timeout_exc=timeout_exc) socket_checkerr(fd) - except socket.error as ex: + except OSError as ex: return get_errno(ex) except IOClosed: return errno.EBADFD @@ -307,21 +302,8 @@ def dup(self, *args, **kw): newsock.settimeout(self.gettimeout()) return newsock - if six.PY3: - def makefile(self, *args, **kwargs): - return _original_socket.makefile(self, *args, **kwargs) - else: - def makefile(self, *args, **kwargs): - dupped = self.dup() - res = _python2_fileobject(dupped, *args, **kwargs) - if hasattr(dupped, "_drop"): - dupped._drop() - # Making the close function of dupped None so that when garbage collector - # kicks in and tries to call del, which will ultimately call close, _drop - # doesn't get called on dupped twice as it has been already explicitly called in - # previous line - dupped.close = None - return res + def makefile(self, *args, **kwargs): + return _original_socket.makefile(self, *args, **kwargs) def makeGreenFile(self, *args, **kw): warnings.warn("makeGreenFile has been deprecated, please use " @@ -352,7 +334,7 @@ def _recv_loop(self, recv_meth, empty_val, *args): if not args[0]: self._read_trampoline() return recv_meth(*args) - except socket.error as e: + except OSError as e: if get_errno(e) in SOCKET_BLOCKING: pass elif get_errno(e) in SOCKET_CLOSED: @@ -386,7 +368,7 @@ def _send_loop(self, send_method, data, *args): while True: try: return send_method(data, *args) - except socket.error as e: + except OSError as e: eno = get_errno(e) if eno == errno.ENOTCONN or eno not in SOCKET_BLOCKING: raise @@ -395,7 +377,7 @@ def _send_loop(self, send_method, data, *args): self._trampoline(self.fd, write=True, timeout=self.gettimeout(), timeout_exc=_timeout_exc) except IOClosed: - raise socket.error(errno.ECONNRESET, 'Connection closed by another thread') + raise OSError(errno.ECONNRESET, 'Connection closed by another thread') def send(self, data, flags=0): return self._send_loop(self.fd.send, data, flags) @@ -473,7 +455,7 @@ def _operation_on_closed_file(*args, **kwargs): from OpenSSL import SSL except ImportError: # pyOpenSSL not installed, define exceptions anyway for convenience - class SSL(object): + class SSL: class WantWriteError(Exception): pass @@ -503,7 +485,7 @@ def shutdown_safe(sock): except TypeError: # SSL.Connection return sock.shutdown() - except socket.error as e: + except OSError as e: # we don't care if the socket is already closed; # this will often be the case in an http server context if get_errno(e) not in (errno.ENOTCONN, errno.EBADF, errno.ENOTSOCK): diff --git a/eventlet/greenio/py2.py b/eventlet/greenio/py2.py index 74d2ff56b7..a6fdca7741 100644 --- a/eventlet/greenio/py2.py +++ b/eventlet/greenio/py2.py @@ -10,7 +10,6 @@ ) from eventlet.hubs import trampoline, notify_close, notify_opened, IOClosed from eventlet.support import get_errno -import six __all__ = ['_fileobject', 'GreenPipe'] @@ -22,10 +21,10 @@ class GreenPipe(_fileobject): __doc__ = greenpipe_doc def __init__(self, f, mode='r', bufsize=-1): - if not isinstance(f, six.string_types + (int, file)): + if not isinstance(f, (str,) + (int, file)): raise TypeError('f(ile) should be int, str, unicode or file, not %r' % f) - if isinstance(f, six.string_types): + if isinstance(f, str): f = open(f, mode, 0) if isinstance(f, int): @@ -39,7 +38,7 @@ def __init__(self, f, mode='r', bufsize=-1): self._name = f.name f.close() - super(GreenPipe, self).__init__(_SocketDuckForFd(fileno), mode, bufsize) + super().__init__(_SocketDuckForFd(fileno), mode, bufsize) set_nonblocking(self) self.softspace = 0 @@ -56,7 +55,7 @@ def __repr__(self): (id(self) < 0) and (sys.maxint + id(self)) or id(self)) def close(self): - super(GreenPipe, self).close() + super().close() for method in [ 'fileno', 'flush', 'isatty', 'next', 'read', 'readinto', 'readline', 'readlines', 'seek', 'tell', 'truncate', @@ -82,7 +81,7 @@ def tell(self): try: return os.lseek(self.fileno(), 0, 1) - self._get_readahead_len() except OSError as e: - raise IOError(*e.args) + raise OSError(*e.args) def seek(self, offset, whence=0): self.flush() @@ -93,7 +92,7 @@ def seek(self, offset, whence=0): try: rv = os.lseek(self.fileno(), offset, whence) except OSError as e: - raise IOError(*e.args) + raise OSError(*e.args) else: self._clear_readahead_buf() return rv @@ -106,7 +105,7 @@ def truncate(self, size=-1): try: rv = os.ftruncate(self.fileno(), size) except OSError as e: - raise IOError(*e.args) + raise OSError(*e.args) else: self.seek(size) # move position&clear buffer return rv @@ -115,10 +114,10 @@ def isatty(self): try: return os.isatty(self.fileno()) except OSError as e: - raise IOError(*e.args) + raise OSError(*e.args) -class _SocketDuckForFd(object): +class _SocketDuckForFd: """Class implementing all socket method used by _fileobject in cooperative manner using low level os I/O calls. """ @@ -162,7 +161,7 @@ def recv(self, buflen): return data except OSError as e: if get_errno(e) not in SOCKET_BLOCKING: - raise IOError(*e.args) + raise OSError(*e.args) self._trampoline(self, read=True) def recv_into(self, buf, nbytes=0, flags=0): @@ -178,7 +177,7 @@ def send(self, data): return os.write(self._fileno, data) except OSError as e: if get_errno(e) not in SOCKET_BLOCKING: - raise IOError(*e.args) + raise OSError(*e.args) else: trampoline(self, write=True) @@ -190,7 +189,7 @@ def sendall(self, data): total_sent = os_write(fileno, data) except OSError as e: if get_errno(e) != errno.EAGAIN: - raise IOError(*e.args) + raise OSError(*e.args) total_sent = 0 while total_sent < len_data: self._trampoline(self, write=True) @@ -198,7 +197,7 @@ def sendall(self, data): total_sent += os_write(fileno, data[total_sent:]) except OSError as e: if get_errno(e) != errno. EAGAIN: - raise IOError(*e.args) + raise OSError(*e.args) def __del__(self): self._close() diff --git a/eventlet/greenio/py3.py b/eventlet/greenio/py3.py index 5762d6dcfc..5b84c8b101 100644 --- a/eventlet/greenio/py3.py +++ b/eventlet/greenio/py3.py @@ -20,7 +20,6 @@ ) from eventlet.hubs import notify_close, notify_opened, IOClosed, trampoline from eventlet.support import get_errno -import six __all__ = ['_fileobject', 'GreenPipe'] @@ -37,7 +36,7 @@ def __init__(self, name, mode='r', closefd=True, opener=None): fileno = name self._name = "" % fileno else: - assert isinstance(name, six.string_types) + assert isinstance(name, str) with open(name, mode) as fd: self._name = fd.name fileno = _original_os.dup(fd.fileno()) @@ -57,7 +56,7 @@ def seekable(self): if self._seekable is None: try: _original_os.lseek(self._fileno, 0, _original_os.SEEK_CUR) - except IOError as e: + except OSError as e: if get_errno(e) == errno.ESPIPE: self._seekable = False else: @@ -85,7 +84,7 @@ def read(self, size=-1): return _original_os.read(self._fileno, size) except OSError as e: if get_errno(e) not in SOCKET_BLOCKING: - raise IOError(*e.args) + raise OSError(*e.args) self._trampoline(self, read=True) def readall(self): @@ -98,7 +97,7 @@ def readall(self): buf.append(chunk) except OSError as e: if get_errno(e) not in SOCKET_BLOCKING: - raise IOError(*e.args) + raise OSError(*e.args) self._trampoline(self, read=True) def readinto(self, b): @@ -112,7 +111,7 @@ def isatty(self): try: return _original_os.isatty(self.fileno()) except OSError as e: - raise IOError(*e.args) + raise OSError(*e.args) def _trampoline(self, fd, read=False, write=False, timeout=None, timeout_exc=None): if self._closed: @@ -141,7 +140,7 @@ def write(self, data): written = _original_os.write(self._fileno, view[offset:]) except OSError as e: if get_errno(e) not in SOCKET_BLOCKING: - raise IOError(*e.args) + raise OSError(*e.args) trampoline(self, write=True) else: offset += written @@ -164,7 +163,7 @@ def truncate(self, size=-1): try: rv = _original_os.ftruncate(self._fileno, size) except OSError as e: - raise IOError(*e.args) + raise OSError(*e.args) else: self.seek(size) # move position&clear buffer return rv @@ -173,7 +172,7 @@ def seek(self, offset, whence=_original_os.SEEK_SET): try: return _original_os.lseek(self._fileno, offset, whence) except OSError as e: - raise IOError(*e.args) + raise OSError(*e.args) def __enter__(self): return self @@ -196,7 +195,7 @@ def __exit__(self, *args): _pyio_open = getattr(_original_pyio.open, '__wrapped__', _original_pyio.open) _open = FunctionType( - six.get_function_code(_pyio_open), + _pyio_open.__code__, _open_environment, ) diff --git a/eventlet/greenpool.py b/eventlet/greenpool.py index 952659c3c3..c77df896e5 100644 --- a/eventlet/greenpool.py +++ b/eventlet/greenpool.py @@ -3,14 +3,13 @@ import eventlet from eventlet import queue from eventlet.support import greenlets as greenlet -import six __all__ = ['GreenPool', 'GreenPile'] DEBUG = True -class GreenPool(object): +class GreenPool: """The GreenPool class is a pool of green threads. """ @@ -18,10 +17,10 @@ def __init__(self, size=1000): try: size = int(size) except ValueError as e: - msg = 'GreenPool() expect size :: int, actual: {0} {1}'.format(type(size), str(e)) + msg = 'GreenPool() expect size :: int, actual: {} {}'.format(type(size), str(e)) raise TypeError(msg) if size < 0: - msg = 'GreenPool() expect size >= 0, actual: {0}'.format(repr(size)) + msg = 'GreenPool() expect size >= 0, actual: {}'.format(repr(size)) raise ValueError(msg) self.size = size self.coroutines_running = set() @@ -181,10 +180,10 @@ def worker(line): for result in pool.imap(worker, open("filename", 'r')): print(result) """ - return self.starmap(function, six.moves.zip(*iterables)) + return self.starmap(function, zip(*iterables)) -class GreenPile(object): +class GreenPile: """GreenPile is an abstraction representing a bunch of I/O-related tasks. Construct a GreenPile with an existing GreenPool object. The GreenPile will @@ -242,7 +241,7 @@ def _next(self): # instead relying on the spawning process to send one in when it's done class GreenMap(GreenPile): def __init__(self, size_or_pool): - super(GreenMap, self).__init__(size_or_pool) + super().__init__(size_or_pool) self.waiters = queue.LightQueue(maxsize=self.pool.size) def done_spawning(self): diff --git a/eventlet/greenthread.py b/eventlet/greenthread.py index 28f0a57c5b..7982ffab96 100644 --- a/eventlet/greenthread.py +++ b/eventlet/greenthread.py @@ -7,7 +7,6 @@ from eventlet import timeout from eventlet.hubs import timer from eventlet.support import greenlets as greenlet -import six import warnings __all__ = ['getcurrent', 'sleep', 'spawn', 'spawn_n', @@ -284,7 +283,7 @@ def kill(g, *throw_args): # method never got called def just_raise(*a, **kw): if throw_args: - six.reraise(throw_args[0], throw_args[1], throw_args[2]) + raise throw_args[1].with_traceback(throw_args[2]) else: raise greenlet.GreenletExit() g.run = just_raise diff --git a/eventlet/hubs/__init__.py b/eventlet/hubs/__init__.py index 9aa2f5265e..f0fcc36876 100644 --- a/eventlet/hubs/__init__.py +++ b/eventlet/hubs/__init__.py @@ -5,7 +5,6 @@ from eventlet import patcher from eventlet.support import greenlets as greenlet -import six __all__ = ["use_hub", "get_hub", "get_default_hub", "trampoline"] @@ -71,7 +70,7 @@ def use_hub(mod=None): del _threadlocal.hub classname = '' - if isinstance(mod, six.string_types): + if isinstance(mod, str): assert mod.strip(), "Need to specify a hub" if '.' in mod or ':' in mod: modulename, _, classname = mod.strip().partition(':') diff --git a/eventlet/hubs/epolls.py b/eventlet/hubs/epolls.py index 07fec14dd4..770c18d1f0 100644 --- a/eventlet/hubs/epolls.py +++ b/eventlet/hubs/epolls.py @@ -12,7 +12,7 @@ def is_available(): # are identical in value to the poll constants class Hub(poll.Hub): def __init__(self, clock=None): - super(Hub, self).__init__(clock=clock) + super().__init__(clock=clock) self.poll = select.epoll() def add(self, evtype, fileno, cb, tb, mac): @@ -22,7 +22,7 @@ def add(self, evtype, fileno, cb, tb, mac): listener = hub.BaseHub.add(self, evtype, fileno, cb, tb, mac) try: self.register(fileno, new=not oldlisteners) - except IOError as ex: # ignore EEXIST, #80 + except OSError as ex: # ignore EEXIST, #80 if support.get_errno(ex) != errno.EEXIST: raise return listener diff --git a/eventlet/hubs/hub.py b/eventlet/hubs/hub.py index c27b81f709..abeee6c9f4 100644 --- a/eventlet/hubs/hub.py +++ b/eventlet/hubs/hub.py @@ -27,8 +27,6 @@ def alarm_signal(seconds): except ImportError: from time import monotonic -import six - g_prevent_multiple_readers = True READ = "read" @@ -42,7 +40,7 @@ def closed_callback(fileno): pass -class FdListener(object): +class FdListener: def __init__(self, evtype, fileno, cb, tb, mark_as_closed): """ The following are required: @@ -90,7 +88,7 @@ class DebugListener(FdListener): def __init__(self, evtype, fileno, cb, tb, mark_as_closed): self.where_called = traceback.format_stack() self.greenlet = greenlet.getcurrent() - super(DebugListener, self).__init__(evtype, fileno, cb, tb, mark_as_closed) + super().__init__(evtype, fileno, cb, tb, mark_as_closed) def __repr__(self): return "DebugListener(%r, %r, %r, %r, %r, %r)\n%sEndDebugFdListener" % ( @@ -109,7 +107,7 @@ def alarm_handler(signum, frame): raise RuntimeError("Blocking detector ALARMED at" + str(inspect.getframeinfo(frame))) -class BaseHub(object): +class BaseHub: """ Base hub class for easing the implementation of subclasses that are specific to a particular underlying event architecture. """ @@ -195,7 +193,7 @@ def _obsolete(self, fileno): their greenlets queued up to send. """ found = False - for evtype, bucket in six.iteritems(self.secondaries): + for evtype, bucket in self.secondaries.items(): if fileno in bucket: for listener in bucket[fileno]: found = True @@ -205,7 +203,7 @@ def _obsolete(self, fileno): # For the primary listeners, we actually need to call remove, # which may modify the underlying OS polling objects. - for evtype, bucket in six.iteritems(self.listeners): + for evtype, bucket in self.listeners.items(): if fileno in bucket: listener = bucket[fileno] found = True diff --git a/eventlet/hubs/kqueue.py b/eventlet/hubs/kqueue.py index 8438805c5a..95025764e6 100644 --- a/eventlet/hubs/kqueue.py +++ b/eventlet/hubs/kqueue.py @@ -2,7 +2,6 @@ import sys from eventlet import patcher, support from eventlet.hubs import hub -import six select = patcher.original('select') time = patcher.original('time') @@ -19,7 +18,7 @@ def __init__(self, clock=None): hub.READ: select.KQ_FILTER_READ, hub.WRITE: select.KQ_FILTER_WRITE, } - super(Hub, self).__init__(clock) + super().__init__(clock) self._events = {} self._init_kqueue() @@ -30,14 +29,14 @@ def _init_kqueue(self): def _reinit_kqueue(self): self.kqueue.close() self._init_kqueue() - events = [e for i in six.itervalues(self._events) - for e in six.itervalues(i)] + events = [e for i in self._events.values() + for e in i.values()] self.kqueue.control(events, 0, 0) def _control(self, events, max_events, timeout): try: return self.kqueue.control(events, max_events, timeout) - except (OSError, IOError): + except OSError: # have we forked? if os.getpid() != self._pid: self._reinit_kqueue() @@ -45,7 +44,7 @@ def _control(self, events, max_events, timeout): raise def add(self, evtype, fileno, cb, tb, mac): - listener = super(Hub, self).add(evtype, fileno, cb, tb, mac) + listener = super().add(evtype, fileno, cb, tb, mac) events = self._events.setdefault(fileno, {}) if evtype not in events: try: @@ -53,7 +52,7 @@ def add(self, evtype, fileno, cb, tb, mac): self._control([event], 0, 0) events[evtype] = event except ValueError: - super(Hub, self).remove(listener) + super().remove(listener) raise return listener @@ -65,7 +64,7 @@ def _delete_events(self, events): self._control(del_events, 0, 0) def remove(self, listener): - super(Hub, self).remove(listener) + super().remove(listener) evtype = listener.evtype fileno = listener.fileno if not self.listeners[evtype].get(fileno): @@ -78,7 +77,7 @@ def remove(self, listener): pass def remove_descriptor(self, fileno): - super(Hub, self).remove_descriptor(fileno) + super().remove_descriptor(fileno) try: events = self._events.pop(fileno).values() self._delete_events(events) diff --git a/eventlet/hubs/poll.py b/eventlet/hubs/poll.py index d3f9c6a3a6..0984214245 100644 --- a/eventlet/hubs/poll.py +++ b/eventlet/hubs/poll.py @@ -13,19 +13,19 @@ def is_available(): class Hub(hub.BaseHub): def __init__(self, clock=None): - super(Hub, self).__init__(clock) + super().__init__(clock) self.EXC_MASK = select.POLLERR | select.POLLHUP self.READ_MASK = select.POLLIN | select.POLLPRI self.WRITE_MASK = select.POLLOUT self.poll = select.poll() def add(self, evtype, fileno, cb, tb, mac): - listener = super(Hub, self).add(evtype, fileno, cb, tb, mac) + listener = super().add(evtype, fileno, cb, tb, mac) self.register(fileno, new=True) return listener def remove(self, listener): - super(Hub, self).remove(listener) + super().remove(listener) self.register(listener.fileno) def register(self, fileno, new=False): @@ -41,12 +41,12 @@ def register(self, fileno, new=False): else: try: self.poll.modify(fileno, mask) - except (IOError, OSError): + except OSError: self.poll.register(fileno, mask) else: try: self.poll.unregister(fileno) - except (KeyError, IOError, OSError): + except (KeyError, OSError): # raised if we try to remove a fileno that was # already removed/invalid pass @@ -56,10 +56,10 @@ def register(self, fileno, new=False): raise def remove_descriptor(self, fileno): - super(Hub, self).remove_descriptor(fileno) + super().remove_descriptor(fileno) try: self.poll.unregister(fileno) - except (KeyError, ValueError, IOError, OSError): + except (KeyError, ValueError, OSError): # raised if we try to remove a fileno that was # already removed/invalid pass @@ -78,7 +78,7 @@ def wait(self, seconds=None): return try: presult = self.do_poll(seconds) - except (IOError, select.error) as e: + except OSError as e: if support.get_errno(e) == errno.EINTR: return raise diff --git a/eventlet/hubs/selects.py b/eventlet/hubs/selects.py index 0386a1ed2f..b6cf1298ea 100644 --- a/eventlet/hubs/selects.py +++ b/eventlet/hubs/selects.py @@ -6,9 +6,9 @@ time = patcher.original('time') try: - BAD_SOCK = set((errno.EBADF, errno.WSAENOTSOCK)) + BAD_SOCK = {errno.EBADF, errno.WSAENOTSOCK} except AttributeError: - BAD_SOCK = set((errno.EBADF,)) + BAD_SOCK = {errno.EBADF} def is_available(): @@ -24,7 +24,7 @@ def _remove_bad_fds(self): for fd in all_fds: try: select.select([fd], [], [], 0) - except select.error as e: + except OSError as e: if support.get_errno(e) in BAD_SOCK: self.remove_descriptor(fd) @@ -40,7 +40,7 @@ def wait(self, seconds=None): all_fds = reader_fds + writer_fds try: r, w, er = select.select(reader_fds, writer_fds, all_fds, seconds) - except select.error as e: + except OSError as e: if support.get_errno(e) == errno.EINTR: return elif support.get_errno(e) in BAD_SOCK: diff --git a/eventlet/hubs/timer.py b/eventlet/hubs/timer.py index 1dfd561f74..2e3fd95ce5 100644 --- a/eventlet/hubs/timer.py +++ b/eventlet/hubs/timer.py @@ -2,14 +2,14 @@ import eventlet.hubs from eventlet.support import greenlets as greenlet -import six +import io """ If true, captures a stack trace for each timer when constructed. This is useful for debugging leaking timers, to find out where the timer was set up. """ _g_debug = False -class Timer(object): +class Timer: def __init__(self, seconds, cb, *args, **kw): """Create a timer. seconds: The minimum number of seconds to wait before calling @@ -24,7 +24,7 @@ def __init__(self, seconds, cb, *args, **kw): self.tpl = cb, args, kw self.called = False if _g_debug: - self.traceback = six.StringIO() + self.traceback = io.StringIO() traceback.print_stack(file=self.traceback) @property diff --git a/eventlet/patcher.py b/eventlet/patcher.py index 32506ea7f0..bdccb69062 100644 --- a/eventlet/patcher.py +++ b/eventlet/patcher.py @@ -11,15 +11,14 @@ register_at_fork = None import eventlet -import six __all__ = ['inject', 'import_patched', 'monkey_patch', 'is_monkey_patched'] -__exclude = set(('__builtins__', '__file__', '__name__')) +__exclude = {'__builtins__', '__file__', '__name__'} -class SysModulesSaver(object): +class SysModulesSaver: """Class that captures some subset of the current state of sys.modules. Pass in an iterator of module names to the constructor.""" @@ -39,7 +38,7 @@ def restore(self): sys.modules. """ try: - for modname, mod in six.iteritems(self._saved): + for modname, mod in self._saved.items(): if mod is not None: sys.modules[modname] = mod else: @@ -198,10 +197,7 @@ def original(modname): # some rudimentary dependency checking -- fortunately the modules # we're working on don't have many dependencies so we can just do # some special-casing here - if six.PY2: - deps = {'threading': 'thread', 'Queue': 'threading'} - if six.PY3: - deps = {'threading': '_thread', 'queue': 'threading'} + deps = {'threading': '_thread', 'queue': 'threading'} if modname in deps: dependency = deps[modname] saver.save(dependency) @@ -249,9 +245,9 @@ def monkey_patch(**on): # the hub calls into monkey-patched modules. eventlet.hubs.get_hub() - accepted_args = set(('os', 'select', 'socket', - 'thread', 'time', 'psycopg', 'MySQLdb', - 'builtins', 'subprocess')) + accepted_args = {'os', 'select', 'socket', + 'thread', 'time', 'psycopg', 'MySQLdb', + 'builtins', 'subprocess'} # To make sure only one of them is passed here assert not ('__builtin__' in on and 'builtins' in on) try: @@ -263,7 +259,7 @@ def monkey_patch(**on): default_on = on.pop("all", None) - for k in six.iterkeys(on): + for k in on.keys(): if k not in accepted_args: raise TypeError("monkey_patch() got an unexpected " "keyword argument %r" % k) @@ -348,24 +344,22 @@ def after_fork(): finally: imp.release_lock() - if sys.version_info >= (3, 3): - import importlib._bootstrap - thread = original('_thread') - # importlib must use real thread locks, not eventlet.Semaphore - importlib._bootstrap._thread = thread + import importlib._bootstrap + thread = original('_thread') + # importlib must use real thread locks, not eventlet.Semaphore + importlib._bootstrap._thread = thread - # Issue #185: Since Python 3.3, threading.RLock is implemented in C and - # so call a C function to get the thread identifier, instead of calling - # threading.get_ident(). Force the Python implementation of RLock which - # calls threading.get_ident() and so is compatible with eventlet. - import threading - threading.RLock = threading._PyRLock + # Issue #185: Since Python 3.3, threading.RLock is implemented in C and + # so call a C function to get the thread identifier, instead of calling + # threading.get_ident(). Force the Python implementation of RLock which + # calls threading.get_ident() and so is compatible with eventlet. + import threading + threading.RLock = threading._PyRLock # Issue #508: Since Python 3.7 queue.SimpleQueue is implemented in C, # causing a deadlock. Replace the C implementation with the Python one. - if sys.version_info >= (3, 7): - import queue - queue.SimpleQueue = queue._PySimpleQueue + import queue + queue.SimpleQueue = queue._PySimpleQueue def is_monkey_patched(module): @@ -495,9 +489,8 @@ def _green_select_modules(): from eventlet.green import select modules = [('select', select)] - if sys.version_info >= (3, 4): - from eventlet.green import selectors - modules.append(('selectors', selectors)) + from eventlet.green import selectors + modules.append(('selectors', selectors)) return modules @@ -520,10 +513,7 @@ def _green_thread_modules(): from eventlet.green import Queue from eventlet.green import thread from eventlet.green import threading - if six.PY2: - return [('Queue', Queue), ('thread', thread), ('threading', threading)] - if six.PY3: - return [('queue', Queue), ('_thread', thread), ('threading', threading)] + return [('queue', Queue), ('_thread', thread), ('threading', threading)] def _green_time_modules(): @@ -542,7 +532,7 @@ def _green_MySQLdb(): def _green_builtins(): try: from eventlet.green import builtin - return [('__builtin__' if six.PY2 else 'builtins', builtin)] + return [('builtins', builtin)] except ImportError: return [] @@ -557,11 +547,11 @@ def slurp_properties(source, destination, ignore=[], srckeys=None): """ if srckeys is None: srckeys = source.__all__ - destination.update(dict([ - (name, getattr(source, name)) + destination.update({ + name: getattr(source, name) for name in srckeys if not (name.startswith('__') or name in ignore) - ])) + }) if __name__ == "__main__": diff --git a/eventlet/pools.py b/eventlet/pools.py index ee9b77bbeb..a65f17453f 100644 --- a/eventlet/pools.py +++ b/eventlet/pools.py @@ -1,5 +1,3 @@ -from __future__ import print_function - import collections from contextlib import contextmanager @@ -9,7 +7,7 @@ __all__ = ['Pool', 'TokenPool'] -class Pool(object): +class Pool: """ Pool class implements resource limitation and construction. @@ -172,7 +170,7 @@ def create(self): raise NotImplementedError("Implement in subclass") -class Token(object): +class Token: pass diff --git a/eventlet/queue.py b/eventlet/queue.py index b61c2f876d..2ee071c445 100644 --- a/eventlet/queue.py +++ b/eventlet/queue.py @@ -39,7 +39,6 @@ and :meth:`Queue.putting` report on the number of greenthreads blocking in :meth:`put ` or :meth:`get ` respectively. """ -from __future__ import print_function import sys import heapq @@ -49,19 +48,18 @@ from eventlet.event import Event from eventlet.greenthread import getcurrent from eventlet.hubs import get_hub -import six -from six.moves import queue as Stdlib_Queue +import queue as Stdlib_Queue from eventlet.timeout import Timeout __all__ = ['Queue', 'PriorityQueue', 'LifoQueue', 'LightQueue', 'Full', 'Empty'] _NONE = object() -Full = six.moves.queue.Full -Empty = six.moves.queue.Empty +Full = Stdlib_Queue.Full +Empty = Stdlib_Queue.Empty -class Waiter(object): +class Waiter: """A low level synchronization class. Wrapper around greenlet's ``switch()`` and ``throw()`` calls that makes them safe: @@ -143,7 +141,7 @@ def wait(self): self.greenlet = None -class LightQueue(object): +class LightQueue: """ This is a variant of Queue that behaves mostly like the standard :class:`Stdlib_Queue`. It differs by not supporting the diff --git a/eventlet/semaphore.py b/eventlet/semaphore.py index 18b5b05f4a..218d01a051 100644 --- a/eventlet/semaphore.py +++ b/eventlet/semaphore.py @@ -4,7 +4,7 @@ from eventlet import hubs -class Semaphore(object): +class Semaphore: """An unbounded semaphore. Optionally initialize with a resource *count*, then :meth:`acquire` and @@ -34,10 +34,10 @@ def __init__(self, value=1): try: value = int(value) except ValueError as e: - msg = 'Semaphore() expect value :: int, actual: {0} {1}'.format(type(value), str(e)) + msg = 'Semaphore() expect value :: int, actual: {} {}'.format(type(value), str(e)) raise TypeError(msg) if value < 0: - msg = 'Semaphore() expect value >= 0, actual: {0}'.format(repr(value)) + msg = 'Semaphore() expect value >= 0, actual: {}'.format(repr(value)) raise ValueError(msg) self.counter = value self._waiters = collections.deque() @@ -176,7 +176,7 @@ class BoundedSemaphore(Semaphore): """ def __init__(self, value=1): - super(BoundedSemaphore, self).__init__(value) + super().__init__(value) self.original_counter = value def release(self, blocking=True): @@ -190,10 +190,10 @@ def release(self, blocking=True): """ if self.counter >= self.original_counter: raise ValueError("Semaphore released too many times") - return super(BoundedSemaphore, self).release(blocking) + return super().release(blocking) -class CappedSemaphore(object): +class CappedSemaphore: """A blockingly bounded semaphore. diff --git a/eventlet/support/greendns.py b/eventlet/support/greendns.py index 5ffbb1fa02..0ac0a78e8b 100644 --- a/eventlet/support/greendns.py +++ b/eventlet/support/greendns.py @@ -43,7 +43,6 @@ from eventlet.green import time from eventlet.green import select from eventlet.green import ssl -import six def import_patched(module_name): @@ -103,7 +102,7 @@ def _raise_new_error(error_instance): def is_ipv4_addr(host): """Return True if host is a valid IPv4 address""" - if not isinstance(host, six.string_types): + if not isinstance(host, str): return False try: dns.ipv4.inet_aton(host) @@ -115,7 +114,7 @@ def is_ipv4_addr(host): def is_ipv6_addr(host): """Return True if host is a valid IPv6 address""" - if not isinstance(host, six.string_types): + if not isinstance(host, str): return False host = host.split('%', 1)[0] try: @@ -166,7 +165,7 @@ def __init__(self, qname, rdtype, rdclass, rrset, raise_on_no_answer=True): rrset.ttl if hasattr(rrset, 'ttl') else 0) -class HostsResolver(object): +class HostsResolver: """Class to parse the hosts file Attributes @@ -211,12 +210,12 @@ def _readlines(self): try: with open(self.fname, 'rb') as fp: fdata = fp.read() - except (IOError, OSError): + except OSError: return [] udata = fdata.decode(errors='ignore') - return six.moves.filter(None, self.LINES_RE.findall(udata)) + return filter(None, self.LINES_RE.findall(udata)) def _load(self): """Load hosts file @@ -267,10 +266,10 @@ def query(self, qname, rdtype=dns.rdatatype.A, rdclass=dns.rdataclass.IN, if self._last_load + self.interval < now: self._load() rdclass = dns.rdataclass.IN - if isinstance(qname, six.string_types): + if isinstance(qname, str): name = qname qname = dns.name.from_text(qname) - elif isinstance(qname, six.binary_type): + elif isinstance(qname, bytes): name = qname.decode("ascii") qname = dns.name.from_text(qname) else: @@ -310,14 +309,14 @@ def getaliases(self, hostname): else: cannon = hostname aliases.append(cannon) - for alias, cname in six.iteritems(self._aliases): + for alias, cname in self._aliases.items(): if cannon == cname: aliases.append(alias) aliases.remove(hostname) return aliases -class ResolverProxy(object): +class ResolverProxy: """Resolver class which can also use /etc/hosts Initialise with a HostsResolver instance in order for it to also @@ -372,7 +371,7 @@ def query(self, qname, rdtype=dns.rdatatype.A, rdclass=dns.rdataclass.IN, if qname is None: qname = '0.0.0.0' - if isinstance(qname, six.string_types) or isinstance(qname, six.binary_type): + if isinstance(qname, str) or isinstance(qname, bytes): qname = dns.name.from_text(qname, None) def step(fun, *args, **kwargs): @@ -551,9 +550,9 @@ def getaddrinfo(host, port, family=0, socktype=0, proto=0, flags=0): flag ensures getaddrinfo(3) does not use the network itself and allows us to respect all the other arguments like the native OS. """ - if isinstance(host, six.string_types): + if isinstance(host, str): host = host.encode('idna').decode('ascii') - elif isinstance(host, six.binary_type): + elif isinstance(host, bytes): host = host.decode("ascii") if host is not None and not is_ip_addr(host): qname, addrs = _getaddrinfo_lookup(host, family, flags) @@ -567,7 +566,7 @@ def getaddrinfo(host, port, family=0, socktype=0, proto=0, flags=0): try: ai = socket.getaddrinfo(addr, port, family, socktype, proto, aiflags) - except socket.error as e: + except OSError as e: if flags & socket.AI_ADDRCONFIG: err = e continue @@ -631,7 +630,7 @@ def getnameinfo(sockaddr, flags): rrset = resolver.query( dns.reversename.from_address(host), dns.rdatatype.PTR) if len(rrset) > 1: - raise socket.error('sockaddr resolved to multiple addresses') + raise OSError('sockaddr resolved to multiple addresses') host = rrset[0].target.to_text(omit_final_dot=True) except dns.exception.Timeout: if flags & socket.NI_NAMEREQD: @@ -643,7 +642,7 @@ def getnameinfo(sockaddr, flags): try: rrset = resolver.query(host) if len(rrset) > 1: - raise socket.error('sockaddr resolved to multiple addresses') + raise OSError('sockaddr resolved to multiple addresses') if flags & socket.NI_NUMERICHOST: host = rrset[0].address except dns.exception.Timeout: diff --git a/eventlet/support/stacklesss.py b/eventlet/support/stacklesss.py index 4d19c5b6e5..9b3951e592 100644 --- a/eventlet/support/stacklesss.py +++ b/eventlet/support/stacklesss.py @@ -17,7 +17,7 @@ def getcurrent(): return tasklet_to_greenlet[stackless.getcurrent()] -class FirstSwitch(object): +class FirstSwitch: def __init__(self, gr): self.gr = gr @@ -33,7 +33,7 @@ def __call__(self, *args, **kw): t.run() -class greenlet(object): +class greenlet: def __init__(self, run=None, parent=None): self.dead = False if parent is None: diff --git a/eventlet/tpool.py b/eventlet/tpool.py index af5e8959d3..1a3f412401 100644 --- a/eventlet/tpool.py +++ b/eventlet/tpool.py @@ -24,7 +24,6 @@ import eventlet from eventlet import event, greenio, greenthread, patcher, timeout -import six __all__ = ['execute', 'Proxy', 'killall', 'set_num_threads'] @@ -36,10 +35,7 @@ socket = patcher.original('socket') threading = patcher.original('threading') -if six.PY2: - Queue_module = patcher.original('Queue') -if six.PY3: - Queue_module = patcher.original('queue') +Queue_module = patcher.original('queue') Empty = Queue_module.Empty Queue = Queue_module.Queue @@ -88,10 +84,7 @@ def tworker(): raise except EXC_CLASSES: rv = sys.exc_info() - if sys.version_info >= (3, 4): - traceback.clear_frames(rv[1].__traceback__) - if six.PY2: - sys.exc_clear() + traceback.clear_frames(rv[1].__traceback__) # test_leakage_from_tracebacks verifies that the use of # exc_info does not lead to memory leaks _rspq.put((e, rv)) @@ -129,7 +122,7 @@ def execute(meth, *args, **kwargs): if not QUIET: traceback.print_exception(c, e, tb) traceback.print_stack() - six.reraise(c, e, tb) + raise e.with_traceback(tb) return rv @@ -155,7 +148,7 @@ def proxy_call(autowrap, f, *args, **kwargs): return rv -class Proxy(object): +class Proxy: """ a simple proxy-wrapper of any object that comes with a methods-only interface, in order to forward every method @@ -289,7 +282,7 @@ def setup(): _rsock = greenio.GreenSocket(csock) _rsock.settimeout(None) - for i in six.moves.range(_nthreads): + for i in range(_nthreads): t = threading.Thread(target=tworker, name="tpool_thread_%s" % i) t.daemon = True diff --git a/eventlet/websocket.py b/eventlet/websocket.py index 01245b8093..87dd6f3cbc 100644 --- a/eventlet/websocket.py +++ b/eventlet/websocket.py @@ -21,7 +21,6 @@ from eventlet import wsgi from eventlet.green import socket from eventlet.support import get_errno -import six # Python 2's utf8 decoding is more lenient than we'd like # In order to pass autobahn's testsuite we need stricter validation @@ -37,7 +36,7 @@ else: break -ACCEPTABLE_CLIENT_ERRORS = set((errno.ECONNRESET, errno.EPIPE)) +ACCEPTABLE_CLIENT_ERRORS = {errno.ECONNRESET, errno.EPIPE} DEFAULT_MAX_FRAME_LENGTH = 8 << 20 __all__ = ["WebSocketWSGI", "WebSocket"] @@ -62,7 +61,7 @@ def __init__(self, status='400 Bad Request', body=None, headers=None): self.headers = headers -class WebSocketWSGI(object): +class WebSocketWSGI: """Wraps a websocket handler function in a WSGI application. Use it like this:: @@ -135,7 +134,7 @@ def __call__(self, environ, start_response): try: self.handler(ws) - except socket.error as e: + except OSError as e: if get_errno(e) not in ACCEPTABLE_CLIENT_ERRORS: raise # Make sure we send the closing frame @@ -188,18 +187,18 @@ def _handle_legacy_request(self, environ): b"HTTP/1.1 101 Web Socket Protocol Handshake\r\n" b"Upgrade: WebSocket\r\n" b"Connection: Upgrade\r\n" - b"WebSocket-Origin: " + six.b(environ.get('HTTP_ORIGIN')) + b"\r\n" - b"WebSocket-Location: " + six.b(location) + b"\r\n\r\n" + b"WebSocket-Origin: " + environ.get('HTTP_ORIGIN').encode() + b"\r\n" + b"WebSocket-Location: " + location.encode() + b"\r\n\r\n" ) elif self.protocol_version == 76: handshake_reply = ( b"HTTP/1.1 101 WebSocket Protocol Handshake\r\n" b"Upgrade: WebSocket\r\n" b"Connection: Upgrade\r\n" - b"Sec-WebSocket-Origin: " + six.b(environ.get('HTTP_ORIGIN')) + b"\r\n" + b"Sec-WebSocket-Origin: " + environ.get('HTTP_ORIGIN').encode() + b"\r\n" b"Sec-WebSocket-Protocol: " + - six.b(environ.get('HTTP_SEC_WEBSOCKET_PROTOCOL', 'default')) + b"\r\n" - b"Sec-WebSocket-Location: " + six.b(location) + b"\r\n" + environ.get('HTTP_SEC_WEBSOCKET_PROTOCOL', 'default').encode() + b"\r\n" + b"Sec-WebSocket-Location: " + location.encode() + b"\r\n" b"\r\n" + response ) else: # pragma NO COVER @@ -266,14 +265,14 @@ def _format_extension_header(self, parsed_extensions): return None parts = [] for name, config in parsed_extensions.items(): - ext_parts = [six.b(name)] + ext_parts = [name.encode()] for key, value in config.items(): if value is False: pass elif value is True: - ext_parts.append(six.b(key)) + ext_parts.append(key.encode()) else: - ext_parts.append(six.b("%s=%s" % (key, str(value)))) + ext_parts.append(("%s=%s" % (key, str(value))).encode()) parts.append(b"; ".join(ext_parts)) return b", ".join(parts) @@ -309,13 +308,13 @@ def _handle_hybi_request(self, environ): break key = environ['HTTP_SEC_WEBSOCKET_KEY'] - response = base64.b64encode(sha1(six.b(key) + PROTOCOL_GUID).digest()) + response = base64.b64encode(sha1(key.encode() + PROTOCOL_GUID).digest()) handshake_reply = [b"HTTP/1.1 101 Switching Protocols", b"Upgrade: websocket", b"Connection: Upgrade", b"Sec-WebSocket-Accept: " + response] if negotiated_protocol: - handshake_reply.append(b"Sec-WebSocket-Protocol: " + six.b(negotiated_protocol)) + handshake_reply.append(b"Sec-WebSocket-Protocol: " + negotiated_protocol.encode()) parsed_extensions = {} extensions = self._parse_extension_header(environ.get("HTTP_SEC_WEBSOCKET_EXTENSIONS")) @@ -349,7 +348,7 @@ def _extract_number(self, value): return int(out) // spaces -class WebSocket(object): +class WebSocket: """A websocket object that handles the details of serialization/deserialization to the socket. @@ -399,10 +398,10 @@ def _pack_message(self, message): As per the dataframing section (5.3) for the websocket spec """ - if isinstance(message, six.text_type): + if isinstance(message, str): message = message.encode('utf-8') - elif not isinstance(message, six.binary_type): - message = six.b(str(message)) + elif not isinstance(message, bytes): + message = str(message).encode() packed = b"\x00" + message + b"\xFF" return packed @@ -417,7 +416,7 @@ def _parse_messages(self): end_idx = 0 buf = self._buf while buf: - frame_type = six.indexbytes(buf, 0) + frame_type = buf[0] if frame_type == 0: # Normal message. end_idx = buf.find(b"\xFF") @@ -427,7 +426,7 @@ def _parse_messages(self): buf = buf[end_idx + 1:] elif frame_type == 255: # Closing handshake. - assert six.indexbytes(buf, 1) == 0, "Unexpected closing handshake: %r" % buf + assert buf[1] == 0, "Unexpected closing handshake: %r" % buf self.websocket_closed = True break else: @@ -475,7 +474,7 @@ def _send_closing_frame(self, ignore_send_errors=False): if self.version == 76 and not self.websocket_closed: try: self.socket.sendall(b"\xff\x00") - except SocketError: + except OSError: # Sometimes, like when the remote side cuts off the connection, # we don't care about this. if not ignore_send_errors: # pragma NO COVER @@ -488,7 +487,7 @@ def close(self): try: self._send_closing_frame(True) self.socket.shutdown(True) - except SocketError as e: + except OSError as e: if e.errno != errno.ENOTCONN: self.log.write('{ctx} socket shutdown error: {e}'.format(ctx=self.log_context, e=e)) finally: @@ -501,7 +500,7 @@ class ConnectionClosedError(Exception): class FailedConnectionError(Exception): def __init__(self, status, message): - super(FailedConnectionError, self).__init__(status, message) + super().__init__(status, message) self.message = message self.status = status @@ -513,7 +512,7 @@ class ProtocolError(ValueError): class RFC6455WebSocket(WebSocket): def __init__(self, sock, environ, version=13, protocol=None, client=False, extensions=None, max_frame_length=DEFAULT_MAX_FRAME_LENGTH): - super(RFC6455WebSocket, self).__init__(sock, environ, version) + super().__init__(sock, environ, version) self.iterator = self._iter_frames() self.client = client self.protocol = protocol @@ -524,7 +523,7 @@ def __init__(self, sock, environ, version=13, protocol=None, client=False, exten self.max_frame_length = max_frame_length self._remote_close_data = None - class UTF8Decoder(object): + class UTF8Decoder: def __init__(self): if utf8validator: self.validator = utf8validator.Utf8Validator() @@ -593,7 +592,7 @@ def _get_bytes(self, numbytes): data = data + d return data - class Message(object): + class Message: def __init__(self, opcode, max_frame_length, decoder=None, decompressor=None): self.decoder = decoder self.data = [] @@ -624,7 +623,7 @@ def _apply_mask(data, mask, length=None, offset=0): if length is None: length = len(data) cnt = range(length) - return b''.join(six.int2byte(six.indexbytes(data, i) ^ mask[(offset + i) % 4]) for i in cnt) + return b''.join(bytes((data[i] ^ mask[(offset + i) % 4],)) for i in cnt) def _handle_control_frame(self, opcode, data): if opcode == 8: # connection close @@ -760,7 +759,7 @@ def _recv_frame(self, message=None): def _pack_message(self, message, masked=False, continuation=False, final=True, control_code=None): is_text = False - if isinstance(message, six.text_type): + if isinstance(message, str): message = message.encode('utf-8') is_text = True @@ -809,7 +808,7 @@ def _pack_message(self, message, masked=False, # NOTE: RFC6455 states: # A server MUST NOT mask any frames that it sends to the client rand = Random(time.time()) - mask = [rand.getrandbits(8) for _ in six.moves.xrange(4)] + mask = [rand.getrandbits(8) for _ in range(4)] message = RFC6455WebSocket._apply_mask(message, mask, length) maskdata = struct.pack('!BBBB', *mask) else: @@ -837,14 +836,14 @@ def _send_closing_frame(self, ignore_send_errors=False, close_data=None): if self.version in (8, 13) and not self.websocket_closed: if close_data is not None: status, msg = close_data - if isinstance(msg, six.text_type): + if isinstance(msg, str): msg = msg.encode('utf-8') data = struct.pack('!H', status) + msg else: data = '' try: self.send(data, control_code=8) - except SocketError: + except OSError: # Sometimes, like when the remote side cuts off the connection, # we don't care about this. if not ignore_send_errors: # pragma NO COVER @@ -857,7 +856,7 @@ def close(self, close_data=None): try: self._send_closing_frame(close_data=close_data, ignore_send_errors=True) self.socket.shutdown(socket.SHUT_WR) - except SocketError as e: + except OSError as e: if e.errno != errno.ENOTCONN: self.log.write('{ctx} socket shutdown error: {e}'.format(ctx=self.log_context, e=e)) finally: diff --git a/eventlet/wsgi.py b/eventlet/wsgi.py index b2c8d2809f..a9b39b90ca 100644 --- a/eventlet/wsgi.py +++ b/eventlet/wsgi.py @@ -4,6 +4,7 @@ import time import traceback import types +import urllib.parse import warnings import eventlet @@ -12,8 +13,6 @@ from eventlet.corolocal import local from eventlet.green import BaseHTTPServer from eventlet.green import socket -import six -from six.moves import urllib DEFAULT_MAX_SIMULTANEOUS_REQUESTS = 1024 @@ -62,8 +61,8 @@ def addr_to_host_port(addr): # Collections of error codes to compare against. Not all attributes are set # on errno module on all platforms, so some are literals :( -BAD_SOCK = set((errno.EBADF, 10053)) -BROKEN_SOCK = set((errno.EPIPE, errno.ECONNRESET)) +BAD_SOCK = {errno.EBADF, 10053} +BROKEN_SOCK = {errno.EPIPE, errno.ECONNRESET} class ChunkReadError(ValueError): @@ -73,7 +72,7 @@ class ChunkReadError(ValueError): WSGI_LOCAL = local() -class Input(object): +class Input: def __init__(self, rfile, @@ -123,7 +122,7 @@ def send_hundred_continue_response(self): if self.hundred_continue_headers is not None: # 100 Continue headers for header in self.hundred_continue_headers: - towrite.append(six.b('%s: %s\r\n' % header)) + towrite.append(('%s: %s\r\n' % header).encode()) # Blank line towrite.append(b'\r\n') @@ -182,7 +181,7 @@ def _chunked_read(self, rfile, length=None, use_readline=False): data = reader(maxreadlen) if not data: self.chunk_length = 0 - raise IOError("unexpected end of file while parsing chunked data") + raise OSError("unexpected end of file while parsing chunked data") datalen = len(data) response.append(data) @@ -272,7 +271,7 @@ def get_logger(log, debug): return LoggerFileWrapper(log or sys.stderr, debug) -class LoggerNull(object): +class LoggerNull: def __init__(self): pass @@ -311,7 +310,7 @@ def write(self, msg, *args): self.log.write(msg) -class FileObjectForHeaders(object): +class FileObjectForHeaders: def __init__(self, fp): self.fp = fp @@ -360,7 +359,7 @@ def setup(self): if getattr(socket, 'TCP_QUICKACK', None): try: conn.setsockopt(socket.IPPROTO_TCP, socket.TCP_QUICKACK, True) - except socket.error: + except OSError: pass try: @@ -374,7 +373,7 @@ def setup(self): else: # it's a SSLObject, or a martian raise NotImplementedError( - '''eventlet.wsgi doesn't support sockets of type {0}'''.format(type(conn))) + '''eventlet.wsgi doesn't support sockets of type {}'''.format(type(conn))) def handle(self): self.close_connection = True @@ -392,7 +391,7 @@ def _read_request_line(self): return '' try: - sock = self.rfile._sock if six.PY2 else self.connection + sock = self.connection if self.server.keepalive and not isinstance(self.server.keepalive, bool): sock.settimeout(self.server.keepalive) line = self.rfile.readline(self.server.url_length_limit) @@ -400,10 +399,10 @@ def _read_request_line(self): return line except greenio.SSL.ZeroReturnError: pass - except socket.error as e: + except OSError as e: last_errno = support.get_errno(e) if last_errno in BROKEN_SOCK: - self.server.log.debug('({0}) connection reset by peer {1!r}'.format( + self.server.log.debug('({}) connection reset by peer {!r}'.format( self.server.pid, self.client_address)) elif last_errno not in BAD_SOCK: @@ -462,7 +461,7 @@ def handle_one_request(self): self.server.outstanding_requests += 1 try: self.handle_one_response() - except socket.error as e: + except OSError as e: # Broken pipe, connection reset by peer if support.get_errno(e) not in BROKEN_SOCK: raise @@ -493,13 +492,13 @@ def write(data): status, response_headers = headers_set headers_sent.append(1) header_list = [header[0].lower() for header in response_headers] - towrite.append(six.b('%s %s\r\n' % (self.protocol_version, status))) + towrite.append(('%s %s\r\n' % (self.protocol_version, status)).encode()) for header in response_headers: - towrite.append(six.b('%s: %s\r\n' % header)) + towrite.append(('%s: %s\r\n' % header).encode('latin-1')) # send Date header? if 'date' not in header_list: - towrite.append(six.b('Date: %s\r\n' % (format_date_time(time.time()),))) + towrite.append(('Date: %s\r\n' % (format_date_time(time.time()),)).encode()) client_conn = self.headers.get('Connection', '').lower() send_keep_alive = False @@ -535,7 +534,7 @@ def write(data): if use_chunked[0]: # Write the chunked encoding - towrite.append(six.b("%x" % (len(data),)) + b"\r\n" + data + b"\r\n") + towrite.append(("%x" % (len(data),)).encode() + b"\r\n" + data + b"\r\n") else: towrite.append(data) wfile.writelines(towrite) @@ -548,7 +547,7 @@ def start_response(status, response_headers, exc_info=None): try: if headers_sent: # Re-raise original exception if headers sent - six.reraise(exc_info[0], exc_info[1], exc_info[2]) + raise exc_info[1].with_traceback(exc_info[2]) finally: # Avoid dangling circular ref exc_info = None @@ -558,12 +557,8 @@ def start_response(status, response_headers, exc_info=None): # Per HTTP RFC standard, header name is case-insensitive. # Please, fix your client to ignore header case if possible. if self.capitalize_response_headers: - if six.PY2: - def cap(x): - return x.capitalize() - else: - def cap(x): - return x.encode('latin1').capitalize().decode('latin1') + def cap(x): + return x.encode('latin1').capitalize().decode('latin1') response_headers = [ ('-'.join([cap(x) for x in key.split('-')]), value) @@ -596,7 +591,7 @@ def cap(x): for data in result: if len(data) == 0: continue - if isinstance(data, six.text_type): + if isinstance(data, str): data = data.encode('ascii') towrite.append(data) @@ -619,7 +614,7 @@ def cap(x): tb = traceback.format_exc() self.server.log.info(tb) if not headers_sent: - err_body = six.b(tb) if self.server.debug else b'' + err_body = tb.encode() if self.server.debug else b'' start_response("500 Internal Server Error", [('Content-type', 'text/plain'), ('Content-length', len(err_body))]) @@ -652,7 +647,7 @@ def cap(x): + ' client={0} request="{1}" error="{2}"').format( self.get_client_address()[0], self.requestline, e, )) - except IOError as e: + except OSError as e: self.close_connection = 1 self.server.log.error(( 'I/O error while discarding request body.' @@ -693,10 +688,7 @@ def get_environ(self): pq = self.path.split('?', 1) env['RAW_PATH_INFO'] = pq[0] - if six.PY2: - env['PATH_INFO'] = urllib.parse.unquote(pq[0]) - else: - env['PATH_INFO'] = urllib.parse.unquote(pq[0], encoding='latin1') + env['PATH_INFO'] = urllib.parse.unquote(pq[0], encoding='latin1') if len(pq) > 1: env['QUERY_STRING'] = pq[1] @@ -758,7 +750,7 @@ def get_environ(self): def finish(self): try: BaseHTTPServer.BaseHTTPRequestHandler.finish(self) - except socket.error as e: + except OSError as e: # Broken pipe, connection reset by peer if support.get_errno(e) not in BROKEN_SOCK: raise @@ -848,7 +840,7 @@ def process_request(self, conn_state): # Expected exceptions are not exceptional conn_state[1].close() # similar to logging "accepted" in server() - self.log.debug('({0}) timed out {1!r}'.format(self.pid, conn_state[0])) + self.log.debug('({}) timed out {!r}'.format(self.pid, conn_state[0])) def log_message(self, message): raise AttributeError('''\ @@ -865,11 +857,11 @@ def log_message(self, message): try: import ssl ACCEPT_EXCEPTIONS = (socket.error, ssl.SSLError) - ACCEPT_ERRNO = set((errno.EPIPE, errno.EBADF, errno.ECONNRESET, - ssl.SSL_ERROR_EOF, ssl.SSL_ERROR_SSL)) + ACCEPT_ERRNO = {errno.EPIPE, errno.EBADF, errno.ECONNRESET, + ssl.SSL_ERROR_EOF, ssl.SSL_ERROR_SSL} except ImportError: ACCEPT_EXCEPTIONS = (socket.error,) - ACCEPT_ERRNO = set((errno.EPIPE, errno.EBADF, errno.ECONNRESET)) + ACCEPT_ERRNO = {errno.EPIPE, errno.EBADF, errno.ECONNRESET} def socket_repr(sock): @@ -879,9 +871,9 @@ def socket_repr(sock): name = sock.getsockname() if sock.family == socket.AF_INET: - hier_part = '//{0}:{1}'.format(*name) + hier_part = '//{}:{}'.format(*name) elif sock.family == socket.AF_INET6: - hier_part = '//[{0}]:{1}'.format(*name[:2]) + hier_part = '//[{}]:{}'.format(*name[:2]) elif sock.family == socket.AF_UNIX: hier_part = name else: @@ -1006,12 +998,12 @@ def _clean_connection(_, conn): conn[1].close() try: - serv.log.info('({0}) wsgi starting up on {1}'.format(serv.pid, socket_repr(sock))) + serv.log.info('({}) wsgi starting up on {}'.format(serv.pid, socket_repr(sock))) while is_accepting: try: client_socket, client_addr = sock.accept() client_socket.settimeout(serv.socket_timeout) - serv.log.debug('({0}) accepted {1!r}'.format(serv.pid, client_addr)) + serv.log.debug('({}) accepted {!r}'.format(serv.pid, client_addr)) connections[client_addr] = connection = [client_addr, client_socket, STATE_IDLE] (pool.spawn(serv.process_request, connection) .link(_clean_connection, connection)) @@ -1022,13 +1014,13 @@ def _clean_connection(_, conn): serv.log.info('wsgi exiting') break finally: - for cs in six.itervalues(connections): + for cs in connections.values(): prev_state = cs[2] cs[2] = STATE_CLOSE if prev_state == STATE_IDLE: greenio.shutdown_safe(cs[1]) pool.waitall() - serv.log.info('({0}) wsgi exited, is_accepting={1}'.format(serv.pid, is_accepting)) + serv.log.info('({}) wsgi exited, is_accepting={}'.format(serv.pid, is_accepting)) try: # NOTE: It's not clear whether we want this to leave the # socket open or close it. Use cases like Spawning want @@ -1036,6 +1028,6 @@ def _clean_connection(_, conn): # that far we might as well not bother closing sock at # all. sock.close() - except socket.error as e: + except OSError as e: if support.get_errno(e) not in BROKEN_SOCK: traceback.print_exc() diff --git a/eventlet/zipkin/api.py b/eventlet/zipkin/api.py index cd03ec0870..8a33a4b2b6 100644 --- a/eventlet/zipkin/api.py +++ b/eventlet/zipkin/api.py @@ -89,7 +89,7 @@ def generate_span_id(): return _uniq_id() -class TraceData(object): +class TraceData: END_ANNOTATION = SERVER_SEND diff --git a/eventlet/zipkin/client.py b/eventlet/zipkin/client.py index 3e629be300..faff244e25 100644 --- a/eventlet/zipkin/client.py +++ b/eventlet/zipkin/client.py @@ -11,7 +11,7 @@ CATEGORY = 'zipkin' -class ZipkinClient(object): +class ZipkinClient: def __init__(self, host='127.0.0.1', port=9410): """ diff --git a/eventlet/zipkin/http.py b/eventlet/zipkin/http.py index 3ab59259a4..f981a171c0 100644 --- a/eventlet/zipkin/http.py +++ b/eventlet/zipkin/http.py @@ -1,6 +1,5 @@ import warnings -import six from eventlet.green import httplib from eventlet.zipkin import api @@ -12,45 +11,14 @@ HDR_SAMPLED = 'X-B3-Sampled' -if six.PY2: - __org_endheaders__ = httplib.HTTPConnection.endheaders - __org_begin__ = httplib.HTTPResponse.begin - - def _patched_endheaders(self): - if api.is_tracing(): - trace_data = api.get_trace_data() - new_span_id = api.generate_span_id() - self.putheader(HDR_TRACE_ID, hex_str(trace_data.trace_id)) - self.putheader(HDR_SPAN_ID, hex_str(new_span_id)) - self.putheader(HDR_PARENT_SPAN_ID, hex_str(trace_data.span_id)) - self.putheader(HDR_SAMPLED, int(trace_data.sampled)) - api.put_annotation('Client Send') - - __org_endheaders__(self) - - def _patched_begin(self): - __org_begin__(self) - - if api.is_tracing(): - api.put_annotation('Client Recv (%s)' % self.status) - - def patch(): - if six.PY2: - httplib.HTTPConnection.endheaders = _patched_endheaders - httplib.HTTPResponse.begin = _patched_begin - if six.PY3: - warnings.warn("Since current Python thrift release \ + warnings.warn("Since current Python thrift release \ doesn't support Python 3, eventlet.zipkin.http \ doesn't also support Python 3 (http.client)") def unpatch(): - if six.PY2: - httplib.HTTPConnection.endheaders = __org_endheaders__ - httplib.HTTPResponse.begin = __org_begin__ - if six.PY3: - pass + pass def hex_str(n): diff --git a/eventlet/zipkin/wsgi.py b/eventlet/zipkin/wsgi.py index 3d52911062..402d1428dc 100644 --- a/eventlet/zipkin/wsgi.py +++ b/eventlet/zipkin/wsgi.py @@ -40,7 +40,7 @@ def _patched_handle_one_response(self): api.put_annotation(SERVER_SEND) -class Sampler(object): +class Sampler: def __init__(self, sampling_rate): self.sampling_rate = sampling_rate From 1c6f60b3d7fa2432749a294db25aa1ac75eeaaa1 Mon Sep 17 00:00:00 2001 From: Eli Schwartz Date: Fri, 22 Dec 2023 13:22:45 -0500 Subject: [PATCH 27/35] Drop old code based on python < 3.7, part 2: everything other than eventlet/ (#858) * inline version comparison checks Assigning global constants for sys.version_info comparisons makes it hard to run automatic migrations. * automatically upgrade code to drop python2-specific Klass(object) part 1 Ported by running the following command: ``` ruff check eventlet/ --select UP004 --fix ``` and committing the results. * automatically upgrade code to drop python2-specific super(), part 1 Ported by running the following command: ``` ruff check eventlet/ --select UP008 --fix --unsafe-fixes ``` fixing a couple of resulting pycodestyle indent issues, and committing the results. * automatically upgrade code to drop python2-specific coding cookie part 1 Ported by running the following command: ``` ruff check eventlet/ --select UP009 --fix ``` and committing the results. * automatically upgrade code to drop python2-specific future import part 1 Ported by running the following command: ``` ruff check eventlet/ --select UP010 --fix ``` and committing the results. * automatically upgrade code to drop python2-specific exceptions, part 1 Ported by running the following command: ``` ruff check eventlet/ --select UP024 --fix ``` and committing the results. * automatically upgrade code to drop python2 format placeholders, part 1 Ported by running the following command: ``` ruff check eventlet/ --select UP030 --fix --unsafe-fixes ``` and committing the results. * automatically upgrade code to drop python2-specific imports, part 1 Ported by running the following command: ``` ruff check eventlet/ --select UP035 --fix ``` and realizing that it was already try/excepted. The automatic rewriter doesn't notice the difference, but we do. Manually fix this up, and commit it. * automatically upgrade remaining code to drop python2-specific logic, part 1 Ported by running the following command: ``` find eventlet -name '*.py' -exec pyupgrade --py3-only --keep-percent-format {} + ``` fixing a couple of resulting pycodestyle indent/spacing issues, and committing the results. Touch only the primary project code for now. Clean up the tests etc. later. This covers a few different types of fixes: - collapse various six imports to their canonical python3 location - elide or collapse various six.PY2 / six.PY3 conditional code to unconditionally, exclusively, run the latter - catch some OSError conversions that ruff did not catch, because of `__import__()` - rewrite set/dict to literals - directly use dict methods (.keys, .items, ...) instead of py2-wrapper .iter - mark strings as raw if they have invalid escape sequences * manually clean up a lot of remaining "import six" code, part 1 Touch only the eventlet/ directory. Manual cleanups to follow. Variously: - simply drop an unused import - convert six.StringIO to io.StringIO - convert six.b() to encoding string -> bytes - collapsing six.moves or six.PY2 logic too complex for the rewriter * Drop remaining python < 3.7 compatible legacy code, part 1 Automatically migrated by running: ``` ruff check eventlet/ --select UP036 --fix --unsafe-fixes ``` and committing the results. Touch only eventlet/ for now. This implements one fixer: - code guarded by sys.version_info conditional on a target python of py37 * automatically upgrade code to drop python2-specific Klass(object) part 2 Ported by running the following command: ``` ruff check . --select UP004 --fix ``` and committing the results. * automatically upgrade code to drop python2.6-specific unittest method Ported by running the following command: ``` ruff check . --select UP005 --fix ``` fixing a couple of resulting pycodestyle indent issues, and committing the results. * automatically upgrade code to drop python2-specific super(), part 2 Ported by running the following command: ``` ruff check . --select UP008 --fix --unsafe-fixes ``` and committing the results. * automatically upgrade code to drop python2-specific coding cookie part 2 Ported by running the following command: ``` ruff check . --select UP009 --fix ``` and committing the results. * automatically upgrade code to drop python2-specific future import part 2 Ported by running the following command: ``` ruff check . --select UP010 --fix ``` and committing the results. * automatically upgrade code to drop python2 encode of string literal Ported by running the following command: ``` ruff check . --select UP012 --fix ``` and committing the results. * automatically upgrade code to drop python2-specific open mode Ported by running the following command: ``` ruff check . --select UP015 --fix ``` and committing the results. * automatically upgrade code to drop python2-specific io.open alias Ported by running the following command: ``` ruff check . --select UP020 --fix ``` and committing the results. * automatically upgrade code to drop python2-specific exceptions, part 2 Ported by running the following command: ``` ruff check . --select UP024 --fix ``` fixing a couple of resulting pycodestyle indent/spacing issues, and committing the results. * automatically upgrade code to drop python2-specific u string prefix Ported by running the following command: ``` ruff check . --select UP025 --fix ``` and committing the results. * automatically upgrade code to drop python2-specific yield loop Ported by running the following command: ``` ruff check . --select UP028 --fix ``` and committing the results. * automatically upgrade code to drop python2 format placeholders, part 2 Ported by running the following command: ``` ruff check . --select UP030 --fix --unsafe-fixes ``` and committing the results. * automatically upgrade remaining code to drop python2-specific logic, part 2 Ported by running the following command: ``` find . -name '*.py' -exec pyupgrade --py3-only --keep-percent-format {} + ``` fixing a couple of resulting pycodestyle indent/spacing issues, and committing the results. Clean up the rest of the non eventlet/ code. This covers a few different types of fixes: - collapse various six imports to their canonical python3 location - elide or collapse various six.PY2 / six.PY3 conditional code to unconditionally, exclusively, run the latter - catch some OSError conversions that ruff did not catch, because of `__import__()` - rewrite set/dict to literals - directly use dict methods (.keys, .items, ...) instead of py2-wrapper .iter - mark strings as raw if they have invalid escape sequences * manually clean up a lot of remaining "import six" code, part 2 Clean up all remaining code outside of eventlet/ itself. Variously: - simply drop an unused import - convert six.StringIO to io.StringIO - convert six.b() to encoding string -> bytes - collapsing six.moves or six.PY2 logic too complex for the rewriter * Drop remaining python < 3.7 compatible legacy code, part 2 Automatically migrated by running: ``` ruff check . --select UP036 --fix --unsafe-fixes ``` and committing the results. Clean up all remaining code outside of eventlet/ itself. This implements one fixer: - code guarded by sys.version_info conditional on a target python of py37 * Restore to correct code (matching the version in stdlib) --------- Co-authored-by: Itamar Turner-Trauring Co-authored-by: Itamar Turner-Trauring --- .coveragerc | 1 - benchmarks/__init__.py | 10 +- benchmarks/context.py | 1 - benchmarks/hub_timers.py | 3 +- benchmarks/localhost_socket.py | 10 +- doc/conf.py | 9 +- examples/chat_server.py | 2 +- examples/connect.py | 1 - examples/distributed_websocket_chat.py | 2 +- examples/echoserver.py | 1 - examples/websocket.py | 3 +- examples/zmq_chat.py | 2 +- pyproject.toml | 1 - tests/__init__.py | 34 +++-- tests/api_test.py | 4 +- tests/convenience_test.py | 11 +- tests/dagpool_test.py | 51 ++++---- tests/db_pool_test.py | 38 +++--- tests/debug_test.py | 22 ++-- tests/green_http_test.py | 3 +- tests/greendns_test.py | 53 ++++---- tests/greenio_test.py | 23 ++-- tests/greenpool_test.py | 19 ++- tests/greenthread_test.py | 6 +- tests/hub_test.py | 11 +- tests/isolated/green_ssl_py36_properties.py | 2 +- tests/isolated/hub_fork.py | 2 +- tests/isolated/hub_kqueue_unsupported.py | 1 - tests/isolated/hub_use_hub_class.py | 3 +- tests/isolated/mysqldb_monkey_patch.py | 2 +- ...her_blocking_select_methods_are_deleted.py | 21 ++-- tests/isolated/patcher_builtin.py | 2 +- .../patcher_fork_after_monkey_patch.py | 5 +- .../patcher_import_patched_defaults.py | 2 +- tests/isolated/patcher_open_kwargs.py | 2 +- .../patcher_socketserver_selectors.py | 2 +- tests/isolated/patcher_threading_current.py | 2 +- tests/isolated/patcher_threadpoolexecutor.py | 11 +- tests/isolated/regular_file_readall.py | 18 ++- tests/isolated/socket_resolve_green.py | 8 +- tests/isolated/tpool_exception_leak.py | 2 +- tests/isolated/wsgi_connection_timeout.py | 10 +- tests/mock.py | 53 ++++---- tests/mysqldb_test.py | 6 +- tests/os_test.py | 14 +-- tests/parse_results.py | 4 +- tests/patcher_psycopg_test.py | 4 +- tests/patcher_test.py | 15 +-- tests/pools_test.py | 9 +- tests/socket_test.py | 4 +- tests/ssl_test.py | 9 +- tests/test__greenness.py | 10 +- tests/test__refcount.py | 2 +- tests/test__socket_errors.py | 8 +- tests/thread_test.py | 9 +- tests/timeout_with_statement_test.py | 4 +- tests/tpool_test.py | 16 ++- tests/websocket_new_test.py | 69 +++++------ tests/websocket_test.py | 68 +++++----- tests/wsgi_test.py | 116 +++++++++--------- tests/zmq_test.py | 8 +- 61 files changed, 382 insertions(+), 462 deletions(-) diff --git a/.coveragerc b/.coveragerc index 0c429f8d22..f2b4081d78 100644 --- a/.coveragerc +++ b/.coveragerc @@ -5,7 +5,6 @@ source = eventlet #concurrency = eventlet omit = eventlet/support/dns/* - eventlet/support/six.py tests/* [report] diff --git a/benchmarks/__init__.py b/benchmarks/__init__.py index c590a0bf55..daf56a78e1 100644 --- a/benchmarks/__init__.py +++ b/benchmarks/__init__.py @@ -1,4 +1,3 @@ -from __future__ import print_function import argparse import gc import importlib @@ -10,7 +9,6 @@ import timeit import eventlet -import six # legacy, TODO convert context/localhost_socket benchmarks to new way @@ -19,7 +17,7 @@ def measure_best(repeat, iters, common_cleanup='pass', *funcs): funcs = list(funcs) - results = dict((f, []) for f in funcs) + results = {f: [] for f in funcs} for _ in range(repeat): random.shuffle(funcs) @@ -30,7 +28,7 @@ def measure_best(repeat, iters, common_cleanup() best_results = {} - for func, times in six.iteritems(results): + for func, times in results.items(): best_results[func] = min(times) return best_results @@ -44,13 +42,13 @@ class Benchmark: mb_per_s = 0 def __init__(self, **kwargs): - for k, v in six.iteritems(kwargs): + for k, v in kwargs.items(): if not hasattr(self, k): raise AttributeError(k) setattr(self, k, v) def __str__(self): - kvs = ', '.join('{}={}'.format(k, v) for k, v in six.iteritems(self.__dict__) if not k.startswith('_')) + kvs = ', '.join('{}={}'.format(k, v) for k, v in self.__dict__.items() if not k.startswith('_')) return 'Benchmark<{}>'.format(kvs) __repr__ = __str__ diff --git a/benchmarks/context.py b/benchmarks/context.py index dd3ac9a524..066fc393b1 100644 --- a/benchmarks/context.py +++ b/benchmarks/context.py @@ -1,5 +1,4 @@ """Test context switching performance of threading and eventlet""" -from __future__ import print_function import threading import time diff --git a/benchmarks/hub_timers.py b/benchmarks/hub_timers.py index 38780181df..27d298d455 100644 --- a/benchmarks/hub_timers.py +++ b/benchmarks/hub_timers.py @@ -5,7 +5,6 @@ import benchmarks from eventlet.hubs import timer, get_hub -import six l = [] @@ -19,7 +18,7 @@ def work(n): @contextlib.contextmanager def setup(iters): l[:] = [] - timeouts = [random.uniform(0, 10) for x in six.moves.range(iters)] + timeouts = [random.uniform(0, 10) for x in range(iters)] yield timeouts diff --git a/benchmarks/localhost_socket.py b/benchmarks/localhost_socket.py index 85401d2003..05b39e3f5a 100644 --- a/benchmarks/localhost_socket.py +++ b/benchmarks/localhost_socket.py @@ -1,10 +1,8 @@ """Benchmark evaluating eventlet's performance at speaking to itself over a localhost socket.""" -from __future__ import print_function import time import benchmarks -import six BYTES = 1000 @@ -31,13 +29,13 @@ def writer(addr, socket_impl): def green_accepter(server_sock, pool): - for i in six.moves.range(CONCURRENCY): + for i in range(CONCURRENCY): sock, addr = server_sock.accept() pool.spawn_n(reader, sock) def heavy_accepter(server_sock, pool): - for i in six.moves.range(CONCURRENCY): + for i in range(CONCURRENCY): sock, addr = server_sock.accept() t = threading.Thread(None, reader, "reader thread", (sock,)) t.start() @@ -58,7 +56,7 @@ def launch_green_threads(): server_sock.listen(50) addr = ('localhost', server_sock.getsockname()[1]) pool.spawn_n(green_accepter, server_sock, pool) - for i in six.moves.range(CONCURRENCY): + for i in range(CONCURRENCY): pool.spawn_n(writer, addr, eventlet.green.socket.socket) pool.waitall() @@ -77,7 +75,7 @@ def launch_heavy_threads(): None, heavy_accepter, "accepter thread", (server_sock, threads)) accepter_thread.start() threads.append(accepter_thread) - for i in six.moves.range(CONCURRENCY): + for i in range(CONCURRENCY): client_thread = threading.Thread(None, writer, "writer thread", (addr, socket.socket)) client_thread.start() threads.append(client_thread) diff --git a/doc/conf.py b/doc/conf.py index 37d2f816b6..52fa38140f 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # # Eventlet documentation build configuration file, created by # sphinx-quickstart on Sat Jul 4 19:48:27 2009. @@ -42,8 +41,8 @@ master_doc = 'index' # General information about the project. -project = u'Eventlet' -copyright = u'2005-2010, Eventlet Contributors' +project = 'Eventlet' +copyright = '2005-2010, Eventlet Contributors' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the @@ -181,8 +180,8 @@ # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ - ('index', 'Eventlet.tex', u'Eventlet Documentation', - u'', 'manual'), + ('index', 'Eventlet.tex', 'Eventlet Documentation', + '', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of diff --git a/examples/chat_server.py b/examples/chat_server.py index 77f09245e7..fda681003f 100644 --- a/examples/chat_server.py +++ b/examples/chat_server.py @@ -14,7 +14,7 @@ def read_chat_forever(writer, reader): if p is not writer: # Don't echo p.write(line) p.flush() - except socket.error as e: + except OSError as e: # ignore broken pipes, they just mean the participant # closed its connection already if e[0] != 32: diff --git a/examples/connect.py b/examples/connect.py index dc2c6d23b9..e5eeb2e61c 100644 --- a/examples/connect.py +++ b/examples/connect.py @@ -2,7 +2,6 @@ Demonstrates how to use the eventlet.green.socket module. """ -from __future__ import print_function import eventlet from eventlet.green import socket diff --git a/examples/distributed_websocket_chat.py b/examples/distributed_websocket_chat.py index 526f3abc04..f815a1ec3a 100644 --- a/examples/distributed_websocket_chat.py +++ b/examples/distributed_websocket_chat.py @@ -28,7 +28,7 @@ ctx = zmq.Context() -class IDName(object): +class IDName: def __init__(self): self.id = uuid1() diff --git a/examples/echoserver.py b/examples/echoserver.py index 33927fd3bb..f3b032e2f1 100644 --- a/examples/echoserver.py +++ b/examples/echoserver.py @@ -9,7 +9,6 @@ You terminate your connection by terminating telnet (typically Ctrl-] and then 'quit') """ -from __future__ import print_function import eventlet diff --git a/examples/websocket.py b/examples/websocket.py index d26e248258..3594dbe662 100644 --- a/examples/websocket.py +++ b/examples/websocket.py @@ -1,7 +1,6 @@ import eventlet from eventlet import wsgi from eventlet import websocket -import six # demo app import os @@ -20,7 +19,7 @@ def handle(ws): ws.send(m) elif ws.path == '/data': - for i in six.moves.range(10000): + for i in range(10000): ws.send("0 %s %s\n" % (i, random.random())) eventlet.sleep(0.1) diff --git a/examples/zmq_chat.py b/examples/zmq_chat.py index c24e16167f..13dcea97ed 100644 --- a/examples/zmq_chat.py +++ b/examples/zmq_chat.py @@ -39,7 +39,7 @@ def read_chat_forever(reader, pub_socket): try: pub_socket.send_pyobj((who, line)) - except socket.error as e: + except OSError as e: # ignore broken pipes, they just mean the participant # closed its connection already if e[0] != 32: diff --git a/pyproject.toml b/pyproject.toml index 7be6115e79..6f152287bf 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -41,7 +41,6 @@ dependencies = [ 'dnspython >= 1.15.0', 'greenlet >= 1.0', 'monotonic >= 1.4;python_version<"3.5"', - 'six >= 1.10.0', ] [project.urls] diff --git a/tests/__init__.py b/tests/__init__.py index 1f3a2136ab..525fa42839 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -1,10 +1,10 @@ # package is named tests, not test, so it won't be confused with test in stdlib -from __future__ import print_function import contextlib import errno import functools import gc +import io import json import os try: @@ -12,10 +12,7 @@ except ImportError: resource = None import signal -try: - import subprocess32 as subprocess # py2 -except ImportError: - import subprocess # py3 +import subprocess import sys import unittest import warnings @@ -24,7 +21,6 @@ import eventlet from eventlet import tpool -import six import socket from threading import Thread import struct @@ -46,7 +42,7 @@ def assert_raises(exc_type): name = exc_type.__name__ except AttributeError: pass - assert False, 'Expected exception {0}'.format(name) + assert False, 'Expected exception {}'.format(name) def skipped(func, *decorator_args): @@ -258,7 +254,7 @@ def find_command(command): p = os.path.join(dir, command) if os.access(p, os.X_OK): return p - raise IOError(errno.ENOENT, 'Command not found: %r' % command) + raise OSError(errno.ENOENT, 'Command not found: %r' % command) def silence_warnings(func): @@ -301,19 +297,17 @@ def get_database_auth(): # Have to convert unicode objects to str objects because # mysqldb is dumb. Using a doubly-nested list comprehension # because we know that the structure is a two-level dict. - return dict( - [(str(modname), dict( - [(str(k), str(v)) for k, v in connectargs.items()])) - for modname, connectargs in auth_utf8.items()]) - except IOError: + return { + str(modname): { + str(k): str(v) for k, v in connectargs.items()} + for modname, connectargs in auth_utf8.items()} + except OSError: pass return retval def run_python(path, env=None, args=None, timeout=None, pythonpath_extend=None, expect_pass=False): new_argv = [sys.executable] - if sys.version_info[:2] <= (2, 7): - new_argv += ['-W', 'ignore:Python 2 is no longer supported'] new_env = os.environ.copy() new_env.setdefault('eventlet_test_in_progress', 'yes') src_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) @@ -346,9 +340,9 @@ def run_python(path, env=None, args=None, timeout=None, pythonpath_extend=None, p.kill() output, _ = p.communicate(timeout=timeout) if expect_pass: - sys.stderr.write('Program {0} output:\n---\n{1}\n---\n'.format(path, output.decode())) + sys.stderr.write('Program {} output:\n---\n{}\n---\n'.format(path, output.decode())) assert False, 'timed out' - return '{0}\nFAIL - timed out'.format(output).encode() + return '{}\nFAIL - timed out'.format(output).encode() if expect_pass: if output.startswith(b'skip'): @@ -360,7 +354,7 @@ def run_python(path, env=None, args=None, timeout=None, pythonpath_extend=None, lines = output.splitlines() ok = lines[-1].rstrip() == b'pass' if not ok or len(lines) > 1: - sys.stderr.write('Program {0} output:\n---\n{1}\n---\n'.format(path, output.decode())) + sys.stderr.write('Program {} output:\n---\n{}\n---\n'.format(path, output.decode())) assert ok, 'Expected single line "pass" in stdout' return output @@ -373,12 +367,12 @@ def run_isolated(path, prefix='tests/isolated/', **kwargs): def check_is_timeout(obj): value_text = getattr(obj, 'is_timeout', '(missing)') - assert eventlet.is_timeout(obj), 'type={0} str={1} .is_timeout={2}'.format(type(obj), str(obj), value_text) + assert eventlet.is_timeout(obj), 'type={} str={} .is_timeout={}'.format(type(obj), str(obj), value_text) @contextlib.contextmanager def capture_stderr(): - stream = six.StringIO() + stream = io.StringIO() original = sys.stderr try: sys.stderr = stream diff --git a/tests/api_test.py b/tests/api_test.py index 3ebc67cb26..d285268248 100644 --- a/tests/api_test.py +++ b/tests/api_test.py @@ -166,13 +166,13 @@ def func(): try: eventlet.with_timeout(0.1, func) - self.fail(u'Expected Timeout') + self.fail('Expected Timeout') except eventlet.Timeout: pass def test_wrap_is_timeout(): - class A(object): + class A: pass obj = eventlet.wrap_is_timeout(A)() diff --git a/tests/convenience_test.py b/tests/convenience_test.py index 68dc63ea68..8d28646988 100644 --- a/tests/convenience_test.py +++ b/tests/convenience_test.py @@ -4,7 +4,6 @@ import eventlet from eventlet import convenience, debug from eventlet.green import socket -import six import tests import tests.mock @@ -15,11 +14,11 @@ class TestServe(tests.LimitedTestCase): def setUp(self): - super(TestServe, self).setUp() + super().setUp() debug.hub_exceptions(False) def tearDown(self): - super(TestServe, self).tearDown() + super().tearDown() debug.hub_exceptions(True) def test_exiting_server(self): @@ -68,7 +67,7 @@ def counter(sock, addr): hits[0] += 1 l = eventlet.listen(('localhost', 0)) gt = eventlet.spawn(eventlet.serve, l, counter) - for i in six.moves.range(100): + for i in range(100): client = eventlet.connect(('localhost', l.getsockname()[1])) self.assertFalse(client.recv(100)) gt.kill() @@ -143,7 +142,7 @@ def test_socket_reuse(): lsock1.close() try: lsock1 = eventlet.listen(addr) - except socket.error as e: + except OSError as e: errors.append(e) continue break @@ -157,7 +156,7 @@ def test_socket_reuse(): lsock2 = eventlet.listen(addr) assert lsock2 lsock2.close() - except socket.error: + except OSError: pass lsock1.close() diff --git a/tests/dagpool_test.py b/tests/dagpool_test.py index 0b4880dbfe..f08b92087b 100644 --- a/tests/dagpool_test.py +++ b/tests/dagpool_test.py @@ -7,7 +7,6 @@ import eventlet from eventlet.dagpool import DAGPool, Collision, PropagateError -import six from contextlib import contextmanager import itertools @@ -30,12 +29,12 @@ def assert_raises(exc): except exc: pass else: - raise AssertionError("failed to raise expected exception {0}" + raise AssertionError("failed to raise expected exception {}" .format(exc.__class__.__name__)) def assert_in(sought, container): - assert sought in container, "{0} not in {1}".format(sought, container) + assert sought in container, "{} not in {}".format(sought, container) # **************************************************************************** @@ -94,7 +93,7 @@ def check_no_suspend(): assert counter is not None, "Use 'with suspend_checker():' to enable check_no_suspend()" current = counter yield - assert counter == current, "Operation suspended {0} times".format(counter - current) + assert counter == current, "Operation suspended {} times".format(counter - current) def test_check_no_suspend(): @@ -117,7 +116,7 @@ def test_check_no_suspend(): # **************************************************************************** # Verify that the expected things happened in the expected order # **************************************************************************** -class Capture(object): +class Capture: """ This class is intended to capture a sequence (of string messages) to verify that all expected events occurred, and in the expected order. The @@ -152,19 +151,19 @@ def validate(self, sequence): # purposes, turn them into the specific form we store: a list of sets. setlist = [] for subseq in sequence: - if isinstance(subseq, six.string_types): + if isinstance(subseq, str): # If this item is a plain string (which Python regards as an # iterable of characters) rather than a list or tuple or set # of strings, treat it as atomic. Make a set containing only # that string. - setlist.append(set([subseq])) + setlist.append({subseq}) else: try: iter(subseq) except TypeError: # subseq is a scalar of some other kind. Make a set # containing only that item. - setlist.append(set([subseq])) + setlist.append({subseq}) else: # subseq is, as we expect, an iterable -- possibly already # a set. Make a set containing its elements. @@ -178,9 +177,9 @@ def validate(self, sequence): # **************************************************************************** def observe(key, results, capture, event): for k, v in results: - capture.add("{0} got {1}".format(key, k)) + capture.add("{} got {}".format(key, k)) result = event.wait() - capture.add("{0} returning {1}".format(key, result)) + capture.add("{} returning {}".format(key, result)) return result @@ -214,7 +213,7 @@ def test_wait_each_empty(): with check_no_suspend(): for k, v in pool.wait_each(()): # shouldn't yield anything - raise AssertionError("empty wait_each() returned ({0}, {1})".format(k, v)) + raise AssertionError("empty wait_each() returned ({}, {})".format(k, v)) def test_wait_each_preload(): @@ -248,7 +247,7 @@ def test_wait_each_posted(): eventlet.spawn(post_each, pool, capture) # use a string as a convenient iterable of single-letter keys for k, v in pool.wait_each("bcdefg"): - capture.add("got ({0}, {1})".format(k, v)) + capture.add("got ({}, {})".format(k, v)) capture.validate([ ["got (b, 2)", "got (c, 3)"], @@ -400,7 +399,7 @@ def spawn_many_func(key, results, capture, pool): # with a capture.step() at each post(), too complicated to predict # which results will be delivered when pass - capture.add("{0} done".format(key)) + capture.add("{} done".format(key)) # use post(key) instead of waiting for implicit post() of return value pool.post(key, key) capture.step() @@ -443,13 +442,13 @@ def test_spawn_many(): # With the dependency graph shown above, it is not guaranteed whether b or # c will complete first. Handle either case. sequence = capture.sequence[:] - sequence[1:3] = [set([sequence[1].pop(), sequence[2].pop()])] + sequence[1:3] = [{sequence[1].pop(), sequence[2].pop()}] assert_equal(sequence, - [set(["a done"]), - set(["b done", "c done"]), - set(["d done"]), - set(["e done"]), - set(["waitall() done"]), + [{"a done"}, + {"b done", "c done"}, + {"d done"}, + {"e done"}, + {"waitall() done"}, ]) @@ -463,9 +462,9 @@ def test_wait_each_all(): capture = Capture() pool = DAGPool([("a", "a")]) # capture a different Event for each key - events = dict((key, eventlet.event.Event()) for key in six.iterkeys(deps)) + events = {key: eventlet.event.Event() for key in deps.keys()} # can't use spawn_many() because we need a different event for each - for key, dep in six.iteritems(deps): + for key, dep in deps.items(): pool.spawn(key, dep, observe, capture, events[key]) keys = "abcde" # this specific order each = iter(pool.wait_each()) @@ -478,11 +477,11 @@ def test_wait_each_all(): # everything from keys[:pos+1] should have a value by now for k in keys[:pos + 1]: assert pool.get(k, _notthere) is not _notthere, \ - "greenlet {0} did not yet produce a value".format(k) + "greenlet {} did not yet produce a value".format(k) # everything from keys[pos+1:] should not yet for k in keys[pos + 1:]: assert pool.get(k, _notthere) is _notthere, \ - "wait_each() delayed value for {0}".format(keys[pos]) + "wait_each() delayed value for {}".format(keys[pos]) # let next greenthread complete if pos < len(keys) - 1: k = keys[pos + 1] @@ -567,7 +566,7 @@ def test_post_replace(): def waitfor(capture, pool, key): value = pool[key] - capture.add("got {0}".format(value)) + capture.add("got {}".format(value)) def test_getitem(): @@ -606,7 +605,7 @@ def test_waitall_exc(): except PropagateError as err: assert_equal(err.key, "a") assert isinstance(err.exc, BogusError), \ - "exc attribute is {0}, not BogusError".format(err.exc) + "exc attribute is {}, not BogusError".format(err.exc) assert_equal(str(err.exc), "bogus") msg = str(err) assert_in("PropagateError(a)", msg) @@ -628,7 +627,7 @@ def test_propagate_exc(): erra = errb.exc assert_equal(erra.key, "a") assert isinstance(erra.exc, BogusError), \ - "exc attribute is {0}, not BogusError".format(erra.exc) + "exc attribute is {}, not BogusError".format(erra.exc) assert_equal(str(erra.exc), "bogus") msg = str(errc) assert_in("PropagateError(a)", msg) diff --git a/tests/db_pool_test.py b/tests/db_pool_test.py index f5bc7729e3..e1d389e96e 100644 --- a/tests/db_pool_test.py +++ b/tests/db_pool_test.py @@ -1,10 +1,8 @@ -from __future__ import print_function import os import sys import traceback from eventlet import db_pool -import six import eventlet import eventlet.tpool import tests @@ -24,7 +22,7 @@ pass -class DBTester(object): +class DBTester: __test__ = False # so that nose doesn't try to execute this directly def setUp(self): @@ -63,7 +61,7 @@ def set_up_dummy_table(self, connection=None): # silly mock class -class Mock(object): +class Mock: pass @@ -71,7 +69,7 @@ class DBConnectionPool(DBTester): __test__ = False # so that nose doesn't try to execute this directly def setUp(self): - super(DBConnectionPool, self).setUp() + super().setUp() self.pool = self.create_pool() self.connection = self.pool.get() @@ -79,7 +77,7 @@ def tearDown(self): if self.connection: self.pool.put(self.connection) self.pool.clear() - super(DBConnectionPool, self).tearDown() + super().tearDown() def assert_cursor_works(self, cursor): cursor.execute("select 1") @@ -139,7 +137,7 @@ def test_bool(self): def fill_up_table(self, conn): curs = conn.cursor() - for i in six.moves.range(1000): + for i in range(1000): curs.execute('insert into test_table (value_int) values (%s)' % i) conn.commit() @@ -228,8 +226,8 @@ def test_clear_warmup(self): self.assertEqual(len(self.pool.free_items), 0) def test_unwrap_connection(self): - self.assert_(isinstance(self.connection, - db_pool.GenericConnectionWrapper)) + self.assertTrue(isinstance(self.connection, + db_pool.GenericConnectionWrapper)) conn = self.pool._unwrap_connection(self.connection) assert not isinstance(conn, db_pool.GenericConnectionWrapper) @@ -318,17 +316,17 @@ def test_raising_create(self): self.assertEqual(self.pool.free(), 1) -class DummyConnection(object): +class DummyConnection: def rollback(self): pass -class DummyDBModule(object): +class DummyDBModule: def connect(self, *args, **kwargs): return DummyConnection() -class RaisingDBModule(object): +class RaisingDBModule: def connect(self, *args, **kw): raise RuntimeError() @@ -348,10 +346,10 @@ def create_pool(self, min_size=0, max_size=1, max_idle=10, max_age=10, **self._auth) def setUp(self): - super(TpoolConnectionPool, self).setUp() + super().setUp() def tearDown(self): - super(TpoolConnectionPool, self).tearDown() + super().tearDown() eventlet.tpool.killall() @@ -453,7 +451,7 @@ def mysql_requirement(_f): return False -class MysqlConnectionPool(object): +class MysqlConnectionPool: dummy_table_sql = """CREATE TEMPORARY TABLE test_table ( row_id INTEGER PRIMARY KEY AUTO_INCREMENT, @@ -471,10 +469,10 @@ class MysqlConnectionPool(object): def setUp(self): self._dbmodule = MySQLdb self._auth = tests.get_database_auth()['MySQLdb'] - super(MysqlConnectionPool, self).setUp() + super().setUp() def tearDown(self): - super(MysqlConnectionPool, self).tearDown() + super().tearDown() def create_db(self): auth = self._auth.copy() @@ -518,7 +516,7 @@ def postgres_requirement(_f): return False -class Psycopg2ConnectionPool(object): +class Psycopg2ConnectionPool: dummy_table_sql = """CREATE TEMPORARY TABLE test_table ( row_id SERIAL PRIMARY KEY, @@ -535,10 +533,10 @@ class Psycopg2ConnectionPool(object): def setUp(self): self._dbmodule = psycopg2 self._auth = tests.get_database_auth()['psycopg2'] - super(Psycopg2ConnectionPool, self).setUp() + super().setUp() def tearDown(self): - super(Psycopg2ConnectionPool, self).tearDown() + super().tearDown() def create_db(self): dbname = 'test%s' % os.getpid() diff --git a/tests/debug_test.py b/tests/debug_test.py index 82b3a83457..1b860df8c9 100644 --- a/tests/debug_test.py +++ b/tests/debug_test.py @@ -1,7 +1,7 @@ +import io import sys from eventlet import debug -import six import tests import eventlet @@ -29,12 +29,9 @@ def test_unspew(self): assert self.tracer is None def test_line(self): - if sys.version_info >= (3, 7): - frame_str = "f== (3, 7): - frame_str = "f=' % ('type' if six.PY2 else 'class')) +expected_get_fileno_type_error_message = 'Expected int or long, got ' def test_get_fileno_of_wrong_type_fails(): @@ -699,7 +698,7 @@ def test_get_fileno_of_wrong_type_fails(): def test_get_fileno_of_a_socket_with_fileno_returning_wrong_type_fails(): - class DummySocket(object): + class DummySocket: def fileno(self): return 'foo' try: @@ -726,7 +725,7 @@ def test_pipe(self): wf = greenio.GreenPipe(w, 'wb', 0) def sender(f, content): - for ch in map(six.int2byte, six.iterbytes(content)): + for ch in map(struct.Struct(">B").pack, iter(content)): eventlet.sleep(0.0001) f.write(ch) f.close() @@ -805,7 +804,7 @@ def test_pipe_writes_large_messages(self): r = greenio.GreenPipe(r, 'rb') w = greenio.GreenPipe(w, 'wb') - large_message = b"".join([1024 * six.int2byte(i) for i in range(65)]) + large_message = b"".join([1024 * bytes((i,)) for i in range(65)]) def writer(): w.write(large_message) @@ -815,7 +814,7 @@ def writer(): for i in range(65): buf = r.read(1024) - expected = 1024 * six.int2byte(i) + expected = 1024 * bytes((i,)) self.assertEqual( buf, expected, "expected=%r..%r, found=%r..%r iter=%d" diff --git a/tests/greenpool_test.py b/tests/greenpool_test.py index 6a5bc69919..6ff9aa0c98 100644 --- a/tests/greenpool_test.py +++ b/tests/greenpool_test.py @@ -4,7 +4,6 @@ import eventlet from eventlet import hubs, pools from eventlet.support import greenlets as greenlet -import six import tests @@ -138,7 +137,7 @@ def wait_long_time(e): timer = eventlet.Timeout(1) try: evt = eventlet.Event() - for x in six.moves.range(num_free): + for x in range(num_free): pool.spawn(wait_long_time, evt) # if the pool has fewer free than we expect, # then we'll hit the timeout error @@ -254,9 +253,9 @@ def foo(a): self.assertEqual(r, [1]) p.spawn_n(foo, 4) - self.assertEqual(set(r), set([1, 2, 3])) + self.assertEqual(set(r), {1, 2, 3}) eventlet.sleep(0) - self.assertEqual(set(r), set([1, 2, 3, 4])) + self.assertEqual(set(r), {1, 2, 3, 4}) def test_exceptions(self): p = eventlet.GreenPool(2) @@ -307,7 +306,7 @@ def raiser(item): results = [] while True: try: - results.append(six.next(it)) + results.append(next(it)) except RuntimeError: results.append('r') except StopIteration: @@ -414,7 +413,7 @@ def spawn_order_check(self, concurrency): p = eventlet.GreenPile(concurrency) def makework(count, unique): - for i in six.moves.range(count): + for i in range(count): token = (unique, i) p.spawn(pressure, token) @@ -428,7 +427,7 @@ def makework(count, unique): it = iter(p) while True: try: - i = six.next(it) + i = next(it) except StressException as exc: i = exc.args[0] except StopIteration: @@ -453,11 +452,11 @@ def imap_memory_check(self, concurrency): # ordered and consumes a constant amount of memory p = eventlet.GreenPool(concurrency) count = 1000 - it = p.imap(passthru, six.moves.range(count)) + it = p.imap(passthru, range(count)) latest = -1 while True: try: - i = six.next(it) + i = next(it) except StopIteration: break @@ -496,7 +495,7 @@ def run(int_pool): int_pool = IntPool(max_size=intpool_size) pool = eventlet.GreenPool(pool_size) - for ix in six.moves.range(num_executes): + for ix in range(num_executes): pool.spawn(run, int_pool) pool.waitall() diff --git a/tests/greenthread_test.py b/tests/greenthread_test.py index 39b29c79de..5025061d99 100644 --- a/tests/greenthread_test.py +++ b/tests/greenthread_test.py @@ -15,7 +15,7 @@ def waiter(a): return a -class Asserts(object): +class Asserts: def assert_dead(self, gt): if hasattr(gt, 'wait'): self.assertRaises(greenlet.GreenletExit, gt.wait) @@ -26,7 +26,7 @@ def assert_dead(self, gt): class Spawn(LimitedTestCase, Asserts): def tearDown(self): global _g_results - super(Spawn, self).tearDown() + super().tearDown() _g_results = [] def test_simple(self): @@ -133,7 +133,7 @@ def test_kill_already_started(self): class SpawnAfterLocal(LimitedTestCase, Asserts): def setUp(self): - super(SpawnAfterLocal, self).setUp() + super().setUp() self.lst = [1] def test_timer_fired(self): diff --git a/tests/hub_test.py b/tests/hub_test.py index e1e9b84a43..284f862506 100644 --- a/tests/hub_test.py +++ b/tests/hub_test.py @@ -9,7 +9,6 @@ import eventlet from eventlet import debug, hubs from eventlet.support import greenlets -import six DELAY = 0.001 @@ -26,7 +25,7 @@ def test_cancel_immediate(self): hub = hubs.get_hub() stimers = hub.get_timers_count() scanceled = hub.timers_canceled - for i in six.moves.range(2000): + for i in range(2000): t = hubs.get_hub().schedule_call_global(60, noop) t.cancel() self.assert_less_than_equal(hub.timers_canceled, @@ -39,7 +38,7 @@ def test_cancel_accumulated(self): hub = hubs.get_hub() stimers = hub.get_timers_count() scanceled = hub.timers_canceled - for i in six.moves.range(2000): + for i in range(2000): t = hubs.get_hub().schedule_call_global(60, noop) eventlet.sleep() self.assert_less_than_equal(hub.timers_canceled, @@ -58,7 +57,7 @@ def test_cancel_proportion(self): uncanceled_timers = [] stimers = hub.get_timers_count() scanceled = hub.timers_canceled - for i in six.moves.range(1000): + for i in range(1000): # 2/3rds of new timers are uncanceled t = hubs.get_hub().schedule_call_global(60, noop) t2 = hubs.get_hub().schedule_call_global(60, noop) @@ -84,12 +83,12 @@ def test_cancel_proportion(self): class TestMultipleListenersCleanup(tests.LimitedTestCase): def setUp(self): - super(TestMultipleListenersCleanup, self).setUp() + super().setUp() debug.hub_prevent_multiple_readers(False) debug.hub_exceptions(False) def tearDown(self): - super(TestMultipleListenersCleanup, self).tearDown() + super().tearDown() debug.hub_prevent_multiple_readers(True) debug.hub_exceptions(True) diff --git a/tests/isolated/green_ssl_py36_properties.py b/tests/isolated/green_ssl_py36_properties.py index aa6b5b5a34..989444a98c 100644 --- a/tests/isolated/green_ssl_py36_properties.py +++ b/tests/isolated/green_ssl_py36_properties.py @@ -11,6 +11,6 @@ certfile='does-not-exist', keyfile='does-not-exist', server_side=True) - except IOError as ex: + except OSError as ex: assert ex.errno == 2 print('pass') diff --git a/tests/isolated/hub_fork.py b/tests/isolated/hub_fork.py index 1872942223..72e58febe8 100644 --- a/tests/isolated/hub_fork.py +++ b/tests/isolated/hub_fork.py @@ -17,7 +17,7 @@ elif pid == 0: with eventlet.Timeout(1): sock, _ = server.accept() - sock.sendall('ok {0}'.format(os.getpid()).encode()) + sock.sendall('ok {}'.format(os.getpid()).encode()) sock.close() sys.exit(0) elif pid > 0: diff --git a/tests/isolated/hub_kqueue_unsupported.py b/tests/isolated/hub_kqueue_unsupported.py index 373df98b9f..47b6f75cbf 100644 --- a/tests/isolated/hub_kqueue_unsupported.py +++ b/tests/isolated/hub_kqueue_unsupported.py @@ -1,4 +1,3 @@ -from __future__ import print_function __test__ = False diff --git a/tests/isolated/hub_use_hub_class.py b/tests/isolated/hub_use_hub_class.py index c7b37828d9..d0dd6e9500 100644 --- a/tests/isolated/hub_use_hub_class.py +++ b/tests/isolated/hub_use_hub_class.py @@ -1,8 +1,7 @@ -from __future__ import print_function __test__ = False -class Foo(object): +class Foo: pass diff --git a/tests/isolated/mysqldb_monkey_patch.py b/tests/isolated/mysqldb_monkey_patch.py index 01041fe56a..a0bb6add58 100644 --- a/tests/isolated/mysqldb_monkey_patch.py +++ b/tests/isolated/mysqldb_monkey_patch.py @@ -5,7 +5,7 @@ from eventlet import patcher from eventlet.green import MySQLdb as gm patcher.monkey_patch(all=True, MySQLdb=True) - patched_set = set(patcher.already_patched) - set(['psycopg']) + patched_set = set(patcher.already_patched) - {'psycopg'} assert patched_set == frozenset([ 'MySQLdb', 'os', diff --git a/tests/isolated/patcher_blocking_select_methods_are_deleted.py b/tests/isolated/patcher_blocking_select_methods_are_deleted.py index a761e1e364..051cc0f3ba 100644 --- a/tests/isolated/patcher_blocking_select_methods_are_deleted.py +++ b/tests/isolated/patcher_blocking_select_methods_are_deleted.py @@ -16,17 +16,16 @@ import sys - if sys.version_info >= (3, 4): - import selectors - for name in [ - 'PollSelector', - 'EpollSelector', - 'DevpollSelector', - 'KqueueSelector', - ]: - assert not hasattr(selectors, name), name + import selectors + for name in [ + 'PollSelector', + 'EpollSelector', + 'DevpollSelector', + 'KqueueSelector', + ]: + assert not hasattr(selectors, name), name - default = selectors.DefaultSelector - assert default is selectors.SelectSelector, default + default = selectors.DefaultSelector + assert default is selectors.SelectSelector, default print('pass') diff --git a/tests/isolated/patcher_builtin.py b/tests/isolated/patcher_builtin.py index 3de73bf6e1..56b2dbe210 100644 --- a/tests/isolated/patcher_builtin.py +++ b/tests/isolated/patcher_builtin.py @@ -6,7 +6,7 @@ from eventlet import hubs with patch.object(hubs, 'notify_opened') as mock_func: eventlet.monkey_patch(builtins=True) - with open(__file__, 'r') as f: + with open(__file__) as f: mock_func.assert_called_with(f.fileno()) if sys.version_info.major == 2: with file(__file__, 'r') as f: diff --git a/tests/isolated/patcher_fork_after_monkey_patch.py b/tests/isolated/patcher_fork_after_monkey_patch.py index 6c5c44565c..c8ee37683c 100644 --- a/tests/isolated/patcher_fork_after_monkey_patch.py +++ b/tests/isolated/patcher_fork_after_monkey_patch.py @@ -38,9 +38,8 @@ def target(): if os.fork() == 0: # Inside the child, we should only have a main thread, # but old pythons make it difficult to ensure - if sys.version_info >= (3, 7): - check(1, threading, 'child post-fork patched') - check(1, _threading, 'child post-fork original') + check(1, threading, 'child post-fork patched') + check(1, _threading, 'child post-fork original') check(1, eventlet.green.threading, 'child post-fork green') sys.exit() else: diff --git a/tests/isolated/patcher_import_patched_defaults.py b/tests/isolated/patcher_import_patched_defaults.py index 69ea1b5ee5..99351dcb55 100644 --- a/tests/isolated/patcher_import_patched_defaults.py +++ b/tests/isolated/patcher_import_patched_defaults.py @@ -12,7 +12,7 @@ t = target.socket.socket import eventlet.green.socket as g if not issubclass(t, g.socket): - print('Fail. Target socket not green: {0} bases {1}'.format(t, t.__bases__)) + print('Fail. Target socket not green: {} bases {}'.format(t, t.__bases__)) sys.exit(1) print('pass') diff --git a/tests/isolated/patcher_open_kwargs.py b/tests/isolated/patcher_open_kwargs.py index be88ef9cb9..22daca6bfd 100644 --- a/tests/isolated/patcher_open_kwargs.py +++ b/tests/isolated/patcher_open_kwargs.py @@ -4,7 +4,7 @@ import eventlet eventlet.monkey_patch(builtins=True, os=True) - with open(__file__, mode="rt", buffering=16): + with open(__file__, buffering=16): pass print("pass") diff --git a/tests/isolated/patcher_socketserver_selectors.py b/tests/isolated/patcher_socketserver_selectors.py index ac4d533b47..cf86c2504b 100644 --- a/tests/isolated/patcher_socketserver_selectors.py +++ b/tests/isolated/patcher_socketserver_selectors.py @@ -4,7 +4,7 @@ import eventlet eventlet.monkey_patch() - from six.moves.BaseHTTPServer import ( + from http.server import ( HTTPServer, BaseHTTPRequestHandler, ) diff --git a/tests/isolated/patcher_threading_current.py b/tests/isolated/patcher_threading_current.py index 2eb6676cb5..0f595c2502 100644 --- a/tests/isolated/patcher_threading_current.py +++ b/tests/isolated/patcher_threading_current.py @@ -20,6 +20,6 @@ def fun(): for t in ts: t.join() - assert g == set(('t0', 't1', 't2')), repr(g) + assert g == {'t0', 't1', 't2'}, repr(g) print('pass') diff --git a/tests/isolated/patcher_threadpoolexecutor.py b/tests/isolated/patcher_threadpoolexecutor.py index 779165444c..755ed2e79d 100644 --- a/tests/isolated/patcher_threadpoolexecutor.py +++ b/tests/isolated/patcher_threadpoolexecutor.py @@ -8,11 +8,10 @@ import sys # Futures is only included in 3.2 or later - if sys.version_info >= (3, 2): - from concurrent import futures + from concurrent import futures - with futures.ThreadPoolExecutor(max_workers=1) as executor: - future = executor.submit(pow, 2, 3) - res = future.result() - assert res == 8, '2^3 should be 8, not %s' % res + with futures.ThreadPoolExecutor(max_workers=1) as executor: + future = executor.submit(pow, 2, 3) + res = future.result() + assert res == 8, '2^3 should be 8, not %s' % res print('pass') diff --git a/tests/isolated/regular_file_readall.py b/tests/isolated/regular_file_readall.py index 4c733febbe..140ad5c705 100644 --- a/tests/isolated/regular_file_readall.py +++ b/tests/isolated/regular_file_readall.py @@ -4,13 +4,12 @@ import eventlet eventlet.monkey_patch() - import six import io import os import tempfile with tempfile.NamedTemporaryFile() as tmp: - with io.open(tmp.name, "wb") as fp: + with open(tmp.name, "wb") as fp: fp.write(b"content") # test BufferedReader.read() @@ -27,16 +26,15 @@ content = fp.read() assert content == b'content' - if six.PY3: - # test FileIO.readall() - fd = os.open(tmp.name, os.O_RDONLY) - fp = os.fdopen(fd, "rb", 0) - with fp: - content = fp.readall() - assert content == b'content' + # test FileIO.readall() + fd = os.open(tmp.name, os.O_RDONLY) + fp = os.fdopen(fd, "rb", 0) + with fp: + content = fp.readall() + assert content == b'content' # test FileIO.readall() (for Python 2 and Python 3) - with io.open(tmp.name, "rb", 0) as fp: + with open(tmp.name, "rb", 0) as fp: content = fp.readall() assert content == b'content' diff --git a/tests/isolated/socket_resolve_green.py b/tests/isolated/socket_resolve_green.py index 611d7dc9d1..d4d1941ab0 100644 --- a/tests/isolated/socket_resolve_green.py +++ b/tests/isolated/socket_resolve_green.py @@ -11,7 +11,7 @@ n = 10 delay = 0.01 - addr_map = {'test-host{0}.'.format(i): '0.0.1.{0}'.format(i) for i in range(n)} + addr_map = {'test-host{}.'.format(i): '0.0.1.{}'.format(i) for i in range(n)} def slow_udp(q, *a, **kw): qname = q.question[0].name @@ -31,8 +31,8 @@ def slow_udp(q, *a, **kw): def fun(name): try: results[name] = socket.gethostbyname(name) - except socket.error as e: - print('name: {0} error: {1}'.format(name, e)) + except OSError as e: + print('name: {} error: {}'.format(name, e)) pool = eventlet.GreenPool(size=n + 1) @@ -45,7 +45,7 @@ def fun(name): pool.spawn(fun, name) pool.waitall() td = time.time() - t1 - fail_msg = 'Resolve time expected: ~{0:.3f}s, real: {1:.3f}'.format(delay, td) + fail_msg = 'Resolve time expected: ~{:.3f}s, real: {:.3f}'.format(delay, td) assert delay <= td < delay * n, fail_msg assert addr_map == results print('pass') diff --git a/tests/isolated/tpool_exception_leak.py b/tests/isolated/tpool_exception_leak.py index 3bbf61df10..c807194b1f 100644 --- a/tests/isolated/tpool_exception_leak.py +++ b/tests/isolated/tpool_exception_leak.py @@ -9,7 +9,7 @@ class RequiredException(Exception): pass - class A(object): + class A: def ok(self): return 'ok' diff --git a/tests/isolated/wsgi_connection_timeout.py b/tests/isolated/wsgi_connection_timeout.py index 80b524254d..156040c686 100644 --- a/tests/isolated/wsgi_connection_timeout.py +++ b/tests/isolated/wsgi_connection_timeout.py @@ -25,7 +25,6 @@ import socket import eventlet -import six import tests.wsgi_test @@ -38,7 +37,7 @@ output_buffer = [] -class BufferLog(object): +class BufferLog: @staticmethod def write(s): output_buffer.append(s.rstrip()) @@ -46,7 +45,7 @@ def write(s): # This test might make you wince -class NaughtySocketAcceptWrap(object): +class NaughtySocketAcceptWrap: # server's socket.accept(); patches resulting connection sockets def __init__(self, sock): @@ -73,7 +72,7 @@ def __call__(self): return conn, addr -class ExplodingConnectionWrap(object): +class ExplodingConnectionWrap: # new connection's socket.makefile # eventlet *tends* to use socket.makefile, not raw socket methods. # need to patch file operations @@ -105,8 +104,7 @@ def __call__(self, mode='r', bufsize=-1): class ExplodingSocketFile(eventlet.greenio._fileobject): def __init__(self, sock, mode='rb', bufsize=-1, close=False): - args = [bufsize, close] if six.PY2 else [] - super(self.__class__, self).__init__(sock, mode, *args) + super(self.__class__, self).__init__(sock, mode) self.armed = False def arm(self): diff --git a/tests/mock.py b/tests/mock.py index 34300217f0..3fcf1f9918 100644 --- a/tests/mock.py +++ b/tests/mock.py @@ -76,16 +76,7 @@ def inner(f): return f return inner else: - if sys.version_info[:2] >= (3, 2): - wraps = original_wraps - else: - def wraps(func): - def inner(f): - f = original_wraps(func)(f) - wrapped = getattr(func, '__wrapped__', func) - f.__wrapped__ = wrapped - return f - return inner + wraps = original_wraps try: unicode @@ -160,7 +151,7 @@ def _is_exception(obj): ) -class _slotted(object): +class _slotted: __slots__ = ['a'] @@ -363,7 +354,7 @@ def _is_magic(name): return '__%s__' % name[2:-2] == name -class _SentinelObject(object): +class _SentinelObject: "A unique, named, sentinel object." def __init__(self, name): @@ -373,7 +364,7 @@ def __repr__(self): return 'sentinel.%s' % self.name -class _Sentinel(object): +class _Sentinel: """Access attributes to return a named object, usable as a sentinel.""" def __init__(self): @@ -408,13 +399,11 @@ def _copy(value): if not inPy3k: ClassTypes = (type, ClassType) -_allowed_names = set( - [ +_allowed_names = { 'return_value', '_mock_return_value', 'side_effect', '_mock_side_effect', '_mock_parent', '_mock_new_parent', '_mock_name', '_mock_new_name' - ] -) +} def _delegating_property(name): @@ -482,7 +471,7 @@ def _check_and_set_parent(parent, value, name, new_name): return True -class Base(object): +class Base: _mock_return_value = DEFAULT _mock_side_effect = None @@ -1104,7 +1093,7 @@ def _is_started(patcher): return hasattr(patcher, 'is_local') -class _patch(object): +class _patch: attribute_name = None _active_patches = set() @@ -1554,7 +1543,7 @@ def patch( ) -class _patch_dict(object): +class _patch_dict: """ Patch a dictionary, or dictionary like object, and restore the dictionary to its original state after the test. @@ -1712,13 +1701,13 @@ def _patch_stopall(): # (as they are metaclass methods) # __del__ is not supported at all as it causes problems if it exists -_non_defaults = set('__%s__' % method for method in [ +_non_defaults = {'__%s__' % method for method in [ 'cmp', 'getslice', 'setslice', 'coerce', 'subclasses', 'format', 'get', 'set', 'delete', 'reversed', 'missing', 'reduce', 'reduce_ex', 'getinitargs', 'getnewargs', 'getstate', 'setstate', 'getformat', 'setformat', 'repr', 'dir' -]) +]} def _get_method(name, func): @@ -1730,19 +1719,19 @@ def method(self, *args, **kw): return method -_magics = set( +_magics = { '__%s__' % method for method in ' '.join([magic_methods, numerics, inplace, right, extra]).split() -) +} _all_magics = _magics | _non_defaults -_unsupported_magics = set([ +_unsupported_magics = { '__getattr__', '__setattr__', '__init__', '__new__', '__prepare__' '__instancecheck__', '__subclasscheck__', '__del__' -]) +} _calculate_return_value = { '__hash__': lambda self: object.__hash__(self), @@ -1827,7 +1816,7 @@ def _set_return_value(mock, method, name): method.side_effect = side_effector(mock) -class MagicMixin(object): +class MagicMixin: def __init__(self, *args, **kw): _super(MagicMixin, self).__init__(*args, **kw) self._mock_set_magics() @@ -1889,7 +1878,7 @@ def mock_add_spec(self, spec, spec_set=False): self._mock_set_magics() -class MagicProxy(object): +class MagicProxy: def __init__(self, name, parent): self.name = name self.parent = parent @@ -1911,7 +1900,7 @@ def __get__(self, obj, _type=None): return self.create_mock() -class _ANY(object): +class _ANY: "A helper object that compares equal to everything." def __eq__(self, other): @@ -2250,7 +2239,7 @@ def _get_class(obj): return type(obj) -class _SpecState(object): +class _SpecState: def __init__(self, spec, spec_set=False, parent=None, name=None, ids=None, instance=False): @@ -2271,7 +2260,7 @@ def __init__(self, spec, spec_set=False, parent=None, type(_ANY.__eq__), ) -FunctionAttributes = set([ +FunctionAttributes = { 'func_closure', 'func_code', 'func_defaults', @@ -2279,7 +2268,7 @@ def __init__(self, spec, spec_set=False, parent=None, 'func_doc', 'func_globals', 'func_name', -]) +} file_spec = None diff --git a/tests/mysqldb_test.py b/tests/mysqldb_test.py index c800752d62..1fc85fa8a0 100644 --- a/tests/mysqldb_test.py +++ b/tests/mysqldb_test.py @@ -1,5 +1,3 @@ -from __future__ import print_function - import os import time import traceback @@ -50,14 +48,14 @@ def setUp(self): self.connection.commit() cursor.close() - super(TestMySQLdb, self).setUp() + super().setUp() def tearDown(self): if self.connection: self.connection.close() self.drop_db() - super(TestMySQLdb, self).tearDown() + super().tearDown() @tests.skip_unless(mysql_requirement) def create_db(self): diff --git a/tests/os_test.py b/tests/os_test.py index 3688d19b9b..f7ea028c6d 100644 --- a/tests/os_test.py +++ b/tests/os_test.py @@ -1,11 +1,9 @@ import eventlet -import six -if six.PY3: - def test_pathlib_open_issue_534(): - pathlib = eventlet.import_patched('pathlib') - path = pathlib.Path(__file__) - with path.open(): - # should not raise - pass +def test_pathlib_open_issue_534(): + pathlib = eventlet.import_patched('pathlib') + path = pathlib.Path(__file__) + with path.open(): + # should not raise + pass diff --git a/tests/parse_results.py b/tests/parse_results.py index 1761327d71..d865d10cf9 100644 --- a/tests/parse_results.py +++ b/tests/parse_results.py @@ -37,7 +37,7 @@ def parse_stdout(s): def parse_unittest_output(s): s = s[s.rindex(unittest_delim) + len(unittest_delim):] - num = int(re.search('^Ran (\d+) test.*?$', s, re.M).group(1)) + num = int(re.search(r'^Ran (\d+) test.*?$', s, re.M).group(1)) ok = re.search('^OK$', s, re.M) error, fail, timeout = 0, 0, 0 failed_match = re.search( @@ -51,7 +51,7 @@ def parse_unittest_output(s): error = int(error or '0') else: assert ok_match, repr(s) - timeout_match = re.search('^===disabled because of timeout: (\d+)$', s, re.M) + timeout_match = re.search(r'^===disabled because of timeout: (\d+)$', s, re.M) if timeout_match: timeout = int(timeout_match.group(1)) return num, error, fail, timeout diff --git a/tests/patcher_psycopg_test.py b/tests/patcher_psycopg_test.py index 92c311bb4a..074614941d 100644 --- a/tests/patcher_psycopg_test.py +++ b/tests/patcher_psycopg_test.py @@ -1,7 +1,5 @@ import os -import six - from tests import patcher_test, skip_unless from tests import get_database_auth from tests.db_pool_test import postgres_requirement @@ -47,7 +45,7 @@ def test_psycopg_patched(self): if isinstance(psycopg_auth, str): dsn = psycopg_auth else: - dsn = " ".join(["%s=%s" % (k, v) for k, v in six.iteritems(psycopg_auth)]) + dsn = " ".join(["%s=%s" % (k, v) for k, v in psycopg_auth.items()]) os.environ['PSYCOPG_TEST_DSN'] = dsn self.write_to_tempfile("psycopg_patcher", psycopg_test_file) output, lines = self.launch_subprocess('psycopg_patcher.py') diff --git a/tests/patcher_test.py b/tests/patcher_test.py index 4993889e22..cddb581da4 100644 --- a/tests/patcher_test.py +++ b/tests/patcher_test.py @@ -3,7 +3,6 @@ import sys import tempfile -import six import tests @@ -33,12 +32,12 @@ class ProcessBase(tests.LimitedTestCase): TEST_TIMEOUT = 3 # starting processes is time-consuming def setUp(self): - super(ProcessBase, self).setUp() + super().setUp() self._saved_syspath = sys.path self.tempdir = tempfile.mkdtemp('_patcher_test') def tearDown(self): - super(ProcessBase, self).tearDown() + super().tearDown() sys.path = self._saved_syspath shutil.rmtree(self.tempdir) @@ -52,11 +51,8 @@ def write_to_tempfile(self, name, contents): def launch_subprocess(self, filename): path = os.path.join(self.tempdir, filename) output = tests.run_python(path) - if six.PY3: - output = output.decode('utf-8') - separator = '\n' - else: - separator = b'\n' + output = output.decode('utf-8') + separator = '\n' lines = output.split(separator) return output, lines @@ -213,8 +209,7 @@ def test_monkey_patch_threading(): tickcount = [0] def tick(): - import six - for i in six.moves.range(1000): + for i in range(1000): tickcount[0] += 1 eventlet.sleep() diff --git a/tests/pools_test.py b/tests/pools_test.py index 080cc56ed1..28191bcfca 100644 --- a/tests/pools_test.py +++ b/tests/pools_test.py @@ -4,7 +4,6 @@ from eventlet import Queue from eventlet import hubs from eventlet import pools -import six class IntPool(pools.Pool): @@ -117,14 +116,14 @@ def test_putting_to_queue(self): def just_put(pool_item, index): self.pool.put(pool_item) queue.put(index) - for index in six.moves.range(size + 1): + for index in range(size + 1): pool_item = self.pool.get() eventlet.spawn(just_put, pool_item, index) - for _ in six.moves.range(size + 1): + for _ in range(size + 1): x = queue.get() results.append(x) - self.assertEqual(sorted(results), list(six.moves.range(size + 1))) + self.assertEqual(sorted(results), list(range(size + 1))) finally: timer.cancel() @@ -160,7 +159,7 @@ def do_get(): p.put(x) gp = eventlet.GreenPool() - for i in six.moves.range(100): + for i in range(100): gp.spawn_n(do_get) gp.waitall() self.assertEqual(creates[0], 4) diff --git a/tests/socket_test.py b/tests/socket_test.py index 181f9be764..4e639a3888 100644 --- a/tests/socket_test.py +++ b/tests/socket_test.py @@ -12,7 +12,7 @@ def test_create_connection_error(): try: socket.create_connection(('192.0.2.1', 80), timeout=0.1) - except (IOError, OSError): + except OSError: pass @@ -95,7 +95,7 @@ def test_error_is_timeout(): s1.settimeout(0.01) try: s1.recv(1) - except socket.error as e: + except OSError as e: tests.check_is_timeout(e) else: assert False, 'No timeout, socket.error was not raised' diff --git a/tests/ssl_test.py b/tests/ssl_test.py index ebaead287e..2dcf37cc6c 100644 --- a/tests/ssl_test.py +++ b/tests/ssl_test.py @@ -11,7 +11,6 @@ from eventlet.green import ssl except ImportError: __test__ = False -import six import tests @@ -36,7 +35,7 @@ def setUp(self): message='.*socket.ssl.*', category=DeprecationWarning) - super(SSLTest, self).setUp() + super().setUp() def test_duplex_response(self): def serve(listener): @@ -249,7 +248,7 @@ def accept_loop(): while True: try: sock, _ = listener.accept() - except socket.error: + except OSError: return eventlet.spawn(serve, sock) @@ -311,8 +310,8 @@ def accept(listener): client_to_server = None try: client_to_server = ssl.wrap_socket(eventlet.connect(listener.getsockname())) - for character in six.iterbytes(content): - character = six.int2byte(character) + for character in iter(content): + character = bytes((character,)) print('We have %d already decrypted bytes pending, expecting: %s' % ( client_to_server.pending(), character)) read_function(client_to_server, character) diff --git a/tests/test__greenness.py b/tests/test__greenness.py index 465fff23ff..53441dd52d 100644 --- a/tests/test__greenness.py +++ b/tests/test__greenness.py @@ -4,13 +4,9 @@ """ import eventlet from eventlet.green import BaseHTTPServer -import six -if six.PY2: - from eventlet.green.urllib2 import HTTPError, urlopen -else: - from eventlet.green.urllib.request import urlopen - from eventlet.green.urllib.error import HTTPError +from eventlet.green.urllib.request import urlopen +from eventlet.green.urllib.error import HTTPError class QuietHandler(BaseHTTPServer.BaseHTTPRequestHandler): @@ -41,7 +37,7 @@ def test_urllib(): try: assert server.request_count == 0 try: - urlopen('http://127.0.0.1:{0}'.format(port)) + urlopen('http://127.0.0.1:{}'.format(port)) assert False, 'should not get there' except HTTPError as ex: assert ex.code == 501, repr(ex) diff --git a/tests/test__refcount.py b/tests/test__refcount.py index 5c1c002438..a0a473c8af 100644 --- a/tests/test__refcount.py +++ b/tests/test__refcount.py @@ -65,7 +65,7 @@ def run_and_check(run_client): for x in gc.get_referrers(fd): print(pprint.pformat(x)) for y in gc.get_referrers(x): - print('- {0}'.format(pprint.pformat(y))) + print('- {}'.format(pprint.pformat(y))) raise AssertionError('server should be dead by now') diff --git a/tests/test__socket_errors.py b/tests/test__socket_errors.py index 016bc73480..91a3a050fc 100644 --- a/tests/test__socket_errors.py +++ b/tests/test__socket_errors.py @@ -17,9 +17,9 @@ def test_connection_refused(self): try: s.connect(('127.0.0.1', port)) self.fail("Shouldn't have connected") - except socket.error as ex: + except OSError as ex: code, text = ex.args - assert code == errno.ECONNREFUSED, 'Expected ECONNREFUSED, got {0} ({1})'.format(code, text) + assert code == errno.ECONNREFUSED, 'Expected ECONNREFUSED, got {} ({})'.format(code, text) assert 'refused' in text.lower(), (code, text) def test_timeout_real_socket(self): @@ -58,6 +58,6 @@ def test_create_connection_refused(): try: socket.create_connection(('127.0.0.1', 1)) assert False, "Shouldn't have connected" - except socket.error as ex: + except OSError as ex: code, text = ex.args - assert code == errno.ECONNREFUSED, 'Expected ECONNREFUSED, got {0} ({1})'.format(code, text) + assert code == errno.ECONNREFUSED, 'Expected ECONNREFUSED, got {} ({})'.format(code, text) diff --git a/tests/thread_test.py b/tests/thread_test.py index 76e9b2481d..e5b012e4c0 100644 --- a/tests/thread_test.py +++ b/tests/thread_test.py @@ -7,7 +7,6 @@ from eventlet import greenthread from eventlet import patcher from eventlet.green import thread -import six from tests import LimitedTestCase @@ -19,11 +18,11 @@ def passthru(self, *args, **kw): def setUp(self): self.results = [] - super(Locals, self).setUp() + super().setUp() def tearDown(self): self.results = [] - super(Locals, self).tearDown() + super().tearDown() def test_assignment(self): my_local = corolocal.local() @@ -82,7 +81,7 @@ def test_no_leaking(self): refs = weakref.WeakKeyDictionary() my_local = corolocal.local() - class X(object): + class X: pass def do_something(i): @@ -91,7 +90,7 @@ def do_something(i): my_local.foo = o p = eventlet.GreenPool() - for i in six.moves.range(100): + for i in range(100): p.spawn(do_something, i) p.waitall() del p diff --git a/tests/timeout_with_statement_test.py b/tests/timeout_with_statement_test.py index cd452a26e6..e37414dc63 100644 --- a/tests/timeout_with_statement_test.py +++ b/tests/timeout_with_statement_test.py @@ -53,9 +53,9 @@ def test_raising_self_true(self): def test_raising_custom_exception(self): # You can customize the exception raised: try: - with Timeout(DELAY, IOError("Operation takes way too long")): + with Timeout(DELAY, OSError("Operation takes way too long")): sleep(DELAY * 2) - except IOError as ex: + except OSError as ex: assert str(ex) == "Operation takes way too long", repr(ex) def test_raising_exception_class(self): diff --git a/tests/tpool_test.py b/tests/tpool_test.py index 1d8fa6b396..0bc19c10ad 100644 --- a/tests/tpool_test.py +++ b/tests/tpool_test.py @@ -12,7 +12,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from __future__ import print_function import gc import random @@ -21,7 +20,6 @@ import eventlet from eventlet import tpool -import six import tests @@ -41,11 +39,11 @@ def raise_exception(): class TestTpool(tests.LimitedTestCase): def setUp(self): - super(TestTpool, self).setUp() + super().setUp() def tearDown(self): tpool.killall() - super(TestTpool, self).tearDown() + super().tearDown() def test_wrap_tuple(self): my_tuple = (1, 2) @@ -154,7 +152,7 @@ def foo(): counter = [0] def tick(): - for i in six.moves.range(20000): + for i in range(20000): counter[0] += 1 if counter[0] % 20 == 0: eventlet.sleep(0.0001) @@ -287,7 +285,7 @@ class TpoolLongTests(tests.LimitedTestCase): TEST_TIMEOUT = 60 def test_a_buncha_stuff(self): - class Dummy(object): + class Dummy: def foo(self, when, token=None): assert token is not None time.sleep(random.random() / 200.0) @@ -296,7 +294,7 @@ def foo(self, when, token=None): def sender_loop(loopnum): obj = tpool.Proxy(Dummy()) count = 100 - for n in six.moves.range(count): + for n in range(count): eventlet.sleep(random.random() / 200.0) now = time.time() token = loopnum * count + n @@ -306,7 +304,7 @@ def sender_loop(loopnum): cnt = 10 pile = eventlet.GreenPile(cnt) - for i in six.moves.range(cnt): + for i in range(cnt): pile.spawn(sender_loop, i) results = list(pile) self.assertEqual(len(results), cnt) @@ -323,7 +321,7 @@ def test_leakage_from_tracebacks(self): # some objects will inevitably be created by the previous loop # now we test to ensure that running the loop an order of # magnitude more doesn't generate additional objects - for i in six.moves.range(100): + for i in range(100): self.assertRaises(RuntimeError, tpool.execute, raise_exception) first_created = middle_objs - initial_objs gc.collect() diff --git a/tests/websocket_new_test.py b/tests/websocket_new_test.py index cc857924fe..30ac6012e4 100644 --- a/tests/websocket_new_test.py +++ b/tests/websocket_new_test.py @@ -7,7 +7,6 @@ from eventlet import websocket from eventlet.green import httplib from eventlet.green import socket -import six import tests.wsgi_test @@ -26,7 +25,7 @@ def handle(ws): eventlet.sleep(0.01) elif ws.path == '/error': # some random socket error that we shouldn't normally get - raise socket.error(errno.ENOTSOCK) + raise OSError(errno.ENOTSOCK) else: ws.close() @@ -104,16 +103,16 @@ def test_correct_upgrade_request_13(self): ] sock = eventlet.connect(self.server_addr) - sock.sendall(six.b('\r\n'.join(connect) + '\r\n\r\n')) + sock.sendall('\r\n'.join(connect).encode() + b'\r\n\r\n') result = sock.recv(1024) # The server responds the correct Websocket handshake print('Connection string: %r' % http_connection) - self.assertEqual(result, six.b('\r\n'.join([ + self.assertEqual(result, ('\r\n'.join([ 'HTTP/1.1 101 Switching Protocols', 'Upgrade: websocket', 'Connection: Upgrade', 'Sec-WebSocket-Accept: ywSyWXCPNsDxLrQdQrn5RFNRfBU=\r\n\r\n', - ]))) + ])).encode()) def test_send_recv_13(self): connect = [ @@ -126,15 +125,15 @@ def test_send_recv_13(self): "Sec-WebSocket-Key: d9MXuOzlVQ0h+qRllvSCIg==", ] sock = eventlet.connect(self.server_addr) - sock.sendall(six.b('\r\n'.join(connect) + '\r\n\r\n')) + sock.sendall('\r\n'.join(connect).encode() + b'\r\n\r\n') sock.recv(1024) ws = websocket.RFC6455WebSocket(sock, {}, client=True) ws.send(b'hello') assert ws.wait() == b'hello' ws.send(b'hello world!\x01') - ws.send(u'hello world again!') + ws.send('hello world again!') assert ws.wait() == b'hello world!\x01' - assert ws.wait() == u'hello world again!' + assert ws.wait() == 'hello world again!' ws.close() eventlet.sleep(0.01) @@ -164,7 +163,7 @@ def error_detector(environ, start_response): "Sec-WebSocket-Key: d9MXuOzlVQ0h+qRllvSCIg==", ] sock = eventlet.connect(self.server_addr) - sock.sendall(six.b('\r\n'.join(connect) + '\r\n\r\n')) + sock.sendall('\r\n'.join(connect).encode() + b'\r\n\r\n') sock.recv(1024) # get the headers sock.close() # close while the app is running done_with_request.wait() @@ -196,7 +195,7 @@ def error_detector(environ, start_response): "Sec-WebSocket-Key: d9MXuOzlVQ0h+qRllvSCIg==", ] sock = eventlet.connect(self.server_addr) - sock.sendall(six.b('\r\n'.join(connect) + '\r\n\r\n')) + sock.sendall('\r\n'.join(connect).encode() + b'\r\n\r\n') sock.recv(1024) # get the headers closeframe = struct.pack('!BBIH', 1 << 7 | 8, 1 << 7 | 2, 0, 1000) sock.sendall(closeframe) # "Close the connection" packet. @@ -229,7 +228,7 @@ def error_detector(environ, start_response): "Sec-WebSocket-Key: d9MXuOzlVQ0h+qRllvSCIg==", ] sock = eventlet.connect(self.server_addr) - sock.sendall(six.b('\r\n'.join(connect) + '\r\n\r\n')) + sock.sendall('\r\n'.join(connect).encode() + b'\r\n\r\n') sock.recv(1024) # get the headers sock.sendall(b'\x07\xff') # Weird packet. done_with_request.wait() @@ -243,7 +242,7 @@ def set_site(self): self.site = wsapp def setUp(self): - super(TestWebSocketWithCompression, self).setUp() + super().setUp() self.connect = '\r\n'.join([ "GET /echo HTTP/1.1", "Upgrade: websocket", @@ -255,14 +254,14 @@ def setUp(self): "Sec-WebSocket-Extensions: %s", '\r\n' ]) - self.handshake_re = re.compile(six.b('\r\n'.join([ + self.handshake_re = re.compile('\r\n'.join([ 'HTTP/1.1 101 Switching Protocols', 'Upgrade: websocket', 'Connection: Upgrade', 'Sec-WebSocket-Accept: ywSyWXCPNsDxLrQdQrn5RFNRfBU=', 'Sec-WebSocket-Extensions: (.+)' '\r\n', - ]))) + ]).encode()) @staticmethod def get_deflated_reply(ws): @@ -277,7 +276,7 @@ def test_accept_basic_deflate_ext_13(self): ]: sock = eventlet.connect(self.server_addr) - sock.sendall(six.b(self.connect % extension)) + sock.sendall((self.connect % extension).encode()) result = sock.recv(1024) # The server responds the correct Websocket handshake @@ -294,7 +293,7 @@ def test_accept_deflate_ext_context_takeover_13(self): ]: sock = eventlet.connect(self.server_addr) - sock.sendall(six.b(self.connect % extension)) + sock.sendall((self.connect % extension).encode()) result = sock.recv(1024) # The server responds the correct Websocket handshake @@ -315,7 +314,7 @@ def test_accept_deflate_ext_window_max_bits_13(self): ]: sock = eventlet.connect(self.server_addr) - sock.sendall(six.b(self.connect % extension_string)) + sock.sendall((self.connect % extension_string).encode()) result = sock.recv(1024) # The server responds the correct Websocket handshake @@ -341,7 +340,7 @@ def test_reject_max_window_bits_out_of_range_13(self): 'permessage-deflate') sock = eventlet.connect(self.server_addr) - sock.sendall(six.b(self.connect % extension_string)) + sock.sendall((self.connect % extension_string).encode()) result = sock.recv(1024) # The server responds the correct Websocket handshake @@ -356,7 +355,7 @@ def test_server_compress_with_context_takeover_13(self): 'server_no_context_takeover': False}} sock = eventlet.connect(self.server_addr) - sock.sendall(six.b(self.connect % extensions_string)) + sock.sendall((self.connect % extensions_string).encode()) sock.recv(1024) ws = websocket.RFC6455WebSocket(sock, {}, client=True, extensions=extensions) @@ -381,7 +380,7 @@ def test_server_compress_no_context_takeover_13(self): 'server_no_context_takeover': True}} sock = eventlet.connect(self.server_addr) - sock.sendall(six.b(self.connect % extensions_string)) + sock.sendall((self.connect % extensions_string).encode()) sock.recv(1024) ws = websocket.RFC6455WebSocket(sock, {}, client=True, extensions=extensions) @@ -441,16 +440,16 @@ def test_compressed_send_recv_13(self): 'server_no_context_takeover': False}} sock = eventlet.connect(self.server_addr) - sock.sendall(six.b(self.connect % extensions_string)) + sock.sendall((self.connect % extensions_string).encode()) sock.recv(1024) ws = websocket.RFC6455WebSocket(sock, {}, client=True, extensions=extensions) ws.send(b'hello') assert ws.wait() == b'hello' ws.send(b'hello world!') - ws.send(u'hello world again!') + ws.send('hello world again!') assert ws.wait() == b'hello world!' - assert ws.wait() == u'hello world again!' + assert ws.wait() == 'hello world again!' ws.close() eventlet.sleep(0.01) @@ -462,7 +461,7 @@ def test_send_uncompressed_msg_13(self): 'server_no_context_takeover': False}} sock = eventlet.connect(self.server_addr) - sock.sendall(six.b(self.connect % extensions_string)) + sock.sendall((self.connect % extensions_string).encode()) sock.recv(1024) # Send without using deflate, having rsv1 unset @@ -483,16 +482,16 @@ def test_compressed_send_recv_client_no_context_13(self): 'server_no_context_takeover': False}} sock = eventlet.connect(self.server_addr) - sock.sendall(six.b(self.connect % extensions_string)) + sock.sendall((self.connect % extensions_string).encode()) sock.recv(1024) ws = websocket.RFC6455WebSocket(sock, {}, client=True, extensions=extensions) ws.send(b'hello') assert ws.wait() == b'hello' ws.send(b'hello world!') - ws.send(u'hello world again!') + ws.send('hello world again!') assert ws.wait() == b'hello world!' - assert ws.wait() == u'hello world again!' + assert ws.wait() == 'hello world again!' ws.close() eventlet.sleep(0.01) @@ -504,16 +503,16 @@ def test_compressed_send_recv_server_no_context_13(self): 'server_no_context_takeover': False}} sock = eventlet.connect(self.server_addr) - sock.sendall(six.b(self.connect % extensions_string)) + sock.sendall((self.connect % extensions_string).encode()) sock.recv(1024) ws = websocket.RFC6455WebSocket(sock, {}, client=True, extensions=extensions) ws.send(b'hello') assert ws.wait() == b'hello' ws.send(b'hello world!') - ws.send(u'hello world again!') + ws.send('hello world again!') assert ws.wait() == b'hello world!' - assert ws.wait() == u'hello world again!' + assert ws.wait() == 'hello world again!' ws.close() eventlet.sleep(0.01) @@ -526,16 +525,16 @@ def test_compressed_send_recv_both_no_context_13(self): 'server_no_context_takeover': True}} sock = eventlet.connect(self.server_addr) - sock.sendall(six.b(self.connect % extensions_string)) + sock.sendall((self.connect % extensions_string).encode()) sock.recv(1024) ws = websocket.RFC6455WebSocket(sock, {}, client=True, extensions=extensions) ws.send(b'hello') assert ws.wait() == b'hello' ws.send(b'hello world!') - ws.send(u'hello world again!') + ws.send('hello world again!') assert ws.wait() == b'hello world!' - assert ws.wait() == u'hello world again!' + assert ws.wait() == 'hello world again!' ws.close() eventlet.sleep(0.01) @@ -548,7 +547,7 @@ def test_large_frame_size_compressed_13(self): 'server_no_context_takeover': False}} sock = eventlet.connect(self.server_addr) - sock.sendall(six.b(self.connect % extensions_string)) + sock.sendall((self.connect % extensions_string).encode()) sock.recv(1024) ws = websocket.RFC6455WebSocket(sock, {}, client=True, extensions=extensions) @@ -572,7 +571,7 @@ def test_large_frame_size_compressed_13(self): def test_large_frame_size_uncompressed_13(self): # Test fix for GHSA-9p9m-jm8w-94p2 sock = eventlet.connect(self.server_addr) - sock.sendall(six.b(self.connect)) + sock.sendall(self.connect.encode()) sock.recv(1024) ws = websocket.RFC6455WebSocket(sock, {}, client=True) diff --git a/tests/websocket_test.py b/tests/websocket_test.py index 666d642218..5d55283172 100644 --- a/tests/websocket_test.py +++ b/tests/websocket_test.py @@ -6,7 +6,6 @@ from eventlet import event from eventlet import greenio from eventlet.green import httplib -import six from eventlet.websocket import WebSocket, WebSocketWSGI import tests @@ -28,7 +27,7 @@ def handle(ws): eventlet.sleep(0.01) elif ws.path == '/error': # some random socket error that we shouldn't normally get - raise socket.error(errno.ENOTSOCK) + raise OSError(errno.ENOTSOCK) else: ws.close() @@ -110,16 +109,16 @@ def test_correct_upgrade_request_75(self): ] sock = eventlet.connect(self.server_addr) - sock.sendall(six.b('\r\n'.join(connect) + '\r\n\r\n')) + sock.sendall('\r\n'.join(connect).encode() + b'\r\n\r\n') result = sock.recv(1024) # The server responds the correct Websocket handshake - self.assertEqual(result, six.b('\r\n'.join([ + self.assertEqual(result, '\r\n'.join([ 'HTTP/1.1 101 Web Socket Protocol Handshake', 'Upgrade: WebSocket', 'Connection: Upgrade', 'WebSocket-Origin: http://%s:%s' % self.server_addr, 'WebSocket-Location: ws://%s:%s/echo\r\n\r\n' % self.server_addr, - ]))) + ]).encode()) def test_correct_upgrade_request_76(self): connect = [ @@ -134,17 +133,17 @@ def test_correct_upgrade_request_76(self): ] sock = eventlet.connect(self.server_addr) - sock.sendall(six.b('\r\n'.join(connect) + '\r\n\r\n^n:ds[4U')) + sock.sendall('\r\n'.join(connect).encode() + b'\r\n\r\n^n:ds[4U') result = sock.recv(1024) # The server responds the correct Websocket handshake - self.assertEqual(result, six.b('\r\n'.join([ + self.assertEqual(result, '\r\n'.join([ 'HTTP/1.1 101 WebSocket Protocol Handshake', 'Upgrade: WebSocket', 'Connection: Upgrade', 'Sec-WebSocket-Origin: http://%s:%s' % self.server_addr, 'Sec-WebSocket-Protocol: ws', 'Sec-WebSocket-Location: ws://%s:%s/echo\r\n\r\n8jKS\'y:G*Co,Wxa-' % self.server_addr, - ]))) + ]).encode()) def test_query_string(self): # verify that the query string comes out the other side unscathed @@ -160,9 +159,9 @@ def test_query_string(self): ] sock = eventlet.connect(self.server_addr) - sock.sendall(six.b('\r\n'.join(connect) + '\r\n\r\n^n:ds[4U')) + sock.sendall('\r\n'.join(connect).encode() + b'\r\n\r\n^n:ds[4U') result = sock.recv(1024) - self.assertEqual(result, six.b('\r\n'.join([ + self.assertEqual(result, '\r\n'.join([ 'HTTP/1.1 101 WebSocket Protocol Handshake', 'Upgrade: WebSocket', 'Connection: Upgrade', @@ -170,7 +169,7 @@ def test_query_string(self): 'Sec-WebSocket-Protocol: ws', 'Sec-WebSocket-Location: ' 'ws://%s:%s/echo?query_string\r\n\r\n8jKS\'y:G*Co,Wxa-' % self.server_addr, - ]))) + ]).encode()) def test_empty_query_string(self): # verify that a single trailing ? doesn't get nuked @@ -186,16 +185,16 @@ def test_empty_query_string(self): ] sock = eventlet.connect(self.server_addr) - sock.sendall(six.b('\r\n'.join(connect) + '\r\n\r\n^n:ds[4U')) + sock.sendall('\r\n'.join(connect).encode() + b'\r\n\r\n^n:ds[4U') result = sock.recv(1024) - self.assertEqual(result, six.b('\r\n'.join([ + self.assertEqual(result, '\r\n'.join([ 'HTTP/1.1 101 WebSocket Protocol Handshake', 'Upgrade: WebSocket', 'Connection: Upgrade', 'Sec-WebSocket-Origin: http://%s:%s' % self.server_addr, 'Sec-WebSocket-Protocol: ws', 'Sec-WebSocket-Location: ws://%s:%s/echo?\r\n\r\n8jKS\'y:G*Co,Wxa-' % self.server_addr, - ]))) + ]).encode()) def test_sending_messages_to_websocket_75(self): connect = [ @@ -208,7 +207,7 @@ def test_sending_messages_to_websocket_75(self): ] sock = eventlet.connect(self.server_addr) - sock.sendall(six.b('\r\n'.join(connect) + '\r\n\r\n')) + sock.sendall('\r\n'.join(connect).encode() + b'\r\n\r\n') sock.recv(1024) sock.sendall(b'\x00hello\xFF') result = sock.recv(1024) @@ -235,7 +234,7 @@ def test_sending_messages_to_websocket_76(self): ] sock = eventlet.connect(self.server_addr) - sock.sendall(six.b('\r\n'.join(connect) + '\r\n\r\n^n:ds[4U')) + sock.sendall('\r\n'.join(connect).encode() + b'\r\n\r\n^n:ds[4U') sock.recv(1024) sock.sendall(b'\x00hello\xFF') result = sock.recv(1024) @@ -260,7 +259,7 @@ def test_getting_messages_from_websocket_75(self): ] sock = eventlet.connect(self.server_addr) - sock.sendall(six.b('\r\n'.join(connect) + '\r\n\r\n')) + sock.sendall('\r\n'.join(connect).encode() + b'\r\n\r\n') resp = sock.recv(1024) headers, result = resp.split(b'\r\n\r\n') msgs = [result.strip(b'\x00\xff')] @@ -269,7 +268,7 @@ def test_getting_messages_from_websocket_75(self): msgs.append(sock.recv(20).strip(b'\x00\xff')) cnt -= 1 # Last item in msgs is an empty string - self.assertEqual(msgs[:-1], [six.b('msg %d' % i) for i in range(10)]) + self.assertEqual(msgs[:-1], [('msg %d' % i).encode() for i in range(10)]) def test_getting_messages_from_websocket_76(self): connect = [ @@ -284,7 +283,7 @@ def test_getting_messages_from_websocket_76(self): ] sock = eventlet.connect(self.server_addr) - sock.sendall(six.b('\r\n'.join(connect) + '\r\n\r\n^n:ds[4U')) + sock.sendall('\r\n'.join(connect).encode() + b'\r\n\r\n^n:ds[4U') resp = sock.recv(1024) headers, result = resp.split(b'\r\n\r\n') msgs = [result[16:].strip(b'\x00\xff')] @@ -293,7 +292,7 @@ def test_getting_messages_from_websocket_76(self): msgs.append(sock.recv(20).strip(b'\x00\xff')) cnt -= 1 # Last item in msgs is an empty string - self.assertEqual(msgs[:-1], [six.b('msg %d' % i) for i in range(10)]) + self.assertEqual(msgs[:-1], [('msg %d' % i).encode() for i in range(10)]) def test_breaking_the_connection_75(self): error_detected = [False] @@ -320,7 +319,7 @@ def error_detector(environ, start_response): "WebSocket-Protocol: ws", ] sock = eventlet.connect(self.server_addr) - sock.sendall(six.b('\r\n'.join(connect) + '\r\n\r\n')) + sock.sendall('\r\n'.join(connect).encode() + b'\r\n\r\n') sock.recv(1024) # get the headers sock.close() # close while the app is running done_with_request.wait() @@ -353,7 +352,7 @@ def error_detector(environ, start_response): "Sec-WebSocket-Key2: 12998 5 Y3 1 .P00", ] sock = eventlet.connect(self.server_addr) - sock.sendall(six.b('\r\n'.join(connect) + '\r\n\r\n^n:ds[4U')) + sock.sendall('\r\n'.join(connect).encode() + b'\r\n\r\n^n:ds[4U') sock.recv(1024) # get the headers sock.close() # close while the app is running done_with_request.wait() @@ -386,7 +385,7 @@ def error_detector(environ, start_response): "Sec-WebSocket-Key2: 12998 5 Y3 1 .P00", ] sock = eventlet.connect(self.server_addr) - sock.sendall(six.b('\r\n'.join(connect) + '\r\n\r\n^n:ds[4U')) + sock.sendall('\r\n'.join(connect).encode() + b'\r\n\r\n^n:ds[4U') sock.recv(1024) # get the headers sock.sendall(b'\xff\x00') # "Close the connection" packet. done_with_request.wait() @@ -419,7 +418,7 @@ def error_detector(environ, start_response): "Sec-WebSocket-Key2: 12998 5 Y3 1 .P00", ] sock = eventlet.connect(self.server_addr) - sock.sendall(six.b('\r\n'.join(connect) + '\r\n\r\n^n:ds[4U')) + sock.sendall('\r\n'.join(connect).encode() + b'\r\n\r\n^n:ds[4U') sock.recv(1024) # get the headers sock.sendall(b'\xef\x00') # Weird packet. done_with_request.wait() @@ -437,7 +436,7 @@ def test_server_closing_connect_76(self): "Sec-WebSocket-Key2: 12998 5 Y3 1 .P00", ] sock = eventlet.connect(self.server_addr) - sock.sendall(six.b('\r\n'.join(connect) + '\r\n\r\n^n:ds[4U')) + sock.sendall('\r\n'.join(connect).encode() + b'\r\n\r\n^n:ds[4U') resp = sock.recv(1024) headers, result = resp.split(b'\r\n\r\n') # The remote server should have immediately closed the connection. @@ -468,7 +467,7 @@ def error_detector(environ, start_response): "WebSocket-Protocol: ws", ] sock = eventlet.connect(self.server_addr) - sock.sendall(six.b('\r\n'.join(connect) + '\r\n\r\n')) + sock.sendall('\r\n'.join(connect).encode() + b'\r\n\r\n') sock.recv(1024) done_with_request.wait() assert error_detected[0] @@ -500,7 +499,7 @@ def error_detector(environ, start_response): "Sec-WebSocket-Key2: 12998 5 Y3 1 .P00", ] sock = eventlet.connect(self.server_addr) - sock.sendall(six.b('\r\n'.join(connect) + '\r\n\r\n^n:ds[4U')) + sock.sendall('\r\n'.join(connect).encode() + b'\r\n\r\n^n:ds[4U') sock.recv(1024) done_with_request.wait() assert error_detected[0] @@ -520,7 +519,7 @@ def test_close_idle(self): 'Sec-WebSocket-Key2: 12998 5 Y3 1 .P00', ) sock = eventlet.connect(self.server_addr) - sock.sendall(six.b('\r\n'.join(connect) + '\r\n\r\n^n:ds[4U')) + sock.sendall('\r\n'.join(connect).encode() + b'\r\n\r\n^n:ds[4U') sock.recv(1024) sock.sendall(b'\x00hello\xff') result = sock.recv(1024) @@ -533,8 +532,7 @@ def test_wrapped_wsgi(self): site = self.site def wrapper(environ, start_response): - for chunk in site(environ, start_response): - yield chunk + yield from site(environ, start_response) self.site = wrapper self.spawn_server() @@ -548,12 +546,12 @@ def wrapper(environ, start_response): ] sock = eventlet.connect(self.server_addr) - sock.sendall(six.b("\r\n".join(connect) + "\r\n\r\n")) + sock.sendall("\r\n".join(connect).encode() + b"\r\n\r\n") resp = sock.recv(1024) headers, result = resp.split(b"\r\n\r\n") msgs = [result.strip(b"\x00\xff")] msgs.extend(sock.recv(20).strip(b"\x00\xff") for _ in range(10)) - expect = [six.b("msg {}".format(i)) for i in range(10)] + [b""] + expect = ["msg {}".format(i).encode() for i in range(10)] + [b""] assert msgs == expect # In case of server error, server will write HTTP 500 response to the socket msg = sock.recv(20) @@ -585,7 +583,7 @@ def test_ssl_sending_messages(self): ] sock = eventlet.wrap_ssl(eventlet.connect(self.server_addr)) - sock.sendall(six.b('\r\n'.join(connect) + '\r\n\r\n^n:ds[4U')) + sock.sendall('\r\n'.join(connect).encode() + b'\r\n\r\n^n:ds[4U') first_resp = b'' while b'\r\n\r\n' not in first_resp: first_resp += sock.recv() @@ -618,7 +616,7 @@ def setUp(self): PATH_INFO='test') self.test_ws = WebSocket(s, env) - super(TestWebSocketObject, self).setUp() + super().setUp() def test_recieve(self): ws = self.test_ws @@ -633,7 +631,7 @@ def test_recieve(self): def test_send_to_ws(self): ws = self.test_ws - ws.send(u'hello') + ws.send('hello') assert ws.socket.sendall.called_with("\x00hello\xFF") ws.send(10) assert ws.socket.sendall.called_with("\x0010\xFF") diff --git a/tests/wsgi_test.py b/tests/wsgi_test.py index df376ae6a1..d04f57293d 100644 --- a/tests/wsgi_test.py +++ b/tests/wsgi_test.py @@ -1,7 +1,7 @@ -# coding: utf-8 import cgi import collections import errno +import io import os import shutil import signal @@ -21,8 +21,7 @@ from eventlet.green import socket as greensocket from eventlet.green import ssl from eventlet.support import bytes_to_str -import six -from six.moves.urllib import parse +from urllib import parse import tests @@ -107,7 +106,7 @@ def already_handled(env, start_response): return [] -class Site(object): +class Site: def __init__(self): self.application = hello_world @@ -115,7 +114,7 @@ def __call__(self, env, start_response): return self.application(env, start_response) -class IterableApp(object): +class IterableApp: def __init__(self, send_start_response=False, return_val=()): self.send_start_response = send_start_response self.return_val = return_val @@ -133,8 +132,7 @@ def __call__(self, env, start_response): class IterableSite(Site): def __call__(self, env, start_response): it = self.application(env, start_response) - for i in it: - yield i + yield from it CONTENT_LENGTH = 'content-length' @@ -160,7 +158,7 @@ def send_expect_close(sock, buf): # Since the test expects an early close, this can be ignored. try: sock.sendall(buf) - except socket.error as exc: + except OSError as exc: if support.get_errno(exc) != errno.EPIPE: raise @@ -169,7 +167,7 @@ def read_http(sock): fd = sock.makefile('rb') try: response_line = bytes_to_str(fd.readline().rstrip(b'\r\n')) - except socket.error as exc: + except OSError as exc: # TODO find out whether 54 is ok here or not, I see it when running tests # on Python 3 if support.get_errno(exc) in (10053, 54): @@ -199,7 +197,7 @@ def read_http(sock): # FIXME: Duplicate headers are allowed as per HTTP RFC standard, # the client and/or intermediate proxies are supposed to treat them # as a single header with values concatenated using space (' ') delimiter. - assert key_lower not in headers_lower, "header duplicated: {0}".format(key) + assert key_lower not in headers_lower, "header duplicated: {}".format(key) headers_original[key] = value headers_lower[key_lower] = value @@ -221,7 +219,7 @@ def read_http(sock): class _TestBase(tests.LimitedTestCase): def setUp(self): - super(_TestBase, self).setUp() + super().setUp() self.site = Site() self.killer = None self.set_site() @@ -230,7 +228,7 @@ def setUp(self): def tearDown(self): greenthread.kill(self.killer) eventlet.sleep(0) - super(_TestBase, self).tearDown() + super().tearDown() def spawn_server(self, **kwargs): """Spawns a new wsgi server with the given arguments using @@ -238,7 +236,7 @@ def spawn_server(self, **kwargs): Sets `self.server_addr` to (host, port) tuple suitable for `socket.connect`. """ - self.logfile = six.StringIO() + self.logfile = io.StringIO() new_kwargs = dict(max_size=128, log=self.logfile, site=self.site) @@ -323,7 +321,7 @@ def new_app(env, start_response): body = bytes_to_str(env['wsgi.input'].read()) a = parse.parse_qs(body).get('a', [1])[0] start_response('200 OK', [('Content-type', 'text/plain')]) - return [six.b('a is %s, body is %s' % (a, body))] + return [('a is %s, body is %s' % (a, body)).encode()] self.site.application = new_app sock = eventlet.connect(self.server_addr) @@ -487,9 +485,9 @@ def test_014_chunked_post(self): self.site.application = chunked_post sock = eventlet.connect(self.server_addr) fd = sock.makefile('rwb') - fd.write('PUT /a HTTP/1.1\r\nHost: localhost\r\nConnection: close\r\n' - 'Transfer-Encoding: chunked\r\n\r\n' - '2\r\noh\r\n4\r\n hai\r\n0\r\n\r\n'.encode()) + fd.write(b'PUT /a HTTP/1.1\r\nHost: localhost\r\nConnection: close\r\n' + b'Transfer-Encoding: chunked\r\n\r\n' + b'2\r\noh\r\n4\r\n hai\r\n0\r\n\r\n') fd.flush() while True: if fd.readline() == b'\r\n': @@ -499,9 +497,9 @@ def test_014_chunked_post(self): sock = eventlet.connect(self.server_addr) fd = sock.makefile('rwb') - fd.write('PUT /b HTTP/1.1\r\nHost: localhost\r\nConnection: close\r\n' - 'Transfer-Encoding: chunked\r\n\r\n' - '2\r\noh\r\n4\r\n hai\r\n0\r\n\r\n'.encode()) + fd.write(b'PUT /b HTTP/1.1\r\nHost: localhost\r\nConnection: close\r\n' + b'Transfer-Encoding: chunked\r\n\r\n' + b'2\r\noh\r\n4\r\n hai\r\n0\r\n\r\n') fd.flush() while True: if fd.readline() == b'\r\n': @@ -511,9 +509,9 @@ def test_014_chunked_post(self): sock = eventlet.connect(self.server_addr) fd = sock.makefile('rwb') - fd.write('PUT /c HTTP/1.1\r\nHost: localhost\r\nConnection: close\r\n' - 'Transfer-Encoding: chunked\r\n\r\n' - '2\r\noh\r\n4\r\n hai\r\n0\r\n\r\n'.encode()) + fd.write(b'PUT /c HTTP/1.1\r\nHost: localhost\r\nConnection: close\r\n' + b'Transfer-Encoding: chunked\r\n\r\n' + b'2\r\noh\r\n4\r\n hai\r\n0\r\n\r\n') fd.flush() while True: if fd.readline() == b'\r\n': @@ -628,8 +626,7 @@ def app(environ, start_response): }.get(environ['PATH_INFO']) if resp_body is None: resp_body = 'Unexpected path: ' + environ['PATH_INFO'] - if six.PY3: - resp_body = resp_body.encode('latin1') + resp_body = resp_body.encode('latin1') # Never look at wsgi.input! start_response('200 OK', [('Content-type', 'text/plain')]) return [resp_body] @@ -656,7 +653,7 @@ def app(environ, start_response): try: sock.sendall(b'PUT /3 HTTP/1.0\r\nHost: localhost\r\nConnection: close\r\n\r\n') - except socket.error as err: + except OSError as err: # At one point this could succeed; presumably some older versions # of python will still allow it, but now we get a BrokenPipeError if err.errno != errno.EPIPE: @@ -704,7 +701,7 @@ def test_020_x_forwarded_for(self): assert '1.2.3.4,5.6.7.8,127.0.0.1' in self.logfile.getvalue() # turning off the option should work too - self.logfile = six.StringIO() + self.logfile = io.StringIO() self.spawn_server(log_x_forwarded_for=False) sock = eventlet.connect(self.server_addr) @@ -734,7 +731,7 @@ def test_socket_remains_open(self): try: server_sock_2.accept() # shouldn't be able to use this one anymore - except socket.error as exc: + except OSError as exc: self.assertEqual(support.get_errno(exc), errno.EBADF) self.spawn_server(sock=server_sock) sock = eventlet.connect(server_sock.getsockname()) @@ -911,7 +908,7 @@ def wsgi_app(environ, start_response): else: header_lines.append(line.strip()) assert header_lines[0].startswith(b'HTTP/1.1 100 Continue') - headers = dict((k, v) for k, v in (h.split(b': ', 1) for h in header_lines[1:])) + headers = {k: v for k, v in (h.split(b': ', 1) for h in header_lines[1:])} assert b'Hundred-Continue-Header-1' in headers assert b'Hundred-Continue-Header-2' in headers assert b'Hundred-Continue-Header-K' in headers @@ -963,8 +960,8 @@ def wsgi_app(environ, start_response): else: header_lines.append(line.strip()) assert header_lines[0].startswith(b'HTTP/1.1 100 Continue') - headers = dict((k, v) for k, v in (h.split(b': ', 1) - for h in header_lines[1:])) + headers = {k: v for k, v in (h.split(b': ', 1) + for h in header_lines[1:])} assert b'Hundred-Continue-Header-1' in headers assert b'Hundred-Continue-Header-2' in headers self.assertEqual(b'H1', headers[b'Hundred-Continue-Header-1']) @@ -983,8 +980,8 @@ def wsgi_app(environ, start_response): else: header_lines.append(line.strip()) assert header_lines[0].startswith(b'HTTP/1.1 100 Continue') - headers = dict((k, v) for k, v in (h.split(b': ', 1) - for h in header_lines[1:])) + headers = {k: v for k, v in (h.split(b': ', 1) + for h in header_lines[1:])} assert b'Hundred-Continue-Header-3' in headers self.assertEqual(b'H3', headers[b'Hundred-Continue-Header-3']) @@ -1041,8 +1038,8 @@ def wsgi_app(environ, start_response): else: header_lines.append(line.strip()) assert header_lines[0].startswith(b'HTTP/1.1 100 Continue') - headers = dict((k, v) for k, v in (h.split(b': ', 1) - for h in header_lines[1:])) + headers = {k: v for k, v in (h.split(b': ', 1) + for h in header_lines[1:])} assert b'Hundred-Continue-Header-1' in headers assert b'Hundred-Continue-Header-2' in headers self.assertEqual(b'H1', headers[b'Hundred-Continue-Header-1']) @@ -1061,8 +1058,8 @@ def wsgi_app(environ, start_response): else: header_lines.append(line.strip()) assert header_lines[0].startswith(b'HTTP/1.1 100 Continue') - headers = dict((k, v) for k, v in (h.split(b': ', 1) - for h in header_lines[1:])) + headers = {k: v for k, v in (h.split(b': ', 1) + for h in header_lines[1:])} assert b'Hundred-Continue-Header-3' in headers self.assertEqual(b'H3', headers[b'Hundred-Continue-Header-3']) @@ -1156,7 +1153,7 @@ def test_025_accept_errors(self): try: eventlet.connect(self.server_addr) self.fail("Didn't expect to connect") - except socket.error as exc: + except OSError as exc: self.assertEqual(support.get_errno(exc), errno.ECONNREFUSED) log_content = log.getvalue() @@ -1446,7 +1443,7 @@ def test_aborted_chunked_post(self): def chunk_reader(env, start_response): try: content = env['wsgi.input'].read(1024) - except IOError: + except OSError: blew_up[0] = True content = b'ok' read_content.send(content) @@ -1574,7 +1571,7 @@ def test_unicode_with_only_ascii_characters_works(self): def wsgi_app(environ, start_response): start_response("200 OK", []) yield b"oh hai, " - yield u"xxx" + yield "xxx" self.site.application = wsgi_app sock = eventlet.connect(self.server_addr) sock.sendall(b'GET / HTTP/1.1\r\nHost: localhost\r\nConnection: close\r\n\r\n') @@ -1585,7 +1582,7 @@ def test_unicode_with_nonascii_characters_raises_error(self): def wsgi_app(environ, start_response): start_response("200 OK", []) yield b"oh hai, " - yield u"xxx \u0230" + yield "xxx \u0230" self.site.application = wsgi_app sock = eventlet.connect(self.server_addr) sock.sendall(b'GET / HTTP/1.1\r\nHost: localhost\r\nConnection: close\r\n\r\n') @@ -1596,8 +1593,8 @@ def wsgi_app(environ, start_response): def test_path_info_decoding(self): def wsgi_app(environ, start_response): start_response("200 OK", []) - yield six.b("decoded: %s" % environ['PATH_INFO']) - yield six.b("raw: %s" % environ['RAW_PATH_INFO']) + yield ("decoded: %s" % environ['PATH_INFO']).encode() + yield ("raw: %s" % environ['RAW_PATH_INFO']).encode() self.site.application = wsgi_app sock = eventlet.connect(self.server_addr) sock.sendall(b'GET /a*b@%40%233 HTTP/1.1\r\nHost: localhost\r\nConnection: close\r\n\r\n') @@ -1631,12 +1628,12 @@ def wsgi_app(environ, start_response): # Per PEP-0333 https://www.python.org/dev/peps/pep-0333/#unicode-issues # in all WSGI environment strings application must observe either bytes in latin-1 (ISO-8859-1) # or unicode code points \u0000..\u00ff - msg = 'Expected PATH_INFO to be a native string, not {0}'.format(type(g[0])) + msg = 'Expected PATH_INFO to be a native string, not {}'.format(type(g[0])) assert isinstance(g[0], str), msg # Fortunately, WSGI strings have the same literal representation on both py2 and py3 assert g[0] == '/\xe4\xbd\xa0\xe5\xa5\xbd' - msg = 'Expected PATH_INFO to be a native string, not {0}'.format(type(g[1])) + msg = 'Expected PATH_INFO to be a native string, not {}'.format(type(g[1])) assert isinstance(g[1], str), msg assert g[1] == '/\xbd\xa5\xe5\xa0\xbd\xe4' @@ -1644,9 +1641,9 @@ def wsgi_app(environ, start_response): def test_ipv6(self): try: sock = eventlet.listen(('::1', 0), family=socket.AF_INET6) - except (socket.gaierror, socket.error): # probably no ipv6 + except (OSError, socket.gaierror): # probably no ipv6 return - log = six.StringIO() + log = io.StringIO() # first thing the server does is try to log the IP it's bound to def run_server(): @@ -1818,14 +1815,14 @@ def wsgi_app(environ, start_response): def test_log_unix_address(self): def app(environ, start_response): start_response('200 OK', []) - return ['\n{0}={1}\n'.format(k, v).encode() for k, v in environ.items()] + return ['\n{}={}\n'.format(k, v).encode() for k, v in environ.items()] tempdir = tempfile.mkdtemp('eventlet_test_log_unix_address') try: server_sock = eventlet.listen(tempdir + '/socket', socket.AF_UNIX) path = server_sock.getsockname() - log = six.StringIO() + log = io.StringIO() self.spawn_server(site=app, sock=server_sock, log=log) eventlet.sleep(0) # need to enter server loop assert 'http:' + path in log.getvalue() @@ -1847,7 +1844,7 @@ def app(environ, start_response): def test_headers_raw(self): def app(environ, start_response): start_response('200 OK', []) - return [b'\n'.join('{0}: {1}'.format(*kv).encode() for kv in environ['headers_raw'])] + return [b'\n'.join('{}: {}'.format(*kv).encode() for kv in environ['headers_raw'])] self.spawn_server(site=app) sock = eventlet.connect(self.server_addr) @@ -1860,7 +1857,7 @@ def app(environ, start_response): def test_env_headers(self): def app(environ, start_response): start_response('200 OK', []) - return ['{0}: {1}\n'.format(*kv).encode() for kv in sorted(environ.items()) + return ['{}: {}\n'.format(*kv).encode() for kv in sorted(environ.items()) if kv[0].startswith('HTTP_')] self.spawn_server(site=app) @@ -1869,7 +1866,7 @@ def app(environ, start_response): b'x-ANY_k: one\r\nhttp-x-ANY_k: two\r\n\r\n') result = read_http(sock) sock.close() - assert result.status == 'HTTP/1.1 200 OK', 'Received status {0!r}'.format(result.status) + assert result.status == 'HTTP/1.1 200 OK', 'Received status {!r}'.format(result.status) assert result.body == (b'HTTP_HOST: localhost\nHTTP_HTTP_X_ANY_K: two\n' b'HTTP_PATH_INFO: foo\nHTTP_X_ANY_K: one\n') @@ -1880,7 +1877,7 @@ def app(environ, start_response): return [line if isinstance(line, bytes) else line.encode('latin1') for kv in sorted(environ.items()) if kv[0].startswith('HTTP_') - for line in ('{0}: {1}\n'.format(*kv),)] + for line in ('{}: {}\n'.format(*kv),)] self.spawn_server(site=app) sock = eventlet.connect(self.server_addr) @@ -1893,7 +1890,7 @@ def app(environ, start_response): b'null-set: \xe2\x88\x85\r\n\r\n') result = read_http(sock) sock.close() - assert result.status == 'HTTP/1.1 200 OK', 'Received status {0!r}'.format(result.status) + assert result.status == 'HTTP/1.1 200 OK', 'Received status {!r}'.format(result.status) assert result.body == ( b'HTTP_HOST: localhost\n' b'HTTP_NULL_SET: \xe2\x88\x85\n' @@ -1920,7 +1917,7 @@ def test_close_idle_connections(self): sock.sendall(b'GET / HTTP/1.1\r\nHost: localhost\r\n\r\n') result = read_http(sock) - assert result.status == 'HTTP/1.1 200 OK', 'Received status {0!r}'.format(result.status) + assert result.status == 'HTTP/1.1 200 OK', 'Received status {!r}'.format(result.status) self.killer.kill(KeyboardInterrupt) try: with eventlet.Timeout(1): @@ -1933,7 +1930,7 @@ def read_headers(sock): fd = sock.makefile('rb') try: response_line = fd.readline() - except socket.error as exc: + except OSError as exc: if support.get_errno(exc) == 10053: raise ConnectionClosed raise @@ -1985,11 +1982,11 @@ class ProxiedIterableAlreadyHandledTest(IterableAlreadyHandledTest): # same thing as the previous test but ensuring that it works with tpooled # results as well as regular ones def get_app(self): - return tpool.Proxy(super(ProxiedIterableAlreadyHandledTest, self).get_app()) + return tpool.Proxy(super().get_app()) def tearDown(self): tpool.killall() - super(ProxiedIterableAlreadyHandledTest, self).tearDown() + super().tearDown() class TestChunkedInput(_TestBase): @@ -2205,8 +2202,7 @@ def handler(*args): # the hub *before* attempting to read anything from a file descriptor # therefore we need one extra context switch to let it notice closed # socket, die and leave the hub empty - if six.PY3: - eventlet.sleep(0) + eventlet.sleep(0) finally: signal.alarm(0) signal.signal(signal.SIGALRM, signal.SIG_DFL) diff --git a/tests/zmq_test.py b/tests/zmq_test.py index 634b785c01..0ae0cc76f8 100644 --- a/tests/zmq_test.py +++ b/tests/zmq_test.py @@ -24,14 +24,14 @@ class TestUpstreamDownStream(tests.LimitedTestCase): @tests.skip_unless(zmq_supported) def setUp(self): - super(TestUpstreamDownStream, self).setUp() + super().setUp() self.context = zmq.Context() self.sockets = [] @tests.skip_unless(zmq_supported) def tearDown(self): self.clear_up_sockets() - super(TestUpstreamDownStream, self).tearDown() + super().tearDown() def create_bound_pair(self, type1, type2, interface='tcp://127.0.0.1'): """Create a bound socket pair using a random port.""" @@ -250,7 +250,7 @@ def tx(): pub.send(b'test BEGIN') eventlet.sleep(0.005) for i in range(1, 101): - msg = 'test {0}'.format(i).encode() + msg = 'test {}'.format(i).encode() if i != 50: pub.send(msg) else: @@ -584,7 +584,7 @@ def clean_pair(type1, type2, interface='tcp://127.0.0.1'): s1 = ctx.socket(type1) port = s1.bind_to_random_port(interface) s2 = ctx.socket(type2) - s2.connect('{0}:{1}'.format(interface, port)) + s2.connect('{}:{}'.format(interface, port)) eventlet.sleep() yield (s1, s2, port) s1.close() From dbb411d6a8531c52bf9507297e8d844f52d1d8ef Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Radoslaw=20=C5=9Amigielski?= Date: Tue, 2 Jan 2024 16:22:27 +0100 Subject: [PATCH 28/35] Skip test which uses Py cgi module (#865) * Skip test which uses Py cgi module Python cgi module has been deprecated since version 3.11, and will be removed in version 3.13. Skip test_019_fieldstorage_compat test if Py >= 3.11. Fixes https://github.com/eventlet/eventlet/issues/863 --- tests/wsgi_test.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/tests/wsgi_test.py b/tests/wsgi_test.py index d04f57293d..86842c5a97 100644 --- a/tests/wsgi_test.py +++ b/tests/wsgi_test.py @@ -1,8 +1,8 @@ -import cgi import collections import errno import io import os +import pytest import shutil import signal import socket @@ -671,7 +671,11 @@ def app(environ, start_response): sock.close() + @pytest.mark.skipif(sys.version_info[:2] >= (3, 11), + reason="cgi deprecated since version 3.11, will be removed in version 3.13") def test_019_fieldstorage_compat(self): + import cgi + def use_fieldstorage(environ, start_response): cgi.FieldStorage(fp=environ['wsgi.input'], environ=environ) start_response('200 OK', [('Content-type', 'text/plain')]) From 12d6e19c7af6693932c80173df1c497abf5772a4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Radoslaw=20=C5=9Amigielski?= Date: Tue, 2 Jan 2024 23:02:21 +0100 Subject: [PATCH 29/35] Fix low hanging fruits in examples (#867) - problems related to the Py2/Py3 switch - be more clear with error message when zmq module is missing --- examples/chat_bridge.py | 9 +++++++-- examples/connect.py | 2 +- examples/producer_consumer.py | 3 ++- examples/zmq_simple.py | 7 ++++++- 4 files changed, 16 insertions(+), 5 deletions(-) diff --git a/examples/chat_bridge.py b/examples/chat_bridge.py index e0fd537575..c321e13650 100644 --- a/examples/chat_bridge.py +++ b/examples/chat_bridge.py @@ -1,6 +1,11 @@ import sys -from zmq import FORWARDER, PUB, SUB, SUBSCRIBE -from zmq.devices import Device + +try: + from zmq import FORWARDER, PUB, SUB, SUBSCRIBE + from zmq.devices import Device +except ModuleNotFoundError: + raise SystemExit("Unable to find required zmq module. " + "Please install it before running this example.") if __name__ == "__main__": diff --git a/examples/connect.py b/examples/connect.py index e5eeb2e61c..3dd74f3bea 100644 --- a/examples/connect.py +++ b/examples/connect.py @@ -12,7 +12,7 @@ def geturl(url): ip = socket.gethostbyname(url) c.connect((ip, 80)) print('%s connected' % url) - c.sendall('GET /\r\n\r\n') + c.sendall(b'GET /\r\n\r\n') return c.recv(1024) diff --git a/examples/producer_consumer.py b/examples/producer_consumer.py index ca0585dd36..cd069d56ce 100644 --- a/examples/producer_consumer.py +++ b/examples/producer_consumer.py @@ -10,6 +10,7 @@ yourself. """ from eventlet.green.urllib.request import urlopen +from urllib.request import urlopen import eventlet import re @@ -22,7 +23,7 @@ def fetch(url, outq): print("fetching", url) data = '' with eventlet.Timeout(5, False): - data = urllib2.urlopen(url).read().decode() + data = urlopen(url).read().decode() for url_match in url_regex.finditer(data): new_url = url_match.group(0) outq.put(new_url) diff --git a/examples/zmq_simple.py b/examples/zmq_simple.py index 6f5f11e39f..b772f2c542 100644 --- a/examples/zmq_simple.py +++ b/examples/zmq_simple.py @@ -1,6 +1,11 @@ -from eventlet.green import zmq import eventlet +try: + from eventlet.green import zmq +except ModuleNotFoundError: + raise SystemExit("Unable to find required zmq module. " + "Please install it before running this example.") + CTX = zmq.Context(1) From 5f39d559d23b633fa1c0b374f6d895b0333fe317 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Radoslaw=20=C5=9Amigielski?= Date: Tue, 9 Jan 2024 17:50:27 +0100 Subject: [PATCH 30/35] Pytests, fix error at teardown of TestGreenSocket.test_full_duplex (#871) Pytests keeps failing with below error: ERROR at teardown of TestGreenSocket.test_full_duplex tests/__init__.py:194: in tearDown and above is caused by: eventlet/hubs/hub.py:310: TestIsTakingTooLong By simply increasing the TEST_TIMEOUT let fully pass all the tests. Co-authored-by: Radoslaw Smigielski --- tests/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/__init__.py b/tests/__init__.py index 525fa42839..d0851b954b 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -152,7 +152,7 @@ class LimitedTestCase(unittest.TestCase): timeout is 1 second, change it by setting TEST_TIMEOUT to the desired quantity.""" - TEST_TIMEOUT = 1 + TEST_TIMEOUT = 2 def setUp(self): self.previous_alarm = None From e2c874ee83276c1e26658fc2abcc8760ff7f835b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Radoslaw=20=C5=9Amigielski?= Date: Tue, 9 Jan 2024 17:50:53 +0100 Subject: [PATCH 31/35] Fix deprecation warning on ssl.PROTOCOL_TLS (#872) * Fix deprecation warning on ssl.PROTOCOL_TLS ssl.PROTOCOL_SSLv23 has been deprecated since version Python 3.6. Use PROTOCOL_TLS instead. --------- Co-authored-by: Radoslaw Smigielski --- eventlet/green/ssl.py | 6 +++--- tests/ssl_test.py | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/eventlet/green/ssl.py b/eventlet/green/ssl.py index ebd57f615e..8da153b1f7 100644 --- a/eventlet/green/ssl.py +++ b/eventlet/green/ssl.py @@ -55,7 +55,7 @@ class GreenSSLSocket(_original_sslsocket): """ def __new__(cls, sock=None, keyfile=None, certfile=None, server_side=False, cert_reqs=CERT_NONE, - ssl_version=PROTOCOL_SSLv23, ca_certs=None, + ssl_version=PROTOCOL_TLS, ca_certs=None, do_handshake_on_connect=True, *args, **kw): if not isinstance(sock, GreenSocket): sock = GreenSocket(sock) @@ -115,7 +115,7 @@ def _wrap_socket(sock, keyfile, certfile, server_side, cert_reqs, # do_handshake whose behavior we wish to override def __init__(self, sock, keyfile=None, certfile=None, server_side=False, cert_reqs=CERT_NONE, - ssl_version=PROTOCOL_SSLv23, ca_certs=None, + ssl_version=PROTOCOL_TLS, ca_certs=None, do_handshake_on_connect=True, *args, **kw): if not isinstance(sock, GreenSocket): sock = GreenSocket(sock) @@ -437,7 +437,7 @@ def sslwrap_simple(sock, keyfile=None, certfile=None): ssl_sock = GreenSSLSocket(sock, keyfile=keyfile, certfile=certfile, server_side=False, cert_reqs=CERT_NONE, - ssl_version=PROTOCOL_SSLv23, + ssl_version=PROTOCOL_TLS, ca_certs=None) return ssl_sock diff --git a/tests/ssl_test.py b/tests/ssl_test.py index 2dcf37cc6c..1ce338ca7d 100644 --- a/tests/ssl_test.py +++ b/tests/ssl_test.py @@ -328,7 +328,7 @@ def accept(listener): listener.close() def test_context_wrapped_accept(self): - context = ssl.SSLContext(ssl.PROTOCOL_SSLv23) + context = ssl.SSLContext(ssl.PROTOCOL_TLS) context.load_cert_chain(tests.certificate_file, tests.private_key_file) expected = "success:{}".format(random.random()).encode() From a94a3cf42fe467064f5dc5a4e6acc054698fade3 Mon Sep 17 00:00:00 2001 From: Tuomo Date: Wed, 10 Jan 2024 17:35:01 +0200 Subject: [PATCH 32/35] greendns: fix getaddrinfo parameter name for Python 3 (#809) * greendns: fix getaddrinfo parameter name for Python 3 The `socktype` parameter has been renamed `type`. Upgrading eventlet accordingly. https://github.com/eventlet/eventlet/issues/775 Co-authored-by: Tim Burke --- AUTHORS | 1 + eventlet/support/greendns.py | 4 ++-- tests/greendns_test.py | 5 +++++ 3 files changed, 8 insertions(+), 2 deletions(-) diff --git a/AUTHORS b/AUTHORS index 1190dced46..31b625e5a6 100644 --- a/AUTHORS +++ b/AUTHORS @@ -46,6 +46,7 @@ Contributors * Victor Stinner * Samuel Merritt * Eric Urban +* Tuomo Kriikkula Linden Lab Contributors ----------------------- diff --git a/eventlet/support/greendns.py b/eventlet/support/greendns.py index 0ac0a78e8b..6262142356 100644 --- a/eventlet/support/greendns.py +++ b/eventlet/support/greendns.py @@ -542,7 +542,7 @@ def _getaddrinfo_lookup(host, family, flags): return str(answer.qname), addrs -def getaddrinfo(host, port, family=0, socktype=0, proto=0, flags=0): +def getaddrinfo(host, port, family=0, type=0, proto=0, flags=0): """Replacement for Python's socket.getaddrinfo This does the A and AAAA lookups asynchronously after which it @@ -565,7 +565,7 @@ def getaddrinfo(host, port, family=0, socktype=0, proto=0, flags=0): for addr in addrs: try: ai = socket.getaddrinfo(addr, port, family, - socktype, proto, aiflags) + type, proto, aiflags) except OSError as e: if flags & socket.AI_ADDRCONFIG: err = e diff --git a/tests/greendns_test.py b/tests/greendns_test.py index f0f55d050d..8acc2c0ddd 100644 --- a/tests/greendns_test.py +++ b/tests/greendns_test.py @@ -793,6 +793,11 @@ def getaddrinfo(addr, port, family, socktype, proto, aiflags): except OSError as e: assert e.errno == socket.EAI_ADDRFAMILY + def test_getaddrinfo_type_parameter(self): + greendns.resolve = _make_mock_resolve() + greendns.resolve.add('localhost', '127.0.0.1') + greendns.getaddrinfo('localhost', None, type=0) + class TestIsIpAddr(tests.LimitedTestCase): From dec2ed5de1f170fad9539f0638278dbe36a3672f Mon Sep 17 00:00:00 2001 From: Itamar Turner-Trauring Date: Thu, 11 Jan 2024 09:42:46 -0500 Subject: [PATCH 33/35] No more segfaults: a new approach for greening existing locks (#866) Switch to a new mechanism for converting pre-existing RLocks to Eventlet-compatible ones at monkey-patching time, hopefully with fewer segfaults. --------- Co-authored-by: Itamar Turner-Trauring --- eventlet/patcher.py | 149 ++++++++++-------- .../patcher_existing_locks_preexisting.py | 41 +++++ tests/patcher_test.py | 4 + 3 files changed, 131 insertions(+), 63 deletions(-) create mode 100644 tests/isolated/patcher_existing_locks_preexisting.py diff --git a/eventlet/patcher.py b/eventlet/patcher.py index bdccb69062..366de161dc 100644 --- a/eventlet/patcher.py +++ b/eventlet/patcher.py @@ -1,3 +1,4 @@ +from __future__ import annotations try: import _imp as imp except ImportError: @@ -382,61 +383,102 @@ def _green_existing_locks(): This was originally noticed in the stdlib logging module.""" import gc + import os import threading import eventlet.green.thread - lock_type = type(threading.Lock()) rlock_type = type(threading.RLock()) - if hasattr(threading, '_PyRLock'): - # this happens on CPython3 and PyPy >= 7.0.0: "py3-style" rlocks, they - # are implemented natively in C and RPython respectively - py3_style = True - pyrlock_type = type(threading._PyRLock()) - else: - # this happens on CPython2.7 and PyPy < 7.0.0: "py2-style" rlocks, - # they are implemented in pure-python - py3_style = False - pyrlock_type = None # We're monkey-patching so there can't be any greenlets yet, ergo our thread # ID is the only valid owner possible. tid = eventlet.green.thread.get_ident() - for obj in gc.get_objects(): - if isinstance(obj, rlock_type): - if not py3_style and isinstance(obj._RLock__block, lock_type): - _fix_py2_rlock(obj, tid) - elif py3_style and not isinstance(obj, pyrlock_type): - _fix_py3_rlock(obj, tid) - - if sys.version_info < (3, 10): - # Older py3 won't have RLocks show up in gc.get_objects() -- see - # https://github.com/eventlet/eventlet/issues/546 -- so green a handful - # that we know are significant + + # Now, upgrade all instances: + def upgrade(old_lock): + return _convert_py3_rlock(old_lock, tid) + + _upgrade_instances(sys.modules, rlock_type, upgrade) + + # Report if there are RLocks we couldn't upgrade. For cases where we're + # using coverage.py in parent process, and more generally for tests in + # general, this is difficult to ensure, so just don't complain in that case. + if "PYTEST_CURRENT_TEST" in os.environ: + return + # On older Pythons (< 3.10), gc.get_objects() won't return any RLock + # instances, so this warning won't get logged on older Pythons. However, + # it's a useful warning, so we try to do it anyway for the benefit of those + # users on 3.10 or later. + gc.collect() + remaining_rlocks = len({o for o in gc.get_objects() if isinstance(o, rlock_type)}) + if remaining_rlocks: import logging - if isinstance(logging._lock, rlock_type): - _fix_py3_rlock(logging._lock, tid) - logging._acquireLock() - try: - for ref in logging._handlerList: - handler = ref() - if handler and isinstance(handler.lock, rlock_type): - _fix_py3_rlock(handler.lock, tid) - del handler - finally: - logging._releaseLock() + logger = logging.Logger("eventlet") + logger.error("{} RLock(s) were not greened,".format(remaining_rlocks) + + " to fix this error make sure you run eventlet.monkey_patch() " + + "before importing any other modules.") -def _fix_py2_rlock(rlock, tid): - import eventlet.green.threading - old = rlock._RLock__block - new = eventlet.green.threading.Lock() - rlock._RLock__block = new - if old.locked(): - new.acquire() - rlock._RLock__owner = tid +def _upgrade_instances(container, klass, upgrade, visited=None, old_to_new=None): + """ + Starting with a Python object, find all instances of ``klass``, following + references in ``dict`` values, ``list`` items, and attributes. + Once an object is found, replace all instances with + ``upgrade(found_object)``, again limited to the criteria above. -def _fix_py3_rlock(old, tid): - import gc + In practice this is used only for ``threading.RLock``, so we can assume + instances are hashable. + """ + if visited is None: + visited = {} # map id(obj) to obj + if old_to_new is None: + old_to_new = {} # map old klass instance to upgrade(old) + + # Handle circular references: + visited[id(container)] = container + + def upgrade_or_traverse(obj): + if id(obj) in visited: + return None + if isinstance(obj, klass): + if obj in old_to_new: + return old_to_new[obj] + else: + new = upgrade(obj) + old_to_new[obj] = new + return new + else: + _upgrade_instances(obj, klass, upgrade, visited, old_to_new) + return None + + if isinstance(container, dict): + for k, v in list(container.items()): + new = upgrade_or_traverse(v) + if new is not None: + container[k] = new + if isinstance(container, list): + for i, v in enumerate(container): + new = upgrade_or_traverse(v) + if new is not None: + container[i] = new + try: + container_vars = vars(container) + except TypeError: + pass + else: + for k, v in list(container_vars.items()): + new = upgrade_or_traverse(v) + if new is not None: + setattr(container, k, new) + + +def _convert_py3_rlock(old, tid): + """ + Convert a normal RLock to one implemented in Python. + + This is necessary to make RLocks work with eventlet, but also introduces + bugs, e.g. https://bugs.python.org/issue13697. So more of a downgrade, + really. + """ import threading from eventlet.green.thread import allocate_lock new = threading._PyRLock() @@ -458,26 +500,7 @@ def _fix_py3_rlock(old, tid): acquired = True if acquired: new._owner = tid - gc.collect() - for ref in gc.get_referrers(old): - if isinstance(ref, dict): - for k, v in list(ref.items()): - if v is old: - ref[k] = new - continue - if isinstance(ref, list): - for i, v in enumerate(ref): - if v is old: - ref[i] = new - continue - try: - ref_vars = vars(ref) - except TypeError: - pass - else: - for k, v in ref_vars.items(): - if v is old: - setattr(ref, k, new) + return new def _green_os_modules(): diff --git a/tests/isolated/patcher_existing_locks_preexisting.py b/tests/isolated/patcher_existing_locks_preexisting.py new file mode 100644 index 0000000000..e3306ee9f8 --- /dev/null +++ b/tests/isolated/patcher_existing_locks_preexisting.py @@ -0,0 +1,41 @@ +""" +Ensure pre-existing RLocks get upgraded in a variety of situations. +""" + +import sys +import threading +import unittest.mock +import eventlet + +python_lock = threading._PyRLock + + +class NS: + lock = threading.RLock() + + class NS2: + lock = threading.RLock() + + dict = {1: 2, 12: threading.RLock()} + list = [0, threading.RLock()] + + +def ensure_upgraded(lock): + if not isinstance(lock, python_lock): + raise RuntimeError(lock) + + +if __name__ == '__main__': + # These extra print()s caused either test failures or segfaults until + # https://github.com/eventlet/eventlet/issues/864 was fixed. + if sys.version_info[:2] > (3, 9): + print(unittest.mock.NonCallableMock._lock) + print(NS.lock) + eventlet.monkey_patch() + ensure_upgraded(NS.lock) + ensure_upgraded(NS.NS2.lock) + ensure_upgraded(NS.dict[12]) + ensure_upgraded(NS.list[1]) + if sys.version_info[:2] > (3, 9): + ensure_upgraded(unittest.mock.NonCallableMock._lock) + print("pass") diff --git a/tests/patcher_test.py b/tests/patcher_test.py index cddb581da4..ca7bc99966 100644 --- a/tests/patcher_test.py +++ b/tests/patcher_test.py @@ -523,3 +523,7 @@ def test_builtin(): def test_open_kwargs(): tests.run_isolated("patcher_open_kwargs.py") + + +def test_patcher_existing_locks(): + tests.run_isolated("patcher_existing_locks_preexisting.py") From e4b034f7325131ef059027d05e3c6539c5790cfd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Herv=C3=A9=20Beraud?= Date: Thu, 11 Jan 2024 16:05:24 +0100 Subject: [PATCH 34/35] Fix security issue related to RFC 9112 (#826) By default reject requests which contains headers `content-length` and `transfer-encoding` at the same time. That's not allowed by RFC 9112 and that could lead to potential security attacks. If the `reject_bad_request` option is turned off, then similar requests will be processed even if they are bad formed. That will allow compatibility with old server that can't be updated. https://www.rfc-editor.org/rfc/rfc9112#section-6.1-15 This is an extract of the RFC: > A server MAY reject a request that contains both Content-Length and > Transfer-Encoding or process such a request in accordance with the > Transfer-Encoding alone. Regardless, the server MUST close the > connection after responding to such a request to avoid the potential > attacks. > A server or client that receives an HTTP/1.0 message containing > a Transfer-Encoding header field MUST treat the message as if the > framing is faulty, even if a Content-Length is present, and close the > connection after processing the message. The message sender might have > retained a portion of the message, in buffer, that could be > misinterpreted by further use of the connection. The following request would lead to this scenario: ``` POST / HTTP/1.1 Host: a.com Transfer-Encoding: chunked Content-Length: 0 Content-Type: application/x-"##-form-urlencoded 14 id=1'or sleep(1);### 0 ``` With these changes, when this kind of request is received the connection is closed and an error 400 is returned. This scenario can be tested by using the following process: 1. run a wsgi server either by using the wsgi sample in official examples (http://eventlet.net/doc/examples.html#wsgi-server) 2. send the following HTTP request to the running server: ``` curl -d "param1=value1¶m2=value2" -X POST -H 'Transfer-Encoding: chunked' -H 'Content-Length: 0' --http1.1 http://0.0.0.0:8090 -i ``` The previous curl command display returned headers and status code. You can observe that now, with these changes, bad requests are rejected. These changes also remove `content-lenght` from the `chunk` tests to avoid reflecting something that's not a bad practice. This security issue was originally discovered by Keran Mu (mkr22@mails.tsinghua.edu.cn) and Jianjun Chen (jianjun@tsinghua.edu.cn), from Tsinghua University and Zhongguancun Laboratory Thanks to them for raising our attention about this security problem. Co-authored-by: Itamar Turner-Trauring --- NEWS | 5 +++++ eventlet/wsgi.py | 31 ++++++++++++++++++++++++++++++- tests/wsgi_test.py | 32 ++++++++++++++++++++++++-------- 3 files changed, 59 insertions(+), 9 deletions(-) diff --git a/NEWS b/NEWS index 63ea65637d..994bfd0bcb 100644 --- a/NEWS +++ b/NEWS @@ -1,3 +1,8 @@ +Unreleased +========== + +* Fix security issue in the wsgi module related to RFC 9112 https://github.com/eventlet/eventlet/pull/826 + 0.34.2 ====== diff --git a/eventlet/wsgi.py b/eventlet/wsgi.py index a9b39b90ca..7e80539ade 100644 --- a/eventlet/wsgi.py +++ b/eventlet/wsgi.py @@ -330,6 +330,22 @@ def readline(self, size=-1): class HttpProtocol(BaseHTTPServer.BaseHTTPRequestHandler): + """This class is used to handle the HTTP requests that arrive + at the server. + + The handler will parse the request and the headers, then call a method + specific to the request type. + + :param conn_state: The given connection status. + :param server: The server accessible by the request handler. + :param reject_bad_requests: Rejection policy. + If True, or not specified, queries defined as non-compliant, + by example with the RFC 9112, will automatically rejected. + Else, if False, even if a request is bad formed, the query + will be processed. Functionning this way, default to the + more-secure behavior, and allow working with old clients that + cannot be updated. + """ protocol_version = 'HTTP/1.1' minimum_chunk_size = MINIMUM_CHUNK_SIZE capitalize_response_headers = True @@ -339,11 +355,12 @@ class HttpProtocol(BaseHTTPServer.BaseHTTPRequestHandler): # so before going back to unbuffered, remove any usage of `writelines`. wbufsize = 16 << 10 - def __init__(self, conn_state, server): + def __init__(self, conn_state, server, reject_bad_requests=True): self.request = conn_state[1] self.client_address = conn_state[0] self.conn_state = conn_state self.server = server + self.reject_bad_requests = reject_bad_requests self.setup() try: self.handle() @@ -443,6 +460,7 @@ def handle_one_request(self): self.rfile = orig_rfile content_length = self.headers.get('content-length') + transfer_encoding = self.headers.get('transfer-encoding') if content_length is not None: try: if int(content_length) < 0: @@ -455,6 +473,17 @@ def handle_one_request(self): self.close_connection = 1 return + if transfer_encoding is not None: + if reject_bad_requests: + msg = b"Content-Length and Transfer-Encoding are not allowed together\n" + self.wfile.write( + b"HTTP/1.0 400 Bad Request\r\n" + b"Connection: close\r\n" + b"Content-Length: %d\r\n" + b"\r\n%s" % (len(msg), msg)) + self.close_connection = 1 + return + self.environ = self.get_environ() self.application = self.server.app try: diff --git a/tests/wsgi_test.py b/tests/wsgi_test.py index 86842c5a97..c1691665dc 100644 --- a/tests/wsgi_test.py +++ b/tests/wsgi_test.py @@ -1929,6 +1929,22 @@ def test_close_idle_connections(self): except Exception: assert False, self.logfile.getvalue() + def test_rfc9112_reject_bad_request(self): + # (hberaud): Transfer-Encoding and Content-Length in the + # same header are not allowed by rfc9112. + # Requests containing both headers MAY be rejected to + # avoid potential attack. + self.site.application = use_write + sock = eventlet.connect(self.server_addr) + sock.send( + b'GET / HTTP/1.1\r\n' + b'Transfer-Encoding: chunked\r\n' + b'Content-Length: 0' + b'Host: localhost\r\n' + b'\r\n') + result = read_http(sock) + self.assertRaises(ConnectionClosed, read_http, sock) + def read_headers(sock): fd = sock.makefile('rb') @@ -2065,7 +2081,7 @@ def ping(self, fd): def test_short_read_with_content_length(self): body = self.body() req = "POST /short-read HTTP/1.1\r\ntransfer-encoding: Chunked\r\n" \ - "Content-Length:1000\r\n\r\n" + body + "\r\n" + body fd = self.connect() fd.sendall(req.encode()) @@ -2077,7 +2093,7 @@ def test_short_read_with_content_length(self): def test_short_read_with_zero_content_length(self): body = self.body() req = "POST /short-read HTTP/1.1\r\ntransfer-encoding: Chunked\r\n" \ - "Content-Length:0\r\n\r\n" + body + "\r\n" + body fd = self.connect() fd.sendall(req.encode()) self.assertEqual(read_http(fd).body, b"this is ch") @@ -2109,8 +2125,8 @@ def test_dirt(self): def test_chunked_readline(self): body = self.body() - req = "POST /lines HTTP/1.1\r\nContent-Length: %s\r\n" \ - "transfer-encoding: Chunked\r\n\r\n%s" % (len(body), body) + req = "POST /lines HTTP/1.1\r\n" \ + "transfer-encoding: Chunked\r\n\r\n%s" % (body) fd = self.connect() fd.sendall(req.encode()) @@ -2119,8 +2135,8 @@ def test_chunked_readline(self): def test_chunked_readline_from_input(self): body = self.body() - req = "POST /readline HTTP/1.1\r\nContent-Length: %s\r\n" \ - "transfer-encoding: Chunked\r\n\r\n%s" % (len(body), body) + req = "POST /readline HTTP/1.1\r\n" \ + "transfer-encoding: Chunked\r\n\r\n%s" % (body) fd = self.connect() fd.sendall(req.encode()) @@ -2129,8 +2145,8 @@ def test_chunked_readline_from_input(self): def test_chunked_readlines_from_input(self): body = self.body() - req = "POST /readlines HTTP/1.1\r\nContent-Length: %s\r\n" \ - "transfer-encoding: Chunked\r\n\r\n%s" % (len(body), body) + req = "POST /readlines HTTP/1.1\r\n" \ + "transfer-encoding: Chunked\r\n\r\n%s" % (body) fd = self.connect() fd.sendall(req.encode()) From 268c4d74b695ad2495661c65394e1694d0c0201f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Herv=C3=A9=20Beraud?= Date: Thu, 11 Jan 2024 16:31:27 +0100 Subject: [PATCH 35/35] update changelog for version 0.34.3 (#876) --- NEWS | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/NEWS b/NEWS index 994bfd0bcb..9cfe8652d1 100644 --- a/NEWS +++ b/NEWS @@ -1,7 +1,16 @@ Unreleased ========== +0.34.3 +====== + * Fix security issue in the wsgi module related to RFC 9112 https://github.com/eventlet/eventlet/pull/826 +* Fix segfault, a new approach for greening existing locks https://github.com/eventlet/eventlet/pull/866 +* greendns: fix getaddrinfo parameter name https://github.com/eventlet/eventlet/pull/809 +* Fix deprecation warning on ssl.PROTOCOL_TLS https://github.com/eventlet/eventlet/pull/872 +* Pytests, fix error at teardown of TestGreenSocket.test_full_duplex https://github.com/eventlet/eventlet/pull/871 +* Skip test which uses Py cgi module https://github.com/eventlet/eventlet/pull/865 +* Drop old code based on python < 3.7 0.34.2 ======