diff --git a/examples/tests/test_examples.py b/examples/tests/test_examples.py index 8f248b24..2cffb502 100644 --- a/examples/tests/test_examples.py +++ b/examples/tests/test_examples.py @@ -79,7 +79,8 @@ def test_examples_screenshots( def unload_module(): del sys.modules[module_name] - request.addfinalizer(unload_module) + if request: + request.addfinalizer(unload_module) if not hasattr(example, "canvas"): # some examples we screenshot test don't have a canvas as a global variable when imported, @@ -188,4 +189,4 @@ def test_examples_run(module, force_offscreen): os.environ["RENDERCANVAS_FORCE_OFFSCREEN"] = "true" pytest.getoption = lambda x: False is_lavapipe = True - test_examples_screenshots("validate_volume", pytest, None, None) + test_examples_screenshots("cube", pytest, mock_time, None, None) diff --git a/tests/test_api.py b/tests/test_api.py index cd572ccf..0c32e6b9 100644 --- a/tests/test_api.py +++ b/tests/test_api.py @@ -100,7 +100,7 @@ def test_enums_and_flags_and_structs(): def test_base_wgpu_api(): # Fake a device and an adapter - adapter = wgpu.GPUAdapter(None, set(), {}, wgpu.GPUAdapterInfo({}), None) + adapter = wgpu.GPUAdapter(None, set(), {}, wgpu.GPUAdapterInfo({})) queue = wgpu.GPUQueue("", None, None) device = wgpu.GPUDevice("device08", -1, adapter, {42, 43}, {}, queue) diff --git a/tests/test_async.py b/tests/test_async.py index f145e3cb..6cada415 100644 --- a/tests/test_async.py +++ b/tests/test_async.py @@ -212,7 +212,7 @@ def poller(): async def test_promise_async_loop_simple(): loop = SillyLoop() - promise = GPUPromise("test", None, loop=loop) + promise = GPUPromise("test", None, _loop=loop) loop.process_events() result = await promise @@ -226,7 +226,7 @@ async def test_promise_async_loop_normal(): def handler(input): return input * 2 - promise = GPUPromise("test", handler, loop=loop) + promise = GPUPromise("test", handler, _loop=loop) loop.process_events() result = await promise @@ -240,7 +240,7 @@ async def test_promise_async_loop_fail2(): def handler(input): return input / 0 - promise = GPUPromise("test", handler, loop=loop) + promise = GPUPromise("test", handler, _loop=loop) loop.process_events() with raises(ZeroDivisionError): @@ -272,7 +272,7 @@ def callback(r): nonlocal result result = r - promise = GPUPromise("test", None, loop=loop) + promise = GPUPromise("test", None, _loop=loop) promise.then(callback) loop.process_events() @@ -291,7 +291,7 @@ def callback(r): def handler(input): return input * 2 - promise = GPUPromise("test", handler, loop=loop) + promise = GPUPromise("test", handler, _loop=loop) promise.then(callback) loop.process_events() @@ -315,7 +315,7 @@ def err_callback(e): def handler(input): return input / 0 - promise = GPUPromise("test", handler, loop=loop) + promise = GPUPromise("test", handler, _loop=loop) promise.then(callback, err_callback) loop.process_events() @@ -338,7 +338,7 @@ def callback1(r): nonlocal result result = r - promise = MyPromise("test", None, loop=loop) + promise = MyPromise("test", None, _loop=loop) p = promise.then(callback1) loop.process_events() @@ -371,7 +371,7 @@ def callback3(r): nonlocal result result = r - promise = GPUPromise("test", None, loop=loop) + promise = GPUPromise("test", None, _loop=loop) p = promise.then(callback1).then(callback2).then(callback3) assert isinstance(p, GPUPromise) @@ -400,7 +400,7 @@ def err_callback(e): nonlocal error error = e - promise = GPUPromise("test", None, loop=loop) + promise = GPUPromise("test", None, _loop=loop) p = promise.then(callback1).then(callback2).then(callback3, err_callback) assert isinstance(p, GPUPromise) @@ -430,7 +430,7 @@ def err_callback(e): nonlocal error error = e - promise = GPUPromise("test", None, loop=loop) + promise = GPUPromise("test", None, _loop=loop) p = promise.then(callback1).then(callback2).then(callback3, err_callback) assert isinstance(p, GPUPromise) @@ -454,7 +454,7 @@ def callback2(r): def callback3(r): results.append(r * 3) - promise = GPUPromise("test", None, loop=loop) + promise = GPUPromise("test", None, _loop=loop) promise.then(callback1) promise.then(callback2) @@ -473,7 +473,7 @@ def test_promise_chaining_after_resolve(): def callback1(r): results.append(r) - promise = GPUPromise("test", None, loop=loop) + promise = GPUPromise("test", None, _loop=loop) # Adding handler has no result, because promise is not yet resolved. promise.then(callback1) @@ -503,16 +503,16 @@ def test_promise_chaining_with_promises(): result = None def callback1(r): - return GPUPromise("test", lambda _: r * 3, loop=loop) + return GPUPromise("test", lambda _: r * 3, _loop=loop) def callback2(r): - return GPUPromise("test", lambda _: r + 2, loop=loop) + return GPUPromise("test", lambda _: r + 2, _loop=loop) def callback3(r): nonlocal result result = r - promise = GPUPromise("test", None, loop=loop) + promise = GPUPromise("test", None, _loop=loop) p = promise.then(callback1).then(callback2).then(callback3) assert isinstance(p, GPUPromise) @@ -535,7 +535,7 @@ def test_promise_decorator(): def handler(input): return input * 2 - promise = GPUPromise("test", handler, loop=loop) + promise = GPUPromise("test", handler, _loop=loop) @promise def decorated(r): diff --git a/wgpu/_async.py b/wgpu/_async.py index 81e08f29..a87fc851 100644 --- a/wgpu/_async.py +++ b/wgpu/_async.py @@ -13,6 +13,43 @@ logger = logging.getLogger("wgpu") +class StubLoop: + def __init__(self, name, call_soon_threadsafe): + self.name = name + self.call_soon_threadsafe = call_soon_threadsafe + + def __repr__(self): + return f"" + + +def get_running_loop(): + """Get an object with a call_soon_threadsafe() method. + + Sniffio is used for this, and it supports asyncio, trio, and rendercanvas.utils.asyncadapter. + If this function returns None, it means that the GPUPromise will not support ``await`` and ``.then()``. + + It's relatively easy to register a custom loop to sniffio so that this code works on it. + """ + + try: + name = sniffio.current_async_library() + except sniffio.AsyncLibraryNotFoundError: + return None + + if name == "trio": + trio = sys.modules[name] + token = trio.lowlevel.current_trio_token() + return StubLoop("trio", token.run_sync_soon) + else: # asyncio, rendercanvas.utils.asyncadapter, and easy to mimic for custom loops + try: + mod = sys.modules[name] + loop = mod.get_running_loop() + loop.call_soon_threadsafe # noqa: B018 - access to make sure it exists + return loop + except Exception: + return None + + # The async_sleep and AsyncEvent are a copy of the implementation in rendercanvas.asyncs @@ -35,16 +72,6 @@ def __new__(cls): AwaitedType = TypeVar("AwaitedType") -class LoopInterface: - """A loop object must have (at least) this API. - - Rendercanvas loop objects do, asyncio.loop does too. - """ - - def call_soon(self, callback: Callable, *args: object): - raise NotImplementedError() - - def get_backoff_time_generator() -> Generator[float, None, None]: """Generates sleep-times, start at 0 then increasing to 100Hz and sticking there.""" for _ in range(5): @@ -88,24 +115,20 @@ def __init__( title: str, handler: Callable | None, *, - loop: LoopInterface | None = None, keepalive: object = None, + _loop: object = None, # for testing and chaining ): """ Arguments: title (str): The title of this promise, mostly for debugging purposes. handler (callable, optional): The function to turn promise input into the result. If None, the result will simply be the input. - loop (LoopInterface, optional): A loop object that at least has a ``call_soon()`` method. - If not given, this promise does not support .then() or promise-chaining. keepalive (object, optional): Pass any data via this arg who's lifetime must be bound to the resolving of this promise. """ self._title = str(title) # title for debugging self._handler = handler # function to turn input into the result - - self._loop = loop # Event loop instance, can be None self._keepalive = keepalive # just to keep something alive self._state = "pending" # "pending", "pending-rejected", "pending-fulfilled", "rejected", "fulfilled" @@ -117,6 +140,9 @@ def __init__( self._error_callbacks = [] self._UNRESOLVED.add(self) + # we only care about call_soon_threadsafe, but clearer to just have a loop object + self._loop = _loop or get_running_loop() + def __repr__(self): return f"" @@ -140,7 +166,9 @@ def _set_input(self, result: object, *, resolve_now=True) -> None: # If the input is a promise, we need to wait for it, i.e. chain to self. if isinstance(result, GPUPromise): if self._loop is None: - self._set_error("Cannot chain GPUPromise if the loop is not set.") + self._set_error( + "Cannot chain GPUPromise because no running loop could be detected." + ) else: result._chain(self) return @@ -197,9 +225,12 @@ def _resolve_callback(self): # Allow tasks that await this promise to continue. if self._async_event is not None: self._async_event.set() - # The callback may already be resolved + # If the value is set, let's resolve it so the handlers get called. But swallow the promise's value/failure. if self._state.startswith("pending-"): - self._resolve() + try: + self._resolve() + except Exception: + pass def _resolve(self): """Finalize the promise, by calling the handler to get the result, and then invoking callbacks.""" @@ -253,7 +284,7 @@ def sync_wait(self) -> AwaitedType: def _sync_wait(self): # Each subclass may implement this in its own way. E.g. it may wait for - # the _thread_event, it may poll the device in a loop while checking the + # the _thread_event, it may poll the device in a while-loop while checking the # status, and Pyodide may use its special logic to sync wait the JS # promise. raise NotImplementedError() @@ -276,7 +307,9 @@ def then( The callback will receive one argument: the result of the promise. """ if self._loop is None: - raise RuntimeError("Cannot use GPUPromise.then() if the loop is not set.") + raise RuntimeError( + "Cannot use GPUPromise.then() because no running loop could be detected." + ) if not callable(callback): raise TypeError( f"GPUPromise.then() got a callback that is not callable: {callback!r}" @@ -293,7 +326,7 @@ def then( title = self._title + " -> " + callback_name # Create new promise - new_promise = self.__class__(title, callback, loop=self._loop) + new_promise = self.__class__(title, callback, _loop=self._loop) self._chain(new_promise) if error_callback is not None: @@ -307,7 +340,9 @@ def catch(self, callback: Callable[[Exception], None] | None): The callback will receive one argument: the error object. """ if self._loop is None: - raise RuntimeError("Cannot use GPUPromise.catch() if the loop is not set.") + raise RuntimeError( + "Cannot use GPUPromise.catch() because not running loop could be detected." + ) if not callable(callback): raise TypeError( f"GPUPromise.catch() got a callback that is not callable: {callback!r}" @@ -317,7 +352,7 @@ def catch(self, callback: Callable[[Exception], None] | None): title = "Catcher for " + self._title # Create new promise - new_promise = self.__class__(title, callback, loop=self._loop) + new_promise = self.__class__(title, callback, _loop=self._loop) # Custom chain with self._lock: @@ -329,7 +364,8 @@ def catch(self, callback: Callable[[Exception], None] | None): def __await__(self): if self._loop is None: - # An async busy loop + # An async busy loop. In theory we should be able to remove this code, but it helps make the transition + # simpler, since then we depend less on https://github.com/pygfx/rendercanvas/pull/151 async def awaiter(): if self._state == "pending": # Do small incremental async naps. Other tasks and threads can run. diff --git a/wgpu/_classes.py b/wgpu/_classes.py index e1b41259..83e263a9 100644 --- a/wgpu/_classes.py +++ b/wgpu/_classes.py @@ -14,7 +14,7 @@ import logging from typing import Sequence -from ._async import GPUPromise as BaseGPUPromise, LoopInterface +from ._async import GPUPromise as BaseGPUPromise from ._coreutils import ApiDiff, str_flag_to_int, ArrayLike, CanvasLike from ._diagnostics import diagnostics, texture_format_to_bpp from . import flags, enums, structs @@ -119,7 +119,6 @@ def request_adapter_async( power_preference: enums.PowerPreferenceEnum | None = None, force_fallback_adapter: bool = False, canvas: CanvasLike | None = None, - loop: LoopInterface | None = None, ) -> GPUPromise[GPUAdapter]: """Create a `GPUAdapter`, the object that represents an abstract wgpu implementation, from which one can request a `GPUDevice`. @@ -132,8 +131,6 @@ def request_adapter_async( fallback adapter. canvas : The canvas or context that the adapter should be able to render to. This can typically be left to None. If given, it must be a ``GPUCanvasContext`` or ``RenderCanvas``. - loop : the loop object for async support. Must have at least ``call_soon(f, *args)``. - The loop object is required for asynchrouns use with ``promise.then()``. EXPERIMENTAL. """ # If this method gets called, no backend has been loaded yet, let's do that now! from .backends.auto import gpu @@ -145,7 +142,6 @@ def request_adapter_async( power_preference=power_preference, force_fallback_adapter=force_fallback_adapter, canvas=canvas, - loop=loop, ) @apidiff.add("Method useful for multi-gpu environments") @@ -158,9 +154,7 @@ def enumerate_adapters_sync(self) -> list[GPUAdapter]: return promise.sync_wait() @apidiff.add("Method useful for multi-gpu environments") - def enumerate_adapters_async( - self, *, loop: LoopInterface | None = None - ) -> GPUPromise[list[GPUAdapter]]: + def enumerate_adapters_async(self) -> GPUPromise[list[GPUAdapter]]: """Get a list of adapter objects available on the current system. An adapter can then be selected (e.g. using its summary), and a device @@ -187,7 +181,7 @@ def enumerate_adapters_async( # If this method gets called, no backend has been loaded yet, let's do that now! from .backends.auto import gpu - return gpu.enumerate_adapters_async(loop=loop) + return gpu.enumerate_adapters_async() # IDL: GPUTextureFormat getPreferredCanvasFormat(); @apidiff.change("Disabled because we put it on the canvas context") @@ -544,10 +538,9 @@ class GPUAdapter: _ot = object_tracker - def __init__(self, internal, features, limits, adapter_info, loop): + def __init__(self, internal, features, limits, adapter_info): self._ot.increase(self.__class__.__name__) self._internal = internal - self._loop = loop assert isinstance(features, set) assert isinstance(limits, dict) @@ -693,7 +686,6 @@ def __init__(self, label, internal, adapter, features, limits, queue): self._adapter = adapter self._features = features self._limits = limits - self._loop = adapter._loop self._queue = queue queue._device = self # because it could not be set earlier diff --git a/wgpu/backends/wgpu_native/_api.py b/wgpu/backends/wgpu_native/_api.py index 9202f6d0..c17c39ae 100644 --- a/wgpu/backends/wgpu_native/_api.py +++ b/wgpu/backends/wgpu_native/_api.py @@ -23,7 +23,6 @@ from weakref import WeakKeyDictionary from typing import NoReturn, Sequence -from ..._async import LoopInterface from ..._coreutils import str_flag_to_int, ArrayLike, CanvasLike from ... import classes, flags, enums, structs @@ -462,7 +461,6 @@ def request_adapter_async( power_preference: enums.PowerPreferenceEnum | None = None, force_fallback_adapter: bool = False, canvas: CanvasLike | None = None, - loop: LoopInterface | None = None, ) -> GPUPromise[GPUAdapter]: """Create a `GPUAdapter`, the object that represents an abstract wgpu implementation, from which one can request a `GPUDevice`. @@ -486,11 +484,11 @@ def request_adapter_async( # We chose the variable name WGPUPY_WGPU_ADAPTER_NAME instead WGPU_ADAPTER_NAME # to avoid a clash if adapter_name := os.getenv(("WGPUPY_WGPU_ADAPTER_NAME")): - adapters = self._enumerate_adapters(loop) + adapters = self._enumerate_adapters() adapters = [a for a in adapters if adapter_name in a.summary] if not adapters: raise ValueError(f"Adapter with name '{adapter_name}' not found.") - promise = GPUPromise("adapter by name", None, loop=loop) + promise = GPUPromise("adapter by name", None) promise._wgpu_set_input(adapters[0]) return promise @@ -563,13 +561,10 @@ def request_adapter_callback(status, result, c_message, _userdata1, _userdata2): ) def handler(adapter_id): - return self._create_adapter(adapter_id, loop) + return self._create_adapter(adapter_id) promise = GPUPromise( - "request_adapter", - handler, - loop=loop, - keepalive=request_adapter_callback, + "request_adapter", handler, keepalive=request_adapter_callback ) instance = get_wgpu_instance() @@ -587,20 +582,18 @@ def handler(adapter_id): return promise - def enumerate_adapters_async( - self, *, loop: LoopInterface | None = None - ) -> GPUPromise[list[GPUAdapter]]: + def enumerate_adapters_async(self) -> GPUPromise[list[GPUAdapter]]: """Get a list of adapter objects available on the current system. This is the implementation based on wgpu-native. """ - result = self._enumerate_adapters(loop) + result = self._enumerate_adapters() # We already have the result, so we return a resolved promise. # The reason this is async is to allow this to work on backends where we cannot actually enumerate adapters. - promise = GPUPromise("enumerate_adapters", None, loop=loop) + promise = GPUPromise("enumerate_adapters", None) promise._wgpu_set_input(result) return promise - def _enumerate_adapters(self, loop) -> list[GPUAdapter]: + def _enumerate_adapters(self) -> list[GPUAdapter]: # The first call is to get the number of adapters, and the second call # is to get the actual adapters. Note that the second arg (now NULL) can # be a `WGPUInstanceEnumerateAdapterOptions` to filter by backend. @@ -610,9 +603,9 @@ def _enumerate_adapters(self, loop) -> list[GPUAdapter]: adapters = new_array("WGPUAdapter[]", count) # H: size_t f(WGPUInstance instance, WGPUInstanceEnumerateAdapterOptions const * options, WGPUAdapter * adapters) libf.wgpuInstanceEnumerateAdapters(instance, ffi.NULL, adapters) - return [self._create_adapter(adapter, loop) for adapter in adapters] + return [self._create_adapter(adapter) for adapter in adapters] - def _create_adapter(self, adapter_id, loop): + def _create_adapter(self, adapter_id): # ----- Get adapter info # H: nextInChain: WGPUChainedStructOut *, vendor: WGPUStringView, architecture: WGPUStringView, device: WGPUStringView, description: WGPUStringView, backendType: WGPUBackendType, adapterType: WGPUAdapterType, vendorID: int, deviceID: int @@ -671,7 +664,7 @@ def to_py_str(key): features = _get_features(adapter_id, adapter=True) # ----- Done - return GPUAdapter(adapter_id, features, limits, adapter_info, loop) + return GPUAdapter(adapter_id, features, limits, adapter_info) def get_canvas_context(self, present_info: dict) -> GPUCanvasContext: """Get the GPUCanvasContext object for the appropriate backend. @@ -1386,10 +1379,7 @@ def handler(device_id): return device promise = GPUPromise( - "request_device", - handler, - loop=self._loop, - keepalive=request_device_callback, + "request_device", handler, keepalive=request_device_callback ) # H: WGPUFuture f(WGPUAdapter adapter, WGPUDeviceDescriptor const * descriptor, WGPURequestDeviceCallbackInfo callbackInfo) @@ -1968,9 +1958,7 @@ def create_compute_pipeline_async( # H: WGPUComputePipeline f(WGPUDevice device, WGPUComputePipelineDescriptor const * descriptor) id = libf.wgpuDeviceCreateComputePipeline(self._internal, descriptor) result = GPUComputePipeline(label, id, self) - promise = GPUPromise( - "create_compute_pipeline_async", None, loop=self._device._loop - ) + promise = GPUPromise("create_compute_pipeline_async", None) promise._wgpu_set_input(result) return promise @@ -2002,12 +1990,7 @@ def callback(status, result, c_message, _userdata1, _userdata2): def handler(id): return GPUComputePipeline(label, id, self) - promise = GPUPromise( - "create_compute_pipeline", - handler, - loop=self._loop, - keepalive=callback, - ) + promise = GPUPromise("create_compute_pipeline", handler, keepalive=callback) token = self._device._poller.get_token() @@ -2097,9 +2080,7 @@ def create_render_pipeline_async( # H: WGPURenderPipeline f(WGPUDevice device, WGPURenderPipelineDescriptor const * descriptor) id = libf.wgpuDeviceCreateRenderPipeline(self._internal, descriptor) result = GPURenderPipeline(label, id, self) - promise = GPUPromise( - "create_render_pipeline_async", None, loop=self._device._loop - ) + promise = GPUPromise("create_render_pipeline_async", None) promise._wgpu_set_input(result) return promise @@ -2129,12 +2110,7 @@ def callback(status, result, c_message, _userdata1, _userdata2): def handler(id): return GPURenderPipeline(label, id, self) - promise = GPUPromise( - "create_render_pipeline", - handler, - loop=self._loop, - keepalive=callback, - ) + promise = GPUPromise("create_render_pipeline", handler, keepalive=callback) token = self._device._poller.get_token() @@ -2565,7 +2541,7 @@ def map_async( # Can we even map? if self._map_state != enums.BufferMapState.unmapped: - promise = GPUPromise("buffer.map", None, loop=self._device._loop) + promise = GPUPromise("buffer.map", None) promise._wgpu_set_error( RuntimeError( f"Can only map a buffer if its currently unmapped, not {self._map_state!r}" @@ -2609,12 +2585,7 @@ def handler(_status): self._mapped_status = offset, offset + size, mode self._mapped_memoryviews = [] - promise = GPUPromise( - "buffer.map", - handler, - loop=self._device._loop, - keepalive=buffer_map_callback, - ) + promise = GPUPromise("buffer.map", handler, keepalive=buffer_map_callback) token = self._device._poller.get_token() @@ -2883,7 +2854,7 @@ def get_compilation_info_async(self) -> GPUPromise[GPUCompilationInfo]: result = [] # Return a resolved promise - promise = GPUPromise("get_compilation_info", None, loop=self._device._loop) + promise = GPUPromise("get_compilation_info", None) promise._wgpu_set_input(result) return promise @@ -4185,10 +4156,7 @@ def handler(_value): return None promise = GPUPromise( - "on_submitted_work_done", - handler, - loop=self._device._loop, - keepalive=work_done_callback, + "on_submitted_work_done", handler, keepalive=work_done_callback ) token = self._device._poller.get_token()