diff --git a/examples/gui_direct2.py b/examples/gui_direct2.py new file mode 100644 index 00000000..fd08442c --- /dev/null +++ b/examples/gui_direct2.py @@ -0,0 +1,115 @@ +""" +Direct integration of glfw and wgpu-py without using the RenderCanvas library. + +Demonstration for hardcore users that need total low-level control. + +This version is a bit more elaborate, using a loop object, and allowing +async code that uses`promose.then()`. For this to work, wgpu needs +access to the running loop. In particular, it needs a +run_soon_threadsafe() function, allowing wgpu to resolve a promise from +its internal thread. +""" + +# run_example = false + +import sys +import time +import atexit + +import glfw +import wgpu +from wgpu.utils.glfw_present_info import get_glfw_present_info + +# from triangle import setup_drawing_sync +from cube import setup_drawing_sync + +# Setup glfw +glfw.init() +atexit.register(glfw.terminate) + +# disable automatic API selection, we are not using opengl +glfw.window_hint(glfw.CLIENT_API, glfw.NO_API) +glfw.window_hint(glfw.RESIZABLE, True) + + +title = "wgpu glfw direct" +window = glfw.create_window(640, 480, title, None, None) +present_info = get_glfw_present_info(window) + +context = wgpu.gpu.get_canvas_context(present_info) + +# Initialize physical size once. For robust apps update this on resize events. +context.set_physical_size(*glfw.get_framebuffer_size(window)) + + +# Setup async callbacks. This is optional, but it enables code using promise.then(). +# The asyncgen hook is a stub for the system to detect the call_soon_threadsafe function. +# This works if both are defined on the same class or in the same module. +to_call_soon = [] +call_soon_threadsafe = to_call_soon.append +stub_asynchen_hook = lambda agen: None +sys.set_asyncgen_hooks(stub_asynchen_hook) + + +class Loop: + def __init__(self): + self._pending_callbacks = [] + + def _asynchen_hook(self, agen): + pass + + def call_soon_threadsafe(self, callback): + self._pending_callbacks.append(callback) + + def run(self): + # Setup async hook. Our stub hook does not do anything, but it makes that + # the wgpu promises detect this loop and the call_soon_threadsafe method. + sys.set_asyncgen_hooks(self._asynchen_hook) + + # Setup + draw_frame = setup_drawing_sync(context) + last_frame_time = time.perf_counter() + frame_count = 0 + + # render loop + while not glfw.window_should_close(window): + # process inputs + glfw.poll_events() + + # resize handling + context.set_physical_size(*glfw.get_framebuffer_size(window)) + + # call async callbacks (optional, see above) + while self._pending_callbacks: + callback = self._pending_callbacks.pop(0) + print("Callback:", callback) + try: + callback() + except Exception as err: + print(err) + + # draw a frame + draw_frame() + # present the frame to the screen + context.present() + # stats + frame_count += 1 + etime = time.perf_counter() - last_frame_time + if etime > 1: + print(f"{frame_count / etime:0.1f} FPS") + last_frame_time, frame_count = time.perf_counter(), 0 + + # dispose resources + sys.set_asyncgen_hooks(None, None) + context.unconfigure() + glfw.destroy_window(window) + + # allow proper cleanup (workaround for glfw bug) + end_time = time.perf_counter() + 0.1 + while time.perf_counter() < end_time: + glfw.wait_events_timeout(end_time - time.perf_counter()) + + +if __name__ == "__main__": + loop = Loop() + loop.run() diff --git a/examples/tests/test_examples.py b/examples/tests/test_examples.py index 8f248b24..2cffb502 100644 --- a/examples/tests/test_examples.py +++ b/examples/tests/test_examples.py @@ -79,7 +79,8 @@ def test_examples_screenshots( def unload_module(): del sys.modules[module_name] - request.addfinalizer(unload_module) + if request: + request.addfinalizer(unload_module) if not hasattr(example, "canvas"): # some examples we screenshot test don't have a canvas as a global variable when imported, @@ -188,4 +189,4 @@ def test_examples_run(module, force_offscreen): os.environ["RENDERCANVAS_FORCE_OFFSCREEN"] = "true" pytest.getoption = lambda x: False is_lavapipe = True - test_examples_screenshots("validate_volume", pytest, None, None) + test_examples_screenshots("cube", pytest, mock_time, None, None) diff --git a/pyproject.toml b/pyproject.toml index 3c6ec884..dfc43d3c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -12,7 +12,6 @@ requires-python = ">= 3.10" dependencies = [ "cffi>=1.15.0", "rubicon-objc>=0.4.1; sys_platform == 'darwin'", - "sniffio", "rendercanvas >=2.4", # Temporarily depend on rendercanvas because we re-aligned apis. Remove in a few months ] diff --git a/tests/test_api.py b/tests/test_api.py index cd572ccf..0c32e6b9 100644 --- a/tests/test_api.py +++ b/tests/test_api.py @@ -100,7 +100,7 @@ def test_enums_and_flags_and_structs(): def test_base_wgpu_api(): # Fake a device and an adapter - adapter = wgpu.GPUAdapter(None, set(), {}, wgpu.GPUAdapterInfo({}), None) + adapter = wgpu.GPUAdapter(None, set(), {}, wgpu.GPUAdapterInfo({})) queue = wgpu.GPUQueue("", None, None) device = wgpu.GPUDevice("device08", -1, adapter, {42, 43}, {}, queue) diff --git a/tests/test_async.py b/tests/test_async.py index f145e3cb..efc99307 100644 --- a/tests/test_async.py +++ b/tests/test_async.py @@ -1,14 +1,22 @@ +import sys import time +import types +import asyncio import threading +import trio import anyio - from pytest import mark, raises +from rendercanvas.raw import RawLoop import wgpu.utils from testutils import can_use_wgpu_lib, run_tests from wgpu import GPUDevice, MapMode, TextureFormat -from wgpu._async import GPUPromise as BaseGPUPromise +from wgpu._async import ( + GPUPromise as BaseGPUPromise, + detect_current_call_soon_threadsafe, + detect_current_async_lib, +) class GPUPromise(BaseGPUPromise): @@ -27,7 +35,7 @@ def __init__(self): self._pending_calls = [] self.errors = [] - def call_soon_threadsafe(self, f, *args): + def call_soon(self, f, *args): # and its threadsafe self._pending_calls.append((f, args)) def process_events(self): @@ -79,6 +87,171 @@ def test_promise_basics(): assert "rejected" in repr(promise) +# %%%%% Low level + + +def test_async_low_level_none(): + flag = [] + + flag.append(detect_current_async_lib()) + flag.append(detect_current_call_soon_threadsafe()) + + assert flag[0] is None + assert flag[1] is None + + +def test_async_low_level_rendercanvas_asyncadapter(): + loop = RawLoop() + + flag = [] + + async def task(): + # Our methods + flag.append(detect_current_async_lib()) + flag.append(detect_current_call_soon_threadsafe()) + # Test that the fast-path works + flag.append(sys.get_asyncgen_hooks()[0].__self__.call_soon_threadsafe) + loop.stop() + + loop.add_task(task) + loop.run() + + assert flag[0] == "rendercanvas.utils.asyncadapter" + assert callable(flag[1]) + assert flag[1].__name__ == "call_soon_threadsafe" + assert flag[1].__func__ is flag[2].__func__ + + +def test_async_low_level_asyncio(): + flag = [] + + async def task(): + # Our methods + flag.append(detect_current_async_lib()) + flag.append(detect_current_call_soon_threadsafe()) + # Test that the fast-path works + flag.append(sys.get_asyncgen_hooks()[0].__self__.call_soon_threadsafe) + + asyncio.run(task()) + + assert flag[0] == "asyncio" + assert callable(flag[1]) + assert flag[1].__name__ == "call_soon_threadsafe" + assert flag[1].__func__ is flag[2].__func__ + + +def test_async_low_level_trio(): + flag = [] + + async def task(): + flag.append(detect_current_async_lib()) + flag.append(detect_current_call_soon_threadsafe()) + + trio.run(task) + + assert flag[0] == "trio" + assert callable(flag[1]) + assert flag[1].__name__ == "run_sync_soon" + + +def test_async_low_level_custom1(): + # Simplest custom approach. Detection at module level. + + mod = types.ModuleType("wgpu_async_test_module") + sys.modules[mod.__name__] = mod + code = """if True: + + def call_soon_threadsafe(callbacl): + pass + + def fake_asyncgen_hook(agen): + pass + """ + exec(code, mod.__dict__) + + flag = [] + + old_hooks = sys.get_asyncgen_hooks() + sys.set_asyncgen_hooks(mod.fake_asyncgen_hook) + + try: + flag.append(detect_current_async_lib()) + flag.append(detect_current_call_soon_threadsafe()) + finally: + sys.set_asyncgen_hooks(*old_hooks) + + assert flag[0] == mod.__name__ + assert flag[1] is mod.call_soon_threadsafe + + +def test_async_low_level_custom2(): + # Even better, call_soon_threadsafe is attr of the same object that asyncgen hook is a method of. + # This takes the fast path! + + mod = types.ModuleType("wgpu_async_test_module") + sys.modules[mod.__name__] = mod + code = """if True: + + class Loop: + def call_soon_threadsafe(callbacl): + pass + + def fake_asyncgen_hook(agen): + pass + loop = Loop() + """ + exec(code, mod.__dict__) + + flag = [] + + old_hooks = sys.get_asyncgen_hooks() + sys.set_asyncgen_hooks(mod.loop.fake_asyncgen_hook) + + try: + flag.append(detect_current_async_lib()) + flag.append(detect_current_call_soon_threadsafe()) + finally: + sys.set_asyncgen_hooks(*old_hooks) + + assert flag[0] == mod.__name__ + assert flag[1].__func__ is mod.loop.call_soon_threadsafe.__func__ + + +def test_async_low_level_custom3(): + # The somewhat longer route. This is also the fallback for asyncio, + # in case they change something that kills the fast-path for asyncio. + # (the fast path being sys.get_asyncgen_hooks()[0].__self__.call_soon_threadsafe) + + mod = types.ModuleType("wgpu_async_test_module") + sys.modules[mod.__name__] = mod + code = """if True: + + def fake_asyncgen_hook(agen): + pass + def get_running_loop(): + return loop + class Loop: + def call_soon_threadsafe(callbacl): + pass + loop = Loop() + """ + exec(code, mod.__dict__) + + flag = [] + + old_hooks = sys.get_asyncgen_hooks() + sys.set_asyncgen_hooks(mod.fake_asyncgen_hook) + + try: + flag.append(detect_current_async_lib()) + flag.append(detect_current_call_soon_threadsafe()) + finally: + sys.set_asyncgen_hooks(*old_hooks) + + assert flag[0] == mod.__name__ + assert flag[1].__func__ is mod.loop.call_soon_threadsafe.__func__ + + # %%%%% Promise using sync_wait @@ -212,7 +385,7 @@ def poller(): async def test_promise_async_loop_simple(): loop = SillyLoop() - promise = GPUPromise("test", None, loop=loop) + promise = GPUPromise("test", None, _call_soon_threadsafe=loop.call_soon) loop.process_events() result = await promise @@ -226,7 +399,7 @@ async def test_promise_async_loop_normal(): def handler(input): return input * 2 - promise = GPUPromise("test", handler, loop=loop) + promise = GPUPromise("test", handler, _call_soon_threadsafe=loop.call_soon) loop.process_events() result = await promise @@ -240,7 +413,7 @@ async def test_promise_async_loop_fail2(): def handler(input): return input / 0 - promise = GPUPromise("test", handler, loop=loop) + promise = GPUPromise("test", handler, _call_soon_threadsafe=loop.call_soon) loop.process_events() with raises(ZeroDivisionError): @@ -272,7 +445,7 @@ def callback(r): nonlocal result result = r - promise = GPUPromise("test", None, loop=loop) + promise = GPUPromise("test", None, _call_soon_threadsafe=loop.call_soon) promise.then(callback) loop.process_events() @@ -291,7 +464,7 @@ def callback(r): def handler(input): return input * 2 - promise = GPUPromise("test", handler, loop=loop) + promise = GPUPromise("test", handler, _call_soon_threadsafe=loop.call_soon) promise.then(callback) loop.process_events() @@ -315,7 +488,7 @@ def err_callback(e): def handler(input): return input / 0 - promise = GPUPromise("test", handler, loop=loop) + promise = GPUPromise("test", handler, _call_soon_threadsafe=loop.call_soon) promise.then(callback, err_callback) loop.process_events() @@ -323,7 +496,7 @@ def handler(input): assert isinstance(error, ZeroDivisionError) -# %%%%% Chainging +# %%%%% Chaining def test_promise_chaining_basic(): @@ -338,7 +511,7 @@ def callback1(r): nonlocal result result = r - promise = MyPromise("test", None, loop=loop) + promise = MyPromise("test", None, _call_soon_threadsafe=loop.call_soon) p = promise.then(callback1) loop.process_events() @@ -371,7 +544,7 @@ def callback3(r): nonlocal result result = r - promise = GPUPromise("test", None, loop=loop) + promise = GPUPromise("test", None, _call_soon_threadsafe=loop.call_soon) p = promise.then(callback1).then(callback2).then(callback3) assert isinstance(p, GPUPromise) @@ -400,7 +573,7 @@ def err_callback(e): nonlocal error error = e - promise = GPUPromise("test", None, loop=loop) + promise = GPUPromise("test", None, _call_soon_threadsafe=loop.call_soon) p = promise.then(callback1).then(callback2).then(callback3, err_callback) assert isinstance(p, GPUPromise) @@ -430,7 +603,7 @@ def err_callback(e): nonlocal error error = e - promise = GPUPromise("test", None, loop=loop) + promise = GPUPromise("test", None, _call_soon_threadsafe=loop.call_soon) p = promise.then(callback1).then(callback2).then(callback3, err_callback) assert isinstance(p, GPUPromise) @@ -454,7 +627,7 @@ def callback2(r): def callback3(r): results.append(r * 3) - promise = GPUPromise("test", None, loop=loop) + promise = GPUPromise("test", None, _call_soon_threadsafe=loop.call_soon) promise.then(callback1) promise.then(callback2) @@ -473,7 +646,7 @@ def test_promise_chaining_after_resolve(): def callback1(r): results.append(r) - promise = GPUPromise("test", None, loop=loop) + promise = GPUPromise("test", None, _call_soon_threadsafe=loop.call_soon) # Adding handler has no result, because promise is not yet resolved. promise.then(callback1) @@ -503,16 +676,16 @@ def test_promise_chaining_with_promises(): result = None def callback1(r): - return GPUPromise("test", lambda _: r * 3, loop=loop) + return GPUPromise("test", lambda _: r * 3, _call_soon_threadsafe=loop.call_soon) def callback2(r): - return GPUPromise("test", lambda _: r + 2, loop=loop) + return GPUPromise("test", lambda _: r + 2, _call_soon_threadsafe=loop.call_soon) def callback3(r): nonlocal result result = r - promise = GPUPromise("test", None, loop=loop) + promise = GPUPromise("test", None, _call_soon_threadsafe=loop.call_soon) p = promise.then(callback1).then(callback2).then(callback3) assert isinstance(p, GPUPromise) @@ -535,7 +708,7 @@ def test_promise_decorator(): def handler(input): return input * 2 - promise = GPUPromise("test", handler, loop=loop) + promise = GPUPromise("test", handler, _call_soon_threadsafe=loop.call_soon) @promise def decorated(r): diff --git a/wgpu/_async.py b/wgpu/_async.py index 81e08f29..b02e804c 100644 --- a/wgpu/_async.py +++ b/wgpu/_async.py @@ -7,27 +7,86 @@ import threading from typing import Callable, Awaitable, Generator, Generic, TypeVar -import sniffio - logger = logging.getLogger("wgpu") -# The async_sleep and AsyncEvent are a copy of the implementation in rendercanvas.asyncs +def detect_current_async_lib(): + """Get the lib name of the currently active async lib, or None.""" + ob = sys.get_asyncgen_hooks()[0] + if ob is not None: + try: + libname = ob.__module__.partition(".")[0] + except AttributeError: + return None + if libname == "rendercanvas": + libname = "rendercanvas.utils.asyncadapter" + elif libname == "pyodide": + libname = "asyncio" + return libname + + +def detect_current_call_soon_threadsafe(): + """Get the current applicable call_soon_threadsafe function, or None""" + + # Get asyncgen hook func, return fast when no async loop active + ob = sys.get_asyncgen_hooks()[0] + if ob is None: + return None + + # Super-fast path that works for loop objects that have call_soon_threadsafe() + # and use sys.set_asyncgen_hooks() on a method of the same loop object. + # Works with asyncio, rendercanvas' asyncadapter, and also custom (direct) loops. + try: + return ob.__self__.call_soon_threadsafe + except AttributeError: + pass + + # Otherwise, checkout the module name + try: + libname = ob.__module__.partition(".")[0] + except AttributeError: + return None + + if libname == "trio": + # Still pretty fast for trio + trio = sys.modules[libname] + token = trio.lowlevel.current_trio_token() + return token.run_sync_soon + else: + # Ok, it looks like there is an async loop that we don't know. Try harder to get the func. + # This is also a fallback for asyncio (in case the ob.__self__ stops working) + # Note: we have a unit test for the asyncio fast-path, so we will know when we need to update, + # but the code below makes sure that it keeps working regardless (just a tiiiny bit slower). + if libname == "pyodide": + libname = "asyncio" + mod = sys.modules.get(libname, None) + if mod is None: + return None + try: + return mod.call_soon_threadsafe + except AttributeError: + pass + try: + return mod.get_running_loop().call_soon_threadsafe + except Exception: # (RuntimeError, AttributeError) but accept any error + pass async def async_sleep(delay): - """Async sleep that uses sniffio to be compatible with asyncio, trio, rendercanvas.utils.asyncadapter, and possibly more.""" - libname = sniffio.current_async_library() + """Async sleep that works with asyncio, trio, and rendercanvas' asyncadapter.""" + # Note that we get the regular lib's sleep(), not the high-precision sleep from rendercanvas.asyncs.sleep + # Anyway, we can remove this once we can assume we have rendercanvas with https://github.com/pygfx/rendercanvas/pull/151 + libname = detect_current_async_lib() sleep = sys.modules[libname].sleep await sleep(delay) class AsyncEvent: - """Generic async event object using sniffio. Works with trio, asyncio and rendercanvas-native.""" + """Async Event object that works with asyncio, trio, and rendercanvas' asyncadapter.""" def __new__(cls): - libname = sniffio.current_async_library() + libname = detect_current_async_lib() Event = sys.modules[libname].Event # noqa return Event() @@ -35,16 +94,6 @@ def __new__(cls): AwaitedType = TypeVar("AwaitedType") -class LoopInterface: - """A loop object must have (at least) this API. - - Rendercanvas loop objects do, asyncio.loop does too. - """ - - def call_soon(self, callback: Callable, *args: object): - raise NotImplementedError() - - def get_backoff_time_generator() -> Generator[float, None, None]: """Generates sleep-times, start at 0 then increasing to 100Hz and sticking there.""" for _ in range(5): @@ -88,24 +137,20 @@ def __init__( title: str, handler: Callable | None, *, - loop: LoopInterface | None = None, keepalive: object = None, + _call_soon_threadsafe: object = None, # for testing and chaining ): """ Arguments: title (str): The title of this promise, mostly for debugging purposes. handler (callable, optional): The function to turn promise input into the result. If None, the result will simply be the input. - loop (LoopInterface, optional): A loop object that at least has a ``call_soon()`` method. - If not given, this promise does not support .then() or promise-chaining. keepalive (object, optional): Pass any data via this arg who's lifetime must be bound to the resolving of this promise. """ self._title = str(title) # title for debugging self._handler = handler # function to turn input into the result - - self._loop = loop # Event loop instance, can be None self._keepalive = keepalive # just to keep something alive self._state = "pending" # "pending", "pending-rejected", "pending-fulfilled", "rejected", "fulfilled" @@ -117,6 +162,11 @@ def __init__( self._error_callbacks = [] self._UNRESOLVED.add(self) + # Set call_soon_threadsafe function, may be None, in which case we cannot do then() or await. + self._call_soon_threadsafe = ( + _call_soon_threadsafe or detect_current_call_soon_threadsafe() + ) + def __repr__(self): return f"" @@ -139,8 +189,10 @@ def _set_input(self, result: object, *, resolve_now=True) -> None: # If the input is a promise, we need to wait for it, i.e. chain to self. if isinstance(result, GPUPromise): - if self._loop is None: - self._set_error("Cannot chain GPUPromise if the loop is not set.") + if self._call_soon_threadsafe is None: + self._set_error( + "Cannot chain GPUPromise because no running loop could be detected." + ) else: result._chain(self) return @@ -188,8 +240,8 @@ def _set_pending_resolved(self, *, resolve_now=False): self._resolve_callback() if self._async_event is not None: self._async_event.set() - elif self._loop is not None: - self._loop.call_soon_threadsafe(self._resolve_callback) + elif self._call_soon_threadsafe is not None: + self._call_soon_threadsafe(self._resolve_callback) def _resolve_callback(self): # This should only be called in the main/reference thread. @@ -197,9 +249,12 @@ def _resolve_callback(self): # Allow tasks that await this promise to continue. if self._async_event is not None: self._async_event.set() - # The callback may already be resolved + # If the value is set, let's resolve it so the handlers get called. But swallow the promise's value/failure. if self._state.startswith("pending-"): - self._resolve() + try: + self._resolve() + except Exception: + pass def _resolve(self): """Finalize the promise, by calling the handler to get the result, and then invoking callbacks.""" @@ -218,11 +273,11 @@ def _resolve(self): if self._state.endswith("rejected"): error = self._value for cb in self._error_callbacks: - self._loop.call_soon_threadsafe(cb, error) + self._call_soon_threadsafe(cb, error) elif self._state.endswith("fulfilled"): result = self._value for cb in self._done_callbacks: - self._loop.call_soon_threadsafe(cb, result) + self._call_soon_threadsafe(cb, result) # New state self._state = self._state.replace("pending-", "") # Clean up @@ -253,7 +308,7 @@ def sync_wait(self) -> AwaitedType: def _sync_wait(self): # Each subclass may implement this in its own way. E.g. it may wait for - # the _thread_event, it may poll the device in a loop while checking the + # the _thread_event, it may poll the device in a while-loop while checking the # status, and Pyodide may use its special logic to sync wait the JS # promise. raise NotImplementedError() @@ -275,8 +330,10 @@ def then( The callback will receive one argument: the result of the promise. """ - if self._loop is None: - raise RuntimeError("Cannot use GPUPromise.then() if the loop is not set.") + if self._call_soon_threadsafe is None: + raise RuntimeError( + "Cannot use GPUPromise.then() because no running loop could be detected." + ) if not callable(callback): raise TypeError( f"GPUPromise.then() got a callback that is not callable: {callback!r}" @@ -293,7 +350,9 @@ def then( title = self._title + " -> " + callback_name # Create new promise - new_promise = self.__class__(title, callback, loop=self._loop) + new_promise = self.__class__( + title, callback, _call_soon_threadsafe=self._call_soon_threadsafe + ) self._chain(new_promise) if error_callback is not None: @@ -306,8 +365,10 @@ def catch(self, callback: Callable[[Exception], None] | None): The callback will receive one argument: the error object. """ - if self._loop is None: - raise RuntimeError("Cannot use GPUPromise.catch() if the loop is not set.") + if self._call_soon_threadsafe is None: + raise RuntimeError( + "Cannot use GPUPromise.catch() because not running loop could be detected." + ) if not callable(callback): raise TypeError( f"GPUPromise.catch() got a callback that is not callable: {callback!r}" @@ -317,7 +378,9 @@ def catch(self, callback: Callable[[Exception], None] | None): title = "Catcher for " + self._title # Create new promise - new_promise = self.__class__(title, callback, loop=self._loop) + new_promise = self.__class__( + title, callback, _call_soon_threadsafe=self._call_soon_threadsafe + ) # Custom chain with self._lock: @@ -328,8 +391,9 @@ def catch(self, callback: Callable[[Exception], None] | None): return new_promise def __await__(self): - if self._loop is None: - # An async busy loop + if self._call_soon_threadsafe is None: + # An async busy loop. In theory we should be able to remove this code, but it helps make the transition + # simpler, since then we depend less on https://github.com/pygfx/rendercanvas/pull/151 async def awaiter(): if self._state == "pending": # Do small incremental async naps. Other tasks and threads can run. diff --git a/wgpu/_classes.py b/wgpu/_classes.py index e1b41259..83e263a9 100644 --- a/wgpu/_classes.py +++ b/wgpu/_classes.py @@ -14,7 +14,7 @@ import logging from typing import Sequence -from ._async import GPUPromise as BaseGPUPromise, LoopInterface +from ._async import GPUPromise as BaseGPUPromise from ._coreutils import ApiDiff, str_flag_to_int, ArrayLike, CanvasLike from ._diagnostics import diagnostics, texture_format_to_bpp from . import flags, enums, structs @@ -119,7 +119,6 @@ def request_adapter_async( power_preference: enums.PowerPreferenceEnum | None = None, force_fallback_adapter: bool = False, canvas: CanvasLike | None = None, - loop: LoopInterface | None = None, ) -> GPUPromise[GPUAdapter]: """Create a `GPUAdapter`, the object that represents an abstract wgpu implementation, from which one can request a `GPUDevice`. @@ -132,8 +131,6 @@ def request_adapter_async( fallback adapter. canvas : The canvas or context that the adapter should be able to render to. This can typically be left to None. If given, it must be a ``GPUCanvasContext`` or ``RenderCanvas``. - loop : the loop object for async support. Must have at least ``call_soon(f, *args)``. - The loop object is required for asynchrouns use with ``promise.then()``. EXPERIMENTAL. """ # If this method gets called, no backend has been loaded yet, let's do that now! from .backends.auto import gpu @@ -145,7 +142,6 @@ def request_adapter_async( power_preference=power_preference, force_fallback_adapter=force_fallback_adapter, canvas=canvas, - loop=loop, ) @apidiff.add("Method useful for multi-gpu environments") @@ -158,9 +154,7 @@ def enumerate_adapters_sync(self) -> list[GPUAdapter]: return promise.sync_wait() @apidiff.add("Method useful for multi-gpu environments") - def enumerate_adapters_async( - self, *, loop: LoopInterface | None = None - ) -> GPUPromise[list[GPUAdapter]]: + def enumerate_adapters_async(self) -> GPUPromise[list[GPUAdapter]]: """Get a list of adapter objects available on the current system. An adapter can then be selected (e.g. using its summary), and a device @@ -187,7 +181,7 @@ def enumerate_adapters_async( # If this method gets called, no backend has been loaded yet, let's do that now! from .backends.auto import gpu - return gpu.enumerate_adapters_async(loop=loop) + return gpu.enumerate_adapters_async() # IDL: GPUTextureFormat getPreferredCanvasFormat(); @apidiff.change("Disabled because we put it on the canvas context") @@ -544,10 +538,9 @@ class GPUAdapter: _ot = object_tracker - def __init__(self, internal, features, limits, adapter_info, loop): + def __init__(self, internal, features, limits, adapter_info): self._ot.increase(self.__class__.__name__) self._internal = internal - self._loop = loop assert isinstance(features, set) assert isinstance(limits, dict) @@ -693,7 +686,6 @@ def __init__(self, label, internal, adapter, features, limits, queue): self._adapter = adapter self._features = features self._limits = limits - self._loop = adapter._loop self._queue = queue queue._device = self # because it could not be set earlier diff --git a/wgpu/backends/wgpu_native/_api.py b/wgpu/backends/wgpu_native/_api.py index a8019b39..59ab1c20 100644 --- a/wgpu/backends/wgpu_native/_api.py +++ b/wgpu/backends/wgpu_native/_api.py @@ -23,7 +23,6 @@ from weakref import WeakKeyDictionary from typing import NoReturn, Sequence -from ..._async import LoopInterface from ..._coreutils import str_flag_to_int, ArrayLike, CanvasLike from ... import classes, flags, enums, structs @@ -462,7 +461,6 @@ def request_adapter_async( power_preference: enums.PowerPreferenceEnum | None = None, force_fallback_adapter: bool = False, canvas: CanvasLike | None = None, - loop: LoopInterface | None = None, ) -> GPUPromise[GPUAdapter]: """Create a `GPUAdapter`, the object that represents an abstract wgpu implementation, from which one can request a `GPUDevice`. @@ -486,11 +484,11 @@ def request_adapter_async( # We chose the variable name WGPUPY_WGPU_ADAPTER_NAME instead WGPU_ADAPTER_NAME # to avoid a clash if adapter_name := os.getenv(("WGPUPY_WGPU_ADAPTER_NAME")): - adapters = self._enumerate_adapters(loop) + adapters = self._enumerate_adapters() adapters = [a for a in adapters if adapter_name in a.summary] if not adapters: raise ValueError(f"Adapter with name '{adapter_name}' not found.") - promise = GPUPromise("adapter by name", None, loop=loop) + promise = GPUPromise("adapter by name", None) promise._wgpu_set_input(adapters[0]) return promise @@ -563,13 +561,10 @@ def request_adapter_callback(status, result, c_message, _userdata1, _userdata2): ) def handler(adapter_id): - return self._create_adapter(adapter_id, loop) + return self._create_adapter(adapter_id) promise = GPUPromise( - "request_adapter", - handler, - loop=loop, - keepalive=request_adapter_callback, + "request_adapter", handler, keepalive=request_adapter_callback ) instance = get_wgpu_instance() @@ -587,20 +582,18 @@ def handler(adapter_id): return promise - def enumerate_adapters_async( - self, *, loop: LoopInterface | None = None - ) -> GPUPromise[list[GPUAdapter]]: + def enumerate_adapters_async(self) -> GPUPromise[list[GPUAdapter]]: """Get a list of adapter objects available on the current system. This is the implementation based on wgpu-native. """ - result = self._enumerate_adapters(loop) + result = self._enumerate_adapters() # We already have the result, so we return a resolved promise. # The reason this is async is to allow this to work on backends where we cannot actually enumerate adapters. - promise = GPUPromise("enumerate_adapters", None, loop=loop) + promise = GPUPromise("enumerate_adapters", None) promise._wgpu_set_input(result) return promise - def _enumerate_adapters(self, loop) -> list[GPUAdapter]: + def _enumerate_adapters(self) -> list[GPUAdapter]: # The first call is to get the number of adapters, and the second call # is to get the actual adapters. Note that the second arg (now NULL) can # be a `WGPUInstanceEnumerateAdapterOptions` to filter by backend. @@ -610,9 +603,9 @@ def _enumerate_adapters(self, loop) -> list[GPUAdapter]: adapters = new_array("WGPUAdapter[]", count) # H: size_t f(WGPUInstance instance, WGPUInstanceEnumerateAdapterOptions const * options, WGPUAdapter * adapters) libf.wgpuInstanceEnumerateAdapters(instance, ffi.NULL, adapters) - return [self._create_adapter(adapter, loop) for adapter in adapters] + return [self._create_adapter(adapter) for adapter in adapters] - def _create_adapter(self, adapter_id, loop): + def _create_adapter(self, adapter_id): # ----- Get adapter info # H: nextInChain: WGPUChainedStructOut *, vendor: WGPUStringView, architecture: WGPUStringView, device: WGPUStringView, description: WGPUStringView, backendType: WGPUBackendType, adapterType: WGPUAdapterType, vendorID: int, deviceID: int @@ -671,7 +664,7 @@ def to_py_str(key): features = _get_features(adapter_id, adapter=True) # ----- Done - return GPUAdapter(adapter_id, features, limits, adapter_info, loop) + return GPUAdapter(adapter_id, features, limits, adapter_info) def get_canvas_context(self, present_info: dict) -> GPUCanvasContext: """Get the GPUCanvasContext object for the appropriate backend. @@ -1390,10 +1383,7 @@ def handler(device_id): return device promise = GPUPromise( - "request_device", - handler, - loop=self._loop, - keepalive=request_device_callback, + "request_device", handler, keepalive=request_device_callback ) # H: WGPUFuture f(WGPUAdapter adapter, WGPUDeviceDescriptor const * descriptor, WGPURequestDeviceCallbackInfo callbackInfo) @@ -1972,9 +1962,7 @@ def create_compute_pipeline_async( # H: WGPUComputePipeline f(WGPUDevice device, WGPUComputePipelineDescriptor const * descriptor) id = libf.wgpuDeviceCreateComputePipeline(self._internal, descriptor) result = GPUComputePipeline(label, id, self) - promise = GPUPromise( - "create_compute_pipeline_async", None, loop=self._device._loop - ) + promise = GPUPromise("create_compute_pipeline_async", None) promise._wgpu_set_input(result) return promise @@ -2006,12 +1994,7 @@ def callback(status, result, c_message, _userdata1, _userdata2): def handler(id): return GPUComputePipeline(label, id, self) - promise = GPUPromise( - "create_compute_pipeline", - handler, - loop=self._loop, - keepalive=callback, - ) + promise = GPUPromise("create_compute_pipeline", handler, keepalive=callback) token = self._device._poller.get_token() @@ -2101,9 +2084,7 @@ def create_render_pipeline_async( # H: WGPURenderPipeline f(WGPUDevice device, WGPURenderPipelineDescriptor const * descriptor) id = libf.wgpuDeviceCreateRenderPipeline(self._internal, descriptor) result = GPURenderPipeline(label, id, self) - promise = GPUPromise( - "create_render_pipeline_async", None, loop=self._device._loop - ) + promise = GPUPromise("create_render_pipeline_async", None) promise._wgpu_set_input(result) return promise @@ -2133,12 +2114,7 @@ def callback(status, result, c_message, _userdata1, _userdata2): def handler(id): return GPURenderPipeline(label, id, self) - promise = GPUPromise( - "create_render_pipeline", - handler, - loop=self._loop, - keepalive=callback, - ) + promise = GPUPromise("create_render_pipeline", handler, keepalive=callback) token = self._device._poller.get_token() @@ -2569,7 +2545,7 @@ def map_async( # Can we even map? if self._map_state != enums.BufferMapState.unmapped: - promise = GPUPromise("buffer.map", None, loop=self._device._loop) + promise = GPUPromise("buffer.map", None) promise._wgpu_set_error( RuntimeError( f"Can only map a buffer if its currently unmapped, not {self._map_state!r}" @@ -2613,12 +2589,7 @@ def handler(_status): self._mapped_status = offset, offset + size, mode self._mapped_memoryviews = [] - promise = GPUPromise( - "buffer.map", - handler, - loop=self._device._loop, - keepalive=buffer_map_callback, - ) + promise = GPUPromise("buffer.map", handler, keepalive=buffer_map_callback) token = self._device._poller.get_token() @@ -2887,7 +2858,7 @@ def get_compilation_info_async(self) -> GPUPromise[GPUCompilationInfo]: result = [] # Return a resolved promise - promise = GPUPromise("get_compilation_info", None, loop=self._device._loop) + promise = GPUPromise("get_compilation_info", None) promise._wgpu_set_input(result) return promise @@ -4189,10 +4160,7 @@ def handler(_value): return None promise = GPUPromise( - "on_submitted_work_done", - handler, - loop=self._device._loop, - keepalive=work_done_callback, + "on_submitted_work_done", handler, keepalive=work_done_callback ) token = self._device._poller.get_token()