skia_safe/gpu/ganesh/
direct_context.rs

1use std::{
2    fmt,
3    ops::{Deref, DerefMut},
4    ptr,
5    time::Duration,
6};
7
8use crate::{
9    gpu::{
10        BackendFormat, BackendRenderTarget, BackendTexture, ContextOptions, FlushInfo,
11        GpuStatsFlags, MutableTextureState, PurgeResourceOptions, RecordingContext,
12        SemaphoresSubmitted, SubmitInfo, SyncCpu,
13    },
14    prelude::*,
15    surfaces, Data, Image, Surface, TextureCompressionType,
16};
17use skia_bindings::{self as sb, GrDirectContext, GrDirectContext_DirectContextID, SkRefCntBase};
18
19#[repr(C)]
20#[derive(Copy, Clone, PartialEq, Eq, Debug)]
21pub struct DirectContextId {
22    id: u32,
23}
24
25native_transmutable!(GrDirectContext_DirectContextID, DirectContextId);
26
27pub type DirectContext = RCHandle<GrDirectContext>;
28
29impl NativeRefCountedBase for GrDirectContext {
30    type Base = SkRefCntBase;
31}
32
33impl Deref for DirectContext {
34    type Target = RecordingContext;
35
36    fn deref(&self) -> &Self::Target {
37        unsafe { transmute_ref(self) }
38    }
39}
40
41impl DerefMut for DirectContext {
42    fn deref_mut(&mut self) -> &mut Self::Target {
43        unsafe { transmute_ref_mut(self) }
44    }
45}
46
47#[derive(Copy, Clone, PartialEq, Eq, Debug)]
48pub struct ResourceCacheLimits {
49    pub max_resources: usize,
50    pub max_resource_bytes: usize,
51}
52
53#[derive(Copy, Clone, PartialEq, Eq, Debug)]
54pub struct ResourceCacheUsage {
55    pub resource_count: usize,
56    pub resource_bytes: usize,
57}
58
59impl fmt::Debug for DirectContext {
60    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
61        f.debug_struct("DirectContext")
62            .field("base", self as &RecordingContext)
63            .field("resource_cache_limit", &self.resource_cache_limit())
64            .field("resource_cache_usage", &self.resource_cache_usage())
65            .field(
66                "resource_cache_purgeable_bytes",
67                &self.resource_cache_purgeable_bytes(),
68            )
69            .field(
70                "supports_distance_field_text",
71                &self.supports_distance_field_text(),
72            )
73            .finish()
74    }
75}
76
77impl DirectContext {
78    // Removed from Skia
79    #[cfg(feature = "gl")]
80    #[deprecated(since = "0.74.0", note = "use gpu::direct_contexts::make_gl()")]
81    pub fn new_gl<'a>(
82        interface: impl Into<crate::gpu::gl::Interface>,
83        options: impl Into<Option<&'a ContextOptions>>,
84    ) -> Option<DirectContext> {
85        crate::gpu::direct_contexts::make_gl(interface, options)
86    }
87
88    // Removed from Skia
89    #[cfg(feature = "vulkan")]
90    #[deprecated(since = "0.74.0", note = "use gpu::direct_contexts::make_vulkan()")]
91    pub fn new_vulkan<'a>(
92        backend_context: &crate::gpu::vk::BackendContext,
93        options: impl Into<Option<&'a ContextOptions>>,
94    ) -> Option<DirectContext> {
95        crate::gpu::direct_contexts::make_vulkan(backend_context, options)
96    }
97
98    #[cfg(feature = "metal")]
99    #[deprecated(since = "0.74.0", note = "use gpu::direct_contexts::make_metal()")]
100    pub fn new_metal<'a>(
101        backend_context: &crate::gpu::mtl::BackendContext,
102        options: impl Into<Option<&'a ContextOptions>>,
103    ) -> Option<DirectContext> {
104        crate::gpu::direct_contexts::make_metal(backend_context, options)
105    }
106
107    #[cfg(feature = "d3d")]
108    #[allow(clippy::missing_safety_doc)]
109    pub unsafe fn new_d3d<'a>(
110        backend_context: &crate::gpu::d3d::BackendContext,
111        options: impl Into<Option<&'a ContextOptions>>,
112    ) -> Option<DirectContext> {
113        DirectContext::from_ptr(sb::C_GrDirectContext_MakeDirect3D(
114            backend_context.native(),
115            options.into().native_ptr_or_null(),
116        ))
117    }
118
119    pub fn reset(&mut self, backend_state: Option<u32>) -> &mut Self {
120        unsafe {
121            self.native_mut()
122                .resetContext(backend_state.unwrap_or(sb::kAll_GrBackendState))
123        }
124        self
125    }
126
127    pub fn reset_gl_texture_bindings(&mut self) -> &mut Self {
128        unsafe { self.native_mut().resetGLTextureBindings() }
129        self
130    }
131
132    pub fn abandon(&mut self) -> &mut Self {
133        unsafe {
134            // self.native_mut().abandonContext()
135            sb::GrDirectContext_abandonContext(self.native_mut() as *mut _ as _)
136        }
137        self
138    }
139
140    pub fn is_device_lost(&mut self) -> bool {
141        unsafe { self.native_mut().isDeviceLost() }
142    }
143
144    // TODO: threadSafeProxy()
145
146    pub fn oomed(&mut self) -> bool {
147        unsafe { self.native_mut().oomed() }
148    }
149
150    pub fn release_resources_and_abandon(&mut self) -> &mut Self {
151        unsafe {
152            sb::GrDirectContext_releaseResourcesAndAbandonContext(self.native_mut() as *mut _ as _)
153        }
154        self
155    }
156
157    pub fn resource_cache_limit(&self) -> usize {
158        unsafe { self.native().getResourceCacheLimit() }
159    }
160
161    pub fn resource_cache_usage(&self) -> ResourceCacheUsage {
162        let mut resource_count = 0;
163        let mut resource_bytes = 0;
164        unsafe {
165            self.native()
166                .getResourceCacheUsage(&mut resource_count, &mut resource_bytes)
167        }
168        ResourceCacheUsage {
169            resource_count: resource_count.try_into().unwrap(),
170            resource_bytes,
171        }
172    }
173
174    pub fn resource_cache_purgeable_bytes(&self) -> usize {
175        unsafe { self.native().getResourceCachePurgeableBytes() }
176    }
177
178    pub fn set_resource_cache_limits(&mut self, limits: ResourceCacheLimits) {
179        unsafe {
180            self.native_mut().setResourceCacheLimits(
181                limits.max_resources.try_into().unwrap(),
182                limits.max_resource_bytes,
183            )
184        }
185    }
186
187    pub fn set_resource_cache_limit(&mut self, max_resource_bytes: usize) {
188        unsafe { self.native_mut().setResourceCacheLimit(max_resource_bytes) }
189    }
190
191    pub fn free_gpu_resources(&mut self) -> &mut Self {
192        unsafe { sb::GrDirectContext_freeGpuResources(self.native_mut() as *mut _ as _) }
193        self
194    }
195
196    pub fn perform_deferred_cleanup(
197        &mut self,
198        not_used: Duration,
199        opts: impl Into<Option<PurgeResourceOptions>>,
200    ) -> &mut Self {
201        unsafe {
202            sb::C_GrDirectContext_performDeferredCleanup(
203                self.native_mut(),
204                not_used.as_millis().try_into().unwrap(),
205                opts.into().unwrap_or(PurgeResourceOptions::AllResources),
206            )
207        }
208        self
209    }
210
211    pub fn purge_unlocked_resource_bytes(
212        &mut self,
213        bytes_to_purge: usize,
214        prefer_scratch_resources: bool,
215    ) -> &mut Self {
216        unsafe {
217            self.native_mut()
218                .purgeUnlockedResources(bytes_to_purge, prefer_scratch_resources)
219        }
220        self
221    }
222
223    pub fn purge_unlocked_resources(&mut self, opts: PurgeResourceOptions) -> &mut Self {
224        unsafe { self.native_mut().purgeUnlockedResources1(opts) }
225        self
226    }
227
228    pub fn supported_gpu_stats(&self) -> GpuStatsFlags {
229        GpuStatsFlags::from_bits_truncate(unsafe { self.native().supportedGpuStats() })
230    }
231
232    // TODO: wait()
233
234    pub fn flush_and_submit(&mut self) -> &mut Self {
235        unsafe { sb::C_GrDirectContext_flushAndSubmit(self.native_mut()) }
236        self
237    }
238
239    pub fn flush_submit_and_sync_cpu(&mut self) -> &mut Self {
240        self.flush(&FlushInfo::default());
241        self.submit(SyncCpu::Yes);
242        self
243    }
244
245    #[deprecated(since = "0.37.0", note = "Use flush()")]
246    pub fn flush_with_info(&mut self, info: &FlushInfo) -> SemaphoresSubmitted {
247        self.flush(info)
248    }
249
250    pub fn flush<'a>(&mut self, info: impl Into<Option<&'a FlushInfo>>) -> SemaphoresSubmitted {
251        let n = self.native_mut();
252        if let Some(info) = info.into() {
253            unsafe { n.flush(info.native()) }
254        } else {
255            let info = FlushInfo::default();
256            unsafe { n.flush(info.native()) }
257        }
258    }
259
260    pub fn flush_image_with_info(
261        &mut self,
262        image: &Image,
263        info: &FlushInfo,
264    ) -> SemaphoresSubmitted {
265        unsafe {
266            sb::C_GrDirectContext_flushImageWithInfo(
267                self.native_mut(),
268                image.clone().into_ptr(),
269                info.native(),
270            )
271        }
272    }
273
274    pub fn flush_image(&mut self, image: &Image) {
275        unsafe { sb::C_GrDirectContext_flushImage(self.native_mut(), image.clone().into_ptr()) }
276    }
277
278    pub fn flush_and_submit_image(&mut self, image: &Image) {
279        unsafe {
280            sb::C_GrDirectContext_flushAndSubmitImage(self.native_mut(), image.clone().into_ptr())
281        }
282    }
283
284    pub fn flush_surface_with_access(
285        &mut self,
286        surface: &mut Surface,
287        access: surfaces::BackendSurfaceAccess,
288        info: &FlushInfo,
289    ) -> SemaphoresSubmitted {
290        unsafe {
291            self.native_mut()
292                .flush3(surface.native_mut(), access, info.native())
293        }
294    }
295
296    pub fn flush_surface_with_texture_state(
297        &mut self,
298        surface: &mut Surface,
299        info: &FlushInfo,
300        new_state: Option<&MutableTextureState>,
301    ) -> SemaphoresSubmitted {
302        unsafe {
303            self.native_mut().flush4(
304                surface.native_mut(),
305                info.native(),
306                new_state.native_ptr_or_null(),
307            )
308        }
309    }
310
311    pub fn flush_and_submit_surface(
312        &mut self,
313        surface: &mut Surface,
314        sync_cpu: impl Into<Option<SyncCpu>>,
315    ) {
316        unsafe {
317            self.native_mut()
318                .flushAndSubmit1(surface.native_mut(), sync_cpu.into().unwrap_or(SyncCpu::No))
319        }
320    }
321
322    pub fn flush_surface(&mut self, surface: &mut Surface) {
323        unsafe { self.native_mut().flush5(surface.native_mut()) }
324    }
325
326    pub fn submit(&mut self, submit_info: impl Into<SubmitInfo>) -> bool {
327        unsafe { self.native_mut().submit(&submit_info.into().into_native()) }
328    }
329
330    pub fn check_async_work_completion(&mut self) {
331        unsafe { self.native_mut().checkAsyncWorkCompletion() }
332    }
333
334    // TODO: dumpMemoryStatistics()
335
336    pub fn supports_distance_field_text(&self) -> bool {
337        unsafe { self.native().supportsDistanceFieldText() }
338    }
339}
340
341#[cfg(feature = "vulkan")]
342impl DirectContext {
343    pub fn can_detect_new_vk_pipeline_cache_data(&self) -> bool {
344        unsafe { self.native().canDetectNewVkPipelineCacheData() }
345    }
346
347    pub fn has_new_vk_pipeline_cache_data(&self) -> bool {
348        unsafe { self.native().hasNewVkPipelineCacheData() }
349    }
350
351    pub fn store_vk_pipeline_cache_data(&mut self) -> &mut Self {
352        unsafe {
353            self.native_mut().storeVkPipelineCacheData();
354        }
355        self
356    }
357
358    pub fn store_vk_pipeline_cache_data_with_max_size(&mut self, max_size: usize) -> &mut Self {
359        unsafe {
360            self.native_mut().storeVkPipelineCacheData1(max_size);
361        }
362        self
363    }
364}
365
366impl DirectContext {
367    // TODO: wrap createBackendTexture (several variants)
368    //       introduced in m76, m77, and m79
369    //       extended in m84 with finishedProc and finishedContext
370    //       extended in m107 with label
371
372    // TODO: wrap updateBackendTexture (several variants)
373    //       introduced in m84
374
375    pub fn compressed_backend_format(&self, compression: TextureCompressionType) -> BackendFormat {
376        let mut backend_format = BackendFormat::new_invalid();
377        unsafe {
378            sb::C_GrDirectContext_compressedBackendFormat(
379                self.native(),
380                compression,
381                backend_format.native_mut(),
382            )
383        };
384        backend_format
385    }
386
387    // TODO: wrap createCompressedBackendTexture (several variants)
388    //       introduced in m81
389    //       extended in m84 with finishedProc and finishedContext
390
391    // TODO: wrap updateCompressedBackendTexture (two variants)
392    //       introduced in m86
393
394    // TODO: add variant with GpuFinishedProc / GpuFinishedContext
395    pub fn set_backend_texture_state(
396        &mut self,
397        backend_texture: &BackendTexture,
398        state: &MutableTextureState,
399    ) -> bool {
400        self.set_backend_texture_state_and_return_previous(backend_texture, state)
401            .is_some()
402    }
403
404    pub fn set_backend_texture_state_and_return_previous(
405        &mut self,
406        backend_texture: &BackendTexture,
407        state: &MutableTextureState,
408    ) -> Option<MutableTextureState> {
409        let mut previous = MutableTextureState::default();
410        unsafe {
411            self.native_mut().setBackendTextureState(
412                backend_texture.native(),
413                state.native(),
414                previous.native_mut(),
415                None,
416                ptr::null_mut(),
417            )
418        }
419        .then_some(previous)
420    }
421
422    // TODO: add variant with GpuFinishedProc / GpuFinishedContext
423    pub fn set_backend_render_target_state(
424        &mut self,
425        target: &BackendRenderTarget,
426        state: &MutableTextureState,
427    ) -> bool {
428        self.set_backend_render_target_state_and_return_previous(target, state)
429            .is_some()
430    }
431
432    pub fn set_backend_render_target_state_and_return_previous(
433        &mut self,
434        target: &BackendRenderTarget,
435        state: &MutableTextureState,
436    ) -> Option<MutableTextureState> {
437        let mut previous = MutableTextureState::default();
438        unsafe {
439            self.native_mut().setBackendRenderTargetState(
440                target.native(),
441                state.native(),
442                previous.native_mut(),
443                None,
444                ptr::null_mut(),
445            )
446        }
447        .then_some(previous)
448    }
449
450    pub fn delete_backend_texture(&mut self, texture: &BackendTexture) {
451        unsafe { self.native_mut().deleteBackendTexture(texture.native()) }
452    }
453
454    pub fn precompile_shader(&mut self, key: &Data, data: &Data) -> bool {
455        unsafe {
456            self.native_mut()
457                .precompileShader(key.native(), data.native())
458        }
459    }
460
461    pub fn id(&self) -> DirectContextId {
462        let mut id = DirectContextId { id: 0 };
463        unsafe { sb::C_GrDirectContext_directContextId(self.native(), id.native_mut()) }
464        id
465    }
466}
467
468#[cfg(test)]
469mod tests {
470    use super::DirectContext;
471    use crate::gpu::{SubmitInfo, SyncCpu};
472
473    #[allow(unused)]
474    fn submit_invocation(direct_context: &mut DirectContext) {
475        direct_context.submit(SyncCpu::Yes);
476        direct_context.submit(None);
477        direct_context.submit(Some(SyncCpu::Yes));
478        direct_context.submit(SubmitInfo::default());
479    }
480}