Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 141BFC678D5 for ; Wed, 8 Mar 2023 13:42:15 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S231790AbjCHNmK (ORCPT ); Wed, 8 Mar 2023 08:42:10 -0500 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:33200 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S231714AbjCHNlk (ORCPT ); Wed, 8 Mar 2023 08:41:40 -0500 Received: from fanzine2.igalia.com (fanzine2.igalia.com [213.97.179.56]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 3140860A9F; Wed, 8 Mar 2023 05:39:52 -0800 (PST) DKIM-Signature: v=1; a=rsa-sha256; q=dns/txt; c=relaxed/relaxed; d=igalia.com; s=20170329; h=Content-Transfer-Encoding:Content-Type:In-Reply-To:From: References:Cc:To:Subject:MIME-Version:Date:Message-ID:Sender:Reply-To: Content-ID:Content-Description:Resent-Date:Resent-From:Resent-Sender: Resent-To:Resent-Cc:Resent-Message-ID:List-Id:List-Help:List-Unsubscribe: List-Subscribe:List-Post:List-Owner:List-Archive; bh=dsRxEvWTXlZ1Z9zA/KLb+az2ZLLklSO9elGidPpvfTc=; b=h3Ei16lZnMKSYeZAEeTTVivXkG b+h/iD4X6zVIlIZ0UiEpIa0rIQZSuf/jqianep9Nl0aHAYhPwLvNCIJJ9JFaN8vCpRIbNfMit5SJu uvu0D+m0Oq9V2PQGdaw1HU3SLs7VsdYJhXJ91DuWYxjZwl5qKGH1CuT94z8NAuKmC0zWMBbaF/3s9 JLCn5THoXfNZhvUZj6c4LxO4Oy0HZCQMPmAGK5dqc0ijlRZoG1Oz5L9vicubvMERdx7WfauEmbK+2 gHGKBJEWasHbBydLQkR7w/bZprZOUy4kFeNsYjwTXRRFvCILay7bumy4+rs75FZXACut0gwuP1xo2 sINUrBNQ==; Received: from [187.36.234.139] (helo=[192.168.1.195]) by fanzine2.igalia.com with esmtpsa (Cipher TLS1.3:ECDHE_X25519__RSA_PSS_RSAE_SHA256__AES_128_GCM:128) (Exim) id 1pZu0E-000Hmc-MM; Wed, 08 Mar 2023 14:38:47 +0100 Message-ID: Date: Wed, 8 Mar 2023 10:38:33 -0300 MIME-Version: 1.0 User-Agent: Mozilla/5.0 (X11; Linux x86_64; rv:102.0) Gecko/20100101 Thunderbird/102.8.0 Subject: Re: [PATCH RFC 06/18] rust: drm: gem: shmem: Add DRM shmem helper abstraction Content-Language: en-US To: Asahi Lina , Maarten Lankhorst , Maxime Ripard , Thomas Zimmermann , David Airlie , Daniel Vetter , Miguel Ojeda , Alex Gaynor , Wedson Almeida Filho , Boqun Feng , Gary Guo , =?UTF-8?Q?Bj=c3=b6rn_Roy_Baron?= , Sumit Semwal , =?UTF-8?Q?Christian_K=c3=b6nig?= , Luben Tuikov , Jarkko Sakkinen , Dave Hansen Cc: linaro-mm-sig@lists.linaro.org, rust-for-linux@vger.kernel.org, Karol Herbst , asahi@lists.linux.dev, linux-kernel@vger.kernel.org, dri-devel@lists.freedesktop.org, Mary , Alyssa Rosenzweig , linux-sgx@vger.kernel.org, Ella Stanforth , Faith Ekstrand , linux-media@vger.kernel.org References: <20230307-rust-drm-v1-0-917ff5bc80a8@asahilina.net> <20230307-rust-drm-v1-6-917ff5bc80a8@asahilina.net> From: =?UTF-8?Q?Ma=c3=adra_Canal?= In-Reply-To: <20230307-rust-drm-v1-6-917ff5bc80a8@asahilina.net> Content-Type: text/plain; charset=UTF-8; format=flowed Content-Transfer-Encoding: 8bit Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org On 3/7/23 11:25, Asahi Lina wrote: > The DRM shmem helper includes common code useful for drivers which > allocate GEM objects as anonymous shmem. Add a Rust abstraction for > this. Drivers can choose the raw GEM implementation or the shmem layer, > depending on their needs. > > Signed-off-by: Asahi Lina > --- > drivers/gpu/drm/Kconfig | 5 + > rust/bindings/bindings_helper.h | 2 + > rust/helpers.c | 67 +++++++ > rust/kernel/drm/gem/mod.rs | 3 + > rust/kernel/drm/gem/shmem.rs | 381 ++++++++++++++++++++++++++++++++++++++++ > 5 files changed, 458 insertions(+) > [...] > +unsafe extern "C" fn gem_create_object( > + raw_dev: *mut bindings::drm_device, > + size: usize, > +) -> *mut bindings::drm_gem_object { > + // SAFETY: GEM ensures the device lives as long as its objects live, > + // so we can conjure up a reference from thin air and never drop it. > + let dev = ManuallyDrop::new(unsafe { device::Device::from_raw(raw_dev) }); > + > + let inner = match T::new(&*dev, size) { > + Ok(v) => v, > + Err(e) => return e.to_ptr(), > + }; > + > + let p = unsafe { > + bindings::krealloc( > + core::ptr::null(), > + Object::::SIZE, > + bindings::GFP_KERNEL | bindings::__GFP_ZERO, > + ) as *mut Object > + }; > + > + if p.is_null() { > + return ENOMEM.to_ptr(); > + } > + > + // SAFETY: p is valid as long as the alloc succeeded > + unsafe { > + addr_of_mut!((*p).dev).write(dev); > + addr_of_mut!((*p).inner).write(inner); > + } > + > + // SAFETY: drm_gem_shmem_object is safe to zero-init, and > + // the rest of Object has been initialized > + let new: &mut Object = unsafe { &mut *(p as *mut _) }; > + > + new.obj.base.funcs = &Object::::VTABLE; > + &mut new.obj.base > +} It would be nice to allow to set wc inside the gem_create_object callback, as some drivers do it so, like v3d, vc4, panfrost, lima... Best Regards, - MaĆ­ra Canal > + > +unsafe extern "C" fn free_callback(obj: *mut bindings::drm_gem_object) { > + // SAFETY: All of our objects are Object. > + let p = crate::container_of!(obj, Object, obj) as *mut Object; > + > + // SAFETY: p is never used after this > + unsafe { > + core::ptr::drop_in_place(&mut (*p).inner); > + } > + > + // SAFETY: This pointer has to be valid, since p is valid > + unsafe { > + bindings::drm_gem_shmem_free(&mut (*p).obj); > + } > +} > + > +impl Object { > + /// The size of this object's structure. > + const SIZE: usize = mem::size_of::(); > + > + /// `drm_gem_object_funcs` vtable suitable for GEM shmem objects. > + const VTABLE: bindings::drm_gem_object_funcs = bindings::drm_gem_object_funcs { > + free: Some(free_callback::), > + open: Some(super::open_callback::>), > + close: Some(super::close_callback::>), > + print_info: Some(bindings::drm_gem_shmem_object_print_info), > + export: None, > + pin: Some(bindings::drm_gem_shmem_object_pin), > + unpin: Some(bindings::drm_gem_shmem_object_unpin), > + get_sg_table: Some(bindings::drm_gem_shmem_object_get_sg_table), > + vmap: Some(bindings::drm_gem_shmem_object_vmap), > + vunmap: Some(bindings::drm_gem_shmem_object_vunmap), > + mmap: Some(bindings::drm_gem_shmem_object_mmap), > + vm_ops: &SHMEM_VM_OPS, > + }; > + > + // SAFETY: Must only be used with DRM functions that are thread-safe > + unsafe fn mut_shmem(&self) -> *mut bindings::drm_gem_shmem_object { > + &self.obj as *const _ as *mut _ > + } > + > + /// Create a new shmem-backed DRM object of the given size. > + pub fn new(dev: &device::Device, size: usize) -> Result> { > + // SAFETY: This function can be called as long as the ALLOC_OPS are set properly > + // for this driver, and the gem_create_object is called. > + let p = unsafe { bindings::drm_gem_shmem_create(dev.raw() as *mut _, size) }; > + let p = crate::container_of!(p, Object, obj) as *mut _; > + > + // SAFETY: The gem_create_object callback ensures this is a valid Object, > + // so we can take a unique reference to it. > + let obj_ref = gem::UniqueObjectRef { ptr: p }; > + > + Ok(obj_ref) > + } > + > + /// Returns the `Device` that owns this GEM object. > + pub fn dev(&self) -> &device::Device { > + &self.dev > + } > + > + /// Creates (if necessary) and returns a scatter-gather table of DMA pages for this object. > + /// > + /// This will pin the object in memory. > + pub fn sg_table(&self) -> Result> { > + // SAFETY: drm_gem_shmem_get_pages_sgt is thread-safe. > + let sgt = from_kernel_err_ptr(unsafe { > + bindings::drm_gem_shmem_get_pages_sgt(self.mut_shmem()) > + })?; > + > + Ok(SGTable { > + sgt, > + _owner: self.reference(), > + }) > + } > + > + /// Creates and returns a virtual kernel memory mapping for this object. > + pub fn vmap(&self) -> Result> { > + let mut map: MaybeUninit = MaybeUninit::uninit(); > + > + // SAFETY: drm_gem_shmem_vmap is thread-safe > + to_result(unsafe { bindings::drm_gem_shmem_vmap(self.mut_shmem(), map.as_mut_ptr()) })?; > + > + // SAFETY: if drm_gem_shmem_vmap did not fail, map is initialized now > + let map = unsafe { map.assume_init() }; > + > + Ok(VMap { > + map, > + owner: self.reference(), > + }) > + } > + > + /// Set the write-combine flag for this object. > + /// > + /// Should be called before any mappings are made. > + pub fn set_wc(&mut self, map_wc: bool) { > + unsafe { (*self.mut_shmem()).map_wc = map_wc }; > + } > +} > + > +impl Deref for Object { > + type Target = T; > + > + fn deref(&self) -> &Self::Target { > + &self.inner > + } > +} > + > +impl DerefMut for Object { > + fn deref_mut(&mut self) -> &mut Self::Target { > + &mut self.inner > + } > +} > + > +impl crate::private::Sealed for Object {} > + > +impl gem::IntoGEMObject for Object { > + type Driver = T::Driver; > + > + fn gem_obj(&self) -> *mut bindings::drm_gem_object { > + &self.obj.base as *const _ as *mut _ > + } > + > + fn from_gem_obj(obj: *mut bindings::drm_gem_object) -> *mut Object { > + crate::container_of!(obj, Object, obj) as *mut Object > + } > +} > + > +impl drv::AllocImpl for Object { > + const ALLOC_OPS: drv::AllocOps = drv::AllocOps { > + gem_create_object: Some(gem_create_object::), > + prime_handle_to_fd: Some(bindings::drm_gem_prime_handle_to_fd), > + prime_fd_to_handle: Some(bindings::drm_gem_prime_fd_to_handle), > + gem_prime_import: None, > + gem_prime_import_sg_table: Some(bindings::drm_gem_shmem_prime_import_sg_table), > + gem_prime_mmap: Some(bindings::drm_gem_prime_mmap), > + dumb_create: Some(bindings::drm_gem_shmem_dumb_create), > + dumb_map_offset: None, > + dumb_destroy: None, > + }; > +} > + > +/// A virtual mapping for a shmem-backed GEM object in kernel address space. > +pub struct VMap { > + map: bindings::iosys_map, > + owner: gem::ObjectRef>, > +} > + > +impl VMap { > + /// Returns a const raw pointer to the start of the mapping. > + pub fn as_ptr(&self) -> *const core::ffi::c_void { > + // SAFETY: The shmem helpers always return non-iomem maps > + unsafe { self.map.__bindgen_anon_1.vaddr } > + } > + > + /// Returns a mutable raw pointer to the start of the mapping. > + pub fn as_mut_ptr(&mut self) -> *mut core::ffi::c_void { > + // SAFETY: The shmem helpers always return non-iomem maps > + unsafe { self.map.__bindgen_anon_1.vaddr } > + } > + > + /// Returns a byte slice view of the mapping. > + pub fn as_slice(&self) -> &[u8] { > + // SAFETY: The vmap maps valid memory up to the owner size > + unsafe { slice::from_raw_parts(self.as_ptr() as *const u8, self.owner.size()) } > + } > + > + /// Returns mutable a byte slice view of the mapping. > + pub fn as_mut_slice(&mut self) -> &mut [u8] { > + // SAFETY: The vmap maps valid memory up to the owner size > + unsafe { slice::from_raw_parts_mut(self.as_mut_ptr() as *mut u8, self.owner.size()) } > + } > + > + /// Borrows a reference to the object that owns this virtual mapping. > + pub fn owner(&self) -> &gem::ObjectRef> { > + &self.owner > + } > +} > + > +impl Drop for VMap { > + fn drop(&mut self) { > + // SAFETY: This function is thread-safe > + unsafe { > + bindings::drm_gem_shmem_vunmap(self.owner.mut_shmem(), &mut self.map); > + } > + } > +} > + > +/// SAFETY: `iosys_map` objects are safe to send across threads. > +unsafe impl Send for VMap {} > +unsafe impl Sync for VMap {} > + > +/// A single scatter-gather entry, representing a span of pages in the device's DMA address space. > +/// > +/// For devices not behind a standalone IOMMU, this corresponds to physical addresses. > +#[repr(transparent)] > +pub struct SGEntry(bindings::scatterlist); > + > +impl SGEntry { > + /// Returns the starting DMA address of this span > + pub fn dma_address(&self) -> usize { > + (unsafe { bindings::sg_dma_address(&self.0) }) as usize > + } > + > + /// Returns the length of this span in bytes > + pub fn dma_len(&self) -> usize { > + (unsafe { bindings::sg_dma_len(&self.0) }) as usize > + } > +} > + > +/// A scatter-gather table of DMA address spans for a GEM shmem object. > +/// > +/// # Invariants > +/// `sgt` must be a valid pointer to the `sg_table`, which must correspond to the owned > +/// object in `_owner` (which ensures it remains valid). > +pub struct SGTable { > + sgt: *const bindings::sg_table, > + _owner: gem::ObjectRef>, > +} > + > +impl SGTable { > + /// Returns an iterator through the SGTable's entries > + pub fn iter(&'_ self) -> SGTableIter<'_> { > + SGTableIter { > + left: unsafe { (*self.sgt).nents } as usize, > + sg: unsafe { (*self.sgt).sgl }, > + _p: PhantomData, > + } > + } > +} > + > +impl<'a, T: DriverObject> IntoIterator for &'a SGTable { > + type Item = &'a SGEntry; > + type IntoIter = SGTableIter<'a>; > + > + fn into_iter(self) -> Self::IntoIter { > + self.iter() > + } > +} > + > +/// SAFETY: `sg_table` objects are safe to send across threads. > +unsafe impl Send for SGTable {} > +unsafe impl Sync for SGTable {} > + > +/// An iterator through `SGTable` entries. > +/// > +/// # Invariants > +/// `sg` must be a valid pointer to the scatterlist, which must outlive our lifetime. > +pub struct SGTableIter<'a> { > + sg: *mut bindings::scatterlist, > + left: usize, > + _p: PhantomData<&'a ()>, > +} > + > +impl<'a> Iterator for SGTableIter<'a> { > + type Item = &'a SGEntry; > + > + fn next(&mut self) -> Option { > + if self.left == 0 { > + None > + } else { > + let sg = self.sg; > + self.sg = unsafe { bindings::sg_next(self.sg) }; > + self.left -= 1; > + Some(unsafe { &(*(sg as *const SGEntry)) }) > + } > + } > +} >