92 changes: 92 additions & 0 deletions drivers/android/defs.rs
@@ -0,0 +1,92 @@
// SPDX-License-Identifier: GPL-2.0

use core::ops::{Deref, DerefMut};
use kernel::{
bindings,
bindings::*,
user_ptr::{ReadableFromBytes, WritableToBytes},
};

macro_rules! pub_no_prefix {
($prefix:ident, $($newname:ident),+) => {
$(pub const $newname: u32 = concat_idents!($prefix, $newname);)+
};
}

pub_no_prefix!(
binder_driver_return_protocol_,
BR_OK,
BR_ERROR,
BR_TRANSACTION,
BR_REPLY,
BR_DEAD_REPLY,
BR_TRANSACTION_COMPLETE,
BR_INCREFS,
BR_ACQUIRE,
BR_RELEASE,
BR_DECREFS,
BR_NOOP,
BR_SPAWN_LOOPER,
BR_DEAD_BINDER,
BR_CLEAR_DEATH_NOTIFICATION_DONE,
BR_FAILED_REPLY
);

pub_no_prefix!(
binder_driver_command_protocol_,
BC_TRANSACTION,
BC_REPLY,
BC_FREE_BUFFER,
BC_INCREFS,
BC_ACQUIRE,
BC_RELEASE,
BC_DECREFS,
BC_INCREFS_DONE,
BC_ACQUIRE_DONE,
BC_REGISTER_LOOPER,
BC_ENTER_LOOPER,
BC_EXIT_LOOPER,
BC_REQUEST_DEATH_NOTIFICATION,
BC_CLEAR_DEATH_NOTIFICATION,
BC_DEAD_BINDER_DONE
);

macro_rules! decl_wrapper {
($newname:ident, $wrapped:ty) => {
#[derive(Copy, Clone, Default)]
pub(crate) struct $newname($wrapped);

// TODO: This must be justified by inspecting the type, so should live outside the macro or
// the macro should be somehow marked unsafe.
unsafe impl ReadableFromBytes for $newname {}
unsafe impl WritableToBytes for $newname {}

impl Deref for $newname {
type Target = $wrapped;
fn deref(&self) -> &Self::Target {
&self.0
}
}

impl DerefMut for $newname {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
};
}

decl_wrapper!(BinderNodeDebugInfo, bindings::binder_node_debug_info);
decl_wrapper!(BinderNodeInfoForRef, bindings::binder_node_info_for_ref);
decl_wrapper!(FlatBinderObject, bindings::flat_binder_object);
decl_wrapper!(BinderTransactionData, bindings::binder_transaction_data);
decl_wrapper!(BinderWriteRead, bindings::binder_write_read);
decl_wrapper!(BinderVersion, bindings::binder_version);

impl BinderVersion {
pub(crate) fn current() -> Self {
Self(bindings::binder_version {
protocol_version: bindings::BINDER_CURRENT_PROTOCOL_VERSION as _,
})
}
}
180 changes: 180 additions & 0 deletions drivers/android/linked_list.rs
@@ -0,0 +1,180 @@
// SPDX-License-Identifier: GPL-2.0

use alloc::{boxed::Box, sync::Arc};
use core::ptr::NonNull;

pub use crate::raw_list::{Cursor, GetLinks, Links};
use crate::{raw_list, raw_list::RawList};

// TODO: Use the one from `kernel::file_operations::PointerWrapper` instead.
pub trait Wrapper<T: ?Sized> {
fn into_pointer(self) -> NonNull<T>;
unsafe fn from_pointer(ptr: NonNull<T>) -> Self;
fn as_ref(&self) -> &T;
}

impl<T: ?Sized> Wrapper<T> for Box<T> {
fn into_pointer(self) -> NonNull<T> {
NonNull::new(Box::into_raw(self)).unwrap()
}

unsafe fn from_pointer(ptr: NonNull<T>) -> Self {
Box::from_raw(ptr.as_ptr())
}

fn as_ref(&self) -> &T {
AsRef::as_ref(self)
}
}

impl<T: ?Sized> Wrapper<T> for Arc<T> {
fn into_pointer(self) -> NonNull<T> {
NonNull::new(Arc::into_raw(self) as _).unwrap()
}

unsafe fn from_pointer(ptr: NonNull<T>) -> Self {
Arc::from_raw(ptr.as_ptr())
}

fn as_ref(&self) -> &T {
AsRef::as_ref(self)
}
}

impl<T: ?Sized> Wrapper<T> for &T {
fn into_pointer(self) -> NonNull<T> {
NonNull::from(self)
}

unsafe fn from_pointer(ptr: NonNull<T>) -> Self {
&*ptr.as_ptr()
}

fn as_ref(&self) -> &T {
self
}
}

pub trait GetLinksWrapped: GetLinks {
type Wrapped: Wrapper<Self::EntryType>;
}

impl<T: ?Sized> GetLinksWrapped for Box<T>
where
Box<T>: GetLinks,
{
type Wrapped = Box<<Box<T> as GetLinks>::EntryType>;
}

impl<T: GetLinks + ?Sized> GetLinks for Box<T> {
type EntryType = T::EntryType;
fn get_links(data: &Self::EntryType) -> &Links<Self::EntryType> {
<T as GetLinks>::get_links(data)
}
}

impl<T: ?Sized> GetLinksWrapped for Arc<T>
where
Arc<T>: GetLinks,
{
type Wrapped = Arc<<Arc<T> as GetLinks>::EntryType>;
}

impl<T: GetLinks + ?Sized> GetLinks for Arc<T> {
type EntryType = T::EntryType;
fn get_links(data: &Self::EntryType) -> &Links<Self::EntryType> {
<T as GetLinks>::get_links(data)
}
}

pub struct List<G: GetLinksWrapped> {
list: RawList<G>,
}

impl<G: GetLinksWrapped> List<G> {
pub fn new() -> Self {
Self {
list: RawList::new(),
}
}

pub fn is_empty(&self) -> bool {
self.list.is_empty()
}

pub fn push_back(&mut self, data: G::Wrapped) {
let ptr = data.into_pointer();
if !unsafe { self.list.push_back(ptr.as_ref()) } {
// If insertion failed, rebuild object so that it can be freed.
unsafe { G::Wrapped::from_pointer(ptr) };
}
}

pub unsafe fn insert_after(&mut self, existing: NonNull<G::EntryType>, data: G::Wrapped) {
let ptr = data.into_pointer();
let entry = &*existing.as_ptr();
if !self.list.insert_after(entry, ptr.as_ref()) {
// If insertion failed, rebuild object so that it can be freed.
G::Wrapped::from_pointer(ptr);
}
}

pub unsafe fn remove(&mut self, data: &G::Wrapped) -> Option<G::Wrapped> {
let entry_ref = Wrapper::as_ref(data);
if self.list.remove(entry_ref) {
Some(G::Wrapped::from_pointer(NonNull::from(entry_ref)))
} else {
None
}
}

pub fn pop_front(&mut self) -> Option<G::Wrapped> {
let front = self.list.pop_front()?;
Some(unsafe { G::Wrapped::from_pointer(front) })
}

pub fn cursor_front(&self) -> Cursor<'_, G> {
self.list.cursor_front()
}

pub fn cursor_front_mut(&mut self) -> CursorMut<'_, G> {
CursorMut::new(self.list.cursor_front_mut())
}
}

impl<G: GetLinksWrapped> Drop for List<G> {
fn drop(&mut self) {
while self.pop_front().is_some() {}
}
}

pub struct CursorMut<'a, G: GetLinksWrapped> {
cursor: raw_list::CursorMut<'a, G>,
}

impl<'a, G: GetLinksWrapped> CursorMut<'a, G> {
fn new(cursor: raw_list::CursorMut<'a, G>) -> Self {
Self { cursor }
}

pub fn current(&mut self) -> Option<&mut G::EntryType> {
self.cursor.current()
}

pub fn remove_current(&mut self) -> Option<G::Wrapped> {
let ptr = self.cursor.remove_current()?;
Some(unsafe { G::Wrapped::from_pointer(ptr) })
}

pub fn peek_next(&mut self) -> Option<&mut G::EntryType> {
self.cursor.peek_next()
}

pub fn peek_prev(&mut self) -> Option<&mut G::EntryType> {
self.cursor.peek_prev()
}

pub fn move_next(&mut self) {
self.cursor.move_next();
}
}
479 changes: 479 additions & 0 deletions drivers/android/node.rs

Large diffs are not rendered by default.

949 changes: 949 additions & 0 deletions drivers/android/process.rs

Large diffs are not rendered by default.

189 changes: 189 additions & 0 deletions drivers/android/range_alloc.rs
@@ -0,0 +1,189 @@
// SPDX-License-Identifier: GPL-2.0

use alloc::boxed::Box;
use core::ptr::NonNull;
use kernel::{prelude::*, Error};

use crate::linked_list::{CursorMut, GetLinks, Links, List};

pub(crate) struct RangeAllocator<T> {
list: List<Box<Descriptor<T>>>,
}

#[derive(Debug, PartialEq, Eq)]
enum DescriptorState {
Free,
Reserved,
Allocated,
}

impl<T> RangeAllocator<T> {
pub(crate) fn new(size: usize) -> KernelResult<Self> {
let desc = Box::try_new(Descriptor::new(0, size))?;
let mut list = List::new();
list.push_back(desc);
Ok(Self { list })
}

fn find_best_match(&self, size: usize) -> Option<NonNull<Descriptor<T>>> {
// TODO: Use a binary tree instead of list for this lookup.
let mut best = None;
let mut best_size = usize::MAX;
let mut cursor = self.list.cursor_front();
while let Some(desc) = cursor.current() {
if desc.state == DescriptorState::Free {
if size == desc.size {
return Some(NonNull::from(desc));
}

if size < desc.size && desc.size < best_size {
best = Some(NonNull::from(desc));
best_size = desc.size;
}
}

cursor.move_next();
}
best
}

pub(crate) fn reserve_new(&mut self, size: usize) -> KernelResult<usize> {
let desc_ptr = match self.find_best_match(size) {
None => return Err(Error::ENOMEM),
Some(found) => found,
};

// SAFETY: We hold the only mutable reference to list, so it cannot have changed.
let desc = unsafe { &mut *desc_ptr.as_ptr() };
if desc.size == size {
desc.state = DescriptorState::Reserved;
return Ok(desc.offset);
}

// We need to break up the descriptor.
let new = Box::try_new(Descriptor::new(desc.offset + size, desc.size - size))?;
unsafe { self.list.insert_after(desc_ptr, new) };
desc.state = DescriptorState::Reserved;
desc.size = size;
Ok(desc.offset)
}

fn free_with_cursor(cursor: &mut CursorMut<Box<Descriptor<T>>>) -> KernelResult {
let mut size = match cursor.current() {
None => return Err(Error::EINVAL),
Some(ref mut entry) => {
match entry.state {
DescriptorState::Free => return Err(Error::EINVAL),
DescriptorState::Allocated => return Err(Error::EPERM),
DescriptorState::Reserved => {}
}
entry.state = DescriptorState::Free;
entry.size
}
};

// Try to merge with the next entry.
if let Some(next) = cursor.peek_next() {
if next.state == DescriptorState::Free {
next.offset -= size;
next.size += size;
size = next.size;
cursor.remove_current();
}
}

// Try to merge with the previous entry.
if let Some(prev) = cursor.peek_prev() {
if prev.state == DescriptorState::Free {
prev.size += size;
cursor.remove_current();
}
}

Ok(())
}

fn find_at_offset(&mut self, offset: usize) -> Option<CursorMut<Box<Descriptor<T>>>> {
let mut cursor = self.list.cursor_front_mut();
while let Some(desc) = cursor.current() {
if desc.offset == offset {
return Some(cursor);
}

if desc.offset > offset {
return None;
}

cursor.move_next();
}
None
}

pub(crate) fn reservation_abort(&mut self, offset: usize) -> KernelResult {
// TODO: The force case is currently O(n), but could be made O(1) with unsafe.
let mut cursor = self.find_at_offset(offset).ok_or(Error::EINVAL)?;
Self::free_with_cursor(&mut cursor)
}

pub(crate) fn reservation_commit(&mut self, offset: usize, data: Option<T>) -> KernelResult {
// TODO: This is currently O(n), make it O(1).
let mut cursor = self.find_at_offset(offset).ok_or(Error::ENOENT)?;
let desc = cursor.current().unwrap();
desc.state = DescriptorState::Allocated;
desc.data = data;
Ok(())
}

/// Takes an entry at the given offset from [`DescriptorState::Allocated`] to
/// [`DescriptorState::Reserved`].
///
/// Returns the size of the existing entry and the data associated with it.
pub(crate) fn reserve_existing(&mut self, offset: usize) -> KernelResult<(usize, Option<T>)> {
// TODO: This is currently O(n), make it O(log n).
let mut cursor = self.find_at_offset(offset).ok_or(Error::ENOENT)?;
let desc = cursor.current().unwrap();
if desc.state != DescriptorState::Allocated {
return Err(Error::ENOENT);
}
desc.state = DescriptorState::Reserved;
Ok((desc.size, desc.data.take()))
}

pub(crate) fn for_each<F: Fn(usize, usize, Option<T>)>(&mut self, callback: F) {
let mut cursor = self.list.cursor_front_mut();
while let Some(desc) = cursor.current() {
if desc.state == DescriptorState::Allocated {
callback(desc.offset, desc.size, desc.data.take());
}

cursor.move_next();
}
}
}

struct Descriptor<T> {
state: DescriptorState,
size: usize,
offset: usize,
links: Links<Descriptor<T>>,
data: Option<T>,
}

impl<T> Descriptor<T> {
fn new(offset: usize, size: usize) -> Self {
Self {
size,
offset,
state: DescriptorState::Free,
links: Links::new(),
data: None,
}
}
}

impl<T> GetLinks for Descriptor<T> {
type EntryType = Self;
fn get_links(desc: &Self) -> &Links<Self> {
&desc.links
}
}
288 changes: 288 additions & 0 deletions drivers/android/raw_list.rs
@@ -0,0 +1,288 @@
// SPDX-License-Identifier: GPL-2.0

use core::{
cell::UnsafeCell,
ptr,
ptr::NonNull,
sync::atomic::{AtomicBool, Ordering},
};

pub trait GetLinks {
type EntryType: ?Sized;
fn get_links(data: &Self::EntryType) -> &Links<Self::EntryType>;
}

pub struct Links<T: ?Sized>(UnsafeCell<ListEntry<T>>);

impl<T: ?Sized> Links<T> {
pub fn new() -> Self {
Self(UnsafeCell::new(ListEntry::new()))
}
}

struct ListEntry<T: ?Sized> {
next: Option<NonNull<T>>,
prev: Option<NonNull<T>>,
inserted: AtomicBool,
}

impl<T: ?Sized> ListEntry<T> {
fn new() -> Self {
Self {
next: None,
prev: None,
inserted: AtomicBool::new(false),
}
}

fn acquire_for_insertion(&mut self) -> bool {
self.inserted
.compare_exchange(false, true, Ordering::Acquire, Ordering::Relaxed)
.is_ok()
}

fn release_after_removal(&mut self) {
self.inserted.store(false, Ordering::Release);
}
}

pub struct RawList<G: GetLinks> {
head: Option<NonNull<G::EntryType>>,
}

impl<G: GetLinks> RawList<G> {
pub fn new() -> Self {
Self { head: None }
}

pub fn is_empty(&self) -> bool {
self.head.is_none()
}

fn insert_after_priv(
&mut self,
existing: &G::EntryType,
new_entry: &mut ListEntry<G::EntryType>,
new_ptr: Option<NonNull<G::EntryType>>,
) -> bool {
if !new_entry.acquire_for_insertion() {
// Nothing to do if already inserted.
return false;
}

{
let existing_links = unsafe { &mut *G::get_links(existing).0.get() };
new_entry.next = existing_links.next;
existing_links.next = new_ptr;
}

new_entry.prev = Some(NonNull::from(existing));
let next_links = unsafe { &mut *G::get_links(new_entry.next.unwrap().as_ref()).0.get() };
next_links.prev = new_ptr;
true
}

pub fn insert_after(&mut self, existing: &G::EntryType, new: &G::EntryType) -> bool {
let new_entry = unsafe { &mut *G::get_links(new).0.get() };
self.insert_after_priv(existing, new_entry, Some(NonNull::from(new)))
}

fn push_back_internal(&mut self, new: &G::EntryType) -> bool {
let new_entry = unsafe { &mut *G::get_links(new).0.get() };
let new_ptr = Some(NonNull::from(new));
match self.back() {
Some(back) => self.insert_after_priv(unsafe { back.as_ref() }, new_entry, new_ptr),
None => {
if !new_entry.acquire_for_insertion() {
// Nothing to do if already inserted.
return false;
}
self.head = new_ptr;
new_entry.next = new_ptr;
new_entry.prev = new_ptr;
true
}
}
}

pub unsafe fn push_back(&mut self, new: &G::EntryType) -> bool {
self.push_back_internal(new)
}

fn remove_internal(&mut self, data: &G::EntryType) -> bool {
let links = unsafe { &mut *G::get_links(data).0.get() };
let next = if let Some(next) = links.next {
next
} else {
// Nothing to do if the entry is not on the list.
return false;
};

if ptr::eq(data, next.as_ptr()) {
// We're removing the only element.
self.head = None
} else {
// Update the head if we're removing it.
if let Some(raw_head) = self.head {
if ptr::eq(data, raw_head.as_ptr()) {
self.head = Some(next);
}
}

unsafe { &mut *G::get_links(links.prev.unwrap().as_ref()).0.get() }.next = links.next;
unsafe { &mut *G::get_links(next.as_ref()).0.get() }.prev = links.prev;
}

// Reset the links of the element we're removing so that we know it's not on any list.
links.next = None;
links.prev = None;
links.release_after_removal();
true
}

pub unsafe fn remove(&mut self, data: &G::EntryType) -> bool {
self.remove_internal(data)
}

fn pop_front_internal(&mut self) -> Option<NonNull<G::EntryType>> {
let head = self.head?;
unsafe { self.remove(head.as_ref()) };
Some(head)
}

pub fn pop_front(&mut self) -> Option<NonNull<G::EntryType>> {
self.pop_front_internal()
}

pub fn front(&self) -> Option<NonNull<G::EntryType>> {
self.head
}

pub fn back(&self) -> Option<NonNull<G::EntryType>> {
unsafe { &*G::get_links(self.head?.as_ref()).0.get() }.prev
}

pub fn cursor_front(&self) -> Cursor<'_, G> {
Cursor::new(self, self.front())
}

pub fn cursor_front_mut(&mut self) -> CursorMut<'_, G> {
CursorMut::new(self, self.front())
}
}

struct CommonCursor<G: GetLinks> {
cur: Option<NonNull<G::EntryType>>,
}

impl<G: GetLinks> CommonCursor<G> {
fn new(cur: Option<NonNull<G::EntryType>>) -> Self {
Self { cur }
}

fn move_next(&mut self, list: &RawList<G>) {
match self.cur.take() {
None => self.cur = list.head,
Some(cur) => {
if let Some(head) = list.head {
// SAFETY: We have a shared ref to the linked list, so the links can't change.
let links = unsafe { &*G::get_links(cur.as_ref()).0.get() };
if links.next.unwrap() != head {
self.cur = links.next;
}
}
}
}
}

fn move_prev(&mut self, list: &RawList<G>) {
match list.head {
None => self.cur = None,
Some(head) => {
let next = match self.cur.take() {
None => head,
Some(cur) => {
if cur == head {
return;
}
cur
}
};
// SAFETY: There's a shared ref to the list, so the links can't change.
let links = unsafe { &*G::get_links(next.as_ref()).0.get() };
self.cur = links.prev;
}
}
}
}

pub struct Cursor<'a, G: GetLinks> {
cursor: CommonCursor<G>,
list: &'a RawList<G>,
}

impl<'a, G: GetLinks> Cursor<'a, G> {
fn new(list: &'a RawList<G>, cur: Option<NonNull<G::EntryType>>) -> Self {
Self {
list,
cursor: CommonCursor::new(cur),
}
}

pub fn current(&self) -> Option<&'a G::EntryType> {
let cur = self.cursor.cur?;
// SAFETY: Objects must be kept alive while on the list.
Some(unsafe { &*cur.as_ptr() })
}

pub fn move_next(&mut self) {
self.cursor.move_next(self.list);
}
}

pub struct CursorMut<'a, G: GetLinks> {
cursor: CommonCursor<G>,
list: &'a mut RawList<G>,
}

impl<'a, G: GetLinks> CursorMut<'a, G> {
fn new(list: &'a mut RawList<G>, cur: Option<NonNull<G::EntryType>>) -> Self {
Self {
list,
cursor: CommonCursor::new(cur),
}
}

pub fn current(&mut self) -> Option<&mut G::EntryType> {
let cur = self.cursor.cur?;
// SAFETY: Objects must be kept alive while on the list.
Some(unsafe { &mut *cur.as_ptr() })
}

/// Removes the entry the cursor is pointing to and advances the cursor to the next entry. It
/// returns a raw pointer to the removed element (if one is removed).
pub fn remove_current(&mut self) -> Option<NonNull<G::EntryType>> {
let entry = self.cursor.cur?;
self.cursor.move_next(self.list);
unsafe { self.list.remove(entry.as_ref()) };
Some(entry)
}

pub fn peek_next(&mut self) -> Option<&mut G::EntryType> {
let mut new = CommonCursor::new(self.cursor.cur);
new.move_next(self.list);
// SAFETY: Objects must be kept alive while on the list.
Some(unsafe { &mut *new.cur?.as_ptr() })
}

pub fn peek_prev(&mut self) -> Option<&mut G::EntryType> {
let mut new = CommonCursor::new(self.cursor.cur);
new.move_prev(self.list);
// SAFETY: Objects must be kept alive while on the list.
Some(unsafe { &mut *new.cur?.as_ptr() })
}

pub fn move_next(&mut self) {
self.cursor.move_next(self.list);
}
}
216 changes: 216 additions & 0 deletions drivers/android/rust_binder.rs
@@ -0,0 +1,216 @@
// SPDX-License-Identifier: GPL-2.0

#![no_std]
#![feature(global_asm, try_reserve, allocator_api, concat_idents)]

use alloc::{boxed::Box, sync::Arc};
use core::{marker::PhantomData, pin::Pin, ptr};
use kernel::{
bindings, c_types, cstr,
miscdev::Registration,
prelude::*,
user_ptr::{UserSlicePtrReader, UserSlicePtrWriter},
Error,
};

mod allocation;
mod context;
mod defs;
mod linked_list;
mod node;
mod process;
mod range_alloc;
mod raw_list;
mod thread;
mod transaction;

use {context::Context, thread::Thread};

module! {
type: BinderModule,
name: b"rust_binder",
author: b"Wedson Almeida Filho",
description: b"Android Binder",
license: b"GPL v2",
params: {},
}

enum Either<L, R> {
Left(L),
Right(R),
}

trait DeliverToRead {
/// Performs work. Returns true if remaining work items in the queue should be processed
/// immediately, or false if it should return to caller before processing additional work
/// items.
fn do_work(
self: Arc<Self>,
thread: &Thread,
writer: &mut UserSlicePtrWriter,
) -> KernelResult<bool>;

/// Cancels the given work item. This is called instead of [`DeliverToRead::do_work`] when work
/// won't be delivered.
fn cancel(self: Arc<Self>) {}

/// Returns the linked list links for the work item.
fn get_links(&self) -> &linked_list::Links<dyn DeliverToRead>;
}

impl linked_list::GetLinks for Arc<dyn DeliverToRead> {
type EntryType = dyn DeliverToRead;
fn get_links(obj: &dyn DeliverToRead) -> &linked_list::Links<dyn DeliverToRead> {
obj.get_links()
}
}

struct DeliverCode {
code: u32,
links: linked_list::Links<dyn DeliverToRead>,
}

impl DeliverCode {
fn new(code: u32) -> Self {
Self {
code,
links: linked_list::Links::new(),
}
}
}

impl DeliverToRead for DeliverCode {
fn do_work(
self: Arc<Self>,
_thread: &Thread,
writer: &mut UserSlicePtrWriter,
) -> KernelResult<bool> {
writer.write(&self.code)?;
Ok(true)
}

fn get_links(&self) -> &linked_list::Links<dyn DeliverToRead> {
&self.links
}
}

extern "C" {
#[allow(improper_ctypes)]
fn rust_helper_alloc_pages(
gfp_mask: bindings::gfp_t,
order: c_types::c_uint,
) -> *mut bindings::page;

#[allow(improper_ctypes)]
fn rust_helper_kmap(page: *mut bindings::page) -> *mut c_types::c_void;

#[allow(improper_ctypes)]
fn rust_helper_kunmap(page: *mut bindings::page);
}

/// Pages holds a reference to a set of pages of order `ORDER`. Having the order as a generic const
/// allows the struct to have the same size as pointer.
struct Pages<const ORDER: u32> {
pages: *mut bindings::page,
}

impl<const ORDER: u32> Pages<ORDER> {
fn new() -> KernelResult<Self> {
// TODO: Consider whether we want to allow callers to specify flags.
let pages = unsafe {
rust_helper_alloc_pages(
bindings::GFP_KERNEL | bindings::__GFP_ZERO | bindings::__GFP_HIGHMEM,
ORDER,
)
};
if pages.is_null() {
return Err(Error::ENOMEM);
}
Ok(Self { pages })
}

fn insert_page(&self, vma: &mut bindings::vm_area_struct, address: usize) -> KernelResult {
let ret = unsafe { bindings::vm_insert_page(vma, address as _, self.pages) };
if ret != 0 {
Err(Error::from_kernel_errno(ret))
} else {
Ok(())
}
}

fn copy_into_page(
&self,
reader: &mut UserSlicePtrReader,
offset: usize,
len: usize,
) -> KernelResult {
let mapping = self.kmap(0).unwrap();
unsafe { reader.read_raw((mapping.ptr as usize + offset) as _, len) }?;
Ok(())
}

unsafe fn read(&self, dest: *mut u8, offset: usize, len: usize) {
let mapping = self.kmap(0).unwrap();
ptr::copy((mapping.ptr as *mut u8).add(offset), dest, len);
}

unsafe fn write(&self, src: *const u8, offset: usize, len: usize) {
let mapping = self.kmap(0).unwrap();
ptr::copy(src, (mapping.ptr as *mut u8).add(offset), len);
}

fn kmap(&self, index: usize) -> Option<PageMapping> {
if index >= 1usize << ORDER {
return None;
}
let page = unsafe { self.pages.add(index) };
let ptr = unsafe { rust_helper_kmap(page) };
Some(PageMapping {
page,
ptr,
_phantom: PhantomData,
})
}
}

impl<const ORDER: u32> Drop for Pages<ORDER> {
fn drop(&mut self) {
unsafe { bindings::__free_pages(self.pages, ORDER) };
}
}

struct PageMapping<'a> {
page: *mut bindings::page,
ptr: *mut c_types::c_void,
_phantom: PhantomData<&'a i32>,
}

impl Drop for PageMapping<'_> {
fn drop(&mut self) {
unsafe { rust_helper_kunmap(self.page) };
}
}

const fn ptr_align(value: usize) -> usize {
let size = core::mem::size_of::<usize>() - 1;
(value + size) & !size
}

unsafe impl Sync for BinderModule {}

struct BinderModule {
_reg: Pin<Box<Registration<Arc<Context>>>>,
}

impl KernelModule for BinderModule {
fn init() -> KernelResult<Self> {
let pinned_ctx = Context::new()?;
let ctx = unsafe { Pin::into_inner_unchecked(pinned_ctx) };
let reg = Registration::<Arc<Context>>::new_pinned::<process::Process>(
cstr!("rust_binder"),
None,
ctx,
)?;
Ok(Self { _reg: reg })
}
}
821 changes: 821 additions & 0 deletions drivers/android/thread.rs

Large diffs are not rendered by default.

207 changes: 207 additions & 0 deletions drivers/android/transaction.rs
@@ -0,0 +1,207 @@
// SPDX-License-Identifier: GPL-2.0

use alloc::sync::Arc;
use core::sync::atomic::{AtomicBool, Ordering};
use kernel::{bindings, prelude::*, sync::Ref, user_ptr::UserSlicePtrWriter};

use crate::{
defs::*,
linked_list::Links,
node::NodeRef,
process::Process,
ptr_align,
thread::{BinderResult, Thread},
DeliverToRead, Either,
};

pub(crate) struct Transaction {
// TODO: Node should be released when the buffer is released.
node_ref: Option<NodeRef>,
stack_next: Option<Arc<Transaction>>,
pub(crate) from: Arc<Thread>,
to: Ref<Process>,
free_allocation: AtomicBool,
code: u32,
flags: u32,
data_size: usize,
offsets_size: usize,
data_address: usize,
links: Links<dyn DeliverToRead>,
}

impl Transaction {
pub(crate) fn new(
node_ref: NodeRef,
stack_next: Option<Arc<Transaction>>,
from: &Arc<Thread>,
tr: &BinderTransactionData,
) -> BinderResult<Self> {
let to = node_ref.node.owner.clone();
let alloc = from.copy_transaction_data(&to, tr)?;
let data_address = alloc.ptr;
alloc.keep_alive();
Ok(Self {
node_ref: Some(node_ref),
stack_next,
from: from.clone(),
to,
code: tr.code,
flags: tr.flags,
data_size: tr.data_size as _,
data_address,
offsets_size: tr.offsets_size as _,
links: Links::new(),
free_allocation: AtomicBool::new(true),
})
}

pub(crate) fn new_reply(
from: &Arc<Thread>,
to: Ref<Process>,
tr: &BinderTransactionData,
) -> BinderResult<Self> {
let alloc = from.copy_transaction_data(&to, tr)?;
let data_address = alloc.ptr;
alloc.keep_alive();
Ok(Self {
node_ref: None,
stack_next: None,
from: from.clone(),
to,
code: tr.code,
flags: tr.flags,
data_size: tr.data_size as _,
data_address,
offsets_size: tr.offsets_size as _,
links: Links::new(),
free_allocation: AtomicBool::new(true),
})
}

/// Determines if the transaction is stacked on top of the given transaction.
pub(crate) fn is_stacked_on(&self, onext: &Option<Arc<Self>>) -> bool {
match (&self.stack_next, onext) {
(None, None) => true,
(Some(stack_next), Some(next)) => Arc::ptr_eq(stack_next, next),
_ => false,
}
}

/// Returns a pointer to the next transaction on the transaction stack, if there is one.
pub(crate) fn clone_next(&self) -> Option<Arc<Self>> {
let next = self.stack_next.as_ref()?;
Some(next.clone())
}

/// Searches in the transaction stack for a thread that belongs to the target process. This is
/// useful when finding a target for a new transaction: if the node belongs to a process that
/// is already part of the transaction stack, we reuse the thread.
fn find_target_thread(&self) -> Option<Arc<Thread>> {
let process = &self.node_ref.as_ref()?.node.owner;

let mut it = &self.stack_next;
while let Some(transaction) = it {
if Ref::ptr_eq(&transaction.from.process, process) {
return Some(transaction.from.clone());
}
it = &transaction.stack_next;
}
None
}

/// Searches in the transaction stack for a transaction originating at the given thread.
pub(crate) fn find_from(&self, thread: &Thread) -> Option<Arc<Transaction>> {
let mut it = &self.stack_next;
while let Some(transaction) = it {
if core::ptr::eq(thread, transaction.from.as_ref()) {
return Some(transaction.clone());
}

it = &transaction.stack_next;
}
None
}

/// Submits the transaction to a work queue. Use a thread if there is one in the transaction
/// stack, otherwise use the destination process.
pub(crate) fn submit(self: Arc<Self>) -> BinderResult {
if let Some(thread) = self.find_target_thread() {
thread.push_work(self)
} else {
let process = self.to.clone();
process.push_work(self)
}
}
}

impl DeliverToRead for Transaction {
fn do_work(
self: Arc<Self>,
thread: &Thread,
writer: &mut UserSlicePtrWriter,
) -> KernelResult<bool> {
/* TODO: Initialise the following fields from tr:
pub sender_pid: pid_t,
pub sender_euid: uid_t,
*/
let mut tr = BinderTransactionData::default();

if let Some(nref) = &self.node_ref {
let (ptr, cookie) = nref.node.get_id();
tr.target.ptr = ptr as _;
tr.cookie = cookie as _;
};

tr.code = self.code;
tr.flags = self.flags;
tr.data_size = self.data_size as _;
tr.data.ptr.buffer = self.data_address as _;
tr.offsets_size = self.offsets_size as _;
if tr.offsets_size > 0 {
tr.data.ptr.offsets = (self.data_address + ptr_align(self.data_size)) as _;
}

// When `drop` is called, we don't want the allocation to be freed because it is now the
// user's reponsibility to free it.
self.free_allocation.store(false, Ordering::Relaxed);

let code = if self.node_ref.is_none() {
BR_REPLY
} else {
BR_TRANSACTION
};

// Write the transaction code and data to the user buffer. On failure we complete the
// transaction with an error.
if let Err(err) = writer.write(&code).and_then(|_| writer.write(&tr)) {
let reply = Either::Right(BR_FAILED_REPLY);
self.from.deliver_reply(reply, &self);
return Err(err);
}

// When this is not a reply and not an async transaction, update `current_transaction`. If
// it's a reply, `current_transaction` has already been updated appropriately.
if self.node_ref.is_some() && tr.flags & bindings::transaction_flags_TF_ONE_WAY == 0 {
thread.set_current_transaction(self);
}

Ok(false)
}

fn cancel(self: Arc<Self>) {
let reply = Either::Right(BR_DEAD_REPLY);
self.from.deliver_reply(reply, &self);
}

fn get_links(&self) -> &Links<dyn DeliverToRead> {
&self.links
}
}

impl Drop for Transaction {
fn drop(&mut self) {
if self.free_allocation.load(Ordering::Relaxed) {
self.to.buffer_get(self.data_address);
}
}
}
22 changes: 12 additions & 10 deletions include/uapi/linux/android/binder.h
Expand Up @@ -217,16 +217,18 @@ struct binder_node_info_for_ref {
__u32 reserved3;
};

#define BINDER_WRITE_READ _IOWR('b', 1, struct binder_write_read)
#define BINDER_SET_IDLE_TIMEOUT _IOW('b', 3, __s64)
#define BINDER_SET_MAX_THREADS _IOW('b', 5, __u32)
#define BINDER_SET_IDLE_PRIORITY _IOW('b', 6, __s32)
#define BINDER_SET_CONTEXT_MGR _IOW('b', 7, __s32)
#define BINDER_THREAD_EXIT _IOW('b', 8, __s32)
#define BINDER_VERSION _IOWR('b', 9, struct binder_version)
#define BINDER_GET_NODE_DEBUG_INFO _IOWR('b', 11, struct binder_node_debug_info)
#define BINDER_GET_NODE_INFO_FOR_REF _IOWR('b', 12, struct binder_node_info_for_ref)
#define BINDER_SET_CONTEXT_MGR_EXT _IOW('b', 13, struct flat_binder_object)
enum {
BINDER_WRITE_READ = _IOWR('b', 1, struct binder_write_read),
BINDER_SET_IDLE_TIMEOUT = _IOW('b', 3, __s64),
BINDER_SET_MAX_THREADS = _IOW('b', 5, __u32),
BINDER_SET_IDLE_PRIORITY = _IOW('b', 6, __s32),
BINDER_SET_CONTEXT_MGR = _IOW('b', 7, __s32),
BINDER_THREAD_EXIT = _IOW('b', 8, __s32),
BINDER_VERSION = _IOWR('b', 9, struct binder_version),
BINDER_GET_NODE_DEBUG_INFO = _IOWR('b', 11, struct binder_node_debug_info),
BINDER_GET_NODE_INFO_FOR_REF = _IOWR('b', 12, struct binder_node_info_for_ref),
BINDER_SET_CONTEXT_MGR_EXT = _IOW('b', 13, struct flat_binder_object),
};

/*
* NOTE: Two special error codes you should check for when calling
Expand Down
26 changes: 26 additions & 0 deletions rust/helpers.c
Expand Up @@ -4,6 +4,8 @@
#include <linux/build_bug.h>
#include <linux/uaccess.h>
#include <linux/sched/signal.h>
#include <linux/gfp.h>
#include <linux/highmem.h>

void rust_helper_BUG(void)
{
Expand Down Expand Up @@ -49,12 +51,36 @@ void rust_helper_init_wait(struct wait_queue_entry *wq_entry)
}
EXPORT_SYMBOL(rust_helper_init_wait);

int rust_helper_current_pid(void)
{
return current->pid;
}
EXPORT_SYMBOL(rust_helper_current_pid);

int rust_helper_signal_pending(void)
{
return signal_pending(current);
}
EXPORT_SYMBOL(rust_helper_signal_pending);

struct page *rust_helper_alloc_pages(gfp_t gfp_mask, unsigned int order)
{
return alloc_pages(gfp_mask, order);
}
EXPORT_SYMBOL(rust_helper_alloc_pages);

void *rust_helper_kmap(struct page *page)
{
return kmap(page);
}
EXPORT_SYMBOL(rust_helper_kmap);

void rust_helper_kunmap(struct page *page)
{
return kunmap(page);
}
EXPORT_SYMBOL(rust_helper_kunmap);

// See https://github.com/rust-lang/rust-bindgen/issues/1671
static_assert(__builtin_types_compatible_p(size_t, uintptr_t),
"size_t must match uintptr_t, what architecture is this??");
1 change: 1 addition & 0 deletions rust/kernel/bindings.rs
Expand Up @@ -19,3 +19,4 @@ pub use bindings_raw::*;

pub const GFP_KERNEL: gfp_t = BINDINGS_GFP_KERNEL;
pub const __GFP_ZERO: gfp_t = BINDINGS___GFP_ZERO;
pub const __GFP_HIGHMEM: gfp_t = ___GFP_HIGHMEM;
2 changes: 2 additions & 0 deletions rust/kernel/bindings_helper.h
Expand Up @@ -10,6 +10,8 @@
#include <linux/version.h>
#include <linux/miscdevice.h>
#include <linux/poll.h>
#include <linux/mm.h>
#include <uapi/linux/android/binder.h>

// `bindgen` gets confused at certain things
const gfp_t BINDINGS_GFP_KERNEL = GFP_KERNEL;
Expand Down
120 changes: 116 additions & 4 deletions rust/kernel/file_operations.rs
Expand Up @@ -5,15 +5,15 @@
//! C header: [`include/linux/fs.h`](../../../../include/linux/fs.h)

use core::convert::{TryFrom, TryInto};
use core::{marker, mem, ptr};
use core::{marker, mem, ops::Deref, pin::Pin, ptr};

use alloc::boxed::Box;
use alloc::sync::Arc;

use crate::bindings;
use crate::c_types;
use crate::error::{Error, KernelResult};
use crate::sync::{Ref, RefCounted};
use crate::sync::{CondVar, Ref, RefCounted};
use crate::user_ptr::{UserSlicePtr, UserSlicePtrReader, UserSlicePtrWriter};

/// Wraps the kernel's `struct file`.
Expand Down Expand Up @@ -49,6 +49,50 @@ impl File {
}
}

/// Wraps the kernel's `struct poll_table_struct`.
///
/// # Invariants
///
/// The pointer [`PollTable::ptr`] is null or valid.
pub struct PollTable {
ptr: *mut bindings::poll_table_struct,
}

impl PollTable {
/// Constructors a new `struct poll_table_struct` wrapper.
///
/// # Safety
///
/// The pointer `ptr` must be either null or a valid pointer for the lifetime of the object.
unsafe fn from_ptr(ptr: *mut bindings::poll_table_struct) -> Self {
Self { ptr }
}

/// Associates the given file and condition variable to this poll table. It means notifying the
/// condition variable will notify the poll table as well; additionally, the association
/// between the condition variable and the file will automatically be undone by the kernel when
/// the file is destructed. To unilaterally remove the association before then, one can call
/// [`CondVar::free_waiters`].
///
/// # Safety
///
/// If the condition variable is destroyed before the file, then [`CondVar::free_waiters`] must
/// be called to ensure that all waiters are flushed out.
pub unsafe fn register_wait<'a>(&self, file: &'a File, cv: &'a CondVar) {
if self.ptr.is_null() {
return;
}

// SAFETY: `PollTable::ptr` is guaranteed to be valid by the type invariants and the null
// check above.
let table = &*self.ptr;
if let Some(proc) = table._qproc {
// SAFETY: All pointers are known to be valid.
proc(file.ptr as _, cv.wait_list.get(), self.ptr)
}
}
}

/// Equivalent to [`std::io::SeekFrom`].
///
/// [`std::io::SeekFrom`]: https://doc.rust-lang.org/std/io/enum.SeekFrom.html
Expand Down Expand Up @@ -183,6 +227,17 @@ unsafe extern "C" fn compat_ioctl_callback<T: FileOperations>(
}
}

unsafe extern "C" fn mmap_callback<T: FileOperations>(
file: *mut bindings::file,
vma: *mut bindings::vm_area_struct,
) -> c_types::c_int {
from_kernel_result! {
let f = &*((*file).private_data as *const T);
f.mmap(&File::from_ptr(file), &mut *vma)?;
Ok(0)
}
}

unsafe extern "C" fn fsync_callback<T: FileOperations>(
file: *mut bindings::file,
start: bindings::loff_t,
Expand All @@ -199,6 +254,17 @@ unsafe extern "C" fn fsync_callback<T: FileOperations>(
}
}

unsafe extern "C" fn poll_callback<T: FileOperations>(
file: *mut bindings::file,
wait: *mut bindings::poll_table_struct,
) -> bindings::__poll_t {
let f = &*((*file).private_data as *const T);
match f.poll(&File::from_ptr(file), &PollTable::from_ptr(wait)) {
Ok(v) => v,
Err(_) => bindings::POLLERR,
}
}

pub(crate) struct FileOperationsVtable<A, T>(marker::PhantomData<A>, marker::PhantomData<T>);

impl<A: FileOpenAdapter, T: FileOpener<A::Arg>> FileOperationsVtable<A, T> {
Expand Down Expand Up @@ -243,10 +309,18 @@ impl<A: FileOpenAdapter, T: FileOpener<A::Arg>> FileOperationsVtable<A, T> {
iterate_shared: None,
iopoll: None,
lock: None,
mmap: None,
mmap: if T::TO_USE.mmap {
Some(mmap_callback::<T>)
} else {
None
},
mmap_supported_flags: 0,
owner: ptr::null_mut(),
poll: None,
poll: if T::TO_USE.poll {
Some(poll_callback::<T>)
} else {
None
},
read_iter: None,
remap_file_range: None,
sendpage: None,
Expand Down Expand Up @@ -291,6 +365,12 @@ pub struct ToUse {

/// The `fsync` field of [`struct file_operations`].
pub fsync: bool,

/// The `mmap` field of [`struct file_operations`].
pub mmap: bool,

/// The `poll` field of [`struct file_operations`].
pub poll: bool,
}

/// A constant version where all values are to set to `false`, that is, all supported fields will
Expand All @@ -302,6 +382,8 @@ pub const USE_NONE: ToUse = ToUse {
ioctl: false,
compat_ioctl: false,
fsync: false,
mmap: false,
poll: false,
};

/// Defines the [`FileOperations::TO_USE`] field based on a list of fields to be populated.
Expand Down Expand Up @@ -506,6 +588,22 @@ pub trait FileOperations: Send + Sync + Sized {
fn fsync(&self, _file: &File, _start: u64, _end: u64, _datasync: bool) -> KernelResult<u32> {
Err(Error::EINVAL)
}

/// Maps areas of the caller's virtual memory with device/file memory.
///
/// Corresponds to the `mmap` function pointer in `struct file_operations`.
/// TODO: wrap `vm_area_struct` so that we don't have to expose it.
fn mmap(&self, _file: &File, _vma: &mut bindings::vm_area_struct) -> KernelResult {
Err(Error::EINVAL)
}

/// Checks the state of the file and optionally registers for notification when the state
/// changes.
///
/// Corresponds to the `poll` function pointer in `struct file_operations`.
fn poll(&self, _file: &File, _table: &PollTable) -> KernelResult<u32> {
Ok(bindings::POLLIN | bindings::POLLOUT | bindings::POLLRDNORM | bindings::POLLWRNORM)
}
}

/// Used to convert an object into a raw pointer that represents it.
Expand Down Expand Up @@ -554,3 +652,17 @@ impl<T> PointerWrapper<T> for Arc<T> {
Arc::from_raw(ptr)
}
}

impl<T, W: PointerWrapper<T> + Deref> PointerWrapper<T> for Pin<W> {
fn into_pointer(self) -> *const T {
// SAFETY: We continue to treat the pointer as pinned by returning just a pointer to it to
// the caller.
let inner = unsafe { Pin::into_inner_unchecked(self) };
inner.into_pointer()
}

unsafe fn from_pointer(p: *const T) -> Self {
// SAFETY: The object was originally pinned.
Pin::new_unchecked(W::from_pointer(p))
}
}