diff --git a/CHANGELOG.md b/CHANGELOG.md index 2c8bb34..6c1daaf 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,16 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](http://keepachangelog.com/) and this project adheres to [Semantic Versioning](http://semver.org/). +## [Unreleased] + +### Added + +- Added a Two-Level Segregated Fit heap with the `tlsf` feature. + +### Changed + +- The `Heap` struct has been renamed to `LlffHeap` and requires the `llff` feature. + ## [v0.5.1] - 2023-11-04 ### Added diff --git a/Cargo.toml b/Cargo.toml index 247517c..086fcf6 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -38,7 +38,6 @@ linked_list_allocator = { version = "0.10.5", default-features = false, optional rlsf = { version = "0.2.1", default-features = false, optional = true } const-default = { version = "1.0.0", default-features = false, optional = true } - [dev-dependencies] cortex-m = { version = "0.7.6", features = ["critical-section-single-core"] } cortex-m-rt = "0.7" diff --git a/README.md b/README.md index a34cb15..bbe9eb9 100644 --- a/README.md +++ b/README.md @@ -48,9 +48,17 @@ For a full usage example, see [`examples/global_alloc.rs`](https://github.com/ru For this to work, an implementation of [`critical-section`](https://github.com/rust-embedded/critical-section) must be provided. -For simple use cases you may enable the `critical-section-single-core` feature in the [cortex-m](https://github.com/rust-embedded/cortex-m) crate. +For simple use cases with Cortex-M CPUs you may enable the `critical-section-single-core` feature in the [cortex-m](https://github.com/rust-embedded/cortex-m) crate. Please refer to the documentation of [`critical-section`](https://docs.rs/critical-section) for further guidance. +## Features + +There are two heaps available to use: + +* `llff`: Provides `LlffHeap`, a Linked List First Fit heap. +* `tlsf`: Provides `TlsfHeap`, a Two-Level Segregated Fit heap. + +The best heap to use will depend on your application, see [#78](https://github.com/rust-embedded/embedded-alloc/pull/78) for more discussion. ## License diff --git a/src/lib.rs b/src/lib.rs index 33ac654..4308790 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,244 +1,14 @@ #![doc = include_str!("../README.md")] #![no_std] #![cfg_attr(feature = "allocator_api", feature(allocator_api, alloc_layout_extra))] - -use core::alloc::{GlobalAlloc, Layout}; -use core::cell::RefCell; -use core::ptr::{self, NonNull}; - -use critical_section::Mutex; +#![warn(missing_docs)] #[cfg(feature = "llff")] -pub use llff::Heap as LlffHeap; +mod llff; #[cfg(feature = "tlsf")] -pub use tlsf::Heap as TlsfHeap; +mod tlsf; #[cfg(feature = "llff")] -mod llff { - use super::*; - use linked_list_allocator::Heap as LLHeap; - - pub struct Heap { - heap: Mutex>, - } - - impl Heap { - /// Create a new UNINITIALIZED heap allocator - /// - /// You must initialize this heap using the - /// [`init`](Self::init) method before using the allocator. - pub const fn empty() -> Heap { - Heap { - heap: Mutex::new(RefCell::new(LLHeap::empty())), - } - } - - /// Initializes the heap - /// - /// This function must be called BEFORE you run any code that makes use of the - /// allocator. - /// - /// `start_addr` is the address where the heap will be located. - /// - /// `size` is the size of the heap in bytes. - /// - /// Note that: - /// - /// - The heap grows "upwards", towards larger addresses. Thus `start_addr` will - /// be the smallest address used. - /// - /// - The largest address used is `start_addr + size - 1`, so if `start_addr` is - /// `0x1000` and `size` is `0x30000` then the allocator won't use memory at - /// addresses `0x31000` and larger. - /// - /// # Safety - /// - /// Obey these or Bad Stuff will happen. - /// - /// - This function must be called exactly ONCE. - /// - `size > 0` - pub unsafe fn init(&self, start_addr: usize, size: usize) { - critical_section::with(|cs| { - self.heap - .borrow(cs) - .borrow_mut() - .init(start_addr as *mut u8, size); - }); - } - - /// Returns an estimate of the amount of bytes in use. - pub fn used(&self) -> usize { - critical_section::with(|cs| self.heap.borrow(cs).borrow_mut().used()) - } - - /// Returns an estimate of the amount of bytes available. - pub fn free(&self) -> usize { - critical_section::with(|cs| self.heap.borrow(cs).borrow_mut().free()) - } - - fn alloc(&self, layout: Layout) -> Option> { - critical_section::with(|cs| { - self.heap - .borrow(cs) - .borrow_mut() - .allocate_first_fit(layout) - .ok() - }) - } - - unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) { - critical_section::with(|cs| { - self.heap - .borrow(cs) - .borrow_mut() - .deallocate(NonNull::new_unchecked(ptr), layout) - }); - } - } - - unsafe impl GlobalAlloc for Heap { - unsafe fn alloc(&self, layout: Layout) -> *mut u8 { - self.alloc(layout) - .map_or(ptr::null_mut(), |allocation| allocation.as_ptr()) - } - - unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) { - self.dealloc(ptr, layout); - } - } - - #[cfg(feature = "allocator_api")] - mod allocator_api { - use super::*; - use core::{ - alloc::{AllocError, Allocator, Layout}, - ptr::NonNull, - }; - - unsafe impl Allocator for Heap { - fn allocate(&self, layout: Layout) -> Result, AllocError> { - match layout.size() { - 0 => Ok(NonNull::slice_from_raw_parts(layout.dangling(), 0)), - size => self.alloc(layout).map_or(Err(AllocError), |allocation| { - Ok(NonNull::slice_from_raw_parts(allocation, size)) - }), - } - } - - unsafe fn deallocate(&self, ptr: NonNull, layout: Layout) { - if layout.size() != 0 { - self.dealloc(ptr.as_ptr(), layout); - } - } - } - } -} - +pub use llff::Heap as LlffHeap; #[cfg(feature = "tlsf")] -mod tlsf { - use super::*; - use const_default::ConstDefault; - use rlsf::Tlsf; - - type TlsfHeap = Tlsf<'static, usize, usize, { usize::BITS as usize }, { usize::BITS as usize }>; - - pub struct Heap { - heap: Mutex>, - } - - impl Heap { - /// Create a new UNINITIALIZED heap allocator - /// - /// You must initialize this heap using the - /// [`init`](Self::init) method before using the allocator. - pub const fn empty() -> Heap { - Heap { - heap: Mutex::new(RefCell::new(ConstDefault::DEFAULT)), - } - } - - /// Initializes the heap - /// - /// This function must be called BEFORE you run any code that makes use of the - /// allocator. - /// - /// `start_addr` is the address where the heap will be located. - /// - /// `size` is the size of the heap in bytes. - /// - /// Note that: - /// - /// - The heap grows "upwards", towards larger addresses. Thus `start_addr` will - /// be the smallest address used. - /// - /// - The largest address used is `start_addr + size - 1`, so if `start_addr` is - /// `0x1000` and `size` is `0x30000` then the allocator won't use memory at - /// addresses `0x31000` and larger. - /// - /// # Safety - /// - /// Obey these or Bad Stuff will happen. - /// - /// - This function must be called exactly ONCE. - /// - `size > 0` - pub unsafe fn init(&self, start_addr: usize, size: usize) { - critical_section::with(|cs| { - let block: &[u8] = core::slice::from_raw_parts(start_addr as *const u8, size); - self.heap - .borrow(cs) - .borrow_mut() - .insert_free_block_ptr(block.into()); - }); - } - - fn alloc(&self, layout: Layout) -> Option> { - critical_section::with(|cs| self.heap.borrow(cs).borrow_mut().allocate(layout)) - } - - unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) { - critical_section::with(|cs| { - self.heap - .borrow(cs) - .borrow_mut() - .deallocate(NonNull::new_unchecked(ptr), layout.align()) - }) - } - } - - unsafe impl GlobalAlloc for Heap { - unsafe fn alloc(&self, layout: Layout) -> *mut u8 { - self.alloc(layout) - .map_or(ptr::null_mut(), |allocation| allocation.as_ptr()) - } - - unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) { - self.dealloc(ptr, layout) - } - } - - #[cfg(feature = "allocator_api")] - mod allocator_api { - use super::*; - use core::{ - alloc::{AllocError, Allocator, Layout}, - ptr::NonNull, - }; - - unsafe impl Allocator for Heap { - fn allocate(&self, layout: Layout) -> Result, AllocError> { - match layout.size() { - 0 => Ok(NonNull::slice_from_raw_parts(layout.dangling(), 0)), - size => self.alloc(layout).map_or(Err(AllocError), |allocation| { - Ok(NonNull::slice_from_raw_parts(allocation, size)) - }), - } - } - - unsafe fn deallocate(&self, ptr: NonNull, layout: Layout) { - if layout.size() != 0 { - self.dealloc(ptr.as_ptr(), layout); - } - } - } - } -} +pub use tlsf::Heap as TlsfHeap; diff --git a/src/llff.rs b/src/llff.rs new file mode 100644 index 0000000..d618a6a --- /dev/null +++ b/src/llff.rs @@ -0,0 +1,122 @@ +use core::alloc::{GlobalAlloc, Layout}; +use core::cell::RefCell; +use core::ptr::{self, NonNull}; + +use critical_section::Mutex; +use linked_list_allocator::Heap as LLHeap; + +/// A linked list first fit heap. +pub struct Heap { + heap: Mutex>, +} + +impl Heap { + /// Create a new UNINITIALIZED heap allocator + /// + /// You must initialize this heap using the + /// [`init`](Self::init) method before using the allocator. + pub const fn empty() -> Heap { + Heap { + heap: Mutex::new(RefCell::new(LLHeap::empty())), + } + } + + /// Initializes the heap + /// + /// This function must be called BEFORE you run any code that makes use of the + /// allocator. + /// + /// `start_addr` is the address where the heap will be located. + /// + /// `size` is the size of the heap in bytes. + /// + /// Note that: + /// + /// - The heap grows "upwards", towards larger addresses. Thus `start_addr` will + /// be the smallest address used. + /// + /// - The largest address used is `start_addr + size - 1`, so if `start_addr` is + /// `0x1000` and `size` is `0x30000` then the allocator won't use memory at + /// addresses `0x31000` and larger. + /// + /// # Safety + /// + /// Obey these or Bad Stuff will happen. + /// + /// - This function must be called exactly ONCE. + /// - `size > 0` + pub unsafe fn init(&self, start_addr: usize, size: usize) { + critical_section::with(|cs| { + self.heap + .borrow(cs) + .borrow_mut() + .init(start_addr as *mut u8, size); + }); + } + + /// Returns an estimate of the amount of bytes in use. + pub fn used(&self) -> usize { + critical_section::with(|cs| self.heap.borrow(cs).borrow_mut().used()) + } + + /// Returns an estimate of the amount of bytes available. + pub fn free(&self) -> usize { + critical_section::with(|cs| self.heap.borrow(cs).borrow_mut().free()) + } + + fn alloc(&self, layout: Layout) -> Option> { + critical_section::with(|cs| { + self.heap + .borrow(cs) + .borrow_mut() + .allocate_first_fit(layout) + .ok() + }) + } + + unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) { + critical_section::with(|cs| { + self.heap + .borrow(cs) + .borrow_mut() + .deallocate(NonNull::new_unchecked(ptr), layout) + }); + } +} + +unsafe impl GlobalAlloc for Heap { + unsafe fn alloc(&self, layout: Layout) -> *mut u8 { + self.alloc(layout) + .map_or(ptr::null_mut(), |allocation| allocation.as_ptr()) + } + + unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) { + self.dealloc(ptr, layout); + } +} + +#[cfg(feature = "allocator_api")] +mod allocator_api { + use super::*; + use core::{ + alloc::{AllocError, Allocator, Layout}, + ptr::NonNull, + }; + + unsafe impl Allocator for Heap { + fn allocate(&self, layout: Layout) -> Result, AllocError> { + match layout.size() { + 0 => Ok(NonNull::slice_from_raw_parts(layout.dangling(), 0)), + size => self.alloc(layout).map_or(Err(AllocError), |allocation| { + Ok(NonNull::slice_from_raw_parts(allocation, size)) + }), + } + } + + unsafe fn deallocate(&self, ptr: NonNull, layout: Layout) { + if layout.size() != 0 { + self.dealloc(ptr.as_ptr(), layout); + } + } + } +} diff --git a/src/tlsf.rs b/src/tlsf.rs new file mode 100644 index 0000000..3a31cfd --- /dev/null +++ b/src/tlsf.rs @@ -0,0 +1,110 @@ +use core::alloc::{GlobalAlloc, Layout}; +use core::cell::RefCell; +use core::ptr::{self, NonNull}; + +use const_default::ConstDefault; +use critical_section::Mutex; +use rlsf::Tlsf; + +type TlsfHeap = Tlsf<'static, usize, usize, { usize::BITS as usize }, { usize::BITS as usize }>; + +/// A two-Level segregated fit heap. +pub struct Heap { + heap: Mutex>, +} + +impl Heap { + /// Create a new UNINITIALIZED heap allocator + /// + /// You must initialize this heap using the + /// [`init`](Self::init) method before using the allocator. + pub const fn empty() -> Heap { + Heap { + heap: Mutex::new(RefCell::new(ConstDefault::DEFAULT)), + } + } + + /// Initializes the heap + /// + /// This function must be called BEFORE you run any code that makes use of the + /// allocator. + /// + /// `start_addr` is the address where the heap will be located. + /// + /// `size` is the size of the heap in bytes. + /// + /// Note that: + /// + /// - The heap grows "upwards", towards larger addresses. Thus `start_addr` will + /// be the smallest address used. + /// + /// - The largest address used is `start_addr + size - 1`, so if `start_addr` is + /// `0x1000` and `size` is `0x30000` then the allocator won't use memory at + /// addresses `0x31000` and larger. + /// + /// # Safety + /// + /// Obey these or Bad Stuff will happen. + /// + /// - This function must be called exactly ONCE. + /// - `size > 0` + pub unsafe fn init(&self, start_addr: usize, size: usize) { + critical_section::with(|cs| { + let block: &[u8] = core::slice::from_raw_parts(start_addr as *const u8, size); + self.heap + .borrow(cs) + .borrow_mut() + .insert_free_block_ptr(block.into()); + }); + } + + fn alloc(&self, layout: Layout) -> Option> { + critical_section::with(|cs| self.heap.borrow(cs).borrow_mut().allocate(layout)) + } + + unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) { + critical_section::with(|cs| { + self.heap + .borrow(cs) + .borrow_mut() + .deallocate(NonNull::new_unchecked(ptr), layout.align()) + }) + } +} + +unsafe impl GlobalAlloc for Heap { + unsafe fn alloc(&self, layout: Layout) -> *mut u8 { + self.alloc(layout) + .map_or(ptr::null_mut(), |allocation| allocation.as_ptr()) + } + + unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) { + self.dealloc(ptr, layout) + } +} + +#[cfg(feature = "allocator_api")] +mod allocator_api { + use super::*; + use core::{ + alloc::{AllocError, Allocator, Layout}, + ptr::NonNull, + }; + + unsafe impl Allocator for Heap { + fn allocate(&self, layout: Layout) -> Result, AllocError> { + match layout.size() { + 0 => Ok(NonNull::slice_from_raw_parts(layout.dangling(), 0)), + size => self.alloc(layout).map_or(Err(AllocError), |allocation| { + Ok(NonNull::slice_from_raw_parts(allocation, size)) + }), + } + } + + unsafe fn deallocate(&self, ptr: NonNull, layout: Layout) { + if layout.size() != 0 { + self.dealloc(ptr.as_ptr(), layout); + } + } + } +}