diff --git a/src/multimap_table.rs b/src/multimap_table.rs index ca037e6f..83bdd6cc 100644 --- a/src/multimap_table.rs +++ b/src/multimap_table.rs @@ -4,8 +4,8 @@ use crate::sealed::Sealed; use crate::table::{ReadableTableMetadata, TableStats}; use crate::tree_store::{ btree_stats, AllPageNumbersBtreeIter, BranchAccessor, BranchMutator, Btree, BtreeHeader, - BtreeMut, BtreeRangeIter, BtreeStats, CachePriority, Checksum, LeafAccessor, LeafMutator, Page, - PageHint, PageNumber, PagePath, RawBtree, RawLeafBuilder, TransactionalMemory, UntypedBtree, + BtreeMut, BtreeRangeIter, BtreeStats, Checksum, LeafAccessor, LeafMutator, Page, PageHint, + PageNumber, PagePath, RawBtree, RawLeafBuilder, TransactionalMemory, UntypedBtree, UntypedBtreeMut, BRANCH, DEFERRED, LEAF, MAX_PAIR_LENGTH, MAX_VALUE_LENGTH, }; use crate::types::{Key, TypeName, Value}; @@ -1057,7 +1057,7 @@ impl<'txn, K: Key + 'static, V: Key + 'static> MultimapTable<'txn, K, V> { .insert(key.borrow(), &DynamicCollection::new(&inline_data))?; } else { // convert into a subtree - let mut page = self.mem.allocate(leaf_data.len(), CachePriority::Low)?; + let mut page = self.mem.allocate(leaf_data.len())?; page.memory_mut()[..leaf_data.len()].copy_from_slice(leaf_data); let page_number = page.get_page_number(); drop(page); diff --git a/src/transactions.rs b/src/transactions.rs index 23662e68..6f59dada 100644 --- a/src/transactions.rs +++ b/src/transactions.rs @@ -5,9 +5,9 @@ use crate::sealed::Sealed; use crate::table::ReadOnlyUntypedTable; use crate::transaction_tracker::{SavepointId, TransactionId, TransactionTracker}; use crate::tree_store::{ - Btree, BtreeHeader, BtreeMut, CachePriority, FreedPageList, FreedTableKey, - InternalTableDefinition, Page, PageHint, PageNumber, SerializedSavepoint, TableTree, - TableTreeMut, TableType, TransactionalMemory, MAX_PAIR_LENGTH, MAX_VALUE_LENGTH, + Btree, BtreeHeader, BtreeMut, FreedPageList, FreedTableKey, InternalTableDefinition, Page, + PageHint, PageNumber, SerializedSavepoint, TableTree, TableTreeMut, TableType, + TransactionalMemory, MAX_PAIR_LENGTH, MAX_VALUE_LENGTH, }; use crate::types::{Key, Value}; use crate::{ @@ -1177,10 +1177,7 @@ impl WriteTransaction { continue; } let old_page = self.mem.get_page(path.page_number())?; - let mut new_page = self.mem.allocate_lowest( - old_page.memory().len(), - CachePriority::default_btree(old_page.memory()), - )?; + let mut new_page = self.mem.allocate_lowest(old_page.memory().len())?; let new_page_number = new_page.get_page_number(); // We have to copy at least the page type into the new page. // Otherwise its cache priority will be calculated incorrectly @@ -1194,10 +1191,7 @@ impl WriteTransaction { continue; } let old_parent = self.mem.get_page(*parent)?; - let mut new_page = self.mem.allocate_lowest( - old_parent.memory().len(), - CachePriority::default_btree(old_parent.memory()), - )?; + let mut new_page = self.mem.allocate_lowest(old_parent.memory().len())?; let new_page_number = new_page.get_page_number(); // We have to copy at least the page type into the new page. // Otherwise its cache priority will be calculated incorrectly diff --git a/src/tree_store/btree_base.rs b/src/tree_store/btree_base.rs index 4aca80e4..0bd73d94 100644 --- a/src/tree_store/btree_base.rs +++ b/src/tree_store/btree_base.rs @@ -1,6 +1,4 @@ -use crate::tree_store::page_store::{ - xxh3_checksum, CachePriority, Page, PageImpl, PageMut, TransactionalMemory, -}; +use crate::tree_store::page_store::{xxh3_checksum, Page, PageImpl, PageMut, TransactionalMemory}; use crate::tree_store::PageNumber; use crate::types::{Key, MutInPlaceValue, Value}; use crate::{Result, StorageError}; @@ -555,7 +553,7 @@ impl<'a, 'b> LeafBuilder<'a, 'b> { let required_size = self.required_bytes(division, first_split_key_bytes + first_split_value_bytes); - let mut page1 = self.mem.allocate(required_size, CachePriority::Low)?; + let mut page1 = self.mem.allocate(required_size)?; let mut builder = RawLeafBuilder::new( page1.memory_mut(), division, @@ -574,7 +572,7 @@ impl<'a, 'b> LeafBuilder<'a, 'b> { - first_split_key_bytes - first_split_value_bytes, ); - let mut page2 = self.mem.allocate(required_size, CachePriority::Low)?; + let mut page2 = self.mem.allocate(required_size)?; let mut builder = RawLeafBuilder::new( page2.memory_mut(), self.pairs.len() - division, @@ -595,7 +593,7 @@ impl<'a, 'b> LeafBuilder<'a, 'b> { self.pairs.len(), self.total_key_bytes + self.total_value_bytes, ); - let mut page = self.mem.allocate(required_size, CachePriority::Low)?; + let mut page = self.mem.allocate(required_size)?; let mut builder = RawLeafBuilder::new( page.memory_mut(), self.pairs.len(), @@ -1306,7 +1304,7 @@ impl<'a, 'b> BranchBuilder<'a, 'b> { self.total_key_bytes, self.fixed_key_size, ); - let mut page = self.mem.allocate(size, CachePriority::High)?; + let mut page = self.mem.allocate(size)?; let mut builder = RawBranchBuilder::new(&mut page, self.keys.len(), self.fixed_key_size); builder.write_first_page(self.children[0].0, self.children[0].1); for i in 1..self.children.len() { @@ -1337,7 +1335,7 @@ impl<'a, 'b> BranchBuilder<'a, 'b> { let size = RawBranchBuilder::required_bytes(division, first_split_key_len, self.fixed_key_size); - let mut page1 = self.mem.allocate(size, CachePriority::High)?; + let mut page1 = self.mem.allocate(size)?; let mut builder = RawBranchBuilder::new(&mut page1, division, self.fixed_key_size); builder.write_first_page(self.children[0].0, self.children[0].1); for i in 0..division { @@ -1356,7 +1354,7 @@ impl<'a, 'b> BranchBuilder<'a, 'b> { second_split_key_len, self.fixed_key_size, ); - let mut page2 = self.mem.allocate(size, CachePriority::High)?; + let mut page2 = self.mem.allocate(size)?; let mut builder = RawBranchBuilder::new( &mut page2, self.keys.len() - division - 1, diff --git a/src/tree_store/mod.rs b/src/tree_store/mod.rs index 2fbf524b..7181dcd8 100644 --- a/src/tree_store/mod.rs +++ b/src/tree_store/mod.rs @@ -17,8 +17,8 @@ pub(crate) use btree_base::{ pub(crate) use btree_iters::{AllPageNumbersBtreeIter, BtreeExtractIf, BtreeRangeIter}; pub use page_store::{file_backend, InMemoryBackend, Savepoint}; pub(crate) use page_store::{ - CachePriority, Page, PageHint, PageNumber, SerializedSavepoint, TransactionalMemory, - FILE_FORMAT_VERSION2, MAX_PAIR_LENGTH, MAX_VALUE_LENGTH, PAGE_SIZE, + Page, PageHint, PageNumber, SerializedSavepoint, TransactionalMemory, FILE_FORMAT_VERSION2, + MAX_PAIR_LENGTH, MAX_VALUE_LENGTH, PAGE_SIZE, }; pub(crate) use table_tree::{FreedPageList, FreedTableKey, TableTree, TableTreeMut}; pub(crate) use table_tree_base::{InternalTableDefinition, TableType}; diff --git a/src/tree_store/page_store/cached_file.rs b/src/tree_store/page_store/cached_file.rs index 81552771..afc48df9 100644 --- a/src/tree_store/page_store/cached_file.rs +++ b/src/tree_store/page_store/cached_file.rs @@ -1,7 +1,6 @@ use crate::tree_store::page_store::base::PageHint; -use crate::tree_store::LEAF; +use crate::tree_store::page_store::lru_cache::LRUCache; use crate::{DatabaseError, Result, StorageBackend, StorageError}; -use std::collections::BTreeMap; use std::ops::{Index, IndexMut}; use std::slice::SliceIndex; #[cfg(feature = "cache_metrics")] @@ -9,28 +8,10 @@ use std::sync::atomic::AtomicU64; use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering}; use std::sync::{Arc, Mutex, RwLock}; -// Leaf pages are cached with low priority. Everything is cached with high priority -#[derive(Clone, Copy)] -pub(crate) enum CachePriority { - High, - Low, -} - -impl CachePriority { - pub(crate) fn default_btree(data: &[u8]) -> CachePriority { - if data[0] == LEAF { - CachePriority::Low - } else { - CachePriority::High - } - } -} - pub(super) struct WritablePage { - buffer: Arc>, + buffer: Arc>, offset: u64, data: Arc<[u8]>, - priority: CachePriority, } impl WritablePage { @@ -48,7 +29,7 @@ impl Drop for WritablePage { self.buffer .lock() .unwrap() - .return_value(&self.offset, self.data.clone(), self.priority); + .return_value(&self.offset, self.data.clone()); } } @@ -67,84 +48,23 @@ impl> IndexMut for WritablePage { } #[derive(Default)] -struct PrioritizedCache { - cache: BTreeMap>, - low_pri_cache: BTreeMap>, +struct LRUWriteCache { + cache: LRUCache>>, } -impl PrioritizedCache { +impl LRUWriteCache { fn new() -> Self { Self { cache: Default::default(), - low_pri_cache: Default::default(), - } - } - - fn insert(&mut self, key: u64, value: Arc<[u8]>, priority: CachePriority) -> Option> { - if matches!(priority, CachePriority::Low) { - debug_assert!(!self.cache.contains_key(&key)); - self.low_pri_cache.insert(key, value) - } else { - debug_assert!(!self.low_pri_cache.contains_key(&key)); - self.cache.insert(key, value) } } - fn remove(&mut self, key: &u64) -> Option> { - let result = self.cache.remove(key); - if result.is_some() { - return result; - } - self.low_pri_cache.remove(key) - } - - fn get(&self, key: &u64) -> Option<&Arc<[u8]>> { - let result = self.cache.get(key); - if result.is_some() { - return result; - } - self.low_pri_cache.get(key) - } - - fn pop_lowest_priority(&mut self) -> Option<(u64, Arc<[u8]>)> { - let result = self.low_pri_cache.pop_first(); - if result.is_some() { - return result; - } - self.cache.pop_first() - } -} - -#[derive(Default)] -struct PrioritizedWriteCache { - cache: BTreeMap>>, - low_pri_cache: BTreeMap>>, -} - -impl PrioritizedWriteCache { - fn new() -> Self { - Self { - cache: Default::default(), - low_pri_cache: Default::default(), - } - } - - fn insert(&mut self, key: u64, value: Arc<[u8]>, priority: CachePriority) { - if matches!(priority, CachePriority::Low) { - assert!(self.low_pri_cache.insert(key, Some(value)).is_none()); - debug_assert!(!self.cache.contains_key(&key)); - } else { - assert!(self.cache.insert(key, Some(value)).is_none()); - debug_assert!(!self.low_pri_cache.contains_key(&key)); - } + fn insert(&mut self, key: u64, value: Arc<[u8]>) { + assert!(self.cache.insert(key, Some(value)).is_none()); } fn get(&self, key: &u64) -> Option<&Arc<[u8]>> { - let result = self.cache.get(key); - if result.is_some() { - return result.map(|x| x.as_ref().unwrap()); - } - self.low_pri_cache.get(key).map(|x| x.as_ref().unwrap()) + self.cache.get(key).map(|x| x.as_ref().unwrap()) } fn remove(&mut self, key: &u64) -> Option> { @@ -152,24 +72,11 @@ impl PrioritizedWriteCache { assert!(value.is_some()); return value; } - if let Some(value) = self.low_pri_cache.remove(key) { - assert!(value.is_some()); - return value; - } None } - fn return_value(&mut self, key: &u64, value: Arc<[u8]>, priority: CachePriority) { - if matches!(priority, CachePriority::Low) { - assert!(self - .low_pri_cache - .get_mut(key) - .unwrap() - .replace(value) - .is_none()); - } else { - assert!(self.cache.get_mut(key).unwrap().replace(value).is_none()); - } + fn return_value(&mut self, key: &u64, value: Arc<[u8]>) { + assert!(self.cache.get_mut(key).unwrap().replace(value).is_none()); } fn take_value(&mut self, key: &u64) -> Option> { @@ -177,38 +84,25 @@ impl PrioritizedWriteCache { let result = value.take().unwrap(); return Some(result); } - if let Some(value) = self.low_pri_cache.get_mut(key) { - let result = value.take().unwrap(); - return Some(result); - } None } - fn pop_lowest_priority(&mut self) -> Option<(u64, Arc<[u8]>, CachePriority)> { - for (k, v) in self.low_pri_cache.range(..) { + fn pop_lowest_priority(&mut self) -> Option<(u64, Arc<[u8]>)> { + let mut selected = None; + for (k, v) in self.cache.iter() { if v.is_some() { - let key = *k; - return self - .low_pri_cache - .remove(&key) - .map(|x| (key, x.unwrap(), CachePriority::Low)); + selected = Some(*k); } } - for (k, v) in self.cache.range(..) { - if v.is_some() { - let key = *k; - return self - .cache - .remove(&key) - .map(|x| (key, x.unwrap(), CachePriority::High)); - } + if let Some(key) = selected { + self.cache.remove(&key).map(|x| (key, x.unwrap())) + } else { + None } - None } fn clear(&mut self) { self.cache.clear(); - self.low_pri_cache.clear(); } } @@ -291,9 +185,9 @@ pub(super) struct PagedCachedFile { reads_total: AtomicU64, #[cfg(feature = "cache_metrics")] reads_hits: AtomicU64, - read_cache: Box<[RwLock]>, + read_cache: Vec>>>, // TODO: maybe move this cache to WriteTransaction? - write_buffer: Arc>, + write_buffer: Arc>, } impl PagedCachedFile { @@ -304,7 +198,7 @@ impl PagedCachedFile { max_write_buffer_bytes: usize, ) -> Result { let read_cache = (0..Self::lock_stripes()) - .map(|_| RwLock::new(PrioritizedCache::new())) + .map(|_| RwLock::new(LRUCache::new())) .collect(); Ok(Self { @@ -319,7 +213,7 @@ impl PagedCachedFile { #[cfg(feature = "cache_metrics")] reads_hits: Default::default(), read_cache, - write_buffer: Arc::new(Mutex::new(PrioritizedWriteCache::new())), + write_buffer: Arc::new(Mutex::new(LRUWriteCache::new())), }) } @@ -341,9 +235,6 @@ impl PagedCachedFile { for (offset, buffer) in write_buffer.cache.iter() { self.file.write(*offset, buffer.as_ref().unwrap())?; } - for (offset, buffer) in write_buffer.low_pri_cache.iter() { - self.file.write(*offset, buffer.as_ref().unwrap())?; - } for (offset, buffer) in write_buffer.cache.iter_mut() { let buffer = buffer.take().unwrap(); let cache_size = self @@ -353,27 +244,7 @@ impl PagedCachedFile { if cache_size + buffer.len() <= self.max_read_cache_bytes { let cache_slot: usize = (offset % Self::lock_stripes()).try_into().unwrap(); let mut lock = self.read_cache[cache_slot].write().unwrap(); - if let Some(replaced) = lock.insert(*offset, buffer, CachePriority::High) { - // A race could cause us to replace an existing buffer - self.read_cache_bytes - .fetch_sub(replaced.len(), Ordering::AcqRel); - } - } else { - self.read_cache_bytes - .fetch_sub(buffer.len(), Ordering::AcqRel); - break; - } - } - for (offset, buffer) in write_buffer.low_pri_cache.iter_mut() { - let buffer = buffer.take().unwrap(); - let cache_size = self - .read_cache_bytes - .fetch_add(buffer.len(), Ordering::AcqRel); - - if cache_size + buffer.len() <= self.max_read_cache_bytes { - let cache_slot: usize = (offset % Self::lock_stripes()).try_into().unwrap(); - let mut lock = self.read_cache[cache_slot].write().unwrap(); - if let Some(replaced) = lock.insert(*offset, buffer, CachePriority::Low) { + if let Some(replaced) = lock.insert(*offset, buffer) { // A race could cause us to replace an existing buffer self.read_cache_bytes .fetch_sub(replaced.len(), Ordering::AcqRel); @@ -416,13 +287,7 @@ impl PagedCachedFile { // Read with caching. Caller must not read overlapping ranges without first calling invalidate_cache(). // Doing so will not cause UB, but is a logic error. - pub(super) fn read( - &self, - offset: u64, - len: usize, - hint: PageHint, - cache_policy: impl Fn(&[u8]) -> CachePriority, - ) -> Result> { + pub(super) fn read(&self, offset: u64, len: usize, hint: PageHint) -> Result> { debug_assert_eq!(0, offset % self.page_size); #[cfg(feature = "cache_metrics")] self.reads_total.fetch_add(1, Ordering::AcqRel); @@ -451,9 +316,7 @@ impl PagedCachedFile { let buffer: Arc<[u8]> = self.read_direct(offset, len)?.into(); let cache_size = self.read_cache_bytes.fetch_add(len, Ordering::AcqRel); let mut write_lock = self.read_cache[cache_slot].write().unwrap(); - let cache_size = if let Some(replaced) = - write_lock.insert(offset, buffer.clone(), cache_policy(&buffer)) - { + let cache_size = if let Some(replaced) = write_lock.insert(offset, buffer.clone()) { // A race could cause us to replace an existing buffer self.read_cache_bytes .fetch_sub(replaced.len(), Ordering::AcqRel) @@ -511,13 +374,7 @@ impl PagedCachedFile { // If overwrite is true, the page is initialized to zero // cache_policy takes the existing data as an argument and returns the priority. The priority should be stable and not change after WritablePage is dropped - pub(super) fn write( - &self, - offset: u64, - len: usize, - overwrite: bool, - cache_policy: impl Fn(&[u8]) -> CachePriority, - ) -> Result { + pub(super) fn write(&self, offset: u64, len: usize, overwrite: bool) -> Result { assert_eq!(0, offset % self.page_size); let mut lock = self.write_buffer.lock().unwrap(); @@ -547,11 +404,11 @@ impl PagedCachedFile { if previous + len > self.max_write_buffer_bytes { let mut removed_bytes = 0; while removed_bytes < len { - if let Some((offset, buffer, removed_priority)) = lock.pop_lowest_priority() { + if let Some((offset, buffer)) = lock.pop_lowest_priority() { let removed_len = buffer.len(); let result = self.file.write(offset, &buffer); if result.is_err() { - lock.insert(offset, buffer, removed_priority); + lock.insert(offset, buffer); } result?; self.write_buffer_bytes @@ -569,16 +426,13 @@ impl PagedCachedFile { } else { self.read_direct(offset, len)?.into() }; - let priority = cache_policy(&result); - lock.insert(offset, result, priority); + lock.insert(offset, result); lock.take_value(&offset).unwrap() }; - let priority = cache_policy(&data); Ok(WritablePage { buffer: self.write_buffer.clone(), offset, data, - priority, }) } } @@ -587,7 +441,7 @@ impl PagedCachedFile { mod test { use crate::backends::InMemoryBackend; use crate::tree_store::page_store::cached_file::PagedCachedFile; - use crate::tree_store::{CachePriority, PageHint}; + use crate::tree_store::PageHint; use crate::StorageBackend; use std::sync::atomic::Ordering; use std::sync::Arc; @@ -605,9 +459,7 @@ mod test { let cached_file = cached_file.clone(); std::thread::spawn(move || { for _ in 0..1000 { - cached_file - .read(0, 128, PageHint::None, CachePriority::default_btree) - .unwrap(); + cached_file.read(0, 128, PageHint::None).unwrap(); cached_file.invalidate_cache(0, 128); } }) @@ -616,9 +468,7 @@ mod test { let cached_file = cached_file.clone(); std::thread::spawn(move || { for _ in 0..1000 { - cached_file - .read(0, 128, PageHint::None, CachePriority::default_btree) - .unwrap(); + cached_file.read(0, 128, PageHint::None).unwrap(); cached_file.invalidate_cache(0, 128); } }) diff --git a/src/tree_store/page_store/lru_cache.rs b/src/tree_store/page_store/lru_cache.rs new file mode 100644 index 00000000..9e33ea0a --- /dev/null +++ b/src/tree_store/page_store/lru_cache.rs @@ -0,0 +1,93 @@ +use std::collections::{HashMap, VecDeque}; +use std::sync::atomic::{AtomicBool, Ordering}; + +#[derive(Default)] +pub struct LRUCache { + // AtomicBool is the second chance flag + cache: HashMap, + lru_queue: VecDeque, +} + +impl LRUCache { + pub(crate) fn new() -> Self { + Self { + cache: Default::default(), + lru_queue: Default::default(), + } + } + + pub(crate) fn insert(&mut self, key: u64, value: T) -> Option { + let result = self + .cache + .insert(key, (value, AtomicBool::new(false))) + .map(|(x, _)| x); + if result.is_none() { + self.lru_queue.push_back(key); + } + result + } + + pub(crate) fn remove(&mut self, key: &u64) -> Option { + if let Some((value, _)) = self.cache.remove(key) { + if self.lru_queue.len() > 2 * self.cache.len() { + // Cycle two elements of the LRU queue to ensure it doesn't grow without bound + for _ in 0..2 { + if let Some(removed_key) = self.lru_queue.pop_front() { + if let Some((_, second_chance)) = self.cache.get(&removed_key) { + second_chance.store(false, Ordering::Release); + self.lru_queue.push_back(removed_key); + } + } + } + } + Some(value) + } else { + None + } + } + + pub(crate) fn get(&self, key: &u64) -> Option<&T> { + if let Some((value, second_chance)) = self.cache.get(key) { + second_chance.store(true, Ordering::Release); + Some(value) + } else { + None + } + } + + pub(crate) fn get_mut(&mut self, key: &u64) -> Option<&mut T> { + if let Some((value, second_chance)) = self.cache.get_mut(key) { + second_chance.store(true, Ordering::Release); + Some(value) + } else { + None + } + } + + pub(crate) fn iter(&self) -> impl ExactSizeIterator { + self.cache.iter().map(|(k, (v, _))| (k, v)) + } + + pub(crate) fn iter_mut(&mut self) -> impl ExactSizeIterator { + self.cache.iter_mut().map(|(k, (v, _))| (k, v)) + } + + pub(crate) fn pop_lowest_priority(&mut self) -> Option<(u64, T)> { + while let Some(key) = self.lru_queue.pop_front() { + if let Some((value, second_chance)) = self.cache.remove(&key) { + if second_chance.load(Ordering::Acquire) { + self.cache.insert(key, (value, AtomicBool::new(false))); + self.lru_queue.push_back(key); + } else { + return Some((key, value)); + } + } + } + None + } + + pub(crate) fn clear(&mut self) { + self.cache.clear(); + self.lru_queue.clear(); + } +} diff --git a/src/tree_store/page_store/mod.rs b/src/tree_store/page_store/mod.rs index 331a2ca7..4d42547c 100644 --- a/src/tree_store/page_store/mod.rs +++ b/src/tree_store/page_store/mod.rs @@ -6,6 +6,7 @@ pub mod file_backend; mod header; mod in_memory_backend; mod layout; +mod lru_cache; mod page_manager; mod region; mod savepoint; @@ -21,6 +22,5 @@ pub(crate) use savepoint::SerializedSavepoint; pub(super) use base::{PageImpl, PageMut}; pub(super) use buddy_allocator::BuddyAllocator; -pub(crate) use cached_file::CachePriority; pub(super) use region::new_allocators; pub(super) use xxh3::hash128_with_seed; diff --git a/src/tree_store/page_store/page_manager.rs b/src/tree_store/page_store/page_manager.rs index 35214bc6..e592cd26 100644 --- a/src/tree_store/page_store/page_manager.rs +++ b/src/tree_store/page_store/page_manager.rs @@ -2,7 +2,7 @@ use crate::transaction_tracker::TransactionId; use crate::tree_store::btree_base::{BtreeHeader, Checksum}; use crate::tree_store::page_store::base::{PageHint, MAX_PAGE_INDEX}; use crate::tree_store::page_store::buddy_allocator::BuddyAllocator; -use crate::tree_store::page_store::cached_file::{CachePriority, PagedCachedFile}; +use crate::tree_store::page_store::cached_file::PagedCachedFile; use crate::tree_store::page_store::header::{DatabaseHeader, DB_HEADER_SIZE, MAGICNUMBER}; use crate::tree_store::page_store::layout::DatabaseLayout; use crate::tree_store::page_store::region::{Allocators, RegionTracker}; @@ -186,7 +186,7 @@ impl TransactionalMemory { header.recovery_required = false; storage - .write(0, DB_HEADER_SIZE, true, |_| CachePriority::High)? + .write(0, DB_HEADER_SIZE, true)? .mem_mut() .copy_from_slice(&header.to_bytes(false, false)); allocators.flush_to(tracker_page, layout, &storage)?; @@ -195,7 +195,7 @@ impl TransactionalMemory { // Write the magic number only after the data structure is initialized and written to disk // to ensure that it's crash safe storage - .write(0, DB_HEADER_SIZE, true, |_| CachePriority::High)? + .write(0, DB_HEADER_SIZE, true)? .mem_mut() .copy_from_slice(&header.to_bytes(true, false)); storage.flush(false)?; @@ -231,7 +231,7 @@ impl TransactionalMemory { } assert!(!repair_info.invalid_magic_number); storage - .write(0, DB_HEADER_SIZE, true, |_| CachePriority::High)? + .write(0, DB_HEADER_SIZE, true)? .mem_mut() .copy_from_slice(&header.to_bytes(true, false)); storage.flush(false)?; @@ -310,7 +310,7 @@ impl TransactionalMemory { return Err(StorageError::Corrupted("Invalid magic number".to_string()).into()); } self.storage - .write(0, DB_HEADER_SIZE, true, |_| CachePriority::High)? + .write(0, DB_HEADER_SIZE, true)? .mem_mut() .copy_from_slice(&header.to_bytes(true, false)); self.storage.flush(false)?; @@ -365,7 +365,7 @@ impl TransactionalMemory { fn write_header(&self, header: &DatabaseHeader, swap_primary: bool) -> Result { self.storage - .write(0, DB_HEADER_SIZE, true, |_| CachePriority::High)? + .write(0, DB_HEADER_SIZE, true)? .mem_mut() .copy_from_slice(&header.to_bytes(true, swap_primary)); @@ -383,9 +383,7 @@ impl TransactionalMemory { || tracker_page.page_size_bytes(self.page_size) < tracker_len as u64 { drop(state); - let new_tracker_page = self - .allocate(tracker_len, CachePriority::High)? - .get_page_number(); + let new_tracker_page = self.allocate(tracker_len)?.get_page_number(); let mut state = self.state.lock().unwrap(); state.header.set_region_tracker(new_tracker_page); @@ -428,8 +426,7 @@ impl TransactionalMemory { let old_tracker_page = state.header.region_tracker(); // allocate acquires this lock, so we need to drop it drop(state); - let new_page = - self.allocate_lowest(region_tracker_size.try_into().unwrap(), CachePriority::High)?; + let new_page = self.allocate_lowest(region_tracker_size.try_into().unwrap())?; if new_page.get_page_number().is_before(old_tracker_page) { let mut state = self.state.lock().unwrap(); state.header.set_region_tracker(new_page.get_page_number()); @@ -648,9 +645,7 @@ impl TransactionalMemory { self.page_size, ); let len: usize = (range.end - range.start).try_into().unwrap(); - let mem = self - .storage - .read(range.start, len, hint, CachePriority::default_btree)?; + let mem = self.storage.read(range.start, len, hint)?; // We must not retrieve an immutable reference to a page which already has a mutable ref to it #[cfg(debug_assertions)] @@ -695,12 +690,7 @@ impl TransactionalMemory { let len: usize = (address_range.end - address_range.start) .try_into() .unwrap(); - let mem = self.storage.write( - address_range.start, - len, - false, - CachePriority::default_btree, - )?; + let mem = self.storage.write(address_range.start, len, false)?; #[cfg(debug_assertions)] { @@ -809,12 +799,7 @@ impl TransactionalMemory { self.allocated_since_commit.lock().unwrap().contains(&page) } - pub(crate) fn allocate_helper( - &self, - allocation_size: usize, - lowest: bool, - priority: CachePriority, - ) -> Result { + pub(crate) fn allocate_helper(&self, allocation_size: usize, lowest: bool) -> Result { let required_pages = (allocation_size + self.get_page_size() - 1) / self.get_page_size(); let required_order = ceil_log2(required_pages); @@ -859,9 +844,7 @@ impl TransactionalMemory { .unwrap(); #[allow(unused_mut)] - let mut mem = self - .storage - .write(address_range.start, len, true, |_| priority)?; + let mut mem = self.storage.write(address_range.start, len, true)?; debug_assert!(mem.mem().len() >= allocation_size); #[cfg(debug_assertions)] @@ -983,20 +966,12 @@ impl TransactionalMemory { Ok(()) } - pub(crate) fn allocate( - &self, - allocation_size: usize, - cache_priority: CachePriority, - ) -> Result { - self.allocate_helper(allocation_size, false, cache_priority) + pub(crate) fn allocate(&self, allocation_size: usize) -> Result { + self.allocate_helper(allocation_size, false) } - pub(crate) fn allocate_lowest( - &self, - allocation_size: usize, - cache_priority: CachePriority, - ) -> Result { - self.allocate_helper(allocation_size, true, cache_priority) + pub(crate) fn allocate_lowest(&self, allocation_size: usize) -> Result { + self.allocate_helper(allocation_size, true) } pub(crate) fn count_allocated_pages(&self) -> Result { @@ -1055,7 +1030,7 @@ impl Drop for TransactionalMemory { if tracker_page_size < (tracker_len as u64) { drop(state); // Allocate a larger tracker page - if let Ok(tracker_page) = self.allocate(tracker_len, CachePriority::High) { + if let Ok(tracker_page) = self.allocate(tracker_len) { state = self.state.lock().unwrap(); state .header diff --git a/src/tree_store/page_store/region.rs b/src/tree_store/page_store/region.rs index 67993bac..27aae36d 100644 --- a/src/tree_store/page_store/region.rs +++ b/src/tree_store/page_store/region.rs @@ -1,6 +1,6 @@ use crate::tree_store::page_store::bitmap::BtreeBitmap; use crate::tree_store::page_store::buddy_allocator::BuddyAllocator; -use crate::tree_store::page_store::cached_file::{CachePriority, PagedCachedFile}; +use crate::tree_store::page_store::cached_file::PagedCachedFile; use crate::tree_store::page_store::header::DatabaseHeader; use crate::tree_store::page_store::layout::DatabaseLayout; use crate::tree_store::page_store::page_manager::{INITIAL_REGIONS, MAX_MAX_PAGE_ORDER}; @@ -225,7 +225,7 @@ impl Allocators { page_size, ); let len: usize = (range.end - range.start).try_into().unwrap(); - storage.write(range.start, len, false, |_| CachePriority::High)? + storage.write(range.start, len, false)? }; let tracker_bytes = self.region_tracker.to_vec(); region_tracker_mem.mem_mut()[..tracker_bytes.len()].copy_from_slice(&tracker_bytes); @@ -240,7 +240,7 @@ impl Allocators { .try_into() .unwrap(); - let mut mem = storage.write(base, len, false, |_| CachePriority::High)?; + let mut mem = storage.write(base, len, false)?; RegionHeader::serialize(&self.region_allocators[i as usize], mem.mem_mut()); }