Skip to content

Commit

Permalink
Prepare library for no_std
Browse files Browse the repository at this point in the history
This is essentially 7b32a16 rebased on
top of #47.
  • Loading branch information
jonhoo committed Jan 30, 2020
1 parent 3bf1f8b commit c044a06
Show file tree
Hide file tree
Showing 8 changed files with 86 additions and 31 deletions.
13 changes: 11 additions & 2 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -21,11 +21,20 @@ maintenance = { status = "experimental" }

[features]
sanitize = ['crossbeam-epoch/sanitize']
std = ["crossbeam-epoch/std", "num_cpus"]
default = ["std"]

[dependencies]
crossbeam-epoch = "0.9"
parking_lot = "0.10"
num_cpus = "1.12.0"

[dependencies.num_cpus]
version = "1.12.0"
optional = true

[dependencies.crossbeam-epoch]
version = "0.9"
default-features = false
features = ["alloc"]

[dependencies.ahash]
version = "0.3.2"
Expand Down
11 changes: 11 additions & 0 deletions azure-pipelines.yml
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,17 @@ jobs:
condition: ne(variables.CACHE_RESTORED, 'true')
- script: cargo deny check
displayName: cargo deny
- job: no_std
displayName: "Compile-check on no_std target"
pool:
vmImage: ubuntu-16.04
steps:
- template: install-rust.yml@templates
parameters:
targets:
- thumbv7m-none-eabi
- bash: cargo check --target thumbv7m-none-eabi --no-default-features
displayName: cargo check
- job: miri
displayName: "Run miri on test suite"
dependsOn: deny
Expand Down
4 changes: 2 additions & 2 deletions src/iter/iter.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
use super::NodeIter;
use core::sync::atomic::Ordering;
use crossbeam_epoch::Guard;
use std::sync::atomic::Ordering;

/// An iterator over a map's entries.
///
Expand Down Expand Up @@ -61,9 +61,9 @@ impl<'g, K, V> Iterator for Values<'g, K, V> {
#[cfg(test)]
mod tests {
use crate::HashMap;
use core::iter::FromIterator;
use crossbeam_epoch as epoch;
use std::collections::HashSet;
use std::iter::FromIterator;

#[test]
fn iter() {
Expand Down
7 changes: 6 additions & 1 deletion src/iter/traverser.rs
Original file line number Diff line number Diff line change
@@ -1,7 +1,12 @@
#[cfg(not(feature = "std"))]
extern crate alloc;

use crate::node::{BinEntry, Node};
use crate::raw::Table;
#[cfg(not(feature = "std"))]
use alloc::boxed::Box;
use core::sync::atomic::Ordering;
use crossbeam_epoch::{Guard, Shared};
use std::sync::atomic::Ordering;

#[derive(Debug)]
pub(crate) struct NodeIter<'g, K, V> {
Expand Down
17 changes: 10 additions & 7 deletions src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -193,13 +193,14 @@
//! more efficient operation than if everything had to be atomically reference-counted.
//!
//! [`crossbeam::epoch`]: https://docs.rs/crossbeam/0.7/crossbeam/epoch/index.html
#![deny(
missing_docs,
missing_debug_implementations,
unreachable_pub,
intra_doc_link_resolution_failure
)]
#![deny(missing_docs, unreachable_pub, intra_doc_link_resolution_failure)]
#![warn(rust_2018_idioms)]
#![cfg_attr(not(feature = "std"), no_std)]
#![cfg_attr(feature = "std", deny(missing_debug_implementations))]

#[cfg(not(feature = "std"))]
#[macro_use]
extern crate alloc;

mod map;
mod node;
Expand All @@ -215,5 +216,7 @@ pub type DefaultHashBuilder = ahash::RandomState;

/// Types needed to safely access shared data concurrently.
pub mod epoch {
pub use crossbeam_epoch::{pin, Guard};
#[cfg(feature = "std")]
pub use crossbeam_epoch::pin;
pub use crossbeam_epoch::Guard;
}
48 changes: 34 additions & 14 deletions src/map.rs
Original file line number Diff line number Diff line change
@@ -1,15 +1,15 @@
use crate::iter::*;
use crate::node::*;
use crate::raw::*;
use core::borrow::Borrow;
use core::hash::{BuildHasher, Hash, Hasher};
use core::iter::FromIterator;
use core::sync::atomic::{AtomicIsize, AtomicUsize, Ordering};
use crossbeam_epoch::{self as epoch, Atomic, Guard, Owned, Shared};
use std::borrow::Borrow;
#[cfg(feature = "std")]
use std::fmt::{self, Debug, Formatter};
use std::hash::{BuildHasher, Hash, Hasher};
use std::iter::FromIterator;
use std::sync::{
atomic::{AtomicIsize, AtomicUsize, Ordering},
Once,
};
#[cfg(feature = "std")]
use std::sync::Once;

const ISIZE_BITS: usize = core::mem::size_of::<isize>() * 8;

Expand Down Expand Up @@ -43,8 +43,10 @@ const MAX_RESIZERS: isize = (1 << (ISIZE_BITS - RESIZE_STAMP_BITS)) - 1;
/// The bit shift for recording size stamp in `size_ctl`.
const RESIZE_STAMP_SHIFT: usize = ISIZE_BITS - RESIZE_STAMP_BITS;

#[cfg(feature = "std")]
static NCPU_INITIALIZER: Once = Once::new();
static NCPU: AtomicUsize = AtomicUsize::new(0);
#[cfg(feature = "std")]
static NCPU: AtomicUsize = AtomicUsize::new(1);

macro_rules! load_factor {
($n: expr) => {
Expand Down Expand Up @@ -307,6 +309,15 @@ where
// try to allocate the table
let mut sc = self.size_ctl.load(Ordering::SeqCst);
if sc < 0 {
#[cfg(not(feature = "std"))]
// for there to be a race, there must be another thread running
// concurrently with us. That thread cannot be blocked on us,
// since we are not in any mutually-exclusive section. So our
// goal is just to not waste cycles and give it some time to
// complete. It is not a requirement that we fully yield.
core::sync::atomic::spin_loop_hint();

#[cfg(feature = "std")]
// we lost the initialization race; just spin
std::thread::yield_now();
continue;
Expand Down Expand Up @@ -580,7 +591,7 @@ where
fn add_count(&self, n: isize, resize_hint: Option<usize>, guard: &Guard) {
// TODO: implement the Java CounterCell business here

use std::cmp;
use core::cmp;
let mut count = match n.cmp(&0) {
cmp::Ordering::Greater => {
let n = n as usize;
Expand Down Expand Up @@ -669,7 +680,7 @@ where
let ncpu = num_cpus();

let stride = if ncpu > 1 { (n >> 3) / ncpu } else { n };
let stride = std::cmp::max(stride as isize, MIN_TRANSFER_STRIDE);
let stride = core::cmp::max(stride as isize, MIN_TRANSFER_STRIDE);

if next_table.is_null() {
// we are initiating a resize
Expand Down Expand Up @@ -954,7 +965,7 @@ where
// TODO: find out if this is neccessary
let size = size + (size >> 1) + 1;

std::cmp::min(MAXIMUM_CAPACITY, size.next_power_of_two())
core::cmp::min(MAXIMUM_CAPACITY, size.next_power_of_two())
} as isize;

loop {
Expand Down Expand Up @@ -1341,6 +1352,7 @@ where
}
}

#[cfg(feature = "std")]
impl<K, V, S> PartialEq for HashMap<K, V, S>
where
K: Sync + Send + Clone + Eq + Hash,
Expand All @@ -1358,6 +1370,7 @@ where
}
}

#[cfg(feature = "std")]
impl<K, V, S> Eq for HashMap<K, V, S>
where
K: Sync + Send + Clone + Eq + Hash,
Expand All @@ -1366,6 +1379,7 @@ where
{
}

#[cfg(feature = "std")]
impl<K, V, S> fmt::Debug for HashMap<K, V, S>
where
K: Sync + Send + Clone + Debug + Eq + Hash,
Expand Down Expand Up @@ -1402,6 +1416,7 @@ impl<K, V, S> Drop for HashMap<K, V, S> {
}
}

#[cfg(feature = "std")]
impl<K, V, S> Extend<(K, V)> for &HashMap<K, V, S>
where
K: Sync + Send + Clone + Hash + Eq,
Expand Down Expand Up @@ -1429,6 +1444,7 @@ where
}
}

#[cfg(feature = "std")]
impl<'a, K, V, S> Extend<(&'a K, &'a V)> for &HashMap<K, V, S>
where
K: Sync + Send + Copy + Hash + Eq,
Expand Down Expand Up @@ -1491,6 +1507,7 @@ where
}
}

#[cfg(feature = "std")]
impl<K, V, S> Clone for HashMap<K, V, S>
where
K: Sync + Send + Clone + Hash + Eq,
Expand All @@ -1509,16 +1526,19 @@ where
}
}

#[cfg(not(miri))]
#[inline]
/// Returns the number of physical CPUs in the machine (_O(1)_).
#[cfg(all(not(miri), feature = "std"))]
/// Returns the number of physical CPUs in the machine.
/// Returns `1` in `no_std` environment.
fn num_cpus() -> usize {
NCPU_INITIALIZER.call_once(|| NCPU.store(num_cpus::get_physical(), Ordering::Relaxed));
NCPU.load(Ordering::Relaxed)
}

#[cfg(miri)]
#[inline]
#[cfg(any(miri, not(feature = "std")))]
/// Returns the number of physical CPUs in the machine.
/// Returns `1` in `no_std` environment.
const fn num_cpus() -> usize {
1
}
Expand Down
4 changes: 2 additions & 2 deletions src/node.rs
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
use crate::raw::Table;
use core::borrow::Borrow;
use core::sync::atomic::Ordering;
use crossbeam_epoch::{Atomic, Guard, Shared};
use parking_lot::Mutex;
use std::borrow::Borrow;
use std::sync::atomic::Ordering;

/// Entry in a bin.
///
Expand Down
13 changes: 10 additions & 3 deletions src/raw/mod.rs
Original file line number Diff line number Diff line change
@@ -1,7 +1,11 @@
use crate::node::*;
#[cfg(not(feature = "std"))]
use alloc::boxed::Box;
#[cfg(not(feature = "std"))]
use alloc::vec::Vec;
use core::fmt::Debug;
use core::sync::atomic::Ordering;
use crossbeam_epoch::{Atomic, Guard, Owned, Shared};
use std::fmt::Debug;
use std::sync::atomic::Ordering;

#[derive(Debug)]
pub(crate) struct Table<K, V> {
Expand Down Expand Up @@ -35,7 +39,10 @@ impl<K, V> Table<K, V> {
// anything in the map.
let guard = unsafe { crossbeam_epoch::unprotected() };

for bin in Vec::from(std::mem::replace(&mut self.bins, vec![].into_boxed_slice())) {
for bin in Vec::from(core::mem::replace(
&mut self.bins,
vec![].into_boxed_slice(),
)) {
if bin.load(Ordering::SeqCst, guard).is_null() {
// bin was never used
continue;
Expand Down

0 comments on commit c044a06

Please sign in to comment.