Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Make the crate no_std + alloc (rebased) #48

Closed
wants to merge 6 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
19 changes: 16 additions & 3 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -21,11 +21,24 @@ maintenance = { status = "experimental" }

[features]
sanitize = ['crossbeam-epoch/sanitize']
std = ["crossbeam-epoch/std", "num_cpus", "parking_lot"]
default = ["std"]

[dependencies]
crossbeam-epoch = "0.9"
parking_lot = "0.10"
num_cpus = "1.12.0"
lock_api = "0.3.3"

[dependencies.parking_lot]
version = "0.10"
optional = true

[dependencies.num_cpus]
version = "1.12.0"
optional = true

[dependencies.crossbeam-epoch]
version = "0.9"
default-features = false
features = ["alloc"]

[dependencies.ahash]
version = "0.3.2"
Expand Down
11 changes: 11 additions & 0 deletions azure-pipelines.yml
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,17 @@ jobs:
condition: ne(variables.CACHE_RESTORED, 'true')
- script: cargo deny check
displayName: cargo deny
- job: no_std
displayName: "Compile-check on no_std target"
pool:
vmImage: ubuntu-16.04
steps:
- template: install-rust.yml@templates
parameters:
targets:
- thumbv7m-none-eabi
- bash: cargo check --target thumbv7m-none-eabi --no-default-features
displayName: cargo check
- job: canary
displayName: "Warning screening"
dependsOn: deny
Expand Down
40 changes: 29 additions & 11 deletions src/iter/mod.rs
Original file line number Diff line number Diff line change
@@ -1,19 +1,25 @@
mod traverser;
pub(crate) use traverser::NodeIter;

use core::sync::atomic::Ordering;
use crossbeam_epoch::Guard;
use std::sync::atomic::Ordering;

/// An iterator over a map's entries.
///
/// See [`HashMap::iter`](crate::HashMap::iter) for details.
#[derive(Debug)]
pub struct Iter<'g, K, V> {
pub(crate) node_iter: NodeIter<'g, K, V>,
pub(crate) struct Iter<'g, K, V, L>
where
L: lock_api::RawMutex,
{
pub(crate) node_iter: NodeIter<'g, K, V, L>,
pub(crate) guard: &'g Guard,
}

impl<'g, K, V> Iterator for Iter<'g, K, V> {
impl<'g, K, V, L> Iterator for Iter<'g, K, V, L>
where
L: lock_api::RawMutex,
{
type Item = (&'g K, &'g V);
fn next(&mut self) -> Option<Self::Item> {
let node = self.node_iter.next()?;
Expand All @@ -28,11 +34,17 @@ impl<'g, K, V> Iterator for Iter<'g, K, V> {
///
/// See [`HashMap::keys`](crate::HashMap::keys) for details.
#[derive(Debug)]
pub struct Keys<'g, K, V> {
pub(crate) node_iter: NodeIter<'g, K, V>,
pub(crate) struct Keys<'g, K, V, L>
where
L: lock_api::RawMutex,
{
pub(crate) node_iter: NodeIter<'g, K, V, L>,
}

impl<'g, K, V> Iterator for Keys<'g, K, V> {
impl<'g, K, V, L> Iterator for Keys<'g, K, V, L>
where
L: lock_api::RawMutex,
{
type Item = &'g K;
fn next(&mut self) -> Option<Self::Item> {
let node = self.node_iter.next()?;
Expand All @@ -44,12 +56,18 @@ impl<'g, K, V> Iterator for Keys<'g, K, V> {
///
/// See [`HashMap::values`](crate::HashMap::values) for details.
#[derive(Debug)]
pub struct Values<'g, K, V> {
pub(crate) node_iter: NodeIter<'g, K, V>,
pub(crate) struct Values<'g, K, V, L>
where
L: lock_api::RawMutex,
{
pub(crate) node_iter: NodeIter<'g, K, V, L>,
pub(crate) guard: &'g Guard,
}

impl<'g, K, V> Iterator for Values<'g, K, V> {
impl<'g, K, V, L> Iterator for Values<'g, K, V, L>
where
L: lock_api::RawMutex,
{
type Item = &'g V;
fn next(&mut self) -> Option<Self::Item> {
let node = self.node_iter.next()?;
Expand All @@ -63,9 +81,9 @@ impl<'g, K, V> Iterator for Values<'g, K, V> {
#[cfg(test)]
mod tests {
use crate::HashMap;
use core::iter::FromIterator;
use crossbeam_epoch as epoch;
use std::collections::HashSet;
use std::iter::FromIterator;

#[test]
fn iter() {
Expand Down
57 changes: 38 additions & 19 deletions src/iter/traverser.rs
Original file line number Diff line number Diff line change
@@ -1,18 +1,26 @@
#[cfg(not(feature = "std"))]
extern crate alloc;

use crate::node::{BinEntry, Node};
use crate::raw::Table;
#[cfg(not(feature = "std"))]
use alloc::boxed::Box;
use core::sync::atomic::Ordering;
use crossbeam_epoch::{Guard, Shared};
use std::sync::atomic::Ordering;

#[derive(Debug)]
pub(crate) struct NodeIter<'g, K, V> {
pub(crate) struct NodeIter<'g, K, V, L>
where
L: lock_api::RawMutex,
{
/// Current table; update if resized
table: Option<&'g Table<K, V>>,
table: Option<&'g Table<K, V, L>>,

stack: Option<Box<TableStack<'g, K, V>>>,
spare: Option<Box<TableStack<'g, K, V>>>,
stack: Option<Box<TableStack<'g, K, V, L>>>,
spare: Option<Box<TableStack<'g, K, V, L>>>,

/// The last bin entry iterated over
prev: Option<&'g Node<K, V>>,
prev: Option<&'g Node<K, V, L>>,

/// Index of bin to use next
index: usize,
Expand All @@ -29,8 +37,11 @@ pub(crate) struct NodeIter<'g, K, V> {
guard: &'g Guard,
}

impl<'g, K, V> NodeIter<'g, K, V> {
pub(crate) fn new(table: Shared<'g, Table<K, V>>, guard: &'g Guard) -> Self {
impl<'g, K, V, L> NodeIter<'g, K, V, L>
where
L: lock_api::RawMutex,
{
pub(crate) fn new(table: Shared<'g, Table<K, V, L>>, guard: &'g Guard) -> Self {
let (table, len) = if table.is_null() {
(None, 0)
} else {
Expand All @@ -53,7 +64,7 @@ impl<'g, K, V> NodeIter<'g, K, V> {
}
}

fn push_state(&mut self, t: &'g Table<K, V>, i: usize, n: usize) {
fn push_state(&mut self, t: &'g Table<K, V, L>, i: usize, n: usize) {
let mut s = self.spare.take();
if let Some(ref mut s) = s {
self.spare = s.next.take();
Expand Down Expand Up @@ -109,8 +120,11 @@ impl<'g, K, V> NodeIter<'g, K, V> {
}
}

impl<'g, K, V> Iterator for NodeIter<'g, K, V> {
type Item = &'g Node<K, V>;
impl<'g, K, V, L> Iterator for NodeIter<'g, K, V, L>
where
L: lock_api::RawMutex,
{
type Item = &'g Node<K, V, L>;
fn next(&mut self) -> Option<Self::Item> {
let mut e = None;
if let Some(prev) = self.prev {
Expand Down Expand Up @@ -177,30 +191,35 @@ impl<'g, K, V> Iterator for NodeIter<'g, K, V> {
}

#[derive(Debug)]
struct TableStack<'g, K, V> {
struct TableStack<'g, K, V, L>
where
L: lock_api::RawMutex,
{
length: usize,
index: usize,
table: &'g Table<K, V>,
next: Option<Box<TableStack<'g, K, V>>>,
table: &'g Table<K, V, L>,
next: Option<Box<TableStack<'g, K, V, L>>>,
}

#[cfg(test)]
mod tests {
use super::*;
use crate::raw::Table;
use crossbeam_epoch::{self as epoch, Atomic, Owned};
use parking_lot::Mutex;
use lock_api::Mutex;

type L = parking_lot::RawMutex;

#[test]
fn iter_new() {
let guard = epoch::pin();
let iter = NodeIter::<usize, usize>::new(Shared::null(), &guard);
let iter = NodeIter::<usize, usize, L>::new(Shared::null(), &guard);
assert_eq!(iter.count(), 0);
}

#[test]
fn iter_empty() {
let table = Owned::new(Table::<usize, usize>::new(16));
let table = Owned::new(Table::<usize, usize, L>::new(16));
let guard = epoch::pin();
let table = table.into_shared(&guard);
let iter = NodeIter::new(table, &guard);
Expand All @@ -219,7 +238,7 @@ mod tests {
key: 0usize,
value: Atomic::new(0usize),
next: Atomic::null(),
lock: Mutex::new(()),
lock: Mutex::<L, _>::new(()),
}));

let table = Owned::new(Table::from(bins));
Expand Down Expand Up @@ -253,7 +272,7 @@ mod tests {

// construct the forwarded-from table
let mut bins = vec![Shared::null(); 16];
let table = Table::<usize, usize>::new(bins.len());
let table = Table::<usize, usize, L>::new(bins.len());
for bin in &mut bins[8..] {
// this also sets table.next_table to deep_table
*bin = table.get_moved(deep_table, &guard);
Expand Down
38 changes: 23 additions & 15 deletions src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -29,20 +29,20 @@
//!
//! # Consistency
//!
//! Retrieval operations (including [`get`](HashMap::get)) generally do not block, so may
//! overlap with update operations (including [`insert`](HashMap::insert)). Retrievals
//! Retrieval operations (including [`get`](ConcurrentHashMap::get)) generally do not block, so may
//! overlap with update operations (including [`insert`](ConcurrentHashMap::insert)). Retrievals
//! reflect the results of the most recently *completed* update operations holding upon their
//! onset. (More formally, an update operation for a given key bears a _happens-before_ relation
//! with any successful retrieval for that key reporting the updated value.)
//!
//! Operations that inspect the map as a whole, rather than a single key, operate on a snapshot of
//! the underlying table. For example, iterators return elements reflecting the state of the hash
//! table at some point at or since the creation of the iterator. Aggregate status methods like
//! [`len`](HashMap::len) are typically useful only when a map is not undergoing concurrent
//! updates in other threads. Otherwise the results of these methods reflect transient states that
//! may be adequate for monitoring or estimation purposes, but not for program control.
//! Similarly, [`Clone`](std::clone::Clone) may not produce a "perfect" clone if the underlying
//! map is being concurrently modified.
//! [`len`](ConcurrentHashMap::len) are typically useful only when a map is not undergoing
//! concurrent updates in other threads. Otherwise the results of these methods reflect transient
//! states that may be adequate for monitoring or estimation purposes, but not for program control.
//! Similarly, [`Clone`](std::clone::Clone) may not produce a "perfect" clone if the underlying map
//! is being concurrently modified.
//!
//! # Resizing behavior
//!
Expand All @@ -53,7 +53,7 @@
//! and removed, but overall, this maintains a commonly accepted time/space tradeoff for hash
//! tables. However, resizing this or any other kind of hash table may be a relatively slow
//! operation. When possible, it is a good idea to provide a size estimate by using the
//! [`with_capacity`](HashMap::with_capacity) constructor. Note that using many keys with
//! [`with_capacity`](ConcurrentHashMap::with_capacity) constructor. Note that using many keys with
//! exactly the same [`Hash`](std::hash::Hash) value is a sure way to slow down performance of any
//! hash table. /* TODO: dynamic load factor */
//!
Expand Down Expand Up @@ -195,13 +195,14 @@
//! more efficient operation than if everything had to be atomically reference-counted.
//!
//! [`crossbeam::epoch`]: https://docs.rs/crossbeam/0.7/crossbeam/epoch/index.html
#![deny(
missing_docs,
missing_debug_implementations,
unreachable_pub,
intra_doc_link_resolution_failure
)]
#![deny(missing_docs, unreachable_pub, intra_doc_link_resolution_failure)]
#![warn(rust_2018_idioms)]
#![cfg_attr(not(feature = "std"), no_std)]
#![cfg_attr(feature = "std", deny(missing_debug_implementations))]

#[cfg(not(feature = "std"))]
#[macro_use]
extern crate alloc;

mod map;
mod map_ref;
Expand All @@ -211,13 +212,20 @@ mod raw;
/// Iterator types.
pub mod iter;

pub use map::ConcurrentHashMap;
pub use map_ref::ConcurrentHashMapRef;

#[cfg(feature = "std")]
pub use map::HashMap;
#[cfg(feature = "std")]
pub use map_ref::HashMapRef;

/// Default hasher for [`HashMap`].
pub type DefaultHashBuilder = ahash::RandomState;

/// Types needed to safely access shared data concurrently.
pub mod epoch {
pub use crossbeam_epoch::{pin, Guard};
#[cfg(feature = "std")]
pub use crossbeam_epoch::pin;
pub use crossbeam_epoch::Guard;
}
Loading