Skip to content

Commit

Permalink
perf: Use enum_dispatch over dynamic dispatch
Browse files Browse the repository at this point in the history
  • Loading branch information
XAMPPRocky committed Oct 14, 2024
1 parent f564b28 commit 8cabd8c
Show file tree
Hide file tree
Showing 24 changed files with 68 additions and 643 deletions.
13 changes: 13 additions & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 1 addition & 0 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -149,6 +149,7 @@ strum_macros = "0.26"
cfg-if = "1.0.0"
libflate = "2.0.0"
form_urlencoded = "1.2.1"
enum_dispatch = "0.3.13"
gxhash = "3.4.1"
papaya = { version = "0.1.3", features = ["serde"] }
seize = "0.4.5"
Expand Down
11 changes: 3 additions & 8 deletions benches/cluster_map.rs
Original file line number Diff line number Diff line change
Expand Up @@ -17,10 +17,10 @@ mod serde {
fn serialize_to_protobuf(cm: &ClusterMap) -> Vec<Any> {
let mut resources = Vec::new();

for cluster in cm.iter() {
for (key, cluster) in cm.pin().iter() {
resources.push(
Resource::Cluster(Cluster {
locality: cluster.key().clone().map(From::from),
locality: key.clone().map(From::from),
endpoints: cluster
.endpoints
.iter()
Expand Down Expand Up @@ -110,12 +110,7 @@ mod ops {
use shared::{gen_cluster_map, GenCluster};

fn compute_hash<const S: u64>(gc: &GenCluster) -> usize {
let mut total_endpoints = 0;

for kv in gc.cm.iter() {
total_endpoints += kv.endpoints.len();
}

let total_endpoints = gc.cm.pin().values().map(|v| v.endpoints.len()).sum();
assert_eq!(total_endpoints, gc.total_endpoints);
total_endpoints
}
Expand Down
2 changes: 1 addition & 1 deletion benches/shared.rs
Original file line number Diff line number Diff line change
Expand Up @@ -676,7 +676,7 @@ pub fn gen_cluster_map<const S: u64>(token_kind: TokenKind) -> GenCluster {

// Now actually insert the endpoints, now that the order of keys is established,
// annoying, but note we split out iteration versus insertion, otherwise we deadlock
let keys: Vec<_> = cm.iter().map(|kv| kv.key().clone()).collect();
let keys: Vec<_> = cm.pin().iter().map(|(key, _)| key.clone()).collect();
let mut sets = std::collections::BTreeMap::new();

let mut token_generator = match token_kind {
Expand Down
4 changes: 2 additions & 2 deletions benches/token_router.rs
Original file line number Diff line number Diff line change
Expand Up @@ -14,8 +14,8 @@ fn token_router(b: Bencher, token_kind: &str) {
let cm = std::sync::Arc::new(gc.cm);

// Calculate the amount of bytes for all the tokens
for eps in cm.iter() {
for ep in &eps.value().endpoints {
for eps in cm.pin().values() {
for ep in &eps.endpoints {
for tok in &ep.metadata.known.tokens {
tokens.push(tok.clone());
}
Expand Down
9 changes: 1 addition & 8 deletions build/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ version:
@echo $(package_version)

# Run all tests
test: ensure-build-image test-quilkin test-examples test-docs
test: ensure-build-image test-quilkin test-docs

# In CI with split jobs that both fetch they will fail if run in parallel since
# cargo will be fighting with itself for some the same host directory that is
Expand All @@ -103,13 +103,6 @@ test-quilkin: ensure-build-image
--network=host \
-e RUST_BACKTRACE=1 --entrypoint=cargo $(BUILD_IMAGE_TAG) test -p quilkin -p qt

# Run tests against the examples
test-examples: ensure-build-image
docker run --rm $(common_rust_args) -w /workspace/examples/quilkin-filter-example \
--entrypoint=cargo $(BUILD_IMAGE_TAG) clippy --tests -- -D warnings
docker run --rm $(common_rust_args) -w /workspace/examples/quilkin-filter-example \
--entrypoint=cargo $(BUILD_IMAGE_TAG) fmt -- --check

# Run tests against documentation
test-docs: ensure-build-image
test-docs: GITHUB_REF_NAME ?= main
Expand Down
5 changes: 0 additions & 5 deletions cloudbuild.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -38,11 +38,6 @@ steps:
id: test-quilkin
waitFor:
- fetch-quilkin
- name: us-docker.pkg.dev/$PROJECT_ID/ci/make-docker
dir: ./build
args:
- test-examples
id: test-examples
- name: us-docker.pkg.dev/$PROJECT_ID/ci/make-docker
dir: ./build
args:
Expand Down
5 changes: 3 additions & 2 deletions crates/test/tests/mesh.rs
Original file line number Diff line number Diff line change
Expand Up @@ -189,8 +189,9 @@ trace_test!(datacenter_discovery, {
#[track_caller]
fn assert_config(config: &quilkin::Config, datacenter: &quilkin::config::Datacenter) {
let dcs = config.datacenters().read();
let ipv4_dc = dcs.get(&std::net::Ipv4Addr::LOCALHOST.into());
let ipv6_dc = dcs.get(&std::net::Ipv6Addr::LOCALHOST.into());
let pin = dcs.pin();
let ipv4_dc = pin.get(&std::net::Ipv4Addr::LOCALHOST.into());
let ipv6_dc = pin.get(&std::net::Ipv6Addr::LOCALHOST.into());

match (ipv4_dc, ipv6_dc) {
(Some(dc), None) => assert_eq!(&*dc, datacenter),
Expand Down
1 change: 1 addition & 0 deletions deny.toml
Original file line number Diff line number Diff line change
Expand Up @@ -57,6 +57,7 @@ version = 2
allow = ["Apache-2.0", "MIT", "ISC", "BSD-3-Clause"]
exceptions = [
{ crate = "adler32", allow = ["Zlib"] },
{ crate = "atomic-wait", allow = ["BSD-2-Clause"] },
# This license should not really be used for code, but here we are
{ crate = "notify", allow = ["CC0-1.0"] },
{ crate = "ring", allow = ["OpenSSL"] },
Expand Down
3 changes: 1 addition & 2 deletions docs/src/SUMMARY.md
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,6 @@
- [Pass](./services/proxy/filters/pass.md)
- [Timestamp](./services/proxy/filters/timestamp.md)
- [Token Router](./services/proxy/filters/token_router.md)
- [Writing Custom Filters](./services/proxy/filters/writing_custom_filters.md)
- [Control Message Protocol](./services/proxy/qcmp.md)
- [Metrics](./services/proxy/metrics.md)

Expand Down Expand Up @@ -55,4 +54,4 @@

# Third Party

- [Videos and Presentations](./third-party/presentations.md)
- [Videos and Presentations](./third-party/presentations.md)
3 changes: 1 addition & 2 deletions docs/src/services/proxy/filters.md
Original file line number Diff line number Diff line change
Expand Up @@ -14,8 +14,7 @@ As an example, say we would like to perform the following steps in our processin
* Do not forward (drop) the packet if its compressed length is over 512 bytes.

We would create a filter corresponding to each step either by leveraging any [existing filters](#built-in-filters)
that do what we want or [writing one ourselves](./filters/writing_custom_filters.md) and connect them to form the
following filter chain:
that do what we want or and connect them to form the following filter chain:

```bash
append | compress | drop
Expand Down
Loading

0 comments on commit 8cabd8c

Please sign in to comment.