From 5b37ebd33522cd1772fa65eeaa18d98f2e08596f Mon Sep 17 00:00:00 2001 From: Frank Elsinga Date: Fri, 1 Nov 2024 21:48:22 +0100 Subject: [PATCH] added an integration test for the location details handler --- server/.gitignore | 1 + server/src/locations/details.rs | 96 ++++++++++++++++++++++++++++++++ server/src/setup/database/mod.rs | 2 + 3 files changed, 99 insertions(+) diff --git a/server/.gitignore b/server/.gitignore index eb5a316cb..d069c540c 100644 --- a/server/.gitignore +++ b/server/.gitignore @@ -1 +1,2 @@ target +src/locations/location_get_handler \ No newline at end of file diff --git a/server/src/locations/details.rs b/server/src/locations/details.rs index e21399a86..848ebf151 100644 --- a/server/src/locations/details.rs +++ b/server/src/locations/details.rs @@ -101,3 +101,99 @@ fn extract_redirect_exact_match(type_: &str, key: &str) -> String { _ => format!("/view/{key}"), // can be triggered if we add a type but don't add it here } } + +#[cfg(test)] +mod tests { + use std::sync::Arc; + + use tokio::task::LocalSet; + use tracing::info; + + use super::*; + use crate::{setup::tests::PostgresTestContainer, AppData}; + + /// Allows tesing if a modification has changed the output of the details API + /// + /// The testcase can be executed via running the following command on main + /// ```bash + /// INSTA_OUTPUT=none INSTA_UPDATE=always DATABASE_URL=postgres://postgres:CHANGE_ME@localhost:5432 cargo test -p navigatum-server test_get_handler_unchanged -- --nocapture --include-ignored + /// ``` + /// + /// And then running this command on the change + /// ```bash + /// DATABASE_URL=postgres://postgres:CHANGE_ME@localhost:5432 cargo insta --review -- -p navigatum-server test_get_handler_unchanged --nocapture --include-ignored + /// ``` + /// + /// This is a ..bit.. slow, due to using a [`tokio::task::LocalSet`]. + /// Using multiple cores for this might be possible, but optimising this testcase from 10m is currently not worth it + #[ignore] + #[actix_web::test] + #[tracing_test::traced_test] + async fn test_get_handler_unchanged() { + // setup + load data into postgis + let pg = PostgresTestContainer::new().await; + for i in 0..20 { + let res = crate::setup::database::load_data(&pg.pool).await; + if let Err(e) = res { + error!("failed to load db because {e:?}. Retrying for 20s"); + tokio::time::sleep(std::time::Duration::from_secs(1)).await; + } else { + info!("successfully initalised the db in try {i}"); + break; + } + } + + let keys: Vec = sqlx::query_scalar!("SELECT key FROM de") + .fetch_all(&pg.pool) + .await + .unwrap(); + let all_keys_len = keys.len(); + let mut resolved_cnt = 0_usize; + + for key_chunk in keys.chunks(1000) { + let tasks = LocalSet::new(); + for key in key_chunk { + let inner_key = key.clone(); + let inner_pool = pg.pool.clone(); + tasks.spawn_local(async move { + check_snapshot(inner_key, inner_pool).await; + }); + } + tasks.await; + resolved_cnt += key_chunk.len(); + info!( + "processed {resolved_cnt}/{all_keys_len} <=> {percentage}%", + percentage = 100_f32 * (resolved_cnt as f32) / (all_keys_len as f32) + ) + } + } + + async fn check_snapshot(key: String, pool: PgPool) { + let data = AppData { + pool, + meilisearch_initialised: Arc::new(Default::default()), + }; + let app = actix_web::App::new() + .app_data(web::Data::new(data)) + .service(get_handler); + let app = actix_web::test::init_service(app).await; + let req = actix_web::test::TestRequest::get() + .uri(&format!("/{key}")) + .to_request(); + let (_, resp) = actix_web::test::call_service(&app, req).await.into_parts(); + + assert_eq!(resp.status().as_u16(), 200); + + let body_box = resp.into_body(); + let body_bytes = actix_web::body::to_bytes(body_box).await.unwrap(); + let body_str = String::from_utf8(body_bytes.into_iter().collect()).unwrap(); + let body_value: serde_json::Value = serde_json::from_str(&body_str).unwrap(); + + let mut settings = insta::Settings::clone_current(); + settings.set_sort_maps(true); + settings.set_snapshot_path("location_get_handler"); + settings.bind(|| { + insta::assert_json_snapshot!(key.clone(), body_value); + }); + } +} diff --git a/server/src/setup/database/mod.rs b/server/src/setup/database/mod.rs index 6212026f2..15ef2b3a2 100644 --- a/server/src/setup/database/mod.rs +++ b/server/src/setup/database/mod.rs @@ -14,7 +14,9 @@ pub async fn setup(pool: &sqlx::PgPool) -> anyhow::Result<()> { } #[tracing::instrument(skip(pool))] pub async fn load_data(pool: &sqlx::PgPool) -> anyhow::Result<()> { + debug!("starting to download the status"); let (new_keys, new_hashes) = data::download_status().await?; + debug!("loaded new keys/hashes successfully"); { let _ = info_span!("deleting old data").enter(); let mut tx = pool.begin().await?;