diff --git a/Cargo.lock b/Cargo.lock
index 387706a3fbcb6..6f18a4607ad36 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -2724,9 +2724,9 @@ dependencies = [
[[package]]
name = "const-str"
-version = "0.5.6"
+version = "0.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "aca749d3d3f5b87a0d6100509879f9cf486ab510803a4a4e1001da1ff61c2bd6"
+checksum = "671927c085eb5827d30b95df08f6c6a2301eafe2274c368bb2c16f42e03547eb"
[[package]]
name = "constant_time_eq"
diff --git a/README.md b/README.md
index 96bc0f35c7ddf..8717e7725e920 100644
--- a/README.md
+++ b/README.md
@@ -9,25 +9,18 @@
-### 🌊 Reimagine real-time data engineering.
+### 🌊 Ride the Wave of Real-Time Data.
-
- 📚
- Documentation 🚀
-
- Slack Community
-
+
+ Docs | Benchmarks | Demos
+
+
+
-RisingWave is a Postgres-compatible SQL database engineered to provide the simplest and most cost-efficient approach for processing, analyzing, and managing real-time event streaming data.
+RisingWave is the world's most advanced streaming database engineered to provide the simplest and most cost-efficient approach for processing, analyzing, and managing real-time event streaming data. It provides both a Postgres-compatible [SQL interface](https://docs.risingwave.com/sql/overview) and a DataFrame-style [Python interface](https://docs.risingwave.com/python-sdk/intro).
-RisingWave can ingest millions of events per second, continuously join and analyze live data streams with historical tables, serve ad-hoc queries in real-time, and deliver fresh, consistent results wherever needed.
+RisingWave can ingest millions of events per second, continuously join and analyze live data streams with historical tables, serve ad-hoc queries at low latency, and deliver fresh, consistent results wherever needed.
-![RisingWave](./docs/dev/src/images/architecture_20240908.png)
+![RisingWave](./docs/dev/src/images/architecture_20250127.png)
## Try it out in 60 seconds
diff --git a/docs/dev/src/images/architecture_20250127.png b/docs/dev/src/images/architecture_20250127.png
new file mode 100644
index 0000000000000..703a38e83f356
Binary files /dev/null and b/docs/dev/src/images/architecture_20250127.png differ
diff --git a/e2e_test/batch/join/asof_join.slt b/e2e_test/batch/join/asof_join.slt
new file mode 100644
index 0000000000000..bf905b661e107
--- /dev/null
+++ b/e2e_test/batch/join/asof_join.slt
@@ -0,0 +1,43 @@
+statement ok
+SET RW_IMPLICIT_FLUSH TO true;
+
+statement ok
+create table t1 (v1 int, v2 int, v3 int primary key);
+
+statement ok
+create table t2 (v1 int, v2 int, v3 int primary key);
+
+statement ok
+insert into t1 values (1, 2, 3), (2, 3, 4), (1, 2, 9);
+
+statement ok
+insert into t2 values (1, NULL, 8), (1, 3, 4), (1, 2, 5), (1, 2, 6);
+
+# asof inner join
+query IIIIII
+SELECT t1.v1 t1_v1, t1.v2 t1_v2, t1.v3 t1_v3, t2.v1 t2_v1, t2.v2 t2_v2, t2.v3 t2_v3 FROM t1 ASOF JOIN t2 ON t1.v1 = t2.v1 and t1.v2 < t2.v2 order by t1.v1, t1.v3;
+----
+1 2 3 1 3 4
+1 2 9 1 3 4
+
+# asof left join
+query IIIIII
+SELECT t1.v1 t1_v1, t1.v2 t1_v2, t1.v3 t1_v3, t2.v1 t2_v1, t2.v2 t2_v2, t2.v3 t2_v3 FROM t1 ASOF LEFT JOIN t2 ON t1.v1 = t2.v1 and t1.v2 < t2.v2 order by t1.v1, t1.v3;
+----
+1 2 3 1 3 4
+1 2 9 1 3 4
+2 3 4 NULL NULL NULL
+
+# asof left join
+query IIIIII
+SELECT t1.v1 t1_v1, t1.v2 t1_v2, t1.v3 t1_v3, t2.v1 t2_v1, t2.v2 t2_v2, t2.v3 t2_v3 FROM t1 ASOF LEFT JOIN t2 ON t1.v1 = t2.v1 and t1.v2 > t2.v2 order by t1.v1, t1.v3;
+----
+1 2 3 NULL NULL NULL
+1 2 9 NULL NULL NULL
+2 3 4 NULL NULL NULL
+
+statement ok
+drop table t1;
+
+statement ok
+drop table t2;
\ No newline at end of file
diff --git a/proto/batch_plan.proto b/proto/batch_plan.proto
index 7ffdf94e3c30a..6e07ceae4d5d4 100644
--- a/proto/batch_plan.proto
+++ b/proto/batch_plan.proto
@@ -289,6 +289,7 @@ message HashJoinNode {
// Null safe means it treats `null = null` as true.
// Each key pair can be null safe independently. (left_key, right_key, null_safe)
repeated bool null_safe = 6;
+ optional plan_common.AsOfJoinDesc asof_desc = 7;
}
message SortMergeJoinNode {
diff --git a/src/batch/executors/benches/hash_join.rs b/src/batch/executors/benches/hash_join.rs
index 330fc299594d0..6d64461dd1c2d 100644
--- a/src/batch/executors/benches/hash_join.rs
+++ b/src/batch/executors/benches/hash_join.rs
@@ -76,6 +76,7 @@ fn create_hash_join_executor(
"HashJoinExecutor".into(),
CHUNK_SIZE,
None,
+ None,
BatchSpillMetrics::for_test(),
ShutdownToken::empty(),
MemoryContext::none(),
diff --git a/src/batch/executors/src/executor/join/hash_join.rs b/src/batch/executors/src/executor/join/hash_join.rs
index af89b3d1503a9..44518e5155496 100644
--- a/src/batch/executors/src/executor/join/hash_join.rs
+++ b/src/batch/executors/src/executor/join/hash_join.rs
@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+use std::cmp::Ordering;
use std::iter;
use std::iter::empty;
use std::marker::PhantomData;
@@ -25,8 +26,8 @@ use risingwave_common::bitmap::{Bitmap, BitmapBuilder, FilterByBitmap};
use risingwave_common::catalog::Schema;
use risingwave_common::hash::{HashKey, HashKeyDispatcher, PrecomputedBuildHasher};
use risingwave_common::memory::{MemoryContext, MonitoredGlobalAlloc};
-use risingwave_common::row::{repeat_n, RowExt};
-use risingwave_common::types::{DataType, Datum};
+use risingwave_common::row::{repeat_n, Row, RowExt};
+use risingwave_common::types::{DataType, Datum, DefaultOrd};
use risingwave_common::util::chunk_coalesce::DataChunkBuilder;
use risingwave_common::util::iter_util::ZipEqFast;
use risingwave_common_estimate_size::EstimateSize;
@@ -35,7 +36,7 @@ use risingwave_pb::batch_plan::plan_node::NodeBody;
use risingwave_pb::data::DataChunk as PbDataChunk;
use risingwave_pb::Message;
-use super::{ChunkedData, JoinType, RowId};
+use super::{AsOfDesc, AsOfInequalityType, ChunkedData, JoinType, RowId};
use crate::error::{BatchError, Result};
use crate::executor::{
BoxedDataChunkStream, BoxedExecutor, BoxedExecutorBuilder, Executor, ExecutorBuilder,
@@ -83,6 +84,8 @@ pub struct HashJoinExecutor {
null_matched: Vec,
identity: String,
chunk_size: usize,
+ /// Whether the join is an as-of join
+ asof_desc: Option,
spill_backend: Option,
spill_metrics: Arc,
@@ -179,6 +182,7 @@ pub struct EquiJoinParams {
next_build_row_with_same_key: ChunkedData