diff --git a/README.md b/README.md
index 4f801e99b..5f0407a40 100644
--- a/README.md
+++ b/README.md
@@ -1,19 +1,17 @@
![Conductor](docs/img/logo.svg)
-[![Github release](https://img.shields.io/github/v/release/Netflix/conductor.svg)](https://GitHub.com/Netflix/conductor/releases)
+[![Github release](https://img.shields.io/github/v/release/conductor-oss/conductor.svg)](https://GitHub.com/Netflix/conductor-oss/releases)
[![License](https://img.shields.io/github/license/conductor-oss/conductor.svg)](http://www.apache.org/licenses/LICENSE-2.0)
Conductor is a platform _originally_ created at Netflix to orchestrate microservices and events.
Conductor OSS is maintained by the team of developers at [Orkes](https://orkes.io/) along with the members of the open source community.
+The latest version is [![Github release](https://img.shields.io/github/v/release/conductor-oss/conductor.svg)](https://GitHub.com/conductor-oss/conductor/releases)
## Conductor OSS
This is the new home for the Conductor open source going forward (previously hosted at Netflix/Conductor).
-> [!IMPORTANT]
-> Going forward, all the bug fixes, feature requests and security patches will be applied and released from this repository.
-
-The last published version of Netflix Conductor will be **3.15.0** which we will continue to support.
+_The last published version of Netflix Conductor will be **3.15.0** which we will continue to support._
If you would like to participate in the roadmap and development, [please reach out](https://forms.gle/P2i1xHrxPQLrjzTB7).
@@ -22,71 +20,57 @@ Show support for the Conductor OSS. Please help spread the awareness by starrin
[![GitHub stars](https://img.shields.io/github/stars/conductor-oss/conductor.svg?style=social&label=Star&maxAge=)](https://GitHub.com/conductor-oss/conductor/)
-## Update your local forks/clones
-Please update your forks to point to this repo. This will ensure your commits and PRs can be send against this repository
-```shell
-git remote set-url origin https://github.com/conductor-oss/conductor
-```
-> [!IMPORTANT]
-> **Follow the steps below if you have an active PR against the Netflix/Conductor repository**
-> 1. Fork **this** repository
-> 2. Update your local repository to change the remote to this repository
-> 3. Send a PR against the `main` branch
+## Getting Started
-## Releases
-The latest version is [![Github release](https://img.shields.io/github/v/release/conductor-oss/conductor.svg)](https://GitHub.com/conductor-oss/conductor/releases)
+### Using Docker (Recommended)
+Follow the steps below to launch the docker container:
-The next scheduled release is in Feb 2024.
+```shell
+docker compose -f docker/docker-compose.yaml up
+```
+* Navigate to http://localhost:5000 once the container starts to launch UI.
+* APIs are accessible at http://localhost:8080
+* Swagger Docs:http://localhost:8080/swagger-ui/index.html?configUrl=/api-docs/swagger-config#/
-## Resources
-#### [Slack Community](https://join.slack.com/t/orkes-conductor/shared_invite/zt-xyxqyseb-YZ3hwwAgHJH97bsrYRnSZg)
-We have an active [community](https://join.slack.com/t/orkes-conductor/shared_invite/zt-xyxqyseb-YZ3hwwAgHJH97bsrYRnSZg) of Conductor users and contributors on the channel.
-#### [Documentation Site](https://docs.conductor-oss.org/)
-[Documentation](https://docs.conductor-oss.org/) and tutorial on how to use Conductor
+## Database Requirements
-[Discussion Forum](https://github.com/conductor-oss/conductor/discussions): Please use the forum for questions and discussing ideas and join the community.
+* The default persistence used is Redis
+* The indexing backend is [Elasticsearch](https://www.elastic.co/) (7.x)
-### Conductor SDKs
-Conductor supports creating workflows using JSON and Code.
-SDK support for creating workflows using code is available in multiple languages and can be found at https://github.com/conductor-sdk
+## Configuration for various database backends
+| Backend | Configuration |
+|----------------|---------------------------------------------------------------------------------------|
+| Redis + ES7 | [config-redis.properties](docker/server/config/config-redis.properties) |
+| Postgres | [config-postgres.properties](docker/server/config/config-postgres.properties) |
+| Postgres + ES7 | [config-postgres-es7.properties](docker/server/config/config-postgres-es7.properties) |
+| MySQL + ES7 | [config-mysql.properties](docker/server/config/config-mysql.properties) |
-## Getting Started - Building & Running Conductor
+## Other Requirements
+* JDK 17+
+* UI requires Node 14 to build. Earlier Node versions may work but are untested.
-### From Source:
+### Building From Source
If you wish to build your own distribution, you can run ```./gradlew build``` from this project that products the runtime artifacts.
The runnable server is in server/ module.
-### Using Docker (Recommended)
-Follow the steps below to launch the docker container:
-
-```shell
-
-# Create volumes for persistent stores
-# Used to create a persistent volume that will preserve the
-docker volume create postgres
-docker volume create redis
+## Conductor OSS Roadmap
+[See the roadmap for the Conductor](ROADMAP.md)
-docker run --init -p 8080:8080 -p 1234:5000 --mount source=redis,target=/redis \
---mount source=postgres,target=/pgdata conductoross/conductor-standalone:3.15.0
-```
-
-Navigate to http://localhost:1234 once the container starts to launch UI.
+## Resources
+#### [Slack Community](https://join.slack.com/t/orkes-conductor/shared_invite/zt-2hmxn0i3n-_W~a9rWMbvMoYmlJo3Y15g)
+We have an active [community](https://join.slack.com/t/orkes-conductor/shared_invite/zt-2hmxn0i3n-_W~a9rWMbvMoYmlJo3Y15g) of Conductor users and contributors on the channel.
+#### [Documentation Site](https://docs.conductor-oss.org/)
+[Documentation](https://docs.conductor-oss.org/) and tutorial on how to use Conductor
-## Docker Containers for production usage
-```shell
-docker pull conductoross/conductor:3.15.0
-```
+[Discussion Forum](https://github.com/conductor-oss/conductor/discussions): Please use the forum for questions and discussing ideas and join the community.
+### Conductor SDKs
+Conductor supports creating workflows using JSON and Code.
+SDK support for creating workflows using code is available in multiple languages and can be found at https://github.com/conductor-sdk
-## Database Requirements
-* The default persistence used is Redis
-* The indexing backend is [Elasticsearch](https://www.elastic.co/) (6.x)
-## Other Requirements
-* JDK 17+
-* UI requires Node 14 to build. Earlier Node versions may work but are untested.
## Get Support
There are several ways to get in touch with us:
diff --git a/ROADMAP.md b/ROADMAP.md
new file mode 100644
index 000000000..35d01abe2
--- /dev/null
+++ b/ROADMAP.md
@@ -0,0 +1,75 @@
+# Conductor OSS Roadmap
+
+
+## New Features
+### Type safety for workflow inputs and task input/output through JSON Schema
+
+* Allow type safe workflows and workers with support for JSON schema and protobuf
+* Enable scaffolding code generation for workers through schema for workers using CLI tool
+
+### New System Tasks
+
+* Database task to work with relational & no-sql databases
+* Polling support for HTTP task
+* Operators
+ * * For..Each with parallel and sequential execution
+ * * Improved While loop
+ * * Try..Catch for improved error handling at the task level
+
+### LLM Integrations
+Conductor is a perfect platform to build your next LLM powered application or incorporating genAI into your applications.
+Enable system tasks for LLM integrations that lets you work with various language models for:
+1. Text completion
+2. Chat completion with memory
+3. Embedding generation
+
+### CLI for Conductor
+Allow developers to manage their conductor instance via CLI.
+
+* Manage metadata
+* Query and manage workflow executions (terminate, pause, resume, retry)
+* Start | Stop manage conductor server
+
+### Support Python as a scripting language for INLINE task
+Extend usability of Conductor by allowing lightweight python code as INLINE tasks.
+
+### New APIs for workflow state management
+
+* Synchronous execution of workflows
+* update workflow variables
+* Update tasks synchronously
+
+## SDKs
+
+* Rust
+* Kotlin
+* C++
+* Ruby
+* Swift
+* Flutter / Dart
+* PHP
+
+### Worker metrics on server
+Expose an endpoint on the server that can be used by workers to publish worker specific metrics.
+This will allow monitoring metrics for all the workers in a distributed system across the entire system.
+
+## Testing
+Infrastructure to make workflows easier to test and debug right from the UI and IDE.
+
+### Workflow Debugger
+
+* Ability to debug your workflows during development just like you would do when you write code
+* All functionality of a debugger
+* Breakpoints add/remove
+* Step to next
+* Drop to a certain task that was already executed. (going back in time)
+* Ability to inspect, modify, add input / output parameters
+* Watch Windows to see values of interesting parameters during execution
+* Attaching to a certain WF execution
+* Remote Task debugging (with SDK Support).. Enable step by step execution in a task worker from the server
+
+## Maintenance
+
+1. Deprecate support for Elasticsearch 6
+2. Update support for newer versions of Elasticsearch
+2. Improve/Fix JOIN task performance (less about making it performant and more about just fixing the usability) - Done
\ No newline at end of file
diff --git a/amqp/build.gradle b/amqp/build.gradle
index 3d41fb3e5..ffa9ea735 100644
--- a/amqp/build.gradle
+++ b/amqp/build.gradle
@@ -9,7 +9,4 @@ dependencies {
compileOnly 'org.springframework.boot:spring-boot-starter'
compileOnly 'org.springframework.boot:spring-boot-starter-web'
-
-
-
}
\ No newline at end of file
diff --git a/build.gradle b/build.gradle
index 0199c295b..4892423e4 100644
--- a/build.gradle
+++ b/build.gradle
@@ -20,7 +20,7 @@ plugins {
id 'maven-publish'
id 'signing'
id 'java-library'
- id "com.diffplug.spotless" version "5.0.0"
+ id "com.diffplug.spotless" version "6.25.0"
id 'jacoco'
id 'org.sonarqube' version '3.4.0.2513'
}
@@ -355,6 +355,8 @@ allprojects {
strictly '[4.1.108.Final]'
}
}
+ compileOnly 'org.projectlombok:lombok:1.18.34'
+ annotationProcessor 'org.projectlombok:lombok:1.18.34'
annotationProcessor 'org.springframework.boot:spring-boot-configuration-processor'
testImplementation('org.springframework.boot:spring-boot-starter-test') {
@@ -363,6 +365,7 @@ allprojects {
testImplementation('org.springframework.boot:spring-boot-starter-log4j2')
testImplementation 'junit:junit'
testImplementation "org.junit.vintage:junit-vintage-engine"
+ testAnnotationProcessor 'org.projectlombok:lombok:1.18.34'
}
// processes additional configuration metadata json file as described here
diff --git a/cassandra-persistence/src/test/groovy/com/netflix/conductor/cassandra/dao/CassandraExecutionDAOSpec.groovy b/cassandra-persistence/src/test/groovy/com/netflix/conductor/cassandra/dao/CassandraExecutionDAOSpec.groovy
index 13e79e238..e438f4a88 100644
--- a/cassandra-persistence/src/test/groovy/com/netflix/conductor/cassandra/dao/CassandraExecutionDAOSpec.groovy
+++ b/cassandra-persistence/src/test/groovy/com/netflix/conductor/cassandra/dao/CassandraExecutionDAOSpec.groovy
@@ -403,44 +403,6 @@ class CassandraExecutionDAOSpec extends CassandraSpec {
eventExecutionList != null && eventExecutionList.empty
}
- def "verify workflow serialization"() {
- given: 'define a workflow'
- String workflowId = new IDGenerator().generate()
- WorkflowTask workflowTask = new WorkflowTask(taskDefinition: new TaskDef(concurrentExecLimit: 2))
- WorkflowDef workflowDef = new WorkflowDef(name: UUID.randomUUID().toString(), version: 1, tasks: [workflowTask])
- WorkflowModel workflow = new WorkflowModel(workflowDefinition: workflowDef, workflowId: workflowId, status: WorkflowModel.Status.RUNNING, createTime: System.currentTimeMillis())
-
- when: 'serialize workflow'
- def workflowJson = objectMapper.writeValueAsString(workflow)
-
- then:
- !workflowJson.contains('failedReferenceTaskNames')
- // workflowTask
- !workflowJson.contains('decisionCases')
- !workflowJson.contains('defaultCase')
- !workflowJson.contains('forkTasks')
- !workflowJson.contains('joinOn')
- !workflowJson.contains('defaultExclusiveJoinTask')
- !workflowJson.contains('loopOver')
- }
-
- def "verify task serialization"() {
- given: 'define a workflow and tasks for this workflow'
- String workflowId = new IDGenerator().generate()
- WorkflowTask workflowTask = new WorkflowTask(taskDefinition: new TaskDef(concurrentExecLimit: 2))
- TaskModel task = new TaskModel(workflowInstanceId: workflowId, taskType: UUID.randomUUID().toString(), referenceTaskName: UUID.randomUUID().toString(), status: TaskModel.Status.SCHEDULED, taskId: new IDGenerator().generate(), workflowTask: workflowTask)
-
- when: 'serialize task'
- def taskJson = objectMapper.writeValueAsString(task)
-
- then:
- !taskJson.contains('decisionCases')
- !taskJson.contains('defaultCase')
- !taskJson.contains('forkTasks')
- !taskJson.contains('joinOn')
- !taskJson.contains('defaultExclusiveJoinTask')
- }
-
def "serde of workflow with large number of tasks"() {
given: 'create a workflow and tasks for this workflow'
String workflowId = new IDGenerator().generate()
diff --git a/client-spring/src/main/resources/META-INF/spring/mybatis-spring-boot-autoconfigure/src/main/resources/META-INF/spring/org.springframework.boot.autoconfigure.AutoConfiguration.imports b/client-spring/src/main/resources/META-INF/spring/org.springframework.boot.autoconfigure.AutoConfiguration.imports
similarity index 100%
rename from client-spring/src/main/resources/META-INF/spring/mybatis-spring-boot-autoconfigure/src/main/resources/META-INF/spring/org.springframework.boot.autoconfigure.AutoConfiguration.imports
rename to client-spring/src/main/resources/META-INF/spring/org.springframework.boot.autoconfigure.AutoConfiguration.imports
diff --git a/common/src/main/java/com/netflix/conductor/annotations/protogen/ProtoEnum.java b/common/src/main/java/com/netflix/conductor/annotations/protogen/ProtoEnum.java
new file mode 100644
index 000000000..c07e679f7
--- /dev/null
+++ b/common/src/main/java/com/netflix/conductor/annotations/protogen/ProtoEnum.java
@@ -0,0 +1,26 @@
+/*
+ * Copyright 2022 Conductor Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
+ * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations under the License.
+ */
+package com.netflix.conductor.annotations.protogen;
+
+import java.lang.annotation.ElementType;
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+import java.lang.annotation.Target;
+
+/**
+ * ProtoEnum annotates an enum type that will be exposed via the GRPC API as a native Protocol
+ * Buffers enum.
+ */
+@Retention(RetentionPolicy.RUNTIME)
+@Target(ElementType.TYPE)
+public @interface ProtoEnum {}
diff --git a/common/src/main/java/com/netflix/conductor/annotations/protogen/ProtoField.java b/common/src/main/java/com/netflix/conductor/annotations/protogen/ProtoField.java
new file mode 100644
index 000000000..a61bb5ea1
--- /dev/null
+++ b/common/src/main/java/com/netflix/conductor/annotations/protogen/ProtoField.java
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2022 Conductor Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
+ * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations under the License.
+ */
+package com.netflix.conductor.annotations.protogen;
+
+import java.lang.annotation.ElementType;
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+import java.lang.annotation.Target;
+
+/**
+ * ProtoField annotates a field inside an struct with metadata on how to expose it on its
+ * corresponding Protocol Buffers struct. For a field to be exposed in a ProtoBuf struct, the
+ * containing struct must also be annotated with a {@link ProtoMessage} or {@link ProtoEnum} tag.
+ */
+@Retention(RetentionPolicy.RUNTIME)
+@Target(ElementType.FIELD)
+public @interface ProtoField {
+ /**
+ * Mandatory. Sets the Protocol Buffer ID for this specific field. Once a field has been
+ * annotated with a given ID, the ID can never change to a different value or the resulting
+ * Protocol Buffer struct will not be backwards compatible.
+ *
+ * @return the numeric ID for the field
+ */
+ int id();
+}
diff --git a/common/src/main/java/com/netflix/conductor/annotations/protogen/ProtoMessage.java b/common/src/main/java/com/netflix/conductor/annotations/protogen/ProtoMessage.java
new file mode 100644
index 000000000..45fa884f9
--- /dev/null
+++ b/common/src/main/java/com/netflix/conductor/annotations/protogen/ProtoMessage.java
@@ -0,0 +1,51 @@
+/*
+ * Copyright 2022 Conductor Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
+ * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations under the License.
+ */
+package com.netflix.conductor.annotations.protogen;
+
+import java.lang.annotation.ElementType;
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+import java.lang.annotation.Target;
+
+/**
+ * ProtoMessage annotates a given Java class so it becomes exposed via the GRPC API as a native
+ * Protocol Buffers struct. The annotated class must be a POJO.
+ */
+@Retention(RetentionPolicy.RUNTIME)
+@Target(ElementType.TYPE)
+public @interface ProtoMessage {
+ /**
+ * Sets whether the generated mapping code will contain a helper to translate the POJO for this
+ * class into the equivalent ProtoBuf object.
+ *
+ * @return whether this class will generate a mapper to ProtoBuf objects
+ */
+ boolean toProto() default true;
+
+ /**
+ * Sets whether the generated mapping code will contain a helper to translate the ProtoBuf
+ * object for this class into the equivalent POJO.
+ *
+ * @return whether this class will generate a mapper from ProtoBuf objects
+ */
+ boolean fromProto() default true;
+
+ /**
+ * Sets whether this is a wrapper class that will be used to encapsulate complex nested type
+ * interfaces. Wrapper classes are not directly exposed by the ProtoBuf API and must be mapped
+ * manually.
+ *
+ * @return whether this is a wrapper class
+ */
+ boolean wrapper() default false;
+}
diff --git a/common/src/main/java/com/netflix/conductor/common/config/ObjectMapperProvider.java b/common/src/main/java/com/netflix/conductor/common/config/ObjectMapperProvider.java
index 51ebfc8cf..5e3a5562c 100644
--- a/common/src/main/java/com/netflix/conductor/common/config/ObjectMapperProvider.java
+++ b/common/src/main/java/com/netflix/conductor/common/config/ObjectMapperProvider.java
@@ -17,6 +17,8 @@
import com.fasterxml.jackson.annotation.JsonInclude;
import com.fasterxml.jackson.databind.DeserializationFeature;
import com.fasterxml.jackson.databind.ObjectMapper;
+import com.fasterxml.jackson.databind.SerializationFeature;
+import com.fasterxml.jackson.datatype.jsr310.JavaTimeModule;
import com.fasterxml.jackson.module.afterburner.AfterburnerModule;
/**
@@ -26,6 +28,8 @@
*/
public class ObjectMapperProvider {
+ private static final ObjectMapper objectMapper = _getObjectMapper();
+
/**
* The customizations in this method are configured using {@link
* org.springframework.boot.autoconfigure.jackson.JacksonAutoConfiguration}
@@ -39,6 +43,10 @@ public class ObjectMapperProvider {
* @see org.springframework.boot.autoconfigure.jackson.JacksonAutoConfiguration
*/
public ObjectMapper getObjectMapper() {
+ return objectMapper;
+ }
+
+ private static ObjectMapper _getObjectMapper() {
final ObjectMapper objectMapper = new ObjectMapper();
objectMapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
objectMapper.configure(DeserializationFeature.FAIL_ON_IGNORED_PROPERTIES, false);
@@ -46,7 +54,9 @@ public ObjectMapper getObjectMapper() {
objectMapper.setDefaultPropertyInclusion(
JsonInclude.Value.construct(
JsonInclude.Include.NON_NULL, JsonInclude.Include.ALWAYS));
+ objectMapper.configure(SerializationFeature.FAIL_ON_EMPTY_BEANS, false);
objectMapper.registerModule(new JsonProtoModule());
+ objectMapper.registerModule(new JavaTimeModule());
objectMapper.registerModule(new AfterburnerModule());
return objectMapper;
}
diff --git a/common/src/main/java/com/netflix/conductor/common/metadata/Auditable.java b/common/src/main/java/com/netflix/conductor/common/metadata/Auditable.java
index fcdfdf9fa..bef2e1792 100644
--- a/common/src/main/java/com/netflix/conductor/common/metadata/Auditable.java
+++ b/common/src/main/java/com/netflix/conductor/common/metadata/Auditable.java
@@ -42,7 +42,7 @@ public void setOwnerApp(String ownerApp) {
* @return the createTime
*/
public Long getCreateTime() {
- return createTime;
+ return createTime == null ? 0 : createTime;
}
/**
@@ -56,7 +56,7 @@ public void setCreateTime(Long createTime) {
* @return the updateTime
*/
public Long getUpdateTime() {
- return updateTime;
+ return updateTime == null ? 0 : updateTime;
}
/**
diff --git a/common/src/main/java/com/netflix/conductor/common/metadata/BaseDef.java b/common/src/main/java/com/netflix/conductor/common/metadata/BaseDef.java
index 7fec07255..fac1d1047 100644
--- a/common/src/main/java/com/netflix/conductor/common/metadata/BaseDef.java
+++ b/common/src/main/java/com/netflix/conductor/common/metadata/BaseDef.java
@@ -22,6 +22,7 @@
* A base class for {@link com.netflix.conductor.common.metadata.workflow.WorkflowDef} and {@link
* com.netflix.conductor.common.metadata.tasks.TaskDef}.
*/
+@Deprecated
public abstract class BaseDef extends Auditable {
private final Map accessPolicy = new EnumMap<>(Permission.class);
diff --git a/common/src/main/java/com/netflix/conductor/common/metadata/SchemaDef.java b/common/src/main/java/com/netflix/conductor/common/metadata/SchemaDef.java
new file mode 100644
index 000000000..5d8b80bbf
--- /dev/null
+++ b/common/src/main/java/com/netflix/conductor/common/metadata/SchemaDef.java
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2024 Conductor Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
+ * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations under the License.
+ */
+package com.netflix.conductor.common.metadata;
+
+import java.util.Map;
+
+import com.netflix.conductor.annotations.protogen.ProtoEnum;
+import com.netflix.conductor.annotations.protogen.ProtoField;
+import com.netflix.conductor.annotations.protogen.ProtoMessage;
+
+import jakarta.validation.constraints.NotNull;
+import lombok.AllArgsConstructor;
+import lombok.Builder;
+import lombok.Data;
+import lombok.EqualsAndHashCode;
+import lombok.NoArgsConstructor;
+
+@EqualsAndHashCode(callSuper = true)
+@Builder
+@Data
+@NoArgsConstructor
+@AllArgsConstructor
+@ProtoMessage
+public class SchemaDef extends Auditable {
+
+ @ProtoEnum
+ public enum Type {
+ JSON,
+ AVRO,
+ PROTOBUF
+ }
+
+ @ProtoField(id = 1)
+ @NotNull
+ private String name;
+
+ @ProtoField(id = 2)
+ @NotNull
+ @Builder.Default
+ private int version = 1;
+
+ @ProtoField(id = 3)
+ @NotNull
+ private Type type;
+
+ // Schema definition stored here
+ private Map data;
+
+ // Externalized schema definition (eg. via AVRO, Protobuf registry)
+ // If using Orkes Schema registry, this points to the name of the schema in the registry
+ private String externalRef;
+}
diff --git a/common/src/main/java/com/netflix/conductor/common/metadata/acl/Permission.java b/common/src/main/java/com/netflix/conductor/common/metadata/acl/Permission.java
index dfcc77571..a87c89953 100644
--- a/common/src/main/java/com/netflix/conductor/common/metadata/acl/Permission.java
+++ b/common/src/main/java/com/netflix/conductor/common/metadata/acl/Permission.java
@@ -15,6 +15,7 @@
import com.netflix.conductor.annotations.protogen.ProtoEnum;
@ProtoEnum
+@Deprecated
public enum Permission {
OWNER,
OPERATOR
diff --git a/common/src/main/java/com/netflix/conductor/common/metadata/events/EventHandler.java b/common/src/main/java/com/netflix/conductor/common/metadata/events/EventHandler.java
index 0321c85b7..56817315a 100644
--- a/common/src/main/java/com/netflix/conductor/common/metadata/events/EventHandler.java
+++ b/common/src/main/java/com/netflix/conductor/common/metadata/events/EventHandler.java
@@ -146,7 +146,9 @@ public static class Action {
public enum Type {
start_workflow,
complete_task,
- fail_task
+ fail_task,
+ terminate_workflow,
+ update_workflow_variables
}
@ProtoField(id = 1)
@@ -164,6 +166,12 @@ public enum Type {
@ProtoField(id = 5)
private boolean expandInlineJSON;
+ @ProtoField(id = 6)
+ private TerminateWorkflow terminate_workflow;
+
+ @ProtoField(id = 7)
+ private UpdateWorkflowVariables update_workflow_variables;
+
/**
* @return the action
*/
@@ -234,6 +242,35 @@ public void setExpandInlineJSON(boolean expandInlineJSON) {
public boolean isExpandInlineJSON() {
return expandInlineJSON;
}
+
+ /**
+ * @return the terminate_workflow
+ */
+ public TerminateWorkflow getTerminate_workflow() {
+ return terminate_workflow;
+ }
+
+ /**
+ * @param terminate_workflow the terminate_workflow to set
+ */
+ public void setTerminate_workflow(TerminateWorkflow terminate_workflow) {
+ this.terminate_workflow = terminate_workflow;
+ }
+
+ /**
+ * @return the update_workflow_variables
+ */
+ public UpdateWorkflowVariables getUpdate_workflow_variables() {
+ return update_workflow_variables;
+ }
+
+ /**
+ * @param update_workflow_variables the update_workflow_variables to set
+ */
+ public void setUpdate_workflow_variables(
+ UpdateWorkflowVariables update_workflow_variables) {
+ this.update_workflow_variables = update_workflow_variables;
+ }
}
@ProtoMessage
@@ -414,4 +451,97 @@ public void setTaskToDomain(Map taskToDomain) {
this.taskToDomain = taskToDomain;
}
}
+
+ @ProtoMessage
+ public static class TerminateWorkflow {
+
+ @ProtoField(id = 1)
+ private String workflowId;
+
+ @ProtoField(id = 2)
+ private String terminationReason;
+
+ /**
+ * @return the workflowId
+ */
+ public String getWorkflowId() {
+ return workflowId;
+ }
+
+ /**
+ * @param workflowId the workflowId to set
+ */
+ public void setWorkflowId(String workflowId) {
+ this.workflowId = workflowId;
+ }
+
+ /**
+ * @return the reasonForTermination
+ */
+ public String getTerminationReason() {
+ return terminationReason;
+ }
+
+ /**
+ * @param terminationReason the reasonForTermination to set
+ */
+ public void setTerminationReason(String terminationReason) {
+ this.terminationReason = terminationReason;
+ }
+ }
+
+ @ProtoMessage
+ public static class UpdateWorkflowVariables {
+
+ @ProtoField(id = 1)
+ private String workflowId;
+
+ @ProtoField(id = 2)
+ private Map variables;
+
+ @ProtoField(id = 3)
+ private Boolean appendArray;
+
+ /**
+ * @return the workflowId
+ */
+ public String getWorkflowId() {
+ return workflowId;
+ }
+
+ /**
+ * @param workflowId the workflowId to set
+ */
+ public void setWorkflowId(String workflowId) {
+ this.workflowId = workflowId;
+ }
+
+ /**
+ * @return the variables
+ */
+ public Map getVariables() {
+ return variables;
+ }
+
+ /**
+ * @param variables the variables to set
+ */
+ public void setVariables(Map variables) {
+ this.variables = variables;
+ }
+
+ /**
+ * @return appendArray
+ */
+ public Boolean isAppendArray() {
+ return appendArray;
+ }
+
+ /**
+ * @param appendArray the appendArray to set
+ */
+ public void setAppendArray(Boolean appendArray) {
+ this.appendArray = appendArray;
+ }
+ }
}
diff --git a/common/src/main/java/com/netflix/conductor/common/metadata/tasks/Task.java b/common/src/main/java/com/netflix/conductor/common/metadata/tasks/Task.java
index ea98133f3..495ff06a9 100644
--- a/common/src/main/java/com/netflix/conductor/common/metadata/tasks/Task.java
+++ b/common/src/main/java/com/netflix/conductor/common/metadata/tasks/Task.java
@@ -24,7 +24,6 @@
import com.netflix.conductor.annotations.protogen.ProtoMessage;
import com.netflix.conductor.common.metadata.workflow.WorkflowTask;
-import com.fasterxml.jackson.annotation.JsonIgnore;
import com.google.protobuf.Any;
import io.swagger.v3.oas.annotations.Hidden;
@@ -203,6 +202,9 @@ public boolean isRetriable() {
@ProtoField(id = 42)
private boolean subworkflowChanged;
+ // If the task is an event associated with a parent task, the id of the parent task
+ private String parentTaskId;
+
public Task() {}
/**
@@ -630,7 +632,6 @@ public void setOutputMessage(Any outputMessage) {
/**
* @return {@link Optional} containing the task definition if available
*/
- @JsonIgnore
public Optional getTaskDefinition() {
return Optional.ofNullable(this.getWorkflowTask()).map(WorkflowTask::getTaskDefinition);
}
@@ -756,6 +757,14 @@ public void setSubWorkflowId(String subWorkflowId) {
}
}
+ public String getParentTaskId() {
+ return parentTaskId;
+ }
+
+ public void setParentTaskId(String parentTaskId) {
+ this.parentTaskId = parentTaskId;
+ }
+
public Task copy() {
Task copy = new Task();
copy.setCallbackAfterSeconds(callbackAfterSeconds);
@@ -788,7 +797,7 @@ public Task copy() {
copy.setIsolationGroupId(isolationGroupId);
copy.setSubWorkflowId(getSubWorkflowId());
copy.setSubworkflowChanged(subworkflowChanged);
-
+ copy.setParentTaskId(parentTaskId);
return copy;
}
@@ -809,7 +818,7 @@ public Task deepCopy() {
deepCopy.setWorkerId(workerId);
deepCopy.setReasonForIncompletion(reasonForIncompletion);
deepCopy.setSeq(seq);
-
+ deepCopy.setParentTaskId(parentTaskId);
return deepCopy;
}
@@ -963,7 +972,8 @@ && getWorkflowPriority() == task.getWorkflowPriority()
getExternalOutputPayloadStoragePath(),
task.getExternalOutputPayloadStoragePath())
&& Objects.equals(getIsolationGroupId(), task.getIsolationGroupId())
- && Objects.equals(getExecutionNameSpace(), task.getExecutionNameSpace());
+ && Objects.equals(getExecutionNameSpace(), task.getExecutionNameSpace())
+ && Objects.equals(getParentTaskId(), task.getParentTaskId());
}
@Override
@@ -1005,6 +1015,7 @@ public int hashCode() {
getExternalInputPayloadStoragePath(),
getExternalOutputPayloadStoragePath(),
getIsolationGroupId(),
- getExecutionNameSpace());
+ getExecutionNameSpace(),
+ getParentTaskId());
}
}
diff --git a/common/src/main/java/com/netflix/conductor/common/metadata/tasks/TaskDef.java b/common/src/main/java/com/netflix/conductor/common/metadata/tasks/TaskDef.java
index f6d5964d7..7e4357604 100644
--- a/common/src/main/java/com/netflix/conductor/common/metadata/tasks/TaskDef.java
+++ b/common/src/main/java/com/netflix/conductor/common/metadata/tasks/TaskDef.java
@@ -23,10 +23,10 @@
import com.netflix.conductor.annotations.protogen.ProtoMessage;
import com.netflix.conductor.common.constraints.OwnerEmailMandatoryConstraint;
import com.netflix.conductor.common.constraints.TaskTimeoutConstraint;
-import com.netflix.conductor.common.metadata.BaseDef;
+import com.netflix.conductor.common.metadata.Auditable;
+import com.netflix.conductor.common.metadata.SchemaDef;
import jakarta.validation.Valid;
-import jakarta.validation.constraints.Email;
import jakarta.validation.constraints.Min;
import jakarta.validation.constraints.NotEmpty;
import jakarta.validation.constraints.NotNull;
@@ -34,7 +34,7 @@
@ProtoMessage
@TaskTimeoutConstraint
@Valid
-public class TaskDef extends BaseDef {
+public class TaskDef extends Auditable {
@ProtoEnum
public enum TimeoutPolicy {
@@ -114,7 +114,6 @@ public enum RetryLogic {
@ProtoField(id = 18)
@OwnerEmailMandatoryConstraint
- @Email(message = "ownerEmail should be valid email address")
private String ownerEmail;
@ProtoField(id = 19)
@@ -125,6 +124,13 @@ public enum RetryLogic {
@Min(value = 1, message = "Backoff scale factor. Applicable for LINEAR_BACKOFF")
private Integer backoffScaleFactor = 1;
+ @ProtoField(id = 21)
+ private String baseType;
+
+ private SchemaDef inputSchema;
+ private SchemaDef outputSchema;
+ private boolean enforceSchema;
+
public TaskDef() {}
public TaskDef(String name) {
@@ -426,6 +432,38 @@ public Integer getBackoffScaleFactor() {
return backoffScaleFactor;
}
+ public String getBaseType() {
+ return baseType;
+ }
+
+ public void setBaseType(String baseType) {
+ this.baseType = baseType;
+ }
+
+ public SchemaDef getInputSchema() {
+ return inputSchema;
+ }
+
+ public void setInputSchema(SchemaDef inputSchema) {
+ this.inputSchema = inputSchema;
+ }
+
+ public SchemaDef getOutputSchema() {
+ return outputSchema;
+ }
+
+ public void setOutputSchema(SchemaDef outputSchema) {
+ this.outputSchema = outputSchema;
+ }
+
+ public boolean isEnforceSchema() {
+ return enforceSchema;
+ }
+
+ public void setEnforceSchema(boolean enforceSchema) {
+ this.enforceSchema = enforceSchema;
+ }
+
@Override
public String toString() {
return name;
@@ -456,7 +494,10 @@ && getRetryLogic() == taskDef.getRetryLogic()
&& Objects.equals(getInputTemplate(), taskDef.getInputTemplate())
&& Objects.equals(getIsolationGroupId(), taskDef.getIsolationGroupId())
&& Objects.equals(getExecutionNameSpace(), taskDef.getExecutionNameSpace())
- && Objects.equals(getOwnerEmail(), taskDef.getOwnerEmail());
+ && Objects.equals(getOwnerEmail(), taskDef.getOwnerEmail())
+ && Objects.equals(getBaseType(), taskDef.getBaseType())
+ && Objects.equals(getInputSchema(), taskDef.getInputSchema())
+ && Objects.equals(getOutputSchema(), taskDef.getOutputSchema());
}
@Override
@@ -479,6 +520,9 @@ public int hashCode() {
getInputTemplate(),
getIsolationGroupId(),
getExecutionNameSpace(),
- getOwnerEmail());
+ getOwnerEmail(),
+ getBaseType(),
+ getInputSchema(),
+ getOutputSchema());
}
}
diff --git a/common/src/main/java/com/netflix/conductor/common/metadata/workflow/IdempotencyStrategy.java b/common/src/main/java/com/netflix/conductor/common/metadata/workflow/IdempotencyStrategy.java
new file mode 100644
index 000000000..4b9ebd60b
--- /dev/null
+++ b/common/src/main/java/com/netflix/conductor/common/metadata/workflow/IdempotencyStrategy.java
@@ -0,0 +1,18 @@
+/*
+ * Copyright 2020 Conductor Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
+ * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations under the License.
+ */
+package com.netflix.conductor.common.metadata.workflow;
+
+public enum IdempotencyStrategy {
+ FAIL,
+ RETURN_EXISTING
+}
diff --git a/common/src/main/java/com/netflix/conductor/common/metadata/workflow/RateLimitConfig.java b/common/src/main/java/com/netflix/conductor/common/metadata/workflow/RateLimitConfig.java
new file mode 100644
index 000000000..966880f68
--- /dev/null
+++ b/common/src/main/java/com/netflix/conductor/common/metadata/workflow/RateLimitConfig.java
@@ -0,0 +1,47 @@
+/*
+ * Copyright 2023 Conductor Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
+ * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations under the License.
+ */
+package com.netflix.conductor.common.metadata.workflow;
+
+import com.netflix.conductor.annotations.protogen.ProtoField;
+import com.netflix.conductor.annotations.protogen.ProtoMessage;
+
+/** Rate limit configuration for workflows */
+@ProtoMessage
+public class RateLimitConfig {
+ /**
+ * Key that defines the rate limit. Rate limit key is a combination of workflow payload such as
+ * name, or correlationId etc.
+ */
+ @ProtoField(id = 1)
+ private String rateLimitKey;
+
+ /** Number of concurrently running workflows that are allowed per key */
+ @ProtoField(id = 2)
+ private int concurrentExecLimit;
+
+ public String getRateLimitKey() {
+ return rateLimitKey;
+ }
+
+ public void setRateLimitKey(String rateLimitKey) {
+ this.rateLimitKey = rateLimitKey;
+ }
+
+ public int getConcurrentExecLimit() {
+ return concurrentExecLimit;
+ }
+
+ public void setConcurrentExecLimit(int concurrentExecLimit) {
+ this.concurrentExecLimit = concurrentExecLimit;
+ }
+}
diff --git a/common/src/main/java/com/netflix/conductor/common/metadata/workflow/StartWorkflowRequest.java b/common/src/main/java/com/netflix/conductor/common/metadata/workflow/StartWorkflowRequest.java
index fc8f83af7..9d76533d6 100644
--- a/common/src/main/java/com/netflix/conductor/common/metadata/workflow/StartWorkflowRequest.java
+++ b/common/src/main/java/com/netflix/conductor/common/metadata/workflow/StartWorkflowRequest.java
@@ -54,6 +54,29 @@ public class StartWorkflowRequest {
@Max(value = 99, message = "priority: ${validatedValue} should be maximum {value}")
private Integer priority = 0;
+ @ProtoField(id = 9)
+ private String createdBy;
+
+ private String idempotencyKey;
+
+ private IdempotencyStrategy idempotencyStrategy;
+
+ public String getIdempotencyKey() {
+ return idempotencyKey;
+ }
+
+ public void setIdempotencyKey(String idempotencyKey) {
+ this.idempotencyKey = idempotencyKey;
+ }
+
+ public IdempotencyStrategy getIdempotencyStrategy() {
+ return idempotencyStrategy;
+ }
+
+ public void setIdempotencyStrategy(IdempotencyStrategy idempotencyStrategy) {
+ this.idempotencyStrategy = idempotencyStrategy;
+ }
+
public String getName() {
return name;
}
@@ -158,4 +181,17 @@ public StartWorkflowRequest withWorkflowDef(WorkflowDef workflowDef) {
this.workflowDef = workflowDef;
return this;
}
+
+ public String getCreatedBy() {
+ return createdBy;
+ }
+
+ public void setCreatedBy(String createdBy) {
+ this.createdBy = createdBy;
+ }
+
+ public StartWorkflowRequest withCreatedBy(String createdBy) {
+ this.createdBy = createdBy;
+ return this;
+ }
}
diff --git a/common/src/main/java/com/netflix/conductor/common/metadata/workflow/StateChangeEvent.java b/common/src/main/java/com/netflix/conductor/common/metadata/workflow/StateChangeEvent.java
new file mode 100644
index 000000000..fc0275a5e
--- /dev/null
+++ b/common/src/main/java/com/netflix/conductor/common/metadata/workflow/StateChangeEvent.java
@@ -0,0 +1,54 @@
+/*
+ * Copyright 2023 Conductor Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
+ * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations under the License.
+ */
+package com.netflix.conductor.common.metadata.workflow;
+
+import java.util.Map;
+
+import com.netflix.conductor.annotations.protogen.ProtoField;
+import com.netflix.conductor.annotations.protogen.ProtoMessage;
+
+import jakarta.validation.Valid;
+import jakarta.validation.constraints.NotNull;
+
+@Valid
+@ProtoMessage
+public class StateChangeEvent {
+
+ @ProtoField(id = 1)
+ @NotNull
+ private String type;
+
+ @ProtoField(id = 2)
+ private Map payload;
+
+ public String getType() {
+ return type;
+ }
+
+ public void setType(String type) {
+ this.type = type;
+ }
+
+ public Map getPayload() {
+ return payload;
+ }
+
+ public void setPayload(Map payload) {
+ this.payload = payload;
+ }
+
+ @Override
+ public String toString() {
+ return "StateChangeEvent{" + "type='" + type + '\'' + ", payload=" + payload + '}';
+ }
+}
diff --git a/common/src/main/java/com/netflix/conductor/common/metadata/workflow/SubWorkflowParams.java b/common/src/main/java/com/netflix/conductor/common/metadata/workflow/SubWorkflowParams.java
index d2fbb6f3b..66040b593 100644
--- a/common/src/main/java/com/netflix/conductor/common/metadata/workflow/SubWorkflowParams.java
+++ b/common/src/main/java/com/netflix/conductor/common/metadata/workflow/SubWorkflowParams.java
@@ -12,23 +12,22 @@
*/
package com.netflix.conductor.common.metadata.workflow;
+import java.util.LinkedHashMap;
import java.util.Map;
import java.util.Objects;
import com.netflix.conductor.annotations.protogen.ProtoField;
import com.netflix.conductor.annotations.protogen.ProtoMessage;
+import com.netflix.conductor.common.utils.TaskUtils;
import com.fasterxml.jackson.annotation.JsonGetter;
+import com.fasterxml.jackson.annotation.JsonIgnore;
import com.fasterxml.jackson.annotation.JsonSetter;
-import jakarta.validation.constraints.NotEmpty;
-import jakarta.validation.constraints.NotNull;
@ProtoMessage
public class SubWorkflowParams {
@ProtoField(id = 1)
- @NotNull(message = "SubWorkflowParams name cannot be null")
- @NotEmpty(message = "SubWorkflowParams name cannot be empty")
private String name;
@ProtoField(id = 2)
@@ -42,15 +41,36 @@ public class SubWorkflowParams {
@ProtoField(id = 4)
private Object workflowDefinition;
+ private String idempotencyKey;
+
+ private IdempotencyStrategy idempotencyStrategy;
+
+ public String getIdempotencyKey() {
+ return idempotencyKey;
+ }
+
+ public void setIdempotencyKey(String idempotencyKey) {
+ this.idempotencyKey = idempotencyKey;
+ }
+
+ public IdempotencyStrategy getIdempotencyStrategy() {
+ return idempotencyStrategy;
+ }
+
+ public void setIdempotencyStrategy(IdempotencyStrategy idempotencyStrategy) {
+ this.idempotencyStrategy = idempotencyStrategy;
+ }
+
/**
* @return the name
*/
public String getName() {
if (workflowDefinition != null) {
- return getWorkflowDef().getName();
- } else {
- return name;
+ if (workflowDefinition instanceof WorkflowDef) {
+ return ((WorkflowDef) workflowDefinition).getName();
+ }
}
+ return name;
}
/**
@@ -65,10 +85,11 @@ public void setName(String name) {
*/
public Integer getVersion() {
if (workflowDefinition != null) {
- return getWorkflowDef().getVersion();
- } else {
- return version;
+ if (workflowDefinition instanceof WorkflowDef) {
+ return ((WorkflowDef) workflowDefinition).getVersion();
+ }
}
+ return version;
}
/**
@@ -95,14 +116,19 @@ public void setTaskToDomain(Map taskToDomain) {
/**
* @return the workflowDefinition as an Object
*/
+ @JsonGetter("workflowDefinition")
public Object getWorkflowDefinition() {
return workflowDefinition;
}
- /**
- * @return the workflowDefinition as a WorkflowDef
- */
- @JsonGetter("workflowDefinition")
+ @Deprecated
+ @JsonIgnore
+ public void setWorkflowDef(WorkflowDef workflowDef) {
+ this.setWorkflowDefinition(workflowDef);
+ }
+
+ @Deprecated
+ @JsonIgnore
public WorkflowDef getWorkflowDef() {
return (WorkflowDef) workflowDefinition;
}
@@ -110,20 +136,26 @@ public WorkflowDef getWorkflowDef() {
/**
* @param workflowDef the workflowDefinition to set
*/
+ @JsonSetter("workflowDefinition")
public void setWorkflowDefinition(Object workflowDef) {
- if (!(workflowDef == null || workflowDef instanceof WorkflowDef)) {
+ if (workflowDef == null) {
+ this.workflowDefinition = workflowDef;
+ } else if (workflowDef instanceof WorkflowDef) {
+ this.workflowDefinition = workflowDef;
+ } else if (workflowDef instanceof String) {
+ if (!(((String) workflowDef).startsWith("${"))
+ || !(((String) workflowDef).endsWith("}"))) {
+ throw new IllegalArgumentException(
+ "workflowDefinition is a string, but not a valid DSL string");
+ } else {
+ this.workflowDefinition = workflowDef;
+ }
+ } else if (workflowDef instanceof LinkedHashMap) {
+ this.workflowDefinition = TaskUtils.convertToWorkflowDef(workflowDef);
+ } else {
throw new IllegalArgumentException(
- "workflowDefinition must be either null or WorkflowDef");
+ "workflowDefinition must be either null, or WorkflowDef, or a valid DSL string");
}
- this.workflowDefinition = workflowDef;
- }
-
- /**
- * @param workflowDef the workflowDefinition to set
- */
- @JsonSetter("workflowDefinition")
- public void setWorkflowDef(WorkflowDef workflowDef) {
- this.workflowDefinition = workflowDef;
}
@Override
diff --git a/common/src/main/java/com/netflix/conductor/common/metadata/workflow/UpgradeWorkflowRequest.java b/common/src/main/java/com/netflix/conductor/common/metadata/workflow/UpgradeWorkflowRequest.java
new file mode 100644
index 000000000..a33b16874
--- /dev/null
+++ b/common/src/main/java/com/netflix/conductor/common/metadata/workflow/UpgradeWorkflowRequest.java
@@ -0,0 +1,69 @@
+/*
+ * Copyright 2023 Conductor Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
+ * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations under the License.
+ */
+package com.netflix.conductor.common.metadata.workflow;
+
+import java.util.Map;
+
+import com.netflix.conductor.annotations.protogen.ProtoField;
+import com.netflix.conductor.annotations.protogen.ProtoMessage;
+
+import jakarta.validation.constraints.NotNull;
+
+@ProtoMessage
+public class UpgradeWorkflowRequest {
+
+ public Map getTaskOutput() {
+ return taskOutput;
+ }
+
+ public void setTaskOutput(Map taskOutput) {
+ this.taskOutput = taskOutput;
+ }
+
+ public Map getWorkflowInput() {
+ return workflowInput;
+ }
+
+ public void setWorkflowInput(Map workflowInput) {
+ this.workflowInput = workflowInput;
+ }
+
+ @ProtoField(id = 4)
+ private Map taskOutput;
+
+ @ProtoField(id = 3)
+ private Map workflowInput;
+
+ @ProtoField(id = 2)
+ private Integer version;
+
+ @NotNull(message = "Workflow name cannot be null or empty")
+ @ProtoField(id = 1)
+ private String name;
+
+ public String getName() {
+ return name;
+ }
+
+ public void setName(String name) {
+ this.name = name;
+ }
+
+ public Integer getVersion() {
+ return version;
+ }
+
+ public void setVersion(Integer version) {
+ this.version = version;
+ }
+}
diff --git a/common/src/main/java/com/netflix/conductor/common/metadata/workflow/WorkflowDef.java b/common/src/main/java/com/netflix/conductor/common/metadata/workflow/WorkflowDef.java
index 02c4d0149..2569294b8 100644
--- a/common/src/main/java/com/netflix/conductor/common/metadata/workflow/WorkflowDef.java
+++ b/common/src/main/java/com/netflix/conductor/common/metadata/workflow/WorkflowDef.java
@@ -12,12 +12,7 @@
*/
package com.netflix.conductor.common.metadata.workflow;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-import java.util.Objects;
+import java.util.*;
import com.netflix.conductor.annotations.protogen.ProtoEnum;
import com.netflix.conductor.annotations.protogen.ProtoField;
@@ -25,19 +20,16 @@
import com.netflix.conductor.common.constraints.NoSemiColonConstraint;
import com.netflix.conductor.common.constraints.OwnerEmailMandatoryConstraint;
import com.netflix.conductor.common.constraints.TaskReferenceNameUniqueConstraint;
-import com.netflix.conductor.common.metadata.BaseDef;
+import com.netflix.conductor.common.metadata.Auditable;
+import com.netflix.conductor.common.metadata.SchemaDef;
import com.netflix.conductor.common.metadata.tasks.TaskType;
-import jakarta.validation.Valid;
-import jakarta.validation.constraints.Email;
-import jakarta.validation.constraints.Max;
-import jakarta.validation.constraints.Min;
-import jakarta.validation.constraints.NotEmpty;
-import jakarta.validation.constraints.NotNull;
+import jakarta.validation.*;
+import jakarta.validation.constraints.*;
@ProtoMessage
@TaskReferenceNameUniqueConstraint
-public class WorkflowDef extends BaseDef {
+public class WorkflowDef extends Auditable {
@ProtoEnum
public enum TimeoutPolicy {
@@ -76,7 +68,7 @@ public enum TimeoutPolicy {
@Max(value = 2, message = "workflowDef schemaVersion: {value} is only supported")
private int schemaVersion = 2;
- // By default, a workflow is restartable
+ // By default a workflow is restartable
@ProtoField(id = 9)
private boolean restartable = true;
@@ -85,7 +77,6 @@ public enum TimeoutPolicy {
@ProtoField(id = 11)
@OwnerEmailMandatoryConstraint
- @Email(message = "ownerEmail should be valid email address")
private String ownerEmail;
@ProtoField(id = 12)
@@ -101,6 +92,29 @@ public enum TimeoutPolicy {
@ProtoField(id = 15)
private Map inputTemplate = new HashMap<>();
+ @ProtoField(id = 17)
+ private String workflowStatusListenerSink;
+
+ @ProtoField(id = 18)
+ private RateLimitConfig rateLimitConfig;
+
+ @ProtoField(id = 19)
+ private SchemaDef inputSchema;
+
+ @ProtoField(id = 20)
+ private SchemaDef outputSchema;
+
+ @ProtoField(id = 21)
+ private boolean enforceSchema = true;
+
+ public boolean isEnforceSchema() {
+ return enforceSchema;
+ }
+
+ public void setEnforceSchema(boolean enforceSchema) {
+ this.enforceSchema = enforceSchema;
+ }
+
/**
* @return the name
*/
@@ -321,6 +335,38 @@ public static String getKey(String name, int version) {
return name + "." + version;
}
+ public String getWorkflowStatusListenerSink() {
+ return workflowStatusListenerSink;
+ }
+
+ public void setWorkflowStatusListenerSink(String workflowStatusListenerSink) {
+ this.workflowStatusListenerSink = workflowStatusListenerSink;
+ }
+
+ public RateLimitConfig getRateLimitConfig() {
+ return rateLimitConfig;
+ }
+
+ public void setRateLimitConfig(RateLimitConfig rateLimitConfig) {
+ this.rateLimitConfig = rateLimitConfig;
+ }
+
+ public SchemaDef getInputSchema() {
+ return inputSchema;
+ }
+
+ public void setInputSchema(SchemaDef inputSchema) {
+ this.inputSchema = inputSchema;
+ }
+
+ public SchemaDef getOutputSchema() {
+ return outputSchema;
+ }
+
+ public void setOutputSchema(SchemaDef outputSchema) {
+ this.outputSchema = outputSchema;
+ }
+
public boolean containsType(String taskType) {
return collectTasks().stream().anyMatch(t -> t.getType().equals(taskType));
}
@@ -384,31 +430,12 @@ public boolean equals(Object o) {
return false;
}
WorkflowDef that = (WorkflowDef) o;
- return getVersion() == that.getVersion()
- && getSchemaVersion() == that.getSchemaVersion()
- && Objects.equals(getName(), that.getName())
- && Objects.equals(getDescription(), that.getDescription())
- && Objects.equals(getTasks(), that.getTasks())
- && Objects.equals(getInputParameters(), that.getInputParameters())
- && Objects.equals(getOutputParameters(), that.getOutputParameters())
- && Objects.equals(getFailureWorkflow(), that.getFailureWorkflow())
- && Objects.equals(getOwnerEmail(), that.getOwnerEmail())
- && Objects.equals(getTimeoutSeconds(), that.getTimeoutSeconds());
+ return version == that.version && Objects.equals(name, that.name);
}
@Override
public int hashCode() {
- return Objects.hash(
- getName(),
- getDescription(),
- getVersion(),
- getTasks(),
- getInputParameters(),
- getOutputParameters(),
- getFailureWorkflow(),
- getSchemaVersion(),
- getOwnerEmail(),
- getTimeoutSeconds());
+ return Objects.hash(name, version);
}
@Override
@@ -437,8 +464,28 @@ public String toString() {
+ restartable
+ ", workflowStatusListenerEnabled="
+ workflowStatusListenerEnabled
+ + ", ownerEmail='"
+ + ownerEmail
+ + '\''
+ + ", timeoutPolicy="
+ + timeoutPolicy
+ ", timeoutSeconds="
+ timeoutSeconds
+ + ", variables="
+ + variables
+ + ", inputTemplate="
+ + inputTemplate
+ + ", workflowStatusListenerSink='"
+ + workflowStatusListenerSink
+ + '\''
+ + ", rateLimitConfig="
+ + rateLimitConfig
+ + ", inputSchema="
+ + inputSchema
+ + ", outputSchema="
+ + outputSchema
+ + ", enforceSchema="
+ + enforceSchema
+ '}';
}
}
diff --git a/common/src/main/java/com/netflix/conductor/common/metadata/workflow/WorkflowTask.java b/common/src/main/java/com/netflix/conductor/common/metadata/workflow/WorkflowTask.java
index b0734ce26..2e42e7319 100644
--- a/common/src/main/java/com/netflix/conductor/common/metadata/workflow/WorkflowTask.java
+++ b/common/src/main/java/com/netflix/conductor/common/metadata/workflow/WorkflowTask.java
@@ -26,10 +26,10 @@
import com.netflix.conductor.common.metadata.tasks.TaskDef;
import com.netflix.conductor.common.metadata.tasks.TaskType;
-import com.fasterxml.jackson.annotation.JsonInclude;
+import com.fasterxml.jackson.annotation.JsonGetter;
+import com.fasterxml.jackson.annotation.JsonSetter;
import jakarta.validation.Valid;
-import jakarta.validation.constraints.NotEmpty;
-import jakarta.validation.constraints.PositiveOrZero;
+import jakarta.validation.constraints.*;
/**
* This is the task definition definied as part of the {@link WorkflowDef}. The tasks definied in
@@ -38,6 +38,32 @@
@ProtoMessage
public class WorkflowTask {
+ @ProtoMessage
+ public static class CacheConfig {
+
+ @ProtoField(id = 1)
+ private String key;
+
+ @ProtoField(id = 2)
+ private int ttlInSecond;
+
+ public String getKey() {
+ return key;
+ }
+
+ public void setKey(String key) {
+ this.key = key;
+ }
+
+ public int getTtlInSecond() {
+ return ttlInSecond;
+ }
+
+ public void setTtlInSecond(int ttlInSecond) {
+ this.ttlInSecond = ttlInSecond;
+ }
+ }
+
@ProtoField(id = 1)
@NotEmpty(message = "WorkflowTask name cannot be empty or null")
private String name;
@@ -86,7 +112,6 @@ public void setTasks(List tasks) {
// Populates for the tasks of the decision type
@ProtoField(id = 9)
- @JsonInclude(JsonInclude.Include.NON_EMPTY)
private Map> decisionCases = new LinkedHashMap<>();
@Deprecated private String dynamicForkJoinTasksParam;
@@ -98,11 +123,9 @@ public void setTasks(List tasks) {
private String dynamicForkTasksInputParamName;
@ProtoField(id = 12)
- @JsonInclude(JsonInclude.Include.NON_EMPTY)
private List<@Valid WorkflowTask> defaultCase = new LinkedList<>();
@ProtoField(id = 13)
- @JsonInclude(JsonInclude.Include.NON_EMPTY)
private List<@Valid List<@Valid WorkflowTask>> forkTasks = new LinkedList<>();
@ProtoField(id = 14)
@@ -114,7 +137,6 @@ public void setTasks(List tasks) {
private SubWorkflowParams subWorkflowParam;
@ProtoField(id = 16)
- @JsonInclude(JsonInclude.Include.NON_EMPTY)
private List joinOn = new LinkedList<>();
@ProtoField(id = 17)
@@ -130,7 +152,6 @@ public void setTasks(List tasks) {
private Boolean rateLimited;
@ProtoField(id = 21)
- @JsonInclude(JsonInclude.Include.NON_EMPTY)
private List defaultExclusiveJoinTask = new LinkedList<>();
@ProtoField(id = 23)
@@ -140,7 +161,6 @@ public void setTasks(List tasks) {
private String loopCondition;
@ProtoField(id = 25)
- @JsonInclude(JsonInclude.Include.NON_EMPTY)
private List loopOver = new LinkedList<>();
@ProtoField(id = 26)
@@ -152,8 +172,21 @@ public void setTasks(List tasks) {
@ProtoField(id = 28)
private String expression;
- @ProtoField(id = 29)
- private boolean permissive = false;
+ /*
+ Map of events to be emitted when the task status changed.
+ key can be comma separated values of the status changes prefixed with "on"
+ */
+ // @ProtoField(id = 29)
+ private @Valid Map> onStateChange = new HashMap<>();
+
+ @ProtoField(id = 30)
+ private String joinStatus;
+
+ @ProtoField(id = 31)
+ private CacheConfig cacheConfig;
+
+ @ProtoField(id = 32)
+ private boolean permissive;
/**
* @return the name
@@ -390,9 +423,18 @@ public void setScriptExpression(String expression) {
this.scriptExpression = expression;
}
+ public CacheConfig getCacheConfig() {
+ return cacheConfig;
+ }
+
+ public void setCacheConfig(CacheConfig cacheConfig) {
+ this.cacheConfig = cacheConfig;
+ }
+
/**
* @return the subWorkflow
*/
+ @JsonGetter
public SubWorkflowParams getSubWorkflowParam() {
return subWorkflowParam;
}
@@ -400,6 +442,7 @@ public SubWorkflowParams getSubWorkflowParam() {
/**
* @param subWorkflow the subWorkflowParam to set
*/
+ @JsonSetter
public void setSubWorkflowParam(SubWorkflowParams subWorkflow) {
this.subWorkflowParam = subWorkflow;
}
@@ -550,18 +593,18 @@ public void setExpression(String expression) {
this.expression = expression;
}
- /**
- * @return If the task is permissive. When set to true, and the task is in failed status,
- * fail-fast does not occur. The workflow execution continues until reaching join or end of
- * workflow, allowing idempotent execution of other tasks.
- */
+ public String getJoinStatus() {
+ return joinStatus;
+ }
+
+ public void setJoinStatus(String joinStatus) {
+ this.joinStatus = joinStatus;
+ }
+
public boolean isPermissive() {
- return this.permissive;
+ return permissive;
}
- /**
- * @param permissive when set to true, the task is marked as permissive
- */
public void setPermissive(boolean permissive) {
this.permissive = permissive;
}
@@ -713,6 +756,14 @@ public WorkflowTask get(String taskReferenceName) {
return null;
}
+ public Map> getOnStateChange() {
+ return onStateChange;
+ }
+
+ public void setOnStateChange(Map> onStateChange) {
+ this.onStateChange = onStateChange;
+ }
+
@Override
public String toString() {
return name + "/" + taskReferenceName;
@@ -727,62 +778,12 @@ public boolean equals(Object o) {
return false;
}
WorkflowTask that = (WorkflowTask) o;
- return getStartDelay() == that.getStartDelay()
- && isOptional() == that.isOptional()
- && Objects.equals(getName(), that.getName())
- && Objects.equals(getTaskReferenceName(), that.getTaskReferenceName())
- && Objects.equals(getDescription(), that.getDescription())
- && Objects.equals(getInputParameters(), that.getInputParameters())
- && Objects.equals(getType(), that.getType())
- && Objects.equals(getDynamicTaskNameParam(), that.getDynamicTaskNameParam())
- && Objects.equals(getCaseValueParam(), that.getCaseValueParam())
- && Objects.equals(getEvaluatorType(), that.getEvaluatorType())
- && Objects.equals(getExpression(), that.getExpression())
- && Objects.equals(getCaseExpression(), that.getCaseExpression())
- && Objects.equals(getDecisionCases(), that.getDecisionCases())
- && Objects.equals(
- getDynamicForkJoinTasksParam(), that.getDynamicForkJoinTasksParam())
- && Objects.equals(getDynamicForkTasksParam(), that.getDynamicForkTasksParam())
- && Objects.equals(
- getDynamicForkTasksInputParamName(),
- that.getDynamicForkTasksInputParamName())
- && Objects.equals(getDefaultCase(), that.getDefaultCase())
- && Objects.equals(getForkTasks(), that.getForkTasks())
- && Objects.equals(getSubWorkflowParam(), that.getSubWorkflowParam())
- && Objects.equals(getJoinOn(), that.getJoinOn())
- && Objects.equals(getSink(), that.getSink())
- && Objects.equals(isAsyncComplete(), that.isAsyncComplete())
- && Objects.equals(getDefaultExclusiveJoinTask(), that.getDefaultExclusiveJoinTask())
- && Objects.equals(getRetryCount(), that.getRetryCount());
+ return Objects.equals(name, that.name)
+ && Objects.equals(taskReferenceName, that.taskReferenceName);
}
@Override
public int hashCode() {
-
- return Objects.hash(
- getName(),
- getTaskReferenceName(),
- getDescription(),
- getInputParameters(),
- getType(),
- getDynamicTaskNameParam(),
- getCaseValueParam(),
- getCaseExpression(),
- getEvaluatorType(),
- getExpression(),
- getDecisionCases(),
- getDynamicForkJoinTasksParam(),
- getDynamicForkTasksParam(),
- getDynamicForkTasksInputParamName(),
- getDefaultCase(),
- getForkTasks(),
- getStartDelay(),
- getSubWorkflowParam(),
- getJoinOn(),
- getSink(),
- isAsyncComplete(),
- isOptional(),
- getDefaultExclusiveJoinTask(),
- getRetryCount());
+ return Objects.hash(name, taskReferenceName);
}
}
diff --git a/common/src/main/java/com/netflix/conductor/common/run/Workflow.java b/common/src/main/java/com/netflix/conductor/common/run/Workflow.java
index 26a8b5598..866d01a5e 100644
--- a/common/src/main/java/com/netflix/conductor/common/run/Workflow.java
+++ b/common/src/main/java/com/netflix/conductor/common/run/Workflow.java
@@ -126,8 +126,47 @@ public boolean isSuccessful() {
@ProtoField(id = 25)
private Set failedTaskNames = new HashSet<>();
+ @ProtoField(id = 26)
+ private List history = new LinkedList<>();
+
+ private String idempotencyKey;
+ private String rateLimitKey;
+ private boolean rateLimited;
+
public Workflow() {}
+ public String getIdempotencyKey() {
+ return idempotencyKey;
+ }
+
+ public void setIdempotencyKey(String idempotencyKey) {
+ this.idempotencyKey = idempotencyKey;
+ }
+
+ public String getRateLimitKey() {
+ return rateLimitKey;
+ }
+
+ public void setRateLimitKey(String rateLimitKey) {
+ this.rateLimitKey = rateLimitKey;
+ }
+
+ public boolean isRateLimited() {
+ return rateLimited;
+ }
+
+ public void setRateLimited(boolean rateLimited) {
+ this.rateLimited = rateLimited;
+ }
+
+ public List getHistory() {
+ return history;
+ }
+
+ public void setHistory(List history) {
+ this.history = history;
+ }
+
/**
* @return the status
*/
@@ -326,14 +365,6 @@ public void setFailedReferenceTaskNames(Set failedReferenceTaskNames) {
this.failedReferenceTaskNames = failedReferenceTaskNames;
}
- public Set getFailedTaskNames() {
- return failedTaskNames;
- }
-
- public void setFailedTaskNames(Set failedTaskNames) {
- this.failedTaskNames = failedTaskNames;
- }
-
public WorkflowDef getWorkflowDefinition() {
return workflowDefinition;
}
@@ -447,6 +478,14 @@ public boolean hasParent() {
return StringUtils.isNotEmpty(parentWorkflowId);
}
+ public Set getFailedTaskNames() {
+ return failedTaskNames;
+ }
+
+ public void setFailedTaskNames(Set failedTaskNames) {
+ this.failedTaskNames = failedTaskNames;
+ }
+
public Task getTaskByRefName(String refName) {
if (refName == null) {
throw new RuntimeException(
@@ -495,7 +534,6 @@ public Workflow copy() {
copy.setLastRetriedTime(lastRetriedTime);
copy.setTaskToDomain(taskToDomain);
copy.setFailedReferenceTaskNames(failedReferenceTaskNames);
- copy.setFailedTaskNames(failedTaskNames);
copy.setExternalInputPayloadStoragePath(externalInputPayloadStoragePath);
copy.setExternalOutputPayloadStoragePath(externalOutputPayloadStoragePath);
return copy;
@@ -527,61 +565,11 @@ public boolean equals(Object o) {
return false;
}
Workflow workflow = (Workflow) o;
- return getEndTime() == workflow.getEndTime()
- && getWorkflowVersion() == workflow.getWorkflowVersion()
- && getStatus() == workflow.getStatus()
- && Objects.equals(getWorkflowId(), workflow.getWorkflowId())
- && Objects.equals(getParentWorkflowId(), workflow.getParentWorkflowId())
- && Objects.equals(getParentWorkflowTaskId(), workflow.getParentWorkflowTaskId())
- && Objects.equals(getTasks(), workflow.getTasks())
- && Objects.equals(getInput(), workflow.getInput())
- && Objects.equals(getOutput(), workflow.getOutput())
- && Objects.equals(getWorkflowName(), workflow.getWorkflowName())
- && Objects.equals(getCorrelationId(), workflow.getCorrelationId())
- && Objects.equals(getReRunFromWorkflowId(), workflow.getReRunFromWorkflowId())
- && Objects.equals(getReasonForIncompletion(), workflow.getReasonForIncompletion())
- && Objects.equals(getEvent(), workflow.getEvent())
- && Objects.equals(getTaskToDomain(), workflow.getTaskToDomain())
- && Objects.equals(
- getFailedReferenceTaskNames(), workflow.getFailedReferenceTaskNames())
- && Objects.equals(getFailedTaskNames(), workflow.getFailedTaskNames())
- && Objects.equals(
- getExternalInputPayloadStoragePath(),
- workflow.getExternalInputPayloadStoragePath())
- && Objects.equals(
- getExternalOutputPayloadStoragePath(),
- workflow.getExternalOutputPayloadStoragePath())
- && Objects.equals(getPriority(), workflow.getPriority())
- && Objects.equals(getWorkflowDefinition(), workflow.getWorkflowDefinition())
- && Objects.equals(getVariables(), workflow.getVariables())
- && Objects.equals(getLastRetriedTime(), workflow.getLastRetriedTime());
+ return Objects.equals(getWorkflowId(), workflow.getWorkflowId());
}
@Override
public int hashCode() {
- return Objects.hash(
- getStatus(),
- getEndTime(),
- getWorkflowId(),
- getParentWorkflowId(),
- getParentWorkflowTaskId(),
- getTasks(),
- getInput(),
- getOutput(),
- getWorkflowName(),
- getWorkflowVersion(),
- getCorrelationId(),
- getReRunFromWorkflowId(),
- getReasonForIncompletion(),
- getEvent(),
- getTaskToDomain(),
- getFailedReferenceTaskNames(),
- getFailedTaskNames(),
- getWorkflowDefinition(),
- getExternalInputPayloadStoragePath(),
- getExternalOutputPayloadStoragePath(),
- getPriority(),
- getVariables(),
- getLastRetriedTime());
+ return Objects.hash(getWorkflowId());
}
}
diff --git a/common/src/main/java/com/netflix/conductor/common/run/WorkflowSummary.java b/common/src/main/java/com/netflix/conductor/common/run/WorkflowSummary.java
index 9be8d7df1..c41a8f69c 100644
--- a/common/src/main/java/com/netflix/conductor/common/run/WorkflowSummary.java
+++ b/common/src/main/java/com/netflix/conductor/common/run/WorkflowSummary.java
@@ -88,6 +88,9 @@ public class WorkflowSummary {
@ProtoField(id = 18)
private Set failedTaskNames = new HashSet<>();
+ @ProtoField(id = 19)
+ private String createdBy;
+
public WorkflowSummary() {}
public WorkflowSummary(Workflow workflow) {
@@ -346,6 +349,14 @@ public void setPriority(int priority) {
this.priority = priority;
}
+ public String getCreatedBy() {
+ return createdBy;
+ }
+
+ public void setCreatedBy(String createdBy) {
+ this.createdBy = createdBy;
+ }
+
@Override
public boolean equals(Object o) {
if (this == o) {
@@ -366,7 +377,8 @@ && getWorkflowId().equals(that.getWorkflowId())
&& StringUtils.equals(getEndTime(), that.getEndTime())
&& getStatus() == that.getStatus()
&& Objects.equals(getReasonForIncompletion(), that.getReasonForIncompletion())
- && Objects.equals(getEvent(), that.getEvent());
+ && Objects.equals(getEvent(), that.getEvent())
+ && Objects.equals(getCreatedBy(), that.getCreatedBy());
}
@Override
@@ -383,6 +395,7 @@ public int hashCode() {
getReasonForIncompletion(),
getExecutionTime(),
getEvent(),
- getPriority());
+ getPriority(),
+ getCreatedBy());
}
}
diff --git a/common/src/main/java/com/netflix/conductor/common/utils/TaskUtils.java b/common/src/main/java/com/netflix/conductor/common/utils/TaskUtils.java
index 6ba1f11ba..7bb6ab7ff 100644
--- a/common/src/main/java/com/netflix/conductor/common/utils/TaskUtils.java
+++ b/common/src/main/java/com/netflix/conductor/common/utils/TaskUtils.java
@@ -12,8 +12,21 @@
*/
package com.netflix.conductor.common.utils;
+import com.netflix.conductor.common.config.ObjectMapperProvider;
+import com.netflix.conductor.common.metadata.workflow.WorkflowDef;
+
+import com.fasterxml.jackson.core.type.TypeReference;
+import com.fasterxml.jackson.databind.ObjectMapper;
+
public class TaskUtils {
+ private static final ObjectMapper objectMapper;
+
+ static {
+ ObjectMapperProvider provider = new ObjectMapperProvider();
+ objectMapper = provider.getObjectMapper();
+ }
+
private static final String LOOP_TASK_DELIMITER = "__";
public static String appendIteration(String name, int iteration) {
@@ -28,4 +41,8 @@ public static String removeIterationFromTaskRefName(String referenceTaskName) {
String[] tokens = referenceTaskName.split(TaskUtils.LOOP_TASK_DELIMITER);
return tokens.length > 0 ? tokens[0] : referenceTaskName;
}
+
+ public static WorkflowDef convertToWorkflowDef(Object workflowDef) {
+ return objectMapper.convertValue(workflowDef, new TypeReference() {});
+ }
}
diff --git a/common/src/main/java/com/netflix/conductor/common/validation/ErrorResponse.java b/common/src/main/java/com/netflix/conductor/common/validation/ErrorResponse.java
index f9183c928..a43a91197 100644
--- a/common/src/main/java/com/netflix/conductor/common/validation/ErrorResponse.java
+++ b/common/src/main/java/com/netflix/conductor/common/validation/ErrorResponse.java
@@ -13,6 +13,7 @@
package com.netflix.conductor.common.validation;
import java.util.List;
+import java.util.Map;
public class ErrorResponse {
@@ -23,6 +24,16 @@ public class ErrorResponse {
private boolean retryable;
private List validationErrors;
+ private Map metadata;
+
+ public Map getMetadata() {
+ return metadata;
+ }
+
+ public void setMetadata(Map metadata) {
+ this.metadata = metadata;
+ }
+
public int getStatus() {
return status;
}
diff --git a/common/src/test/java/com/netflix/conductor/common/tasks/TaskDefTest.java b/common/src/test/java/com/netflix/conductor/common/tasks/TaskDefTest.java
index f370138fc..a46cf7d5c 100644
--- a/common/src/test/java/com/netflix/conductor/common/tasks/TaskDefTest.java
+++ b/common/src/test/java/com/netflix/conductor/common/tasks/TaskDefTest.java
@@ -74,24 +74,6 @@ public void testTaskDef() {
assertTrue(validationErrors.contains("ownerEmail cannot be empty"));
}
- @Test
- public void testTaskDefNameAndOwnerNotSet() {
- TaskDef taskDef = new TaskDef();
- taskDef.setRetryCount(-1);
- taskDef.setTimeoutSeconds(1000);
- taskDef.setResponseTimeoutSeconds(1);
-
- Set> result = validator.validate(taskDef);
- assertEquals(3, result.size());
-
- List validationErrors = new ArrayList<>();
- result.forEach(e -> validationErrors.add(e.getMessage()));
-
- assertTrue(validationErrors.contains("TaskDef retryCount: 0 must be >= 0"));
- assertTrue(validationErrors.contains("TaskDef name cannot be null or empty"));
- assertTrue(validationErrors.contains("ownerEmail cannot be empty"));
- }
-
@Test
public void testTaskDefInvalidEmail() {
TaskDef taskDef = new TaskDef();
@@ -99,7 +81,6 @@ public void testTaskDefInvalidEmail() {
taskDef.setRetryCount(1);
taskDef.setTimeoutSeconds(1000);
taskDef.setResponseTimeoutSeconds(1);
- taskDef.setOwnerEmail("owner");
Set> result = validator.validate(taskDef);
assertEquals(1, result.size());
@@ -107,7 +88,9 @@ public void testTaskDefInvalidEmail() {
List validationErrors = new ArrayList<>();
result.forEach(e -> validationErrors.add(e.getMessage()));
- assertTrue(validationErrors.contains("ownerEmail should be valid email address"));
+ assertTrue(
+ validationErrors.toString(),
+ validationErrors.contains("ownerEmail cannot be empty"));
}
@Test
diff --git a/common/src/test/java/com/netflix/conductor/common/tasks/TaskTest.java b/common/src/test/java/com/netflix/conductor/common/tasks/TaskTest.java
index 255108170..402fcfcb0 100644
--- a/common/src/test/java/com/netflix/conductor/common/tasks/TaskTest.java
+++ b/common/src/test/java/com/netflix/conductor/common/tasks/TaskTest.java
@@ -98,7 +98,7 @@ public void testDeepCopyTask() {
final Task task = new Task();
// In order to avoid forgetting putting inside the copy method the newly added fields check
// the number of declared fields.
- final int expectedTaskFieldsNumber = 40;
+ final int expectedTaskFieldsNumber = 41;
final int declaredFieldsNumber = task.getClass().getDeclaredFields().length;
assertEquals(expectedTaskFieldsNumber, declaredFieldsNumber);
diff --git a/common/src/test/java/com/netflix/conductor/common/workflow/SubWorkflowParamsTest.java b/common/src/test/java/com/netflix/conductor/common/workflow/SubWorkflowParamsTest.java
index d32afc5f6..5d9222d62 100644
--- a/common/src/test/java/com/netflix/conductor/common/workflow/SubWorkflowParamsTest.java
+++ b/common/src/test/java/com/netflix/conductor/common/workflow/SubWorkflowParamsTest.java
@@ -12,11 +12,9 @@
*/
package com.netflix.conductor.common.workflow;
-import java.util.ArrayList;
+import java.util.Arrays;
import java.util.HashMap;
-import java.util.List;
import java.util.Map;
-import java.util.Set;
import org.junit.Test;
import org.junit.runner.RunWith;
@@ -25,6 +23,8 @@
import org.springframework.test.context.junit4.SpringRunner;
import com.netflix.conductor.common.config.TestObjectMapperConfiguration;
+import com.netflix.conductor.common.metadata.tasks.TaskDef;
+import com.netflix.conductor.common.metadata.tasks.TaskType;
import com.netflix.conductor.common.metadata.workflow.SubWorkflowParams;
import com.netflix.conductor.common.metadata.workflow.WorkflowDef;
import com.netflix.conductor.common.metadata.workflow.WorkflowTask;
@@ -32,13 +32,8 @@
import com.fasterxml.jackson.databind.MapperFeature;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.SerializationFeature;
-import jakarta.validation.ConstraintViolation;
-import jakarta.validation.Validation;
-import jakarta.validation.Validator;
-import jakarta.validation.ValidatorFactory;
import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
@ContextConfiguration(classes = {TestObjectMapperConfiguration.class})
@RunWith(SpringRunner.class)
@@ -46,22 +41,6 @@ public class SubWorkflowParamsTest {
@Autowired private ObjectMapper objectMapper;
- @Test
- public void testWorkflowTaskName() {
- SubWorkflowParams subWorkflowParams = new SubWorkflowParams(); // name is null
- ValidatorFactory factory = Validation.buildDefaultValidatorFactory();
- Validator validator = factory.getValidator();
-
- Set> result = validator.validate(subWorkflowParams);
- assertEquals(2, result.size());
-
- List validationErrors = new ArrayList<>();
- result.forEach(e -> validationErrors.add(e.getMessage()));
-
- assertTrue(validationErrors.contains("SubWorkflowParams name cannot be null"));
- assertTrue(validationErrors.contains("SubWorkflowParams name cannot be empty"));
- }
-
@Test
public void testWorkflowSetTaskToDomain() {
SubWorkflowParams subWorkflowParams = new SubWorkflowParams();
@@ -91,7 +70,6 @@ public void testGetWorkflowDef() {
def.getTasks().add(task);
subWorkflowParams.setWorkflowDefinition(def);
assertEquals(def, subWorkflowParams.getWorkflowDefinition());
- assertEquals(def, subWorkflowParams.getWorkflowDef());
}
@Test
@@ -115,7 +93,41 @@ public void testWorkflowDefJson() throws Exception {
objectMapper.writerWithDefaultPrettyPrinter().writeValueAsString(subWorkflowParams);
SubWorkflowParams deserializedParams =
objectMapper.readValue(serializedParams, SubWorkflowParams.class);
- assertEquals(def, deserializedParams.getWorkflowDefinition());
- assertEquals(def, deserializedParams.getWorkflowDef());
+ var x = (WorkflowDef) deserializedParams.getWorkflowDefinition();
+ assertEquals(def, x);
+
+ var taskName = "taskName";
+ var subWorkflowName = "subwf";
+ TaskDef taskDef = new TaskDef(taskName);
+ taskDef.setRetryCount(0);
+ taskDef.setOwnerEmail("test@orkes.io");
+
+ WorkflowTask inline = new WorkflowTask();
+ inline.setTaskReferenceName(taskName);
+ inline.setName(taskName);
+ inline.setTaskDefinition(taskDef);
+ inline.setWorkflowTaskType(TaskType.SIMPLE);
+ inline.setInputParameters(Map.of("evaluatorType", "graaljs", "expression", "true;"));
+
+ WorkflowDef subworkflowDef = new WorkflowDef();
+ subworkflowDef.setName(subWorkflowName);
+ subworkflowDef.setOwnerEmail("test@orkes.io");
+ subworkflowDef.setInputParameters(Arrays.asList("value", "inlineValue"));
+ subworkflowDef.setDescription("Sub Workflow to test retry");
+ subworkflowDef.setTimeoutSeconds(600);
+ subworkflowDef.setTimeoutPolicy(WorkflowDef.TimeoutPolicy.TIME_OUT_WF);
+ subworkflowDef.setTasks(Arrays.asList(inline));
+
+ // autowired
+ var serializedSubWorkflowDef1 = objectMapper.writeValueAsString(subworkflowDef);
+ var deserializedSubWorkflowDef1 =
+ objectMapper.readValue(serializedSubWorkflowDef1, WorkflowDef.class);
+ assertEquals(deserializedSubWorkflowDef1, subworkflowDef);
+ // default
+ ObjectMapper mapper = new ObjectMapper();
+ var serializedSubWorkflowDef2 = mapper.writeValueAsString(subworkflowDef);
+ var deserializedSubWorkflowDef2 =
+ mapper.readValue(serializedSubWorkflowDef2, WorkflowDef.class);
+ assertEquals(deserializedSubWorkflowDef2, subworkflowDef);
}
}
diff --git a/common/src/test/java/com/netflix/conductor/common/workflow/WorkflowDefValidatorTest.java b/common/src/test/java/com/netflix/conductor/common/workflow/WorkflowDefValidatorTest.java
index d08acdc77..132e33d99 100644
--- a/common/src/test/java/com/netflix/conductor/common/workflow/WorkflowDefValidatorTest.java
+++ b/common/src/test/java/com/netflix/conductor/common/workflow/WorkflowDefValidatorTest.java
@@ -327,12 +327,7 @@ public void testWorkflowOwnerInvalidEmail() {
ValidatorFactory factory = Validation.buildDefaultValidatorFactory();
Validator validator = factory.getValidator();
Set> result = validator.validate(workflowDef);
- assertEquals(1, result.size());
-
- List validationErrors = new ArrayList<>();
- result.forEach(e -> validationErrors.add(e.getMessage()));
-
- assertTrue(validationErrors.contains("ownerEmail should be valid email address"));
+ assertEquals(0, result.size());
}
@Test
diff --git a/core/src/main/java/com/netflix/conductor/core/config/ConductorProperties.java b/core/src/main/java/com/netflix/conductor/core/config/ConductorProperties.java
index 339f82db0..a83242e84 100644
--- a/core/src/main/java/com/netflix/conductor/core/config/ConductorProperties.java
+++ b/core/src/main/java/com/netflix/conductor/core/config/ConductorProperties.java
@@ -81,6 +81,9 @@ public class ConductorProperties {
@DurationUnit(ChronoUnit.SECONDS)
private Duration taskExecutionPostponeDuration = Duration.ofSeconds(60);
+ /** Used to enable/disable the indexing of tasks. */
+ private boolean taskIndexingEnabled = true;
+
/** Used to enable/disable the indexing of task execution logs. */
private boolean taskExecLogIndexingEnabled = true;
@@ -345,6 +348,14 @@ public void setTaskExecLogIndexingEnabled(boolean taskExecLogIndexingEnabled) {
this.taskExecLogIndexingEnabled = taskExecLogIndexingEnabled;
}
+ public boolean isTaskIndexingEnabled() {
+ return taskIndexingEnabled;
+ }
+
+ public void setTaskIndexingEnabled(boolean taskIndexingEnabled) {
+ this.taskIndexingEnabled = taskIndexingEnabled;
+ }
+
public boolean isAsyncIndexingEnabled() {
return asyncIndexingEnabled;
}
diff --git a/core/src/main/java/com/netflix/conductor/core/dal/ExecutionDAOFacade.java b/core/src/main/java/com/netflix/conductor/core/dal/ExecutionDAOFacade.java
index 06251f9f3..3b428cf3f 100644
--- a/core/src/main/java/com/netflix/conductor/core/dal/ExecutionDAOFacade.java
+++ b/core/src/main/java/com/netflix/conductor/core/dal/ExecutionDAOFacade.java
@@ -530,7 +530,7 @@ public void updateTask(TaskModel taskModel) {
* of tasks on a system failure. So only index for each update if async indexing is not enabled.
* If it *is* enabled, tasks will be indexed only when a workflow is in terminal state.
*/
- if (!properties.isAsyncIndexingEnabled()) {
+ if (!properties.isAsyncIndexingEnabled() && properties.isTaskIndexingEnabled()) {
indexDAO.indexTask(new TaskSummary(taskModel.toTask()));
}
} catch (TerminateWorkflowException e) {
diff --git a/core/src/main/java/com/netflix/conductor/core/events/ScriptEvaluator.java b/core/src/main/java/com/netflix/conductor/core/events/ScriptEvaluator.java
index 1cd5e4a89..c77f3860d 100644
--- a/core/src/main/java/com/netflix/conductor/core/events/ScriptEvaluator.java
+++ b/core/src/main/java/com/netflix/conductor/core/events/ScriptEvaluator.java
@@ -17,6 +17,8 @@
import javax.script.ScriptEngineManager;
import javax.script.ScriptException;
+import org.openjdk.nashorn.api.scripting.NashornScriptEngineFactory;
+
public class ScriptEvaluator {
private static ScriptEngine engine;
@@ -25,7 +27,7 @@ private ScriptEvaluator() {}
/**
* Evaluates the script with the help of input provided but converts the result to a boolean
- * value.
+ * value. Set environment variable CONDUCTOR_NASHORN_ES6_ENABLED=true for Nashorn ES6 support.
*
* @param script Script to be evaluated.
* @param input Input parameters.
@@ -37,7 +39,8 @@ public static Boolean evalBool(String script, Object input) throws ScriptExcepti
}
/**
- * Evaluates the script with the help of input provided.
+ * Evaluates the script with the help of input provided. Set environment variable
+ * CONDUCTOR_NASHORN_ES6_ENABLED=true for Nashorn ES6 support.
*
* @param script Script to be evaluated.
* @param input Input parameters.
@@ -45,16 +48,30 @@ public static Boolean evalBool(String script, Object input) throws ScriptExcepti
* @return Generic object, the result of the evaluated expression.
*/
public static Object eval(String script, Object input) throws ScriptException {
- if (engine == null) {
- engine = new ScriptEngineManager().getEngineByName("Nashorn");
+ initEngine(false);
+ Bindings bindings = engine.createBindings();
+ bindings.put("$", input);
+ return engine.eval(script, bindings);
+ }
+
+ // to mock in a test
+ public static String getEnv(String name) {
+ return System.getenv(name);
+ }
+
+ public static void initEngine(boolean reInit) {
+ if (engine == null || reInit) {
+ if ("true".equalsIgnoreCase(getEnv("CONDUCTOR_NASHORN_ES6_ENABLED"))) {
+ NashornScriptEngineFactory factory = new NashornScriptEngineFactory();
+ engine = factory.getScriptEngine("--language=es6");
+ } else {
+ engine = new ScriptEngineManager().getEngineByName("Nashorn");
+ }
}
if (engine == null) {
throw new RuntimeException(
"missing nashorn engine. Ensure you are running supported JVM");
}
- Bindings bindings = engine.createBindings();
- bindings.put("$", input);
- return engine.eval(script, bindings);
}
/**
diff --git a/core/src/main/java/com/netflix/conductor/core/execution/WorkflowExecutor.java b/core/src/main/java/com/netflix/conductor/core/execution/WorkflowExecutor.java
index c8520133c..a2561978f 100644
--- a/core/src/main/java/com/netflix/conductor/core/execution/WorkflowExecutor.java
+++ b/core/src/main/java/com/netflix/conductor/core/execution/WorkflowExecutor.java
@@ -1203,6 +1203,15 @@ List cancelNonTerminalTasks(WorkflowModel workflow) {
if (!task.getStatus().isTerminal()) {
// Cancel the ones which are not completed yet....
task.setStatus(CANCELED);
+ try {
+ notifyTaskStatusListener(task);
+ } catch (Exception e) {
+ String errorMsg =
+ String.format(
+ "Error while notifying TaskStatusListener: %s for workflow: %s",
+ task.getTaskId(), task.getWorkflowInstanceId());
+ LOGGER.error(errorMsg, e);
+ }
if (systemTaskRegistry.isSystemTask(task.getTaskType())) {
WorkflowSystemTask workflowSystemTask =
systemTaskRegistry.get(task.getTaskType());
diff --git a/core/src/main/java/com/netflix/conductor/core/execution/tasks/DoWhile.java b/core/src/main/java/com/netflix/conductor/core/execution/tasks/DoWhile.java
index f273a8512..2e61b4fe7 100644
--- a/core/src/main/java/com/netflix/conductor/core/execution/tasks/DoWhile.java
+++ b/core/src/main/java/com/netflix/conductor/core/execution/tasks/DoWhile.java
@@ -14,6 +14,7 @@
import java.util.*;
import java.util.stream.Collectors;
+import java.util.stream.IntStream;
import javax.script.ScriptException;
@@ -111,6 +112,17 @@ public boolean execute(
}
doWhileTaskModel.addOutput(String.valueOf(doWhileTaskModel.getIteration()), output);
+ Optional keepLastN =
+ Optional.ofNullable(doWhileTaskModel.getWorkflowTask().getInputParameters())
+ .map(parameters -> parameters.get("keepLastN"))
+ .map(value -> (Integer) value);
+ if (keepLastN.isPresent() && doWhileTaskModel.getIteration() > keepLastN.get()) {
+ Integer iteration = doWhileTaskModel.getIteration();
+ IntStream.range(0, iteration - keepLastN.get() - 1)
+ .mapToObj(Integer::toString)
+ .forEach(doWhileTaskModel::removeOutput);
+ }
+
if (hasFailures) {
LOGGER.debug(
"Task {} failed in {} iteration",
diff --git a/core/src/main/java/com/netflix/conductor/core/execution/tasks/Join.java b/core/src/main/java/com/netflix/conductor/core/execution/tasks/Join.java
index 4114e39ab..5b0db258b 100644
--- a/core/src/main/java/com/netflix/conductor/core/execution/tasks/Join.java
+++ b/core/src/main/java/com/netflix/conductor/core/execution/tasks/Join.java
@@ -13,6 +13,7 @@
package com.netflix.conductor.core.execution.tasks;
import java.util.List;
+import java.util.Objects;
import java.util.Optional;
import java.util.stream.Collectors;
@@ -36,9 +37,6 @@ public Join() {
@SuppressWarnings("unchecked")
public boolean execute(
WorkflowModel workflow, TaskModel task, WorkflowExecutor workflowExecutor) {
-
- boolean allDone = true;
- boolean hasFailures = false;
StringBuilder failureReason = new StringBuilder();
StringBuilder optionalTaskFailures = new StringBuilder();
List joinOn = (List) task.getInputData().get("joinOn");
@@ -47,41 +45,47 @@ public boolean execute(
joinOn =
joinOn.stream()
.map(name -> TaskUtils.appendIteration(name, task.getIteration()))
- .collect(Collectors.toList());
+ .toList();
}
+
+ boolean allTasksTerminal =
+ joinOn.stream()
+ .map(workflow::getTaskByRefName)
+ .allMatch(t -> t != null && t.getStatus().isTerminal());
+
for (String joinOnRef : joinOn) {
TaskModel forkedTask = workflow.getTaskByRefName(joinOnRef);
if (forkedTask == null) {
- // Task is not even scheduled yet
- allDone = false;
- break;
+ // Continue checking other tasks if a referenced task is not yet scheduled
+ continue;
}
+
TaskModel.Status taskStatus = forkedTask.getStatus();
- hasFailures =
+
+ // Only add to task output if it's not empty
+ if (!forkedTask.getOutputData().isEmpty()) {
+ task.addOutput(joinOnRef, forkedTask.getOutputData());
+ }
+
+ // Determine if the join task fails immediately due to a non-optional, non-permissive
+ // task failure,
+ // or waits for all tasks to be terminal if the failed task is permissive.
+ var isJoinFailure =
!taskStatus.isSuccessful()
&& !forkedTask.getWorkflowTask().isOptional()
- && (!forkedTask.getWorkflowTask().isPermissive()
- || joinOn.stream()
- .map(workflow::getTaskByRefName)
- .allMatch(t -> t.getStatus().isTerminal()));
- if (hasFailures) {
+ && (!forkedTask.getWorkflowTask().isPermissive() || allTasksTerminal);
+ if (isJoinFailure) {
final String failureReasons =
joinOn.stream()
.map(workflow::getTaskByRefName)
+ .filter(Objects::nonNull)
.filter(t -> !t.getStatus().isSuccessful())
.map(TaskModel::getReasonForIncompletion)
.collect(Collectors.joining(" "));
failureReason.append(failureReasons);
- }
- // Only add to task output if it's not empty
- if (!forkedTask.getOutputData().isEmpty()) {
- task.addOutput(joinOnRef, forkedTask.getOutputData());
- }
- if (!taskStatus.isTerminal()) {
- allDone = false;
- }
- if (hasFailures) {
- break;
+ task.setReasonForIncompletion(failureReason.toString());
+ task.setStatus(TaskModel.Status.FAILED);
+ return true;
}
// check for optional task failures
@@ -95,11 +99,10 @@ public boolean execute(
.append(" ");
}
}
- if (allDone || hasFailures || optionalTaskFailures.length() > 0) {
- if (hasFailures) {
- task.setReasonForIncompletion(failureReason.toString());
- task.setStatus(TaskModel.Status.FAILED);
- } else if (optionalTaskFailures.length() > 0) {
+
+ // Finalize the join task's status based on the outcomes of all referenced tasks.
+ if (allTasksTerminal) {
+ if (!optionalTaskFailures.isEmpty()) {
task.setStatus(TaskModel.Status.COMPLETED_WITH_ERRORS);
optionalTaskFailures.append("completed with errors");
task.setReasonForIncompletion(optionalTaskFailures.toString());
@@ -108,6 +111,8 @@ public boolean execute(
}
return true;
}
+
+ // Task execution not complete, waiting on more tasks to reach terminal state.
return false;
}
diff --git a/core/src/main/java/com/netflix/conductor/core/execution/tasks/SubWorkflow.java b/core/src/main/java/com/netflix/conductor/core/execution/tasks/SubWorkflow.java
index 6e29dd64d..3c9dcb769 100644
--- a/core/src/main/java/com/netflix/conductor/core/execution/tasks/SubWorkflow.java
+++ b/core/src/main/java/com/netflix/conductor/core/execution/tasks/SubWorkflow.java
@@ -147,11 +147,6 @@ public void cancel(WorkflowModel workflow, TaskModel task, WorkflowExecutor work
workflowExecutor.terminateWorkflow(subWorkflow, reason, null);
}
- @Override
- public boolean isAsync() {
- return true;
- }
-
/**
* Keep Subworkflow task asyncComplete. The Subworkflow task will be executed once
* asynchronously to move to IN_PROGRESS state, and will move to termination by Subworkflow's
diff --git a/core/src/main/java/com/netflix/conductor/model/TaskModel.java b/core/src/main/java/com/netflix/conductor/model/TaskModel.java
index 8dd6207a6..122c31b5b 100644
--- a/core/src/main/java/com/netflix/conductor/model/TaskModel.java
+++ b/core/src/main/java/com/netflix/conductor/model/TaskModel.java
@@ -874,6 +874,10 @@ public void addOutput(String key, Object value) {
this.outputData.put(key, value);
}
+ public void removeOutput(String key) {
+ this.outputData.remove(key);
+ }
+
public void addOutput(Map outputData) {
if (outputData != null) {
this.outputData.putAll(outputData);
diff --git a/core/src/main/java/com/netflix/conductor/service/ExecutionService.java b/core/src/main/java/com/netflix/conductor/service/ExecutionService.java
index 33360c625..2c985dec0 100644
--- a/core/src/main/java/com/netflix/conductor/service/ExecutionService.java
+++ b/core/src/main/java/com/netflix/conductor/service/ExecutionService.java
@@ -35,6 +35,7 @@
import com.netflix.conductor.core.exception.NotFoundException;
import com.netflix.conductor.core.execution.WorkflowExecutor;
import com.netflix.conductor.core.execution.tasks.SystemTaskRegistry;
+import com.netflix.conductor.core.listener.TaskStatusListener;
import com.netflix.conductor.core.utils.QueueUtils;
import com.netflix.conductor.core.utils.Utils;
import com.netflix.conductor.dao.QueueDAO;
@@ -52,6 +53,7 @@ public class ExecutionService {
private final QueueDAO queueDAO;
private final ExternalPayloadStorage externalPayloadStorage;
private final SystemTaskRegistry systemTaskRegistry;
+ private final TaskStatusListener taskStatusListener;
private final long queueTaskMessagePostponeSecs;
@@ -65,7 +67,8 @@ public ExecutionService(
QueueDAO queueDAO,
ConductorProperties properties,
ExternalPayloadStorage externalPayloadStorage,
- SystemTaskRegistry systemTaskRegistry) {
+ SystemTaskRegistry systemTaskRegistry,
+ TaskStatusListener taskStatusListener) {
this.workflowExecutor = workflowExecutor;
this.executionDAOFacade = executionDAOFacade;
this.queueDAO = queueDAO;
@@ -74,6 +77,7 @@ public ExecutionService(
this.queueTaskMessagePostponeSecs =
properties.getTaskExecutionPostponeDuration().getSeconds();
this.systemTaskRegistry = systemTaskRegistry;
+ this.taskStatusListener = taskStatusListener;
}
public Task poll(String taskType, String workerId) {
@@ -181,6 +185,11 @@ public List poll(
queueDAO.postpone(queueName, taskId, 0, queueTaskMessagePostponeSecs);
}
}
+ taskIds.stream()
+ .map(executionDAOFacade::getTaskModel)
+ .filter(Objects::nonNull)
+ .filter(task -> TaskModel.Status.IN_PROGRESS.equals(task.getStatus()))
+ .forEach(taskStatusListener::onTaskInProgress);
executionDAOFacade.updateTaskLastPoll(taskType, domain, workerId);
Monitors.recordTaskPoll(queueName);
tasks.forEach(this::ackTaskReceived);
diff --git a/core/src/main/java/com/netflix/conductor/service/WorkflowBulkService.java b/core/src/main/java/com/netflix/conductor/service/WorkflowBulkService.java
index de6bd4292..ca240b1a9 100644
--- a/core/src/main/java/com/netflix/conductor/service/WorkflowBulkService.java
+++ b/core/src/main/java/com/netflix/conductor/service/WorkflowBulkService.java
@@ -67,4 +67,23 @@ BulkResponse terminate(
"Cannot process more than {max} workflows. Please use multiple requests.")
List workflowIds,
String reason);
+
+ BulkResponse deleteWorkflow(
+ @NotEmpty(message = "WorkflowIds list cannot be null.")
+ @Size(
+ max = MAX_REQUEST_ITEMS,
+ message =
+ "Cannot process more than {max} workflows. Please use multiple requests.")
+ List workflowIds,
+ boolean archiveWorkflow);
+
+ BulkResponse terminateRemove(
+ @NotEmpty(message = "WorkflowIds list cannot be null.")
+ @Size(
+ max = MAX_REQUEST_ITEMS,
+ message =
+ "Cannot process more than {max} workflows. Please use multiple requests.")
+ List workflowIds,
+ String reason,
+ boolean archiveWorkflow);
}
diff --git a/core/src/main/java/com/netflix/conductor/service/WorkflowBulkServiceImpl.java b/core/src/main/java/com/netflix/conductor/service/WorkflowBulkServiceImpl.java
index 5e05ae453..fcbdbe3bc 100644
--- a/core/src/main/java/com/netflix/conductor/service/WorkflowBulkServiceImpl.java
+++ b/core/src/main/java/com/netflix/conductor/service/WorkflowBulkServiceImpl.java
@@ -30,9 +30,12 @@ public class WorkflowBulkServiceImpl implements WorkflowBulkService {
private static final Logger LOGGER = LoggerFactory.getLogger(WorkflowBulkService.class);
private final WorkflowExecutor workflowExecutor;
+ private final WorkflowService workflowService;
- public WorkflowBulkServiceImpl(WorkflowExecutor workflowExecutor) {
+ public WorkflowBulkServiceImpl(
+ WorkflowExecutor workflowExecutor, WorkflowService workflowService) {
this.workflowExecutor = workflowExecutor;
+ this.workflowService = workflowService;
}
/**
@@ -164,4 +167,70 @@ public BulkResponse terminate(List workflowIds, String reason) {
}
return bulkResponse;
}
+
+ /**
+ * Removes a list of workflows from the system.
+ *
+ * @param workflowIds List of WorkflowIDs of the workflows you want to remove from system.
+ * @param archiveWorkflow Archives the workflow and associated tasks instead of removing them.
+ */
+ public BulkResponse deleteWorkflow(List workflowIds, boolean archiveWorkflow) {
+ BulkResponse bulkResponse = new BulkResponse();
+ for (String workflowId : workflowIds) {
+ try {
+ workflowService.deleteWorkflow(
+ workflowId,
+ archiveWorkflow); // TODO: change this to method that cancels then deletes
+ bulkResponse.appendSuccessResponse(workflowId);
+ } catch (Exception e) {
+ LOGGER.error(
+ "bulk delete exception, workflowId {}, message: {} ",
+ workflowId,
+ e.getMessage(),
+ e);
+ bulkResponse.appendFailedResponse(workflowId, e.getMessage());
+ }
+ }
+ return bulkResponse;
+ }
+
+ /**
+ * Terminates execution for workflows in a list, then removes each workflow.
+ *
+ * @param workflowIds List of workflow IDs to terminate and delete.
+ * @param reason Reason for terminating the workflow.
+ * @param archiveWorkflow Archives the workflow and associated tasks instead of removing them.
+ * @return bulk response object containing a list of succeeded workflows and a list of failed
+ * ones with errors
+ */
+ public BulkResponse terminateRemove(
+ List workflowIds, String reason, boolean archiveWorkflow) {
+ BulkResponse bulkResponse = new BulkResponse();
+ for (String workflowId : workflowIds) {
+ try {
+ workflowExecutor.terminateWorkflow(workflowId, reason);
+ bulkResponse.appendSuccessResponse(workflowId);
+ } catch (Exception e) {
+ LOGGER.error(
+ "bulk terminate exception, workflowId {}, message: {} ",
+ workflowId,
+ e.getMessage(),
+ e);
+ bulkResponse.appendFailedResponse(workflowId, e.getMessage());
+ }
+
+ try {
+ workflowService.deleteWorkflow(workflowId, archiveWorkflow);
+ bulkResponse.appendSuccessResponse(workflowId);
+ } catch (Exception e) {
+ LOGGER.error(
+ "bulk delete exception, workflowId {}, message: {} ",
+ workflowId,
+ e.getMessage(),
+ e);
+ bulkResponse.appendFailedResponse(workflowId, e.getMessage());
+ }
+ }
+ return bulkResponse;
+ }
}
diff --git a/core/src/main/java/com/netflix/conductor/service/WorkflowService.java b/core/src/main/java/com/netflix/conductor/service/WorkflowService.java
index 2253cad54..07fff88e0 100644
--- a/core/src/main/java/com/netflix/conductor/service/WorkflowService.java
+++ b/core/src/main/java/com/netflix/conductor/service/WorkflowService.java
@@ -242,6 +242,19 @@ void terminateWorkflow(
@NotEmpty(message = "WorkflowId cannot be null or empty.") String workflowId,
String reason);
+ /**
+ * Terminate workflow execution, and then remove it from the system. Acts as terminate and
+ * remove combined.
+ *
+ * @param workflowId WorkflowId of the workflow
+ * @param reason Reason for terminating the workflow.
+ * @param archiveWorkflow Archives the workflow and associated tasks instead of removing them.
+ */
+ void terminateRemove(
+ @NotEmpty(message = "WorkflowId cannot be null or empty.") String workflowId,
+ String reason,
+ boolean archiveWorkflow);
+
/**
* Search for workflows based on payload and given parameters. Use sort options as sort ASCor
* DESC e.g. sort=name or sort=workflowId:DESC. If order is not specified, defaults to ASC.
diff --git a/core/src/main/java/com/netflix/conductor/service/WorkflowServiceImpl.java b/core/src/main/java/com/netflix/conductor/service/WorkflowServiceImpl.java
index 93777afd5..bc6a700ee 100644
--- a/core/src/main/java/com/netflix/conductor/service/WorkflowServiceImpl.java
+++ b/core/src/main/java/com/netflix/conductor/service/WorkflowServiceImpl.java
@@ -196,6 +196,19 @@ public void deleteWorkflow(String workflowId, boolean archiveWorkflow) {
executionService.removeWorkflow(workflowId, archiveWorkflow);
}
+ /**
+ * Terminate workflow execution, and then remove it from the system. Acts as terminate and
+ * remove combined.
+ *
+ * @param workflowId WorkflowId of the workflow
+ * @param reason Reason for terminating the workflow.
+ * @param archiveWorkflow Archives the workflow and associated tasks instead of removing them.
+ */
+ public void terminateRemove(String workflowId, String reason, boolean archiveWorkflow) {
+ workflowExecutor.terminateWorkflow(workflowId, reason);
+ executionService.removeWorkflow(workflowId, archiveWorkflow);
+ }
+
/**
* Retrieves all the running workflows.
*
diff --git a/core/src/test/java/com/netflix/conductor/core/events/TestScriptEval.java b/core/src/test/java/com/netflix/conductor/core/events/TestScriptEval.java
index 313c02bc3..3877a2093 100644
--- a/core/src/test/java/com/netflix/conductor/core/events/TestScriptEval.java
+++ b/core/src/test/java/com/netflix/conductor/core/events/TestScriptEval.java
@@ -16,6 +16,8 @@
import java.util.Map;
import org.junit.Test;
+import org.mockito.MockedStatic;
+import org.mockito.Mockito;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
@@ -44,4 +46,43 @@ public void testScript() throws Exception {
assertTrue(ScriptEvaluator.evalBool(script3, payload));
assertFalse(ScriptEvaluator.evalBool(script4, payload));
}
+
+ @Test
+ public void testES6Setting() throws Exception {
+ Map payload = new HashMap<>();
+ Map app = new HashMap<>();
+ app.put("name", "conductor");
+ app.put("version", 2.0);
+ app.put("license", "Apache 2.0");
+
+ payload.put("app", app);
+ payload.put("author", "Netflix");
+ payload.put("oss", true);
+
+ String script1 =
+ """
+ (function(){\s
+ const variable = 1; // const support => es6\s
+ return $.app.name == 'conductor';})();"""; // true
+
+ MockedStatic evaluator = Mockito.mockStatic(ScriptEvaluator.class);
+ evaluator
+ .when(() -> ScriptEvaluator.getEnv("CONDUCTOR_NASHORN_ES6_ENABLED"))
+ .thenReturn("true");
+ evaluator
+ .when(() -> ScriptEvaluator.eval(Mockito.any(), Mockito.any()))
+ .thenCallRealMethod();
+ evaluator
+ .when(() -> ScriptEvaluator.evalBool(Mockito.any(), Mockito.any()))
+ .thenCallRealMethod();
+ evaluator.when(() -> ScriptEvaluator.initEngine(Mockito.anyBoolean())).thenCallRealMethod();
+ evaluator.when(() -> ScriptEvaluator.toBoolean(Mockito.any())).thenCallRealMethod();
+ ScriptEvaluator.initEngine(true);
+ assertTrue(ScriptEvaluator.evalBool(script1, payload));
+ evaluator
+ .when(() -> ScriptEvaluator.getEnv("CONDUCTOR_NASHORN_ES6_ENABLED"))
+ .thenReturn("false");
+ ScriptEvaluator.initEngine(true);
+ evaluator.close();
+ }
}
diff --git a/core/src/test/java/com/netflix/conductor/core/execution/TestDeciderService.java b/core/src/test/java/com/netflix/conductor/core/execution/TestDeciderService.java
index 7e78aee4e..761f62957 100644
--- a/core/src/test/java/com/netflix/conductor/core/execution/TestDeciderService.java
+++ b/core/src/test/java/com/netflix/conductor/core/execution/TestDeciderService.java
@@ -1242,6 +1242,55 @@ public void testCheckForWorkflowCompletion() {
assertTrue(deciderService.checkForWorkflowCompletion(workflow));
}
+ @Test
+ public void testWorkflowCompleted_WhenAllOptionalTasksInTerminalState() {
+ var workflowDef = createOnlyOptionalTaskWorkflow();
+
+ var workflow = new WorkflowModel();
+ workflow.setWorkflowDefinition(workflowDef);
+ workflow.setStatus(WorkflowModel.Status.RUNNING);
+
+ // Workflow should be running
+ assertFalse(deciderService.checkForWorkflowCompletion(workflow));
+
+ var task1 = new TaskModel();
+ task1.setTaskType(SIMPLE.name());
+ task1.setReferenceTaskName("o1");
+ task1.setStatus(TaskModel.Status.FAILED_WITH_TERMINAL_ERROR);
+
+ assertFalse(deciderService.checkForWorkflowCompletion(workflow));
+
+ var task2 = new TaskModel();
+ task2.setTaskType(SIMPLE.name());
+ task2.setReferenceTaskName("o2");
+ task2.setStatus(TaskModel.Status.COMPLETED_WITH_ERRORS);
+
+ workflow.getTasks().addAll(List.of(task1, task2));
+
+ // Workflow should be COMPLETED. All optional tasks have reached a terminal state.
+ assertTrue(deciderService.checkForWorkflowCompletion(workflow));
+ }
+
+ private WorkflowDef createOnlyOptionalTaskWorkflow() {
+ var workflowTask1 = new WorkflowTask();
+ workflowTask1.setName("junit_task_1");
+ workflowTask1.setTaskReferenceName("o1");
+ workflowTask1.setTaskDefinition(new TaskDef("junit_task_1"));
+ workflowTask1.setOptional(true);
+
+ var workflowTask2 = new WorkflowTask();
+ workflowTask2.setName("junit_task_2");
+ workflowTask2.setTaskReferenceName("o2");
+ workflowTask2.setTaskDefinition(new TaskDef("junit_task_2"));
+ workflowTask2.setOptional(true);
+
+ var workflowDef = new WorkflowDef();
+ workflowDef.setSchemaVersion(2);
+ workflowDef.setName("only_optional_tasks_workflow");
+ workflowDef.getTasks().addAll(Arrays.asList(workflowTask1, workflowTask2));
+ return workflowDef;
+ }
+
private WorkflowDef createConditionalWF() {
WorkflowTask workflowTask1 = new WorkflowTask();
diff --git a/core/src/test/java/com/netflix/conductor/core/execution/tasks/TestJoin.java b/core/src/test/java/com/netflix/conductor/core/execution/tasks/TestJoin.java
new file mode 100644
index 000000000..66082edd0
--- /dev/null
+++ b/core/src/test/java/com/netflix/conductor/core/execution/tasks/TestJoin.java
@@ -0,0 +1,192 @@
+/*
+ * Copyright 2024 Conductor Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
+ * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations under the License.
+ */
+package com.netflix.conductor.core.execution.tasks;
+
+import java.util.List;
+import java.util.stream.Collectors;
+
+import org.apache.commons.lang3.tuple.Pair;
+import org.junit.Test;
+
+import com.netflix.conductor.common.metadata.workflow.WorkflowTask;
+import com.netflix.conductor.core.execution.WorkflowExecutor;
+import com.netflix.conductor.model.TaskModel;
+import com.netflix.conductor.model.WorkflowModel;
+
+import static org.junit.Assert.*;
+import static org.mockito.Mockito.mock;
+
+public class TestJoin {
+ private final WorkflowExecutor executor = mock(WorkflowExecutor.class);
+
+ private TaskModel createTask(
+ String referenceName,
+ TaskModel.Status status,
+ boolean isOptional,
+ boolean isPermissive) {
+ TaskModel task = new TaskModel();
+ task.setStatus(status);
+ task.setReferenceTaskName(referenceName);
+ WorkflowTask workflowTask = new WorkflowTask();
+ workflowTask.setOptional(isOptional);
+ workflowTask.setPermissive(isPermissive);
+ task.setWorkflowTask(workflowTask);
+ return task;
+ }
+
+ private Pair createJoinWorkflow(
+ List tasks, String... extraTaskRefNames) {
+ WorkflowModel workflow = new WorkflowModel();
+ var join = new TaskModel();
+ join.setReferenceTaskName("join");
+ var taskRefNames =
+ tasks.stream().map(TaskModel::getReferenceTaskName).collect(Collectors.toList());
+ taskRefNames.addAll(List.of(extraTaskRefNames));
+ join.getInputData().put("joinOn", taskRefNames);
+ workflow.getTasks().addAll(tasks);
+ workflow.getTasks().add(join);
+ return Pair.of(workflow, join);
+ }
+
+ @Test
+ public void testShouldNotMarkJoinAsCompletedWithErrorsWhenNotDone() {
+ var task1 = createTask("task1", TaskModel.Status.COMPLETED_WITH_ERRORS, true, false);
+
+ // task2 is not scheduled yet, so the join is not completed
+ var wfJoinPair = createJoinWorkflow(List.of(task1), "task2");
+
+ var join = new Join();
+ var result = join.execute(wfJoinPair.getLeft(), wfJoinPair.getRight(), executor);
+ assertFalse(result);
+ }
+
+ @Test
+ public void testJoinCompletesSuccessfullyWhenAllTasksSucceed() {
+ var task1 = createTask("task1", TaskModel.Status.COMPLETED, false, false);
+ var task2 = createTask("task2", TaskModel.Status.COMPLETED, false, false);
+
+ var wfJoinPair = createJoinWorkflow(List.of(task1, task2));
+
+ var join = new Join();
+ var result = join.execute(wfJoinPair.getLeft(), wfJoinPair.getRight(), executor);
+ assertTrue("Join task should execute successfully when all tasks succeed", result);
+ assertEquals(
+ "Join task status should be COMPLETED when all tasks succeed",
+ TaskModel.Status.COMPLETED,
+ wfJoinPair.getRight().getStatus());
+ }
+
+ @Test
+ public void testJoinWaitsWhenAnyTaskIsNotTerminal() {
+ var task1 = createTask("task1", TaskModel.Status.IN_PROGRESS, false, false);
+ var task2 = createTask("task2", TaskModel.Status.COMPLETED, false, false);
+
+ var wfJoinPair = createJoinWorkflow(List.of(task1, task2));
+
+ var join = new Join();
+ var result = join.execute(wfJoinPair.getLeft(), wfJoinPair.getRight(), executor);
+ assertFalse("Join task should wait when any task is not in terminal state", result);
+ }
+
+ @Test
+ public void testJoinFailsWhenMandatoryTaskFails() {
+ // Mandatory task fails
+ var task1 = createTask("task1", TaskModel.Status.FAILED, false, false);
+ // Optional task completes with errors
+ var task2 = createTask("task2", TaskModel.Status.COMPLETED_WITH_ERRORS, true, false);
+
+ var wfJoinPair = createJoinWorkflow(List.of(task1, task2));
+
+ var join = new Join();
+ var result = join.execute(wfJoinPair.getLeft(), wfJoinPair.getRight(), executor);
+ assertTrue("Join task should be executed when a mandatory task fails", result);
+ assertEquals(
+ "Join task status should be FAILED when a mandatory task fails",
+ TaskModel.Status.FAILED,
+ wfJoinPair.getRight().getStatus());
+ }
+
+ @Test
+ public void testJoinCompletesWithErrorsWhenOnlyOptionalTasksFail() {
+ // Mandatory task succeeds
+ var task1 = createTask("task1", TaskModel.Status.COMPLETED, false, false);
+ // Optional task completes with errors
+ var task2 = createTask("task2", TaskModel.Status.COMPLETED_WITH_ERRORS, true, false);
+
+ var wfJoinPair = createJoinWorkflow(List.of(task1, task2));
+
+ var join = new Join();
+ var result = join.execute(wfJoinPair.getLeft(), wfJoinPair.getRight(), executor);
+ assertTrue("Join task should be executed when only optional tasks fail", result);
+ assertEquals(
+ "Join task status should be COMPLETED_WITH_ERRORS when only optional tasks fail",
+ TaskModel.Status.COMPLETED_WITH_ERRORS,
+ wfJoinPair.getRight().getStatus());
+ }
+
+ @Test
+ public void testJoinAggregatesFailureReasonsCorrectly() {
+ var task1 = createTask("task1", TaskModel.Status.FAILED, false, false);
+ task1.setReasonForIncompletion("Task1 failed");
+ var task2 = createTask("task2", TaskModel.Status.FAILED, false, false);
+ task2.setReasonForIncompletion("Task2 failed");
+
+ var wfJoinPair = createJoinWorkflow(List.of(task1, task2));
+
+ var join = new Join();
+ var result = join.execute(wfJoinPair.getLeft(), wfJoinPair.getRight(), executor);
+ assertTrue("Join task should be executed when tasks fail", result);
+ assertEquals(
+ "Join task status should be FAILED when tasks fail",
+ TaskModel.Status.FAILED,
+ wfJoinPair.getRight().getStatus());
+ assertTrue(
+ "Join task reason for incompletion should aggregate failure reasons",
+ wfJoinPair.getRight().getReasonForIncompletion().contains("Task1 failed")
+ && wfJoinPair
+ .getRight()
+ .getReasonForIncompletion()
+ .contains("Task2 failed"));
+ }
+
+ @Test
+ public void testJoinWaitsForAllTasksBeforeFailingDueToPermissiveTaskFailure() {
+ // Task 1 is a permissive task that fails.
+ var task1 = createTask("task1", TaskModel.Status.FAILED, false, true);
+ // Task 2 is a non-permissive task that eventually succeeds.
+ var task2 =
+ createTask(
+ "task2",
+ TaskModel.Status.IN_PROGRESS,
+ false,
+ false); // Initially not in a terminal state.
+
+ var wfJoinPair = createJoinWorkflow(List.of(task1, task2));
+
+ // First execution: Task 2 is not yet terminal.
+ var join = new Join();
+ boolean result = join.execute(wfJoinPair.getLeft(), wfJoinPair.getRight(), executor);
+ assertFalse("Join task should wait as not all tasks are terminal", result);
+
+ // Simulate Task 2 reaching a terminal state.
+ task2.setStatus(TaskModel.Status.COMPLETED);
+
+ // Second execution: Now all tasks are terminal.
+ result = join.execute(wfJoinPair.getLeft(), wfJoinPair.getRight(), executor);
+ assertTrue("Join task should proceed as now all tasks are terminal", result);
+ assertEquals(
+ "Join task should be marked as FAILED due to permissive task failure",
+ TaskModel.Status.FAILED,
+ wfJoinPair.getRight().getStatus());
+ }
+}
diff --git a/core/src/test/java/com/netflix/conductor/core/execution/tasks/TestSubWorkflow.java b/core/src/test/java/com/netflix/conductor/core/execution/tasks/TestSubWorkflow.java
index 2d5fcccd8..15a975f41 100644
--- a/core/src/test/java/com/netflix/conductor/core/execution/tasks/TestSubWorkflow.java
+++ b/core/src/test/java/com/netflix/conductor/core/execution/tasks/TestSubWorkflow.java
@@ -415,7 +415,7 @@ public void testCancelWithoutWorkflowId() {
@Test
public void testIsAsync() {
- assertTrue(subWorkflow.isAsync());
+ assertFalse(subWorkflow.isAsync());
}
@Test
diff --git a/core/src/test/java/com/netflix/conductor/service/ExecutionServiceTest.java b/core/src/test/java/com/netflix/conductor/service/ExecutionServiceTest.java
index 9c7a631e3..a1974c713 100644
--- a/core/src/test/java/com/netflix/conductor/service/ExecutionServiceTest.java
+++ b/core/src/test/java/com/netflix/conductor/service/ExecutionServiceTest.java
@@ -34,6 +34,7 @@
import com.netflix.conductor.core.dal.ExecutionDAOFacade;
import com.netflix.conductor.core.execution.WorkflowExecutor;
import com.netflix.conductor.core.execution.tasks.SystemTaskRegistry;
+import com.netflix.conductor.core.listener.TaskStatusListener;
import com.netflix.conductor.dao.QueueDAO;
import static junit.framework.TestCase.assertEquals;
@@ -48,6 +49,7 @@ public class ExecutionServiceTest {
@Mock private ConductorProperties conductorProperties;
@Mock private ExternalPayloadStorage externalPayloadStorage;
@Mock private SystemTaskRegistry systemTaskRegistry;
+ @Mock private TaskStatusListener taskStatusListener;
private ExecutionService executionService;
@@ -68,7 +70,8 @@ public void setup() {
queueDAO,
conductorProperties,
externalPayloadStorage,
- systemTaskRegistry);
+ systemTaskRegistry,
+ taskStatusListener);
WorkflowDef workflowDef = new WorkflowDef();
workflow1 = new Workflow();
workflow1.setWorkflowId("wf1");
diff --git a/core/src/test/java/com/netflix/conductor/service/MetadataServiceTest.java b/core/src/test/java/com/netflix/conductor/service/MetadataServiceTest.java
index 1fa3f1990..e4f827340 100644
--- a/core/src/test/java/com/netflix/conductor/service/MetadataServiceTest.java
+++ b/core/src/test/java/com/netflix/conductor/service/MetadataServiceTest.java
@@ -377,13 +377,12 @@ public void testRegisterWorkflowDefInvalidName() {
workflowDef.setOwnerEmail("inavlid-email");
metadataService.registerWorkflowDef(workflowDef);
} catch (ConstraintViolationException ex) {
- assertEquals(3, ex.getConstraintViolations().size());
+ assertEquals(2, ex.getConstraintViolations().size());
Set messages = getConstraintViolationMessages(ex.getConstraintViolations());
assertTrue(messages.contains("WorkflowTask list cannot be empty"));
assertTrue(
messages.contains(
"Workflow name cannot contain the following set of characters: ':'"));
- assertTrue(messages.contains("ownerEmail should be valid email address"));
throw ex;
}
fail("metadataService.registerWorkflowDef did not throw ConstraintViolationException !");
@@ -397,13 +396,12 @@ public void testValidateWorkflowDefInvalidName() {
workflowDef.setOwnerEmail("inavlid-email");
metadataService.validateWorkflowDef(workflowDef);
} catch (ConstraintViolationException ex) {
- assertEquals(3, ex.getConstraintViolations().size());
+ assertEquals(2, ex.getConstraintViolations().size());
Set messages = getConstraintViolationMessages(ex.getConstraintViolations());
assertTrue(messages.contains("WorkflowTask list cannot be empty"));
assertTrue(
messages.contains(
"Workflow name cannot contain the following set of characters: ':'"));
- assertTrue(messages.contains("ownerEmail should be valid email address"));
throw ex;
}
fail("metadataService.validateWorkflowDef did not throw ConstraintViolationException !");
diff --git a/core/src/test/java/com/netflix/conductor/service/WorkflowBulkServiceTest.java b/core/src/test/java/com/netflix/conductor/service/WorkflowBulkServiceTest.java
index 27c23b992..25f70fd35 100644
--- a/core/src/test/java/com/netflix/conductor/service/WorkflowBulkServiceTest.java
+++ b/core/src/test/java/com/netflix/conductor/service/WorkflowBulkServiceTest.java
@@ -50,8 +50,14 @@ WorkflowExecutor workflowExecutor() {
}
@Bean
- public WorkflowBulkService workflowBulkService(WorkflowExecutor workflowExecutor) {
- return new WorkflowBulkServiceImpl(workflowExecutor);
+ WorkflowService workflowService() {
+ return mock(WorkflowService.class);
+ }
+
+ @Bean
+ public WorkflowBulkService workflowBulkService(
+ WorkflowExecutor workflowExecutor, WorkflowService workflowService) {
+ return new WorkflowBulkServiceImpl(workflowExecutor, workflowService);
}
}
@@ -144,4 +150,28 @@ public void testTerminateNull() {
throw ex;
}
}
+
+ @Test(expected = ConstraintViolationException.class)
+ public void testDeleteWorkflowNull() {
+ try {
+ workflowBulkService.deleteWorkflow(null, false);
+ } catch (ConstraintViolationException ex) {
+ assertEquals(1, ex.getConstraintViolations().size());
+ Set messages = getConstraintViolationMessages(ex.getConstraintViolations());
+ assertTrue(messages.contains("WorkflowIds list cannot be null."));
+ throw ex;
+ }
+ }
+
+ @Test(expected = ConstraintViolationException.class)
+ public void testTerminateRemoveNull() {
+ try {
+ workflowBulkService.terminateRemove(null, null, false);
+ } catch (ConstraintViolationException ex) {
+ assertEquals(1, ex.getConstraintViolations().size());
+ Set messages = getConstraintViolationMessages(ex.getConstraintViolations());
+ assertTrue(messages.contains("WorkflowIds list cannot be null."));
+ throw ex;
+ }
+ }
}
diff --git a/core/src/test/java/com/netflix/conductor/validations/WorkflowTaskTypeConstraintTest.java b/core/src/test/java/com/netflix/conductor/validations/WorkflowTaskTypeConstraintTest.java
index eb1d88dd3..e58ed97e0 100644
--- a/core/src/test/java/com/netflix/conductor/validations/WorkflowTaskTypeConstraintTest.java
+++ b/core/src/test/java/com/netflix/conductor/validations/WorkflowTaskTypeConstraintTest.java
@@ -29,7 +29,6 @@
import com.netflix.conductor.common.metadata.tasks.TaskDef;
import com.netflix.conductor.common.metadata.tasks.TaskType;
-import com.netflix.conductor.common.metadata.workflow.SubWorkflowParams;
import com.netflix.conductor.common.metadata.workflow.WorkflowTask;
import com.netflix.conductor.core.execution.tasks.Terminate;
import com.netflix.conductor.dao.MetadataDAO;
@@ -401,25 +400,6 @@ public void testWorkflowTaskTypeSubworkflowMissingSubworkflowParam() {
"subWorkflowParam field is required for taskType: SUB_WORKFLOW taskName: encode"));
}
- @Test
- public void testWorkflowTaskTypeSubworkflow() {
- WorkflowTask workflowTask = createSampleWorkflowTask();
- workflowTask.setType("SUB_WORKFLOW");
-
- SubWorkflowParams subWorkflowTask = new SubWorkflowParams();
- workflowTask.setSubWorkflowParam(subWorkflowTask);
-
- Set> result = validator.validate(workflowTask);
- assertEquals(2, result.size());
-
- List validationErrors = new ArrayList<>();
-
- result.forEach(e -> validationErrors.add(e.getMessage()));
-
- assertTrue(validationErrors.contains("SubWorkflowParams name cannot be null"));
- assertTrue(validationErrors.contains("SubWorkflowParams name cannot be empty"));
- }
-
@Test
public void testWorkflowTaskTypeTerminateWithoutTerminationStatus() {
WorkflowTask workflowTask = createSampleWorkflowTask();
diff --git a/dependencies.gradle b/dependencies.gradle
index 1a7e8f962..a9797ed4e 100644
--- a/dependencies.gradle
+++ b/dependencies.gradle
@@ -38,6 +38,7 @@ ext {
revHamcrestAllMatchers = '1.8'
revHealth = '1.1.4'
revProtoBuf = '3.22.0'
+ revPostgres = '42.7.3'
revJakartaAnnotation = '2.1.1'
revJAXB = '4.0.1'
revJAXRS = '2.1.1'
@@ -70,9 +71,8 @@ ext {
revCodec = '1.15'
revAzureStorageBlobSdk = '12.25.3'
revNatsStreaming = '2.6.5'
- revNats = '2.15.6'
+ revNats = '2.16.14'
revStan = '2.2.3'
revJettison = '1.5.4'
- revPostgresql = '42.7.3'
}
diff --git a/docs/devguide/architecture/directed-acyclic-graph.md b/docs/devguide/architecture/directed-acyclic-graph.md
index f8d08c0b6..c741eca14 100644
--- a/docs/devguide/architecture/directed-acyclic-graph.md
+++ b/docs/devguide/architecture/directed-acyclic-graph.md
@@ -34,13 +34,13 @@ So a Directed Acyclic Graph is a set of vertices where the connections are direc
Since a Conductor workflow is a series of vertices that can connect in only a specific direction and cannot loop, a Conductor workflow is thus a directed acyclic graph:
-![Conductor Dag](dag_workflow.png)
+![Conductor Dag](dag_workflow2.png)
### Can a workflow have loops and still be a DAG?
Yes. For example, Conductor workflows have Do-While loops:
-![Conductor Dag](dag_workflow2.png)
+![Conductor Dag](dag_workflow.png)
This is still a DAG, because the loop is just shorthand for running the tasks inside the loop over and over again. For example, if the 2nd loop in the above image is run 3 times, the workflow path will be:
diff --git a/docs/documentation/advanced/postgresql.md b/docs/documentation/advanced/postgresql.md
new file mode 100644
index 000000000..3a1d2ff3b
--- /dev/null
+++ b/docs/documentation/advanced/postgresql.md
@@ -0,0 +1,93 @@
+# PostgreSQL
+
+By default conductor runs with an in-memory Redis mock. However, you
+can run Conductor against PostgreSQL which provides workflow management, queues, indexing, and locking.
+There are a number of configuration options that enable you to use more or less of PostgreSQL functionality for your needs.
+It has the benefit of requiring fewer moving parts for the infrastructure, but does not scale as well to handle high volumes of workflows.
+You should benchmark Conductor with Postgres against your specific workload to be sure.
+
+
+## Configuration
+
+To enable the basic use of PostgreSQL to manage workflow metadata, set the following property:
+
+```properties
+conductor.db.type=postgres
+spring.datasource.url=jdbc:postgresql://postgres:5432/conductor
+spring.datasource.username=conductor
+spring.datasource.password=password
+# optional
+conductor.postgres.schema=public
+```
+
+To also use PostgreSQL for queues, you can set:
+
+```properties
+conductor.queue.type=postgres
+```
+
+You can also use PostgreSQL to index workflows, configure this as follows:
+
+```properties
+conductor.indexing.enabled=true
+conductor.indexing.type=postgres
+conductor.elasticsearch.version=0
+```
+
+To use PostgreSQL for locking, set the following configurations:
+```properties
+conductor.app.workflowExecutionLockEnabled=true
+conductor.workflow-execution-lock.type=postgres
+```
+
+## Performance Optimisations
+
+### Poll Data caching
+
+By default, Conductor writes the latest poll for tasks to the database so that it can be used to determine which tasks and domains are active. This creates a lot of database traffic.
+To avoid some of this traffic you can configure the PollDataDAO with a write buffer so that it only flushes every x milliseconds. If you keep this value around 5s then there should be no impact on behaviour. Conductor uses a default duration of 10s to determine whether a queue for a domain is active or not (also configurable using `conductor.app.activeWorkerLastPollTimeout`) so this will ensure that there is plenty of time for the data to get to the database to be shared by other instances:
+
+```properties
+# Flush the data every 5 seconds
+conductor.postgres.pollDataFlushInterval=5000
+```
+
+You can also configure a duration when the cached poll data will be considered stale. This means that the PollDataDAO will try to use the cached data, but if it is older than the configured period, it will check against the database. There is no downside to setting this as if this Conductor node already can confirm that the queue is active then there's no need to go to the database. If the record in the cache is out of date, then we still go to the database to check.
+
+```properties
+# Data older than 5 seconds is considered stale
+conductor.postgres.pollDataCacheValidityPeriod=5000
+```
+
+### Workflow and Task indexing on status change
+
+If you have a workflow with many tasks, Conductor will index that workflow every time a task completes which can result in a lot of extra load on the database. By setting this parameter you can configure Conductor to only index the workflow when its status changes:
+
+```properties
+conductor.postgres.onlyIndexOnStatusChange=true
+```
+
+### Control over what gets indexed
+
+By default Conductor will index both workflows and tasks to enable searching via the UI. If you find that you don't search for tasks, but only workflows, you can use the following option to disable task indexing:
+
+```properties
+conductor.app.taskIndexingEnabled=false
+```
+
+### Experimental LISTEN/NOTIFY based queues
+
+By default, Conductor will query the queues in the database 10 times per second for every task, which can result in a lot of traffic.
+By enabling this option, Conductor makes use of [LISTEN](https://www.postgresql.org/docs/current/sql-listen.html)/[NOTIFY](https://www.postgresql.org/docs/current/sql-notify.html) to use triggers that distribute metadata about the state of the queues to all of the Conductor servers. This drastically reduces the load on the database because a single message containing the state of the queues is sent to all subscribers.
+Enable it as follows:
+
+```properties
+conductor.postgres.experimentalQueueNotify=true
+```
+
+You can also configure how long Conductor will wait before considering a notification stale using the following property:
+
+```properties
+# Data older than 5 seconds is considered stale
+conductor.postgres.experimentalQueueNotifyStalePeriod=5000
+```
diff --git a/docs/documentation/advanced/redis.md b/docs/documentation/advanced/redis.md
index ee96c6c08..c0c329f99 100644
--- a/docs/documentation/advanced/redis.md
+++ b/docs/documentation/advanced/redis.md
@@ -1,9 +1,9 @@
# Redis
By default conductor runs with an in-memory Redis mock. However, you
-can change the configuration by setting the properties `conductor.db.type` and `conductor.redis.hosts`.
+can change the configuration by setting the properties mentioned below.
-## `conductor.db.type`
+## `conductor.db.type` and `conductor.queue.type`
| Value | Description |
|--------------------------------|----------------------------------------------------------------------------------------|
@@ -13,8 +13,6 @@ can change the configuration by setting the properties `conductor.db.type` and `
| redis_sentinel | Redis Sentinel configuration. |
| redis_standalone | Redis Standalone configuration. |
-
-
## `conductor.redis.hosts`
Expected format is `host:port:rack` separated by semicolon, e.g.:
@@ -23,16 +21,32 @@ Expected format is `host:port:rack` separated by semicolon, e.g.:
conductor.redis.hosts=host0:6379:us-east-1c;host1:6379:us-east-1c;host2:6379:us-east-1c
```
-### Auth Support
+## `conductor.redis.database`
+Redis database value other than default of 0 is supported in sentinel and standalone configurations.
+Redis cluster mode only uses database 0, and the configuration is ignored.
+
+```properties
+conductor.redis.database=1
+```
+
-Password authentication is supported. The password should be set as the 4th param of the first host `host:port:rack:password`, e.g.:
+## `conductor.redis.username`
+[Redis ACL](https://redis.io/docs/management/security/acl/) using username and password authentication is now supported.
+
+The username property should be set as `conductor.redis.username`, e.g.:
```properties
-conductor.redis.hosts=host0:6379:us-east-1c:my_str0ng_pazz;host1:6379:us-east-1c;host2:6379:us-east-1c
+conductor.redis.username=conductor
```
+If not set, the client uses `default` as the username.
+
+The password should be set as the 4th param of the first host `host:port:rack:password`, e.g.:
+```properties
+conductor.redis.hosts=host0:6379:us-east-1c:my_str0ng_pazz;host1:6379:us-east-1c;host2:6379:us-east-1c
+```
**Notes**
-- In a cluster, all nodes use the same password.
-- In a sentinel configuration, sentinels and redis nodes use the same password.
+- In a cluster, all nodes use the same username and password.
+- In a sentinel configuration, sentinels and redis nodes use the same database index, username, and password.
diff --git a/es7-persistence/src/main/java/com/netflix/conductor/es7/config/ElasticSearchV7Configuration.java b/es7-persistence/src/main/java/com/netflix/conductor/es7/config/ElasticSearchV7Configuration.java
index c5159839c..f1f67cf74 100644
--- a/es7-persistence/src/main/java/com/netflix/conductor/es7/config/ElasticSearchV7Configuration.java
+++ b/es7-persistence/src/main/java/com/netflix/conductor/es7/config/ElasticSearchV7Configuration.java
@@ -46,15 +46,7 @@ public class ElasticSearchV7Configuration {
private static final Logger log = LoggerFactory.getLogger(ElasticSearchV7Configuration.class);
@Bean
- public RestClient restClient(ElasticSearchProperties properties) {
- RestClientBuilder restClientBuilder =
- RestClient.builder(convertToHttpHosts(properties.toURLs()));
- if (properties.getRestClientConnectionRequestTimeout() > 0) {
- restClientBuilder.setRequestConfigCallback(
- requestConfigBuilder ->
- requestConfigBuilder.setConnectionRequestTimeout(
- properties.getRestClientConnectionRequestTimeout()));
- }
+ public RestClient restClient(RestClientBuilder restClientBuilder) {
return restClientBuilder.build();
}
@@ -62,6 +54,13 @@ public RestClient restClient(ElasticSearchProperties properties) {
public RestClientBuilder elasticRestClientBuilder(ElasticSearchProperties properties) {
RestClientBuilder builder = RestClient.builder(convertToHttpHosts(properties.toURLs()));
+ if (properties.getRestClientConnectionRequestTimeout() > 0) {
+ builder.setRequestConfigCallback(
+ requestConfigBuilder ->
+ requestConfigBuilder.setConnectionRequestTimeout(
+ properties.getRestClientConnectionRequestTimeout()));
+ }
+
if (properties.getUsername() != null && properties.getPassword() != null) {
log.info(
"Configure ElasticSearch with BASIC authentication. User:{}",
diff --git a/grpc/src/main/java/com/netflix/conductor/grpc/AbstractProtoMapper.java b/grpc/src/main/java/com/netflix/conductor/grpc/AbstractProtoMapper.java
index b6d3a3447..47ccc9d8b 100644
--- a/grpc/src/main/java/com/netflix/conductor/grpc/AbstractProtoMapper.java
+++ b/grpc/src/main/java/com/netflix/conductor/grpc/AbstractProtoMapper.java
@@ -2,6 +2,7 @@
import com.google.protobuf.Any;
import com.google.protobuf.Value;
+import com.netflix.conductor.common.metadata.SchemaDef;
import com.netflix.conductor.common.metadata.events.EventExecution;
import com.netflix.conductor.common.metadata.events.EventHandler;
import com.netflix.conductor.common.metadata.tasks.PollData;
@@ -11,10 +12,13 @@
import com.netflix.conductor.common.metadata.tasks.TaskResult;
import com.netflix.conductor.common.metadata.workflow.DynamicForkJoinTask;
import com.netflix.conductor.common.metadata.workflow.DynamicForkJoinTaskList;
+import com.netflix.conductor.common.metadata.workflow.RateLimitConfig;
import com.netflix.conductor.common.metadata.workflow.RerunWorkflowRequest;
import com.netflix.conductor.common.metadata.workflow.SkipTaskRequest;
import com.netflix.conductor.common.metadata.workflow.StartWorkflowRequest;
+import com.netflix.conductor.common.metadata.workflow.StateChangeEvent;
import com.netflix.conductor.common.metadata.workflow.SubWorkflowParams;
+import com.netflix.conductor.common.metadata.workflow.UpgradeWorkflowRequest;
import com.netflix.conductor.common.metadata.workflow.WorkflowDef;
import com.netflix.conductor.common.metadata.workflow.WorkflowDefSummary;
import com.netflix.conductor.common.metadata.workflow.WorkflowTask;
@@ -26,15 +30,19 @@
import com.netflix.conductor.proto.EventExecutionPb;
import com.netflix.conductor.proto.EventHandlerPb;
import com.netflix.conductor.proto.PollDataPb;
+import com.netflix.conductor.proto.RateLimitConfigPb;
import com.netflix.conductor.proto.RerunWorkflowRequestPb;
+import com.netflix.conductor.proto.SchemaDefPb;
import com.netflix.conductor.proto.SkipTaskRequestPb;
import com.netflix.conductor.proto.StartWorkflowRequestPb;
+import com.netflix.conductor.proto.StateChangeEventPb;
import com.netflix.conductor.proto.SubWorkflowParamsPb;
import com.netflix.conductor.proto.TaskDefPb;
import com.netflix.conductor.proto.TaskExecLogPb;
import com.netflix.conductor.proto.TaskPb;
import com.netflix.conductor.proto.TaskResultPb;
import com.netflix.conductor.proto.TaskSummaryPb;
+import com.netflix.conductor.proto.UpgradeWorkflowRequestPb;
import com.netflix.conductor.proto.WorkflowDefPb;
import com.netflix.conductor.proto.WorkflowDefSummaryPb;
import com.netflix.conductor.proto.WorkflowPb;
@@ -202,6 +210,54 @@ public EventHandler fromProto(EventHandlerPb.EventHandler from) {
return to;
}
+ public EventHandlerPb.EventHandler.UpdateWorkflowVariables toProto(
+ EventHandler.UpdateWorkflowVariables from) {
+ EventHandlerPb.EventHandler.UpdateWorkflowVariables.Builder to = EventHandlerPb.EventHandler.UpdateWorkflowVariables.newBuilder();
+ if (from.getWorkflowId() != null) {
+ to.setWorkflowId( from.getWorkflowId() );
+ }
+ for (Map.Entry pair : from.getVariables().entrySet()) {
+ to.putVariables( pair.getKey(), toProto( pair.getValue() ) );
+ }
+ if (from.isAppendArray() != null) {
+ to.setAppendArray( from.isAppendArray() );
+ }
+ return to.build();
+ }
+
+ public EventHandler.UpdateWorkflowVariables fromProto(
+ EventHandlerPb.EventHandler.UpdateWorkflowVariables from) {
+ EventHandler.UpdateWorkflowVariables to = new EventHandler.UpdateWorkflowVariables();
+ to.setWorkflowId( from.getWorkflowId() );
+ Map variablesMap = new HashMap();
+ for (Map.Entry pair : from.getVariablesMap().entrySet()) {
+ variablesMap.put( pair.getKey(), fromProto( pair.getValue() ) );
+ }
+ to.setVariables(variablesMap);
+ to.setAppendArray( from.getAppendArray() );
+ return to;
+ }
+
+ public EventHandlerPb.EventHandler.TerminateWorkflow toProto(
+ EventHandler.TerminateWorkflow from) {
+ EventHandlerPb.EventHandler.TerminateWorkflow.Builder to = EventHandlerPb.EventHandler.TerminateWorkflow.newBuilder();
+ if (from.getWorkflowId() != null) {
+ to.setWorkflowId( from.getWorkflowId() );
+ }
+ if (from.getTerminationReason() != null) {
+ to.setTerminationReason( from.getTerminationReason() );
+ }
+ return to.build();
+ }
+
+ public EventHandler.TerminateWorkflow fromProto(
+ EventHandlerPb.EventHandler.TerminateWorkflow from) {
+ EventHandler.TerminateWorkflow to = new EventHandler.TerminateWorkflow();
+ to.setWorkflowId( from.getWorkflowId() );
+ to.setTerminationReason( from.getTerminationReason() );
+ return to;
+ }
+
public EventHandlerPb.EventHandler.StartWorkflow toProto(EventHandler.StartWorkflow from) {
EventHandlerPb.EventHandler.StartWorkflow.Builder to = EventHandlerPb.EventHandler.StartWorkflow.newBuilder();
if (from.getName() != null) {
@@ -291,6 +347,12 @@ public EventHandlerPb.EventHandler.Action toProto(EventHandler.Action from) {
to.setFailTask( toProto( from.getFail_task() ) );
}
to.setExpandInlineJson( from.isExpandInlineJSON() );
+ if (from.getTerminate_workflow() != null) {
+ to.setTerminateWorkflow( toProto( from.getTerminate_workflow() ) );
+ }
+ if (from.getUpdate_workflow_variables() != null) {
+ to.setUpdateWorkflowVariables( toProto( from.getUpdate_workflow_variables() ) );
+ }
return to.build();
}
@@ -307,6 +369,12 @@ public EventHandler.Action fromProto(EventHandlerPb.EventHandler.Action from) {
to.setFail_task( fromProto( from.getFailTask() ) );
}
to.setExpandInlineJSON( from.getExpandInlineJson() );
+ if (from.hasTerminateWorkflow()) {
+ to.setTerminate_workflow( fromProto( from.getTerminateWorkflow() ) );
+ }
+ if (from.hasUpdateWorkflowVariables()) {
+ to.setUpdate_workflow_variables( fromProto( from.getUpdateWorkflowVariables() ) );
+ }
return to;
}
@@ -316,6 +384,8 @@ public EventHandlerPb.EventHandler.Action.Type toProto(EventHandler.Action.Type
case start_workflow: to = EventHandlerPb.EventHandler.Action.Type.START_WORKFLOW; break;
case complete_task: to = EventHandlerPb.EventHandler.Action.Type.COMPLETE_TASK; break;
case fail_task: to = EventHandlerPb.EventHandler.Action.Type.FAIL_TASK; break;
+ case terminate_workflow: to = EventHandlerPb.EventHandler.Action.Type.TERMINATE_WORKFLOW; break;
+ case update_workflow_variables: to = EventHandlerPb.EventHandler.Action.Type.UPDATE_WORKFLOW_VARIABLES; break;
default: throw new IllegalArgumentException("Unexpected enum constant: " + from);
}
return to;
@@ -327,6 +397,8 @@ public EventHandler.Action.Type fromProto(EventHandlerPb.EventHandler.Action.Typ
case START_WORKFLOW: to = EventHandler.Action.Type.start_workflow; break;
case COMPLETE_TASK: to = EventHandler.Action.Type.complete_task; break;
case FAIL_TASK: to = EventHandler.Action.Type.fail_task; break;
+ case TERMINATE_WORKFLOW: to = EventHandler.Action.Type.terminate_workflow; break;
+ case UPDATE_WORKFLOW_VARIABLES: to = EventHandler.Action.Type.update_workflow_variables; break;
default: throw new IllegalArgumentException("Unexpected enum constant: " + from);
}
return to;
@@ -356,6 +428,22 @@ public PollData fromProto(PollDataPb.PollData from) {
return to;
}
+ public RateLimitConfigPb.RateLimitConfig toProto(RateLimitConfig from) {
+ RateLimitConfigPb.RateLimitConfig.Builder to = RateLimitConfigPb.RateLimitConfig.newBuilder();
+ if (from.getRateLimitKey() != null) {
+ to.setRateLimitKey( from.getRateLimitKey() );
+ }
+ to.setConcurrentExecLimit( from.getConcurrentExecLimit() );
+ return to.build();
+ }
+
+ public RateLimitConfig fromProto(RateLimitConfigPb.RateLimitConfig from) {
+ RateLimitConfig to = new RateLimitConfig();
+ to.setRateLimitKey( from.getRateLimitKey() );
+ to.setConcurrentExecLimit( from.getConcurrentExecLimit() );
+ return to;
+ }
+
public RerunWorkflowRequestPb.RerunWorkflowRequest toProto(RerunWorkflowRequest from) {
RerunWorkflowRequestPb.RerunWorkflowRequest.Builder to = RerunWorkflowRequestPb.RerunWorkflowRequest.newBuilder();
if (from.getReRunFromWorkflowId() != null) {
@@ -394,6 +482,48 @@ public RerunWorkflowRequest fromProto(RerunWorkflowRequestPb.RerunWorkflowReques
return to;
}
+ public SchemaDefPb.SchemaDef toProto(SchemaDef from) {
+ SchemaDefPb.SchemaDef.Builder to = SchemaDefPb.SchemaDef.newBuilder();
+ if (from.getName() != null) {
+ to.setName( from.getName() );
+ }
+ to.setVersion( from.getVersion() );
+ if (from.getType() != null) {
+ to.setType( toProto( from.getType() ) );
+ }
+ return to.build();
+ }
+
+ public SchemaDef fromProto(SchemaDefPb.SchemaDef from) {
+ SchemaDef to = new SchemaDef();
+ to.setName( from.getName() );
+ to.setVersion( from.getVersion() );
+ to.setType( fromProto( from.getType() ) );
+ return to;
+ }
+
+ public SchemaDefPb.SchemaDef.Type toProto(SchemaDef.Type from) {
+ SchemaDefPb.SchemaDef.Type to;
+ switch (from) {
+ case JSON: to = SchemaDefPb.SchemaDef.Type.JSON; break;
+ case AVRO: to = SchemaDefPb.SchemaDef.Type.AVRO; break;
+ case PROTOBUF: to = SchemaDefPb.SchemaDef.Type.PROTOBUF; break;
+ default: throw new IllegalArgumentException("Unexpected enum constant: " + from);
+ }
+ return to;
+ }
+
+ public SchemaDef.Type fromProto(SchemaDefPb.SchemaDef.Type from) {
+ SchemaDef.Type to;
+ switch (from) {
+ case JSON: to = SchemaDef.Type.JSON; break;
+ case AVRO: to = SchemaDef.Type.AVRO; break;
+ case PROTOBUF: to = SchemaDef.Type.PROTOBUF; break;
+ default: throw new IllegalArgumentException("Unexpected enum constant: " + from);
+ }
+ return to;
+ }
+
public SkipTaskRequest fromProto(SkipTaskRequestPb.SkipTaskRequest from) {
SkipTaskRequest to = new SkipTaskRequest();
Map taskInputMap = new HashMap();
@@ -439,6 +569,9 @@ public StartWorkflowRequestPb.StartWorkflowRequest toProto(StartWorkflowRequest
if (from.getPriority() != null) {
to.setPriority( from.getPriority() );
}
+ if (from.getCreatedBy() != null) {
+ to.setCreatedBy( from.getCreatedBy() );
+ }
return to.build();
}
@@ -458,6 +591,29 @@ public StartWorkflowRequest fromProto(StartWorkflowRequestPb.StartWorkflowReques
}
to.setExternalInputPayloadStoragePath( from.getExternalInputPayloadStoragePath() );
to.setPriority( from.getPriority() );
+ to.setCreatedBy( from.getCreatedBy() );
+ return to;
+ }
+
+ public StateChangeEventPb.StateChangeEvent toProto(StateChangeEvent from) {
+ StateChangeEventPb.StateChangeEvent.Builder to = StateChangeEventPb.StateChangeEvent.newBuilder();
+ if (from.getType() != null) {
+ to.setType( from.getType() );
+ }
+ for (Map.Entry pair : from.getPayload().entrySet()) {
+ to.putPayload( pair.getKey(), toProto( pair.getValue() ) );
+ }
+ return to.build();
+ }
+
+ public StateChangeEvent fromProto(StateChangeEventPb.StateChangeEvent from) {
+ StateChangeEvent to = new StateChangeEvent();
+ to.setType( from.getType() );
+ Map payloadMap = new HashMap();
+ for (Map.Entry pair : from.getPayloadMap().entrySet()) {
+ payloadMap.put( pair.getKey(), fromProto( pair.getValue() ) );
+ }
+ to.setPayload(payloadMap);
return to;
}
@@ -716,6 +872,9 @@ public TaskDefPb.TaskDef toProto(TaskDef from) {
if (from.getBackoffScaleFactor() != null) {
to.setBackoffScaleFactor( from.getBackoffScaleFactor() );
}
+ if (from.getBaseType() != null) {
+ to.setBaseType( from.getBaseType() );
+ }
return to.build();
}
@@ -744,6 +903,7 @@ public TaskDef fromProto(TaskDefPb.TaskDef from) {
to.setOwnerEmail( from.getOwnerEmail() );
to.setPollTimeoutSeconds( from.getPollTimeoutSeconds() );
to.setBackoffScaleFactor( from.getBackoffScaleFactor() );
+ to.setBaseType( from.getBaseType() );
return to;
}
@@ -965,6 +1125,40 @@ public TaskSummary fromProto(TaskSummaryPb.TaskSummary from) {
return to;
}
+ public UpgradeWorkflowRequestPb.UpgradeWorkflowRequest toProto(UpgradeWorkflowRequest from) {
+ UpgradeWorkflowRequestPb.UpgradeWorkflowRequest.Builder to = UpgradeWorkflowRequestPb.UpgradeWorkflowRequest.newBuilder();
+ for (Map.Entry pair : from.getTaskOutput().entrySet()) {
+ to.putTaskOutput( pair.getKey(), toProto( pair.getValue() ) );
+ }
+ for (Map.Entry pair : from.getWorkflowInput().entrySet()) {
+ to.putWorkflowInput( pair.getKey(), toProto( pair.getValue() ) );
+ }
+ if (from.getVersion() != null) {
+ to.setVersion( from.getVersion() );
+ }
+ if (from.getName() != null) {
+ to.setName( from.getName() );
+ }
+ return to.build();
+ }
+
+ public UpgradeWorkflowRequest fromProto(UpgradeWorkflowRequestPb.UpgradeWorkflowRequest from) {
+ UpgradeWorkflowRequest to = new UpgradeWorkflowRequest();
+ Map taskOutputMap = new HashMap();
+ for (Map.Entry pair : from.getTaskOutputMap().entrySet()) {
+ taskOutputMap.put( pair.getKey(), fromProto( pair.getValue() ) );
+ }
+ to.setTaskOutput(taskOutputMap);
+ Map workflowInputMap = new HashMap();
+ for (Map.Entry pair : from.getWorkflowInputMap().entrySet()) {
+ workflowInputMap.put( pair.getKey(), fromProto( pair.getValue() ) );
+ }
+ to.setWorkflowInput(workflowInputMap);
+ to.setVersion( from.getVersion() );
+ to.setName( from.getName() );
+ return to;
+ }
+
public WorkflowPb.Workflow toProto(Workflow from) {
WorkflowPb.Workflow.Builder to = WorkflowPb.Workflow.newBuilder();
if (from.getStatus() != null) {
@@ -1018,6 +1212,9 @@ public WorkflowPb.Workflow toProto(Workflow from) {
}
to.setLastRetriedTime( from.getLastRetriedTime() );
to.addAllFailedTaskNames( from.getFailedTaskNames() );
+ for (Workflow elem : from.getHistory()) {
+ to.addHistory( toProto(elem) );
+ }
return to.build();
}
@@ -1058,6 +1255,7 @@ public Workflow fromProto(WorkflowPb.Workflow from) {
to.setVariables(variablesMap);
to.setLastRetriedTime( from.getLastRetriedTime() );
to.setFailedTaskNames( from.getFailedTaskNamesList().stream().collect(Collectors.toCollection(HashSet::new)) );
+ to.setHistory( from.getHistoryList().stream().map(this::fromProto).collect(Collectors.toCollection(ArrayList::new)) );
return to;
}
@@ -1124,6 +1322,19 @@ public WorkflowDefPb.WorkflowDef toProto(WorkflowDef from) {
for (Map.Entry pair : from.getInputTemplate().entrySet()) {
to.putInputTemplate( pair.getKey(), toProto( pair.getValue() ) );
}
+ if (from.getWorkflowStatusListenerSink() != null) {
+ to.setWorkflowStatusListenerSink( from.getWorkflowStatusListenerSink() );
+ }
+ if (from.getRateLimitConfig() != null) {
+ to.setRateLimitConfig( toProto( from.getRateLimitConfig() ) );
+ }
+ if (from.getInputSchema() != null) {
+ to.setInputSchema( toProto( from.getInputSchema() ) );
+ }
+ if (from.getOutputSchema() != null) {
+ to.setOutputSchema( toProto( from.getOutputSchema() ) );
+ }
+ to.setEnforceSchema( from.isEnforceSchema() );
return to.build();
}
@@ -1156,6 +1367,17 @@ public WorkflowDef fromProto(WorkflowDefPb.WorkflowDef from) {
inputTemplateMap.put( pair.getKey(), fromProto( pair.getValue() ) );
}
to.setInputTemplate(inputTemplateMap);
+ to.setWorkflowStatusListenerSink( from.getWorkflowStatusListenerSink() );
+ if (from.hasRateLimitConfig()) {
+ to.setRateLimitConfig( fromProto( from.getRateLimitConfig() ) );
+ }
+ if (from.hasInputSchema()) {
+ to.setInputSchema( fromProto( from.getInputSchema() ) );
+ }
+ if (from.hasOutputSchema()) {
+ to.setOutputSchema( fromProto( from.getOutputSchema() ) );
+ }
+ to.setEnforceSchema( from.getEnforceSchema() );
return to;
}
@@ -1247,6 +1469,9 @@ public WorkflowSummaryPb.WorkflowSummary toProto(WorkflowSummary from) {
}
to.setPriority( from.getPriority() );
to.addAllFailedTaskNames( from.getFailedTaskNames() );
+ if (from.getCreatedBy() != null) {
+ to.setCreatedBy( from.getCreatedBy() );
+ }
return to.build();
}
@@ -1270,6 +1495,7 @@ public WorkflowSummary fromProto(WorkflowSummaryPb.WorkflowSummary from) {
to.setExternalOutputPayloadStoragePath( from.getExternalOutputPayloadStoragePath() );
to.setPriority( from.getPriority() );
to.setFailedTaskNames( from.getFailedTaskNamesList().stream().collect(Collectors.toCollection(HashSet::new)) );
+ to.setCreatedBy( from.getCreatedBy() );
return to;
}
@@ -1351,6 +1577,12 @@ public WorkflowTaskPb.WorkflowTask toProto(WorkflowTask from) {
if (from.getExpression() != null) {
to.setExpression( from.getExpression() );
}
+ if (from.getJoinStatus() != null) {
+ to.setJoinStatus( from.getJoinStatus() );
+ }
+ if (from.getCacheConfig() != null) {
+ to.setCacheConfig( toProto( from.getCacheConfig() ) );
+ }
to.setPermissive( from.isPermissive() );
return to.build();
}
@@ -1397,10 +1629,30 @@ public WorkflowTask fromProto(WorkflowTaskPb.WorkflowTask from) {
to.setRetryCount( from.getRetryCount() );
to.setEvaluatorType( from.getEvaluatorType() );
to.setExpression( from.getExpression() );
+ to.setJoinStatus( from.getJoinStatus() );
+ if (from.hasCacheConfig()) {
+ to.setCacheConfig( fromProto( from.getCacheConfig() ) );
+ }
to.setPermissive( from.getPermissive() );
return to;
}
+ public WorkflowTaskPb.WorkflowTask.CacheConfig toProto(WorkflowTask.CacheConfig from) {
+ WorkflowTaskPb.WorkflowTask.CacheConfig.Builder to = WorkflowTaskPb.WorkflowTask.CacheConfig.newBuilder();
+ if (from.getKey() != null) {
+ to.setKey( from.getKey() );
+ }
+ to.setTtlInSecond( from.getTtlInSecond() );
+ return to.build();
+ }
+
+ public WorkflowTask.CacheConfig fromProto(WorkflowTaskPb.WorkflowTask.CacheConfig from) {
+ WorkflowTask.CacheConfig to = new WorkflowTask.CacheConfig();
+ to.setKey( from.getKey() );
+ to.setTtlInSecond( from.getTtlInSecond() );
+ return to;
+ }
+
public abstract WorkflowTaskPb.WorkflowTask.WorkflowTaskList toProto(List in);
public abstract List fromProto(WorkflowTaskPb.WorkflowTask.WorkflowTaskList in);
diff --git a/grpc/src/main/proto/model/eventhandler.proto b/grpc/src/main/proto/model/eventhandler.proto
index cfc623b53..8806bb6ca 100644
--- a/grpc/src/main/proto/model/eventhandler.proto
+++ b/grpc/src/main/proto/model/eventhandler.proto
@@ -9,6 +9,15 @@ option java_outer_classname = "EventHandlerPb";
option go_package = "github.com/netflix/conductor/client/gogrpc/conductor/model";
message EventHandler {
+ message UpdateWorkflowVariables {
+ string workflow_id = 1;
+ map variables = 2;
+ bool append_array = 3;
+ }
+ message TerminateWorkflow {
+ string workflow_id = 1;
+ string termination_reason = 2;
+ }
message StartWorkflow {
string name = 1;
int32 version = 2;
@@ -29,12 +38,16 @@ message EventHandler {
START_WORKFLOW = 0;
COMPLETE_TASK = 1;
FAIL_TASK = 2;
+ TERMINATE_WORKFLOW = 3;
+ UPDATE_WORKFLOW_VARIABLES = 4;
}
EventHandler.Action.Type action = 1;
EventHandler.StartWorkflow start_workflow = 2;
EventHandler.TaskDetails complete_task = 3;
EventHandler.TaskDetails fail_task = 4;
bool expand_inline_json = 5;
+ EventHandler.TerminateWorkflow terminate_workflow = 6;
+ EventHandler.UpdateWorkflowVariables update_workflow_variables = 7;
}
string name = 1;
string event = 2;
diff --git a/grpc/src/main/proto/model/ratelimitconfig.proto b/grpc/src/main/proto/model/ratelimitconfig.proto
new file mode 100644
index 000000000..96a6ab357
--- /dev/null
+++ b/grpc/src/main/proto/model/ratelimitconfig.proto
@@ -0,0 +1,12 @@
+syntax = "proto3";
+package conductor.proto;
+
+
+option java_package = "com.netflix.conductor.proto";
+option java_outer_classname = "RateLimitConfigPb";
+option go_package = "github.com/netflix/conductor/client/gogrpc/conductor/model";
+
+message RateLimitConfig {
+ string rate_limit_key = 1;
+ int32 concurrent_exec_limit = 2;
+}
diff --git a/grpc/src/main/proto/model/schemadef.proto b/grpc/src/main/proto/model/schemadef.proto
new file mode 100644
index 000000000..58583bdc2
--- /dev/null
+++ b/grpc/src/main/proto/model/schemadef.proto
@@ -0,0 +1,18 @@
+syntax = "proto3";
+package conductor.proto;
+
+
+option java_package = "com.netflix.conductor.proto";
+option java_outer_classname = "SchemaDefPb";
+option go_package = "github.com/netflix/conductor/client/gogrpc/conductor/model";
+
+message SchemaDef {
+ enum Type {
+ JSON = 0;
+ AVRO = 1;
+ PROTOBUF = 2;
+ }
+ string name = 1;
+ int32 version = 2;
+ SchemaDef.Type type = 3;
+}
diff --git a/grpc/src/main/proto/model/startworkflowrequest.proto b/grpc/src/main/proto/model/startworkflowrequest.proto
index 4a71f28ed..73d8d3c23 100644
--- a/grpc/src/main/proto/model/startworkflowrequest.proto
+++ b/grpc/src/main/proto/model/startworkflowrequest.proto
@@ -17,4 +17,5 @@ message StartWorkflowRequest {
WorkflowDef workflow_def = 6;
string external_input_payload_storage_path = 7;
int32 priority = 8;
+ string created_by = 9;
}
diff --git a/grpc/src/main/proto/model/statechangeevent.proto b/grpc/src/main/proto/model/statechangeevent.proto
new file mode 100644
index 000000000..57660ea7b
--- /dev/null
+++ b/grpc/src/main/proto/model/statechangeevent.proto
@@ -0,0 +1,13 @@
+syntax = "proto3";
+package conductor.proto;
+
+import "google/protobuf/struct.proto";
+
+option java_package = "com.netflix.conductor.proto";
+option java_outer_classname = "StateChangeEventPb";
+option go_package = "github.com/netflix/conductor/client/gogrpc/conductor/model";
+
+message StateChangeEvent {
+ string type = 1;
+ map payload = 2;
+}
diff --git a/grpc/src/main/proto/model/taskdef.proto b/grpc/src/main/proto/model/taskdef.proto
index 43c086c9e..e531bcfec 100644
--- a/grpc/src/main/proto/model/taskdef.proto
+++ b/grpc/src/main/proto/model/taskdef.proto
@@ -37,4 +37,5 @@ message TaskDef {
string owner_email = 18;
int32 poll_timeout_seconds = 19;
int32 backoff_scale_factor = 20;
+ string base_type = 21;
}
diff --git a/grpc/src/main/proto/model/upgradeworkflowrequest.proto b/grpc/src/main/proto/model/upgradeworkflowrequest.proto
new file mode 100644
index 000000000..f9ebcf890
--- /dev/null
+++ b/grpc/src/main/proto/model/upgradeworkflowrequest.proto
@@ -0,0 +1,15 @@
+syntax = "proto3";
+package conductor.proto;
+
+import "google/protobuf/struct.proto";
+
+option java_package = "com.netflix.conductor.proto";
+option java_outer_classname = "UpgradeWorkflowRequestPb";
+option go_package = "github.com/netflix/conductor/client/gogrpc/conductor/model";
+
+message UpgradeWorkflowRequest {
+ map task_output = 4;
+ map workflow_input = 3;
+ int32 version = 2;
+ string name = 1;
+}
diff --git a/grpc/src/main/proto/model/workflow.proto b/grpc/src/main/proto/model/workflow.proto
index 4c1488aa3..d623a2dce 100644
--- a/grpc/src/main/proto/model/workflow.proto
+++ b/grpc/src/main/proto/model/workflow.proto
@@ -39,4 +39,5 @@ message Workflow {
map variables = 23;
int64 last_retried_time = 24;
repeated string failed_task_names = 25;
+ repeated Workflow history = 26;
}
diff --git a/grpc/src/main/proto/model/workflowdef.proto b/grpc/src/main/proto/model/workflowdef.proto
index ddf75e38a..c98c6cb25 100644
--- a/grpc/src/main/proto/model/workflowdef.proto
+++ b/grpc/src/main/proto/model/workflowdef.proto
@@ -1,8 +1,10 @@
syntax = "proto3";
package conductor.proto;
+import "model/ratelimitconfig.proto";
import "model/workflowtask.proto";
import "google/protobuf/struct.proto";
+import "model/schemadef.proto";
option java_package = "com.netflix.conductor.proto";
option java_outer_classname = "WorkflowDefPb";
@@ -28,4 +30,9 @@ message WorkflowDef {
int64 timeout_seconds = 13;
map variables = 14;
map input_template = 15;
+ string workflow_status_listener_sink = 17;
+ RateLimitConfig rate_limit_config = 18;
+ SchemaDef input_schema = 19;
+ SchemaDef output_schema = 20;
+ bool enforce_schema = 21;
}
diff --git a/grpc/src/main/proto/model/workflowsummary.proto b/grpc/src/main/proto/model/workflowsummary.proto
index 7b0e3f652..c48338066 100644
--- a/grpc/src/main/proto/model/workflowsummary.proto
+++ b/grpc/src/main/proto/model/workflowsummary.proto
@@ -26,4 +26,5 @@ message WorkflowSummary {
string external_output_payload_storage_path = 16;
int32 priority = 17;
repeated string failed_task_names = 18;
+ string created_by = 19;
}
diff --git a/grpc/src/main/proto/model/workflowtask.proto b/grpc/src/main/proto/model/workflowtask.proto
index 2c35d56dd..0bee4ce44 100644
--- a/grpc/src/main/proto/model/workflowtask.proto
+++ b/grpc/src/main/proto/model/workflowtask.proto
@@ -10,6 +10,10 @@ option java_outer_classname = "WorkflowTaskPb";
option go_package = "github.com/netflix/conductor/client/gogrpc/conductor/model";
message WorkflowTask {
+ message CacheConfig {
+ string key = 1;
+ int32 ttl_in_second = 2;
+ }
message WorkflowTaskList {
repeated WorkflowTask tasks = 1;
}
@@ -41,5 +45,7 @@ message WorkflowTask {
int32 retry_count = 26;
string evaluator_type = 27;
string expression = 28;
- bool permissive = 29;
+ string join_status = 30;
+ WorkflowTask.CacheConfig cache_config = 31;
+ bool permissive = 32;
}
diff --git a/java-sdk/src/main/java/com/netflix/conductor/sdk/workflow/def/tasks/Javascript.java b/java-sdk/src/main/java/com/netflix/conductor/sdk/workflow/def/tasks/Javascript.java
index 7260a0586..d6d4af2ba 100644
--- a/java-sdk/src/main/java/com/netflix/conductor/sdk/workflow/def/tasks/Javascript.java
+++ b/java-sdk/src/main/java/com/netflix/conductor/sdk/workflow/def/tasks/Javascript.java
@@ -22,6 +22,7 @@
import javax.script.ScriptEngineManager;
import javax.script.ScriptException;
+import org.openjdk.nashorn.api.scripting.NashornScriptEngineFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -49,7 +50,8 @@ public class Javascript extends Task {
* Javascript tasks are executed on the Conductor server without having to write worker code
*
* Use {@link Javascript#validate()} method to validate the javascript to ensure the script
- * is valid.
+ * is valid. Set environment variable CONDUCTOR_NASHORN_ES6_ENABLED=true for Nashorn ES6 support
+ * during validation.
*
* @param taskReferenceName
* @param script script to execute
@@ -100,7 +102,13 @@ public String getExpression() {
* @return
*/
public Javascript validate() {
- ScriptEngine scriptEngine = new ScriptEngineManager().getEngineByName("Nashorn");
+ ScriptEngine scriptEngine;
+ if ("true".equalsIgnoreCase(System.getenv("CONDUCTOR_NASHORN_ES6_ENABLED"))) {
+ NashornScriptEngineFactory factory = new NashornScriptEngineFactory();
+ scriptEngine = factory.getScriptEngine("--language=es6");
+ } else {
+ scriptEngine = new ScriptEngineManager().getEngineByName("Nashorn");
+ }
if (scriptEngine == null) {
LOGGER.error("missing " + ENGINE + " engine. Ensure you are running supported JVM");
return this;
@@ -128,7 +136,13 @@ public Javascript validate() {
*/
public Object test(Map input) {
- ScriptEngine scriptEngine = new ScriptEngineManager().getEngineByName("Nashorn");
+ ScriptEngine scriptEngine;
+ if ("true".equalsIgnoreCase(System.getenv("CONDUCTOR_NASHORN_ES6_ENABLED"))) {
+ NashornScriptEngineFactory factory = new NashornScriptEngineFactory();
+ scriptEngine = factory.getScriptEngine("--language=es6");
+ } else {
+ scriptEngine = new ScriptEngineManager().getEngineByName("Nashorn");
+ }
if (scriptEngine == null) {
LOGGER.error("missing " + ENGINE + " engine. Ensure you are running supported JVM");
return this;
diff --git a/metrics/build.gradle b/metrics/build.gradle
index 25d0a8d92..fdf9fe89e 100644
--- a/metrics/build.gradle
+++ b/metrics/build.gradle
@@ -19,7 +19,7 @@ dependencies {
implementation "io.prometheus:simpleclient:${revPrometheus}"
implementation "io.micrometer:micrometer-registry-prometheus:${revMicrometer}"
- implementation 'io.micrometer:micrometer-registry-datadog:1.12.1'
+ implementation 'io.micrometer:micrometer-registry-datadog:1.13.0'
testImplementation 'org.springframework.boot:spring-boot-starter-web'
diff --git a/mkdocs.yml b/mkdocs.yml
index fe81a3757..fa4794b0f 100644
--- a/mkdocs.yml
+++ b/mkdocs.yml
@@ -101,6 +101,7 @@ nav:
- documentation/advanced/azureblob-storage.md
- documentation/advanced/externalpayloadstorage.md
- documentation/advanced/redis.md
+ - documentation/advanced/postgresql.md
- Client SDKs:
- documentation/clientsdks/index.md
- documentation/clientsdks/java-sdk.md
diff --git a/nats/src/main/java/com/netflix/conductor/contribs/queue/nats/JetStreamObservableQueue.java b/nats/src/main/java/com/netflix/conductor/contribs/queue/nats/JetStreamObservableQueue.java
index 7b54b775b..3083eadaa 100644
--- a/nats/src/main/java/com/netflix/conductor/contribs/queue/nats/JetStreamObservableQueue.java
+++ b/nats/src/main/java/com/netflix/conductor/contribs/queue/nats/JetStreamObservableQueue.java
@@ -22,26 +22,20 @@
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
+import org.apache.commons.lang3.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import org.springframework.boot.availability.AvailabilityChangeEvent;
+import org.springframework.boot.availability.LivenessState;
+import org.springframework.context.ApplicationEventPublisher;
import com.netflix.conductor.contribs.queue.nats.config.JetStreamProperties;
+import com.netflix.conductor.core.config.ConductorProperties;
import com.netflix.conductor.core.events.queue.Message;
import com.netflix.conductor.core.events.queue.ObservableQueue;
-import io.nats.client.Connection;
-import io.nats.client.ConnectionListener;
-import io.nats.client.JetStream;
-import io.nats.client.JetStreamApiException;
-import io.nats.client.JetStreamManagement;
-import io.nats.client.JetStreamSubscription;
-import io.nats.client.Nats;
-import io.nats.client.Options;
-import io.nats.client.PushSubscribeOptions;
-import io.nats.client.api.RetentionPolicy;
-import io.nats.client.api.StorageType;
-import io.nats.client.api.StreamConfiguration;
-import io.nats.client.api.StreamInfo;
+import io.nats.client.*;
+import io.nats.client.api.*;
import rx.Observable;
import rx.Scheduler;
@@ -58,31 +52,49 @@ public class JetStreamObservableQueue implements ObservableQueue {
private final JetStreamProperties properties;
private final Scheduler scheduler;
private final AtomicBoolean running = new AtomicBoolean(false);
+ private final ApplicationEventPublisher eventPublisher;
private Connection nc;
private JetStreamSubscription sub;
private Observable interval;
private final String queueGroup;
public JetStreamObservableQueue(
+ ConductorProperties conductorProperties,
JetStreamProperties properties,
String queueType,
String queueUri,
- Scheduler scheduler) {
+ Scheduler scheduler,
+ ApplicationEventPublisher eventPublisher) {
LOG.debug("JSM obs queue create, qtype={}, quri={}", queueType, queueUri);
this.queueUri = queueUri;
// If queue specified (e.g. subject:queue) - split to subject & queue
if (queueUri.contains(":")) {
- this.subject = queueUri.substring(0, queueUri.indexOf(':'));
+ this.subject =
+ getQueuePrefix(conductorProperties, properties)
+ + queueUri.substring(0, queueUri.indexOf(':'));
queueGroup = queueUri.substring(queueUri.indexOf(':') + 1);
} else {
- this.subject = queueUri;
+ this.subject = getQueuePrefix(conductorProperties, properties) + queueUri;
queueGroup = null;
}
this.queueType = queueType;
this.properties = properties;
this.scheduler = scheduler;
+ this.eventPublisher = eventPublisher;
+ }
+
+ public static String getQueuePrefix(
+ ConductorProperties conductorProperties, JetStreamProperties properties) {
+ String stack = "";
+ if (conductorProperties.getStack() != null && conductorProperties.getStack().length() > 0) {
+ stack = conductorProperties.getStack() + "_";
+ }
+
+ return StringUtils.isBlank(properties.getListenerQueuePrefix())
+ ? conductorProperties.getAppId() + "_jsm_notify_" + stack
+ : properties.getListenerQueuePrefix();
}
@Override
@@ -211,11 +223,19 @@ private void natsConnect() {
.connectionListener(
(conn, type) -> {
LOG.info("Connection to JSM updated: {}", type);
+ if (ConnectionListener.Events.CLOSED.equals(type)) {
+ LOG.error(
+ "Could not reconnect to NATS! Changing liveness status to {}!",
+ LivenessState.BROKEN);
+ AvailabilityChangeEvent.publish(
+ eventPublisher, type, LivenessState.BROKEN);
+ }
this.nc = conn;
subscribeOnce(conn, type);
})
+ .errorListener(new LoggingNatsErrorListener())
.server(properties.getUrl())
- .maxReconnects(-1)
+ .maxReconnects(properties.getMaxReconnects())
.build(),
true);
} catch (InterruptedException e) {
@@ -224,43 +244,71 @@ private void natsConnect() {
}
}
- private void createStream(Connection nc) {
- JetStreamManagement jsm;
- try {
- jsm = nc.jetStreamManagement();
- } catch (IOException e) {
- throw new NatsException("Failed to get jsm management", e);
- }
-
+ private void createStream(JetStreamManagement jsm) {
StreamConfiguration streamConfig =
StreamConfiguration.builder()
.name(subject)
- .retentionPolicy(RetentionPolicy.WorkQueue)
+ .replicas(properties.getReplicas())
+ .retentionPolicy(RetentionPolicy.Limits)
+ .maxBytes(properties.getStreamMaxBytes())
.storageType(StorageType.get(properties.getStreamStorageType()))
.build();
try {
StreamInfo streamInfo = jsm.addStream(streamConfig);
- LOG.debug("Create stream, info: {}", streamInfo);
+ LOG.debug("Updated stream, info: {}", streamInfo);
} catch (IOException | JetStreamApiException e) {
LOG.error("Failed to add stream: " + streamConfig, e);
+ AvailabilityChangeEvent.publish(eventPublisher, e, LivenessState.BROKEN);
}
}
private void subscribeOnce(Connection nc, ConnectionListener.Events type) {
if (type.equals(ConnectionListener.Events.CONNECTED)
|| type.equals(ConnectionListener.Events.RECONNECTED)) {
- createStream(nc);
- subscribe(nc);
+ JetStreamManagement jsm;
+ try {
+ jsm = nc.jetStreamManagement();
+ } catch (IOException e) {
+ throw new NatsException("Failed to get jsm management", e);
+ }
+ createStream(jsm);
+ var consumerConfig = createConsumer(jsm);
+ subscribe(nc, consumerConfig);
}
}
- private void subscribe(Connection nc) {
+ private ConsumerConfiguration createConsumer(JetStreamManagement jsm) {
+ ConsumerConfiguration consumerConfig =
+ ConsumerConfiguration.builder()
+ .name(properties.getDurableName())
+ .deliverGroup(queueGroup)
+ .durable(properties.getDurableName())
+ .ackWait(properties.getAckWait())
+ .maxDeliver(properties.getMaxDeliver())
+ .maxAckPending(properties.getMaxAckPending())
+ .ackPolicy(AckPolicy.Explicit)
+ .deliverSubject(subject + "-deliver")
+ .deliverPolicy(DeliverPolicy.New)
+ .build();
+
+ try {
+ jsm.addOrUpdateConsumer(subject, consumerConfig);
+ return consumerConfig;
+ } catch (IOException | JetStreamApiException e) {
+ throw new NatsException("Failed to add/update consumer", e);
+ }
+ }
+
+ private void subscribe(Connection nc, ConsumerConfiguration consumerConfig) {
try {
JetStream js = nc.jetStream();
PushSubscribeOptions pso =
- PushSubscribeOptions.builder().durable(properties.getDurableName()).build();
+ PushSubscribeOptions.builder().configuration(consumerConfig).stream(subject)
+ .bind(true)
+ .build();
+
LOG.debug("Subscribing jsm, subject={}, options={}", subject, pso);
sub =
js.subscribe(
@@ -270,7 +318,7 @@ private void subscribe(Connection nc) {
msg -> {
var message = new JsmMessage();
message.setJsmMsg(msg);
- message.setId(msg.getSID());
+ message.setId(NUID.nextGlobal());
message.setPayload(new String(msg.getData()));
messages.add(message);
},
@@ -279,7 +327,7 @@ private void subscribe(Connection nc) {
LOG.debug("Subscribed successfully {}", sub.getConsumerInfo());
this.running.set(true);
} catch (IOException | JetStreamApiException e) {
- LOG.error("Failed to subscribe", e);
+ throw new NatsException("Failed to subscribe", e);
}
}
}
diff --git a/nats/src/main/java/com/netflix/conductor/contribs/queue/nats/LoggingNatsErrorListener.java b/nats/src/main/java/com/netflix/conductor/contribs/queue/nats/LoggingNatsErrorListener.java
new file mode 100644
index 000000000..5f365bd45
--- /dev/null
+++ b/nats/src/main/java/com/netflix/conductor/contribs/queue/nats/LoggingNatsErrorListener.java
@@ -0,0 +1,49 @@
+/*
+ * Copyright 2024 Conductor Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
+ * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations under the License.
+ */
+package com.netflix.conductor.contribs.queue.nats;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import io.nats.client.Connection;
+import io.nats.client.ErrorListener;
+import io.nats.client.JetStreamSubscription;
+import io.nats.client.Message;
+
+public class LoggingNatsErrorListener implements ErrorListener {
+ private static final Logger LOG = LoggerFactory.getLogger(LoggingNatsErrorListener.class);
+
+ @Override
+ public void errorOccurred(Connection conn, String error) {
+ LOG.error("Nats connection error occurred: {}", error);
+ }
+
+ @Override
+ public void exceptionOccurred(Connection conn, Exception exp) {
+ LOG.error("Nats connection exception occurred", exp);
+ }
+
+ @Override
+ public void messageDiscarded(Connection conn, Message msg) {
+ LOG.error("Nats message discarded, SID={}, ", msg.getSID());
+ }
+
+ @Override
+ public void heartbeatAlarm(
+ Connection conn,
+ JetStreamSubscription sub,
+ long lastStreamSequence,
+ long lastConsumerSequence) {
+ LOG.warn("Heartbit missed, subject={}", sub.getSubject());
+ }
+}
diff --git a/nats/src/main/java/com/netflix/conductor/contribs/queue/nats/config/JetStreamConfiguration.java b/nats/src/main/java/com/netflix/conductor/contribs/queue/nats/config/JetStreamConfiguration.java
index a0fb07b65..edefbcece 100644
--- a/nats/src/main/java/com/netflix/conductor/contribs/queue/nats/config/JetStreamConfiguration.java
+++ b/nats/src/main/java/com/netflix/conductor/contribs/queue/nats/config/JetStreamConfiguration.java
@@ -15,9 +15,9 @@
import java.util.EnumMap;
import java.util.Map;
-import org.apache.commons.lang3.StringUtils;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.boot.context.properties.EnableConfigurationProperties;
+import org.springframework.context.ApplicationEventPublisher;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
@@ -37,32 +37,25 @@
public class JetStreamConfiguration {
@Bean
public EventQueueProvider jsmEventQueueProvider(
- JetStreamProperties properties, Scheduler scheduler) {
- return new JetStreamEventQueueProvider(properties, scheduler);
+ JetStreamProperties properties,
+ Scheduler scheduler,
+ ConductorProperties conductorProperties,
+ ApplicationEventPublisher eventPublisher) {
+ return new JetStreamEventQueueProvider(
+ conductorProperties, properties, scheduler, eventPublisher);
}
@ConditionalOnProperty(name = "conductor.default-event-queue.type", havingValue = "jsm")
@Bean
public Map getQueues(
- JetStreamEventQueueProvider provider,
- ConductorProperties conductorProperties,
- JetStreamProperties properties) {
- String stack = "";
- if (conductorProperties.getStack() != null && conductorProperties.getStack().length() > 0) {
- stack = conductorProperties.getStack() + "_";
- }
+ EventQueueProvider jsmEventQueueProvider, JetStreamProperties properties) {
TaskModel.Status[] statuses =
new TaskModel.Status[] {TaskModel.Status.COMPLETED, TaskModel.Status.FAILED};
Map queues = new EnumMap<>(TaskModel.Status.class);
for (TaskModel.Status status : statuses) {
- String queuePrefix =
- StringUtils.isBlank(properties.getListenerQueuePrefix())
- ? conductorProperties.getAppId() + "_jsm_notify_" + stack
- : properties.getListenerQueuePrefix();
-
- String queueName = queuePrefix + status.name() + getQueueGroup(properties);
+ String queueName = status.name() + getQueueGroup(properties);
- ObservableQueue queue = provider.getQueue(queueName);
+ ObservableQueue queue = jsmEventQueueProvider.getQueue(queueName);
queues.put(status, queue);
}
diff --git a/nats/src/main/java/com/netflix/conductor/contribs/queue/nats/config/JetStreamEventQueueProvider.java b/nats/src/main/java/com/netflix/conductor/contribs/queue/nats/config/JetStreamEventQueueProvider.java
index 36fb7bf4f..cbe3615ed 100644
--- a/nats/src/main/java/com/netflix/conductor/contribs/queue/nats/config/JetStreamEventQueueProvider.java
+++ b/nats/src/main/java/com/netflix/conductor/contribs/queue/nats/config/JetStreamEventQueueProvider.java
@@ -17,9 +17,11 @@
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import org.springframework.context.ApplicationEventPublisher;
import org.springframework.lang.NonNull;
import com.netflix.conductor.contribs.queue.nats.JetStreamObservableQueue;
+import com.netflix.conductor.core.config.ConductorProperties;
import com.netflix.conductor.core.events.EventQueueProvider;
import com.netflix.conductor.core.events.queue.ObservableQueue;
@@ -33,12 +35,20 @@ public class JetStreamEventQueueProvider implements EventQueueProvider {
private static final Logger LOG = LoggerFactory.getLogger(JetStreamEventQueueProvider.class);
private final Map queues = new ConcurrentHashMap<>();
private final JetStreamProperties properties;
+ private final ConductorProperties conductorProperties;
private final Scheduler scheduler;
+ private final ApplicationEventPublisher eventPublisher;
- public JetStreamEventQueueProvider(JetStreamProperties properties, Scheduler scheduler) {
+ public JetStreamEventQueueProvider(
+ ConductorProperties conductorProperties,
+ JetStreamProperties properties,
+ Scheduler scheduler,
+ ApplicationEventPublisher eventPublisher) {
LOG.info("NATS Event Queue Provider initialized...");
this.properties = properties;
+ this.conductorProperties = conductorProperties;
this.scheduler = scheduler;
+ this.eventPublisher = eventPublisher;
}
@Override
@@ -49,9 +59,16 @@ public String getQueueType() {
@Override
@NonNull
public ObservableQueue getQueue(String queueURI) throws IllegalArgumentException {
- LOG.debug("Getting obs queue, quri={}", queueURI);
+ LOG.info("Getting obs queue, quri={}", queueURI);
return queues.computeIfAbsent(
queueURI,
- q -> new JetStreamObservableQueue(properties, getQueueType(), queueURI, scheduler));
+ q ->
+ new JetStreamObservableQueue(
+ conductorProperties,
+ properties,
+ getQueueType(),
+ queueURI,
+ scheduler,
+ eventPublisher));
}
}
diff --git a/nats/src/main/java/com/netflix/conductor/contribs/queue/nats/config/JetStreamProperties.java b/nats/src/main/java/com/netflix/conductor/contribs/queue/nats/config/JetStreamProperties.java
index 7d8305d06..ebf1001d0 100644
--- a/nats/src/main/java/com/netflix/conductor/contribs/queue/nats/config/JetStreamProperties.java
+++ b/nats/src/main/java/com/netflix/conductor/contribs/queue/nats/config/JetStreamProperties.java
@@ -29,6 +29,7 @@ public class JetStreamProperties {
private String durableName = "defaultQueue";
private String streamStorageType = "file";
+ private long streamMaxBytes = -1;
/** The NATS connection url */
private String url = Options.DEFAULT_URL;
@@ -38,6 +39,46 @@ public class JetStreamProperties {
/** WAIT tasks default queue group, to make subscription round-robin delivery to single sub */
private String defaultQueueGroup = "wait-group";
+ private int replicas = 3;
+
+ private int maxReconnects = -1;
+
+ private Duration ackWait = Duration.ofSeconds(60);
+ private long maxAckPending = 100;
+ private int maxDeliver = 5;
+
+ public long getStreamMaxBytes() {
+ return streamMaxBytes;
+ }
+
+ public void setStreamMaxBytes(long streamMaxBytes) {
+ this.streamMaxBytes = streamMaxBytes;
+ }
+
+ public Duration getAckWait() {
+ return ackWait;
+ }
+
+ public void setAckWait(Duration ackWait) {
+ this.ackWait = ackWait;
+ }
+
+ public long getMaxAckPending() {
+ return maxAckPending;
+ }
+
+ public void setMaxAckPending(long maxAckPending) {
+ this.maxAckPending = maxAckPending;
+ }
+
+ public int getMaxDeliver() {
+ return maxDeliver;
+ }
+
+ public void setMaxDeliver(int maxDeliver) {
+ this.maxDeliver = maxDeliver;
+ }
+
public Duration getPollTimeDuration() {
return pollTimeDuration;
}
@@ -85,4 +126,20 @@ public String getDefaultQueueGroup() {
public void setDefaultQueueGroup(String defaultQueueGroup) {
this.defaultQueueGroup = defaultQueueGroup;
}
+
+ public int getReplicas() {
+ return replicas;
+ }
+
+ public void setReplicas(int replicas) {
+ this.replicas = replicas;
+ }
+
+ public int getMaxReconnects() {
+ return maxReconnects;
+ }
+
+ public void setMaxReconnects(int maxReconnects) {
+ this.maxReconnects = maxReconnects;
+ }
}
diff --git a/postgres-external-storage/build.gradle b/postgres-external-storage/build.gradle
index 25915c091..abd6d26ae 100644
--- a/postgres-external-storage/build.gradle
+++ b/postgres-external-storage/build.gradle
@@ -5,7 +5,7 @@ dependencies {
compileOnly 'org.springframework.boot:spring-boot-starter'
compileOnly 'org.springframework.boot:spring-boot-starter-web'
- implementation 'org.postgresql:postgresql'
+ implementation "org.postgresql:postgresql:${revPostgres}"
implementation 'org.springframework.boot:spring-boot-starter-jdbc'
implementation 'org.flywaydb:flyway-core'
implementation 'org.flywaydb:flyway-database-postgresql'
diff --git a/postgres-persistence/build.gradle b/postgres-persistence/build.gradle
index 29f7a6b24..a9f740b64 100644
--- a/postgres-persistence/build.gradle
+++ b/postgres-persistence/build.gradle
@@ -13,7 +13,7 @@ dependencies {
implementation "com.fasterxml.jackson.core:jackson-core"
implementation "org.apache.commons:commons-lang3"
- implementation "org.postgresql:postgresql:${revPostgresql}"
+ implementation "org.postgresql:postgresql:${revPostgres}"
implementation "org.springframework.boot:spring-boot-starter-jdbc"
implementation "org.flywaydb:flyway-core"
diff --git a/postgres-persistence/src/main/java/com/netflix/conductor/postgres/config/PostgresConfiguration.java b/postgres-persistence/src/main/java/com/netflix/conductor/postgres/config/PostgresConfiguration.java
index 62ed27eff..1e00cb067 100644
--- a/postgres-persistence/src/main/java/com/netflix/conductor/postgres/config/PostgresConfiguration.java
+++ b/postgres-persistence/src/main/java/com/netflix/conductor/postgres/config/PostgresConfiguration.java
@@ -13,11 +13,13 @@
package com.netflix.conductor.postgres.config;
import java.sql.SQLException;
+import java.util.Map;
import java.util.Optional;
import javax.sql.DataSource;
import org.flywaydb.core.Flyway;
+import org.flywaydb.core.api.configuration.FluentConfiguration;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.boot.autoconfigure.jdbc.DataSourceAutoConfiguration;
@@ -29,10 +31,7 @@
import org.springframework.retry.policy.SimpleRetryPolicy;
import org.springframework.retry.support.RetryTemplate;
-import com.netflix.conductor.postgres.dao.PostgresExecutionDAO;
-import com.netflix.conductor.postgres.dao.PostgresIndexDAO;
-import com.netflix.conductor.postgres.dao.PostgresMetadataDAO;
-import com.netflix.conductor.postgres.dao.PostgresQueueDAO;
+import com.netflix.conductor.postgres.dao.*;
import com.fasterxml.jackson.databind.ObjectMapper;
import jakarta.annotation.*;
@@ -57,8 +56,16 @@ public PostgresConfiguration(DataSource dataSource, PostgresProperties propertie
@Bean(initMethod = "migrate")
@PostConstruct
public Flyway flywayForPrimaryDb() {
- return Flyway.configure()
- .locations("classpath:db/migration_postgres")
+ FluentConfiguration config = Flyway.configure();
+
+ if (properties.getExperimentalQueueNotify()) {
+ config.locations(
+ "classpath:db/migration_postgres", "classpath:db/migration_postgres_notify");
+ } else {
+ config.locations("classpath:db/migration_postgres");
+ }
+
+ return config.configuration(Map.of("flyway.postgresql.transactional.lock", "false"))
.schemas(properties.getSchema())
.dataSource(dataSource)
.outOfOrder(true)
@@ -83,21 +90,43 @@ public PostgresExecutionDAO postgresExecutionDAO(
return new PostgresExecutionDAO(retryTemplate, objectMapper, dataSource);
}
+ @Bean
+ @DependsOn({"flywayForPrimaryDb"})
+ public PostgresPollDataDAO postgresPollDataDAO(
+ @Qualifier("postgresRetryTemplate") RetryTemplate retryTemplate,
+ ObjectMapper objectMapper,
+ PostgresProperties properties) {
+ return new PostgresPollDataDAO(retryTemplate, objectMapper, dataSource, properties);
+ }
+
@Bean
@DependsOn({"flywayForPrimaryDb"})
public PostgresQueueDAO postgresQueueDAO(
@Qualifier("postgresRetryTemplate") RetryTemplate retryTemplate,
- ObjectMapper objectMapper) {
- return new PostgresQueueDAO(retryTemplate, objectMapper, dataSource);
+ ObjectMapper objectMapper,
+ PostgresProperties properties) {
+ return new PostgresQueueDAO(retryTemplate, objectMapper, dataSource, properties);
}
@Bean
@DependsOn({"flywayForPrimaryDb"})
@ConditionalOnProperty(name = "conductor.indexing.type", havingValue = "postgres")
public PostgresIndexDAO postgresIndexDAO(
+ @Qualifier("postgresRetryTemplate") RetryTemplate retryTemplate,
+ ObjectMapper objectMapper,
+ PostgresProperties properties) {
+ return new PostgresIndexDAO(retryTemplate, objectMapper, dataSource, properties);
+ }
+
+ @Bean
+ @DependsOn({"flywayForPrimaryDb"})
+ @ConditionalOnProperty(
+ name = "conductor.workflow-execution-lock.type",
+ havingValue = "postgres")
+ public PostgresLockDAO postgresLockDAO(
@Qualifier("postgresRetryTemplate") RetryTemplate retryTemplate,
ObjectMapper objectMapper) {
- return new PostgresIndexDAO(retryTemplate, objectMapper, dataSource);
+ return new PostgresLockDAO(retryTemplate, objectMapper, dataSource);
}
@Bean
diff --git a/postgres-persistence/src/main/java/com/netflix/conductor/postgres/config/PostgresProperties.java b/postgres-persistence/src/main/java/com/netflix/conductor/postgres/config/PostgresProperties.java
index 5c392cb52..0ddf80098 100644
--- a/postgres-persistence/src/main/java/com/netflix/conductor/postgres/config/PostgresProperties.java
+++ b/postgres-persistence/src/main/java/com/netflix/conductor/postgres/config/PostgresProperties.java
@@ -27,8 +27,46 @@ public class PostgresProperties {
private Integer deadlockRetryMax = 3;
+ @DurationUnit(ChronoUnit.MILLIS)
+ private Duration pollDataFlushInterval = Duration.ofMillis(0);
+
+ @DurationUnit(ChronoUnit.MILLIS)
+ private Duration pollDataCacheValidityPeriod = Duration.ofMillis(0);
+
+ private boolean experimentalQueueNotify = false;
+
+ private Integer experimentalQueueNotifyStalePeriod = 5000;
+
+ private boolean onlyIndexOnStatusChange = false;
+
public String schema = "public";
+ public boolean allowFullTextQueries = true;
+
+ public boolean allowJsonQueries = true;
+
+ /** The maximum number of threads allowed in the async pool */
+ private int asyncMaxPoolSize = 12;
+
+ /** The size of the queue used for holding async indexing tasks */
+ private int asyncWorkerQueueSize = 100;
+
+ public boolean getExperimentalQueueNotify() {
+ return experimentalQueueNotify;
+ }
+
+ public void setExperimentalQueueNotify(boolean experimentalQueueNotify) {
+ this.experimentalQueueNotify = experimentalQueueNotify;
+ }
+
+ public Integer getExperimentalQueueNotifyStalePeriod() {
+ return experimentalQueueNotifyStalePeriod;
+ }
+
+ public void setExperimentalQueueNotifyStalePeriod(Integer experimentalQueueNotifyStalePeriod) {
+ this.experimentalQueueNotifyStalePeriod = experimentalQueueNotifyStalePeriod;
+ }
+
public Duration getTaskDefCacheRefreshInterval() {
return taskDefCacheRefreshInterval;
}
@@ -37,6 +75,14 @@ public void setTaskDefCacheRefreshInterval(Duration taskDefCacheRefreshInterval)
this.taskDefCacheRefreshInterval = taskDefCacheRefreshInterval;
}
+ public boolean getOnlyIndexOnStatusChange() {
+ return onlyIndexOnStatusChange;
+ }
+
+ public void setOnlyIndexOnStatusChange(boolean onlyIndexOnStatusChange) {
+ this.onlyIndexOnStatusChange = onlyIndexOnStatusChange;
+ }
+
public Integer getDeadlockRetryMax() {
return deadlockRetryMax;
}
@@ -52,4 +98,52 @@ public String getSchema() {
public void setSchema(String schema) {
this.schema = schema;
}
+
+ public boolean getAllowFullTextQueries() {
+ return allowFullTextQueries;
+ }
+
+ public void setAllowFullTextQueries(boolean allowFullTextQueries) {
+ this.allowFullTextQueries = allowFullTextQueries;
+ }
+
+ public boolean getAllowJsonQueries() {
+ return allowJsonQueries;
+ }
+
+ public void setAllowJsonQueries(boolean allowJsonQueries) {
+ this.allowJsonQueries = allowJsonQueries;
+ }
+
+ public int getAsyncWorkerQueueSize() {
+ return asyncWorkerQueueSize;
+ }
+
+ public void setAsyncWorkerQueueSize(int asyncWorkerQueueSize) {
+ this.asyncWorkerQueueSize = asyncWorkerQueueSize;
+ }
+
+ public int getAsyncMaxPoolSize() {
+ return asyncMaxPoolSize;
+ }
+
+ public void setAsyncMaxPoolSize(int asyncMaxPoolSize) {
+ this.asyncMaxPoolSize = asyncMaxPoolSize;
+ }
+
+ public Duration getPollDataFlushInterval() {
+ return pollDataFlushInterval;
+ }
+
+ public void setPollDataFlushInterval(Duration interval) {
+ this.pollDataFlushInterval = interval;
+ }
+
+ public Duration getPollDataCacheValidityPeriod() {
+ return pollDataCacheValidityPeriod;
+ }
+
+ public void setPollDataCacheValidityPeriod(Duration period) {
+ this.pollDataCacheValidityPeriod = period;
+ }
}
diff --git a/postgres-persistence/src/main/java/com/netflix/conductor/postgres/dao/PostgresExecutionDAO.java b/postgres-persistence/src/main/java/com/netflix/conductor/postgres/dao/PostgresExecutionDAO.java
index cf6afae4e..aa5233f6b 100644
--- a/postgres-persistence/src/main/java/com/netflix/conductor/postgres/dao/PostgresExecutionDAO.java
+++ b/postgres-persistence/src/main/java/com/netflix/conductor/postgres/dao/PostgresExecutionDAO.java
@@ -14,7 +14,6 @@
import java.sql.Connection;
import java.sql.Date;
-import java.sql.SQLException;
import java.text.SimpleDateFormat;
import java.util.*;
import java.util.concurrent.Executors;
@@ -27,12 +26,10 @@
import org.springframework.retry.support.RetryTemplate;
import com.netflix.conductor.common.metadata.events.EventExecution;
-import com.netflix.conductor.common.metadata.tasks.PollData;
import com.netflix.conductor.common.metadata.tasks.TaskDef;
import com.netflix.conductor.core.exception.NonTransientException;
import com.netflix.conductor.dao.ConcurrentExecutionLimitDAO;
import com.netflix.conductor.dao.ExecutionDAO;
-import com.netflix.conductor.dao.PollDataDAO;
import com.netflix.conductor.dao.RateLimitingDAO;
import com.netflix.conductor.metrics.Monitors;
import com.netflix.conductor.model.TaskModel;
@@ -47,7 +44,7 @@
import jakarta.annotation.*;
public class PostgresExecutionDAO extends PostgresBaseDAO
- implements ExecutionDAO, RateLimitingDAO, PollDataDAO, ConcurrentExecutionLimitDAO {
+ implements ExecutionDAO, RateLimitingDAO, ConcurrentExecutionLimitDAO {
private final ScheduledExecutorService scheduledExecutorService;
@@ -139,9 +136,10 @@ private static String taskKey(TaskModel task) {
public List createTasks(List tasks) {
List created = Lists.newArrayListWithCapacity(tasks.size());
- for (TaskModel task : tasks) {
- withTransaction(
- connection -> {
+ withTransaction(
+ connection -> {
+ for (TaskModel task : tasks) {
+
validate(task);
task.setScheduledTime(System.currentTimeMillis());
@@ -158,7 +156,7 @@ public List createTasks(List tasks) {
+ task.getReferenceTaskName()
+ ", key="
+ taskKey);
- return;
+ continue;
}
insertOrUpdateTaskData(connection, task);
@@ -167,8 +165,8 @@ public List createTasks(List tasks) {
updateTask(connection, task);
created.add(task);
- });
- }
+ }
+ });
return created;
}
@@ -558,45 +556,6 @@ public List getEventExecutions(
}
}
- @Override
- public void updateLastPollData(String taskDefName, String domain, String workerId) {
- Preconditions.checkNotNull(taskDefName, "taskDefName name cannot be null");
- PollData pollData = new PollData(taskDefName, domain, workerId, System.currentTimeMillis());
- String effectiveDomain = (domain == null) ? "DEFAULT" : domain;
- withTransaction(tx -> insertOrUpdatePollData(tx, pollData, effectiveDomain));
- }
-
- @Override
- public PollData getPollData(String taskDefName, String domain) {
- Preconditions.checkNotNull(taskDefName, "taskDefName name cannot be null");
- String effectiveDomain = (domain == null) ? "DEFAULT" : domain;
- return getWithRetriedTransactions(tx -> readPollData(tx, taskDefName, effectiveDomain));
- }
-
- @Override
- public List getPollData(String taskDefName) {
- Preconditions.checkNotNull(taskDefName, "taskDefName name cannot be null");
- return readAllPollData(taskDefName);
- }
-
- @Override
- public List getAllPollData() {
- try (Connection tx = dataSource.getConnection()) {
- boolean previousAutoCommitMode = tx.getAutoCommit();
- tx.setAutoCommit(true);
- try {
- String GET_ALL_POLL_DATA = "SELECT json_data FROM poll_data ORDER BY queue_name";
- return query(tx, GET_ALL_POLL_DATA, q -> q.executeAndFetch(PollData.class));
- } catch (Throwable th) {
- throw new NonTransientException(th.getMessage(), th);
- } finally {
- tx.setAutoCommit(previousAutoCommitMode);
- }
- } catch (SQLException ex) {
- throw new NonTransientException(ex.getMessage(), ex);
- }
- }
-
private List getTasks(Connection connection, List taskIds) {
if (taskIds.isEmpty()) {
return Lists.newArrayList();
@@ -1027,56 +986,6 @@ private EventExecution readEventExecution(
.executeAndFetchFirst(EventExecution.class));
}
- private void insertOrUpdatePollData(Connection connection, PollData pollData, String domain) {
- /*
- * Most times the row will be updated so let's try the update first. This used to be an 'INSERT/ON CONFLICT do update' sql statement. The problem with that
- * is that if we try the INSERT first, the sequence will be increased even if the ON CONFLICT happens. Since polling happens *a lot*, the sequence can increase
- * dramatically even though it won't be used.
- */
- String UPDATE_POLL_DATA =
- "UPDATE poll_data SET json_data=?, modified_on=CURRENT_TIMESTAMP WHERE queue_name=? AND domain=?";
- int rowsUpdated =
- query(
- connection,
- UPDATE_POLL_DATA,
- q ->
- q.addJsonParameter(pollData)
- .addParameter(pollData.getQueueName())
- .addParameter(domain)
- .executeUpdate());
-
- if (rowsUpdated == 0) {
- String INSERT_POLL_DATA =
- "INSERT INTO poll_data (queue_name, domain, json_data, modified_on) VALUES (?, ?, ?, CURRENT_TIMESTAMP) ON CONFLICT (queue_name,domain) DO UPDATE SET json_data=excluded.json_data, modified_on=excluded.modified_on";
- execute(
- connection,
- INSERT_POLL_DATA,
- q ->
- q.addParameter(pollData.getQueueName())
- .addParameter(domain)
- .addJsonParameter(pollData)
- .executeUpdate());
- }
- }
-
- private PollData readPollData(Connection connection, String queueName, String domain) {
- String GET_POLL_DATA =
- "SELECT json_data FROM poll_data WHERE queue_name = ? AND domain = ?";
- return query(
- connection,
- GET_POLL_DATA,
- q ->
- q.addParameter(queueName)
- .addParameter(domain)
- .executeAndFetchFirst(PollData.class));
- }
-
- private List readAllPollData(String queueName) {
- String GET_ALL_POLL_DATA = "SELECT json_data FROM poll_data WHERE queue_name = ?";
- return queryWithTransaction(
- GET_ALL_POLL_DATA, q -> q.addParameter(queueName).executeAndFetch(PollData.class));
- }
-
private List findAllTasksInProgressInOrderOfArrival(TaskModel task, int limit) {
String GET_IN_PROGRESS_TASKS_WITH_LIMIT =
"SELECT task_id FROM task_in_progress WHERE task_def_name = ? ORDER BY created_on LIMIT ?";
diff --git a/postgres-persistence/src/main/java/com/netflix/conductor/postgres/dao/PostgresIndexDAO.java b/postgres-persistence/src/main/java/com/netflix/conductor/postgres/dao/PostgresIndexDAO.java
index cbd36da28..6d80818d5 100644
--- a/postgres-persistence/src/main/java/com/netflix/conductor/postgres/dao/PostgresIndexDAO.java
+++ b/postgres-persistence/src/main/java/com/netflix/conductor/postgres/dao/PostgresIndexDAO.java
@@ -18,6 +18,10 @@
import java.time.temporal.TemporalAccessor;
import java.util.*;
import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.LinkedBlockingQueue;
+import java.util.concurrent.ThreadPoolExecutor;
+import java.util.concurrent.TimeUnit;
import javax.sql.DataSource;
@@ -30,15 +34,49 @@
import com.netflix.conductor.common.run.WorkflowSummary;
import com.netflix.conductor.core.events.queue.Message;
import com.netflix.conductor.dao.IndexDAO;
+import com.netflix.conductor.metrics.Monitors;
+import com.netflix.conductor.postgres.config.PostgresProperties;
import com.netflix.conductor.postgres.util.PostgresIndexQueryBuilder;
import com.fasterxml.jackson.databind.ObjectMapper;
public class PostgresIndexDAO extends PostgresBaseDAO implements IndexDAO {
+ private final PostgresProperties properties;
+ private final ExecutorService executorService;
+
+ private static final int CORE_POOL_SIZE = 6;
+ private static final long KEEP_ALIVE_TIME = 1L;
+
+ private boolean onlyIndexOnStatusChange;
+
public PostgresIndexDAO(
- RetryTemplate retryTemplate, ObjectMapper objectMapper, DataSource dataSource) {
+ RetryTemplate retryTemplate,
+ ObjectMapper objectMapper,
+ DataSource dataSource,
+ PostgresProperties properties) {
super(retryTemplate, objectMapper, dataSource);
+ this.properties = properties;
+ this.onlyIndexOnStatusChange = properties.getOnlyIndexOnStatusChange();
+
+ int maximumPoolSize = properties.getAsyncMaxPoolSize();
+ int workerQueueSize = properties.getAsyncWorkerQueueSize();
+
+ // Set up a workerpool for performing async operations.
+ this.executorService =
+ new ThreadPoolExecutor(
+ CORE_POOL_SIZE,
+ maximumPoolSize,
+ KEEP_ALIVE_TIME,
+ TimeUnit.MINUTES,
+ new LinkedBlockingQueue<>(workerQueueSize),
+ (runnable, executor) -> {
+ logger.warn(
+ "Request {} to async dao discarded in executor {}",
+ runnable,
+ executor);
+ Monitors.recordDiscardedIndexingCount("indexQueue");
+ });
}
@Override
@@ -49,19 +87,25 @@ public void indexWorkflow(WorkflowSummary workflow) {
+ "DO UPDATE SET correlation_id = EXCLUDED.correlation_id, workflow_type = EXCLUDED.workflow_type, "
+ "start_time = EXCLUDED.start_time, status = EXCLUDED.status, json_data = EXCLUDED.json_data";
+ if (onlyIndexOnStatusChange) {
+ INSERT_WORKFLOW_INDEX_SQL += " WHERE workflow_index.status != EXCLUDED.status";
+ }
+
TemporalAccessor ta = DateTimeFormatter.ISO_INSTANT.parse(workflow.getStartTime());
Timestamp startTime = Timestamp.from(Instant.from(ta));
- queryWithTransaction(
- INSERT_WORKFLOW_INDEX_SQL,
- q ->
- q.addParameter(workflow.getWorkflowId())
- .addParameter(workflow.getCorrelationId())
- .addParameter(workflow.getWorkflowType())
- .addParameter(startTime)
- .addParameter(workflow.getStatus().toString())
- .addJsonParameter(workflow)
- .executeUpdate());
+ int rowsUpdated =
+ queryWithTransaction(
+ INSERT_WORKFLOW_INDEX_SQL,
+ q ->
+ q.addParameter(workflow.getWorkflowId())
+ .addParameter(workflow.getCorrelationId())
+ .addParameter(workflow.getWorkflowType())
+ .addParameter(startTime)
+ .addParameter(workflow.getStatus().toString())
+ .addJsonParameter(workflow)
+ .executeUpdate());
+ logger.debug("Postgres index workflow rows updated: {}", rowsUpdated);
}
@Override
@@ -69,7 +113,7 @@ public SearchResult searchWorkflowSummary(
String query, String freeText, int start, int count, List sort) {
PostgresIndexQueryBuilder queryBuilder =
new PostgresIndexQueryBuilder(
- "workflow_index", query, freeText, start, count, sort);
+ "workflow_index", query, freeText, start, count, sort, properties);
List results =
queryWithTransaction(
@@ -93,31 +137,38 @@ public void indexTask(TaskSummary task) {
+ "DO UPDATE SET task_type = EXCLUDED.task_type, task_def_name = EXCLUDED.task_def_name, "
+ "status = EXCLUDED.status, update_time = EXCLUDED.update_time, json_data = EXCLUDED.json_data";
+ if (onlyIndexOnStatusChange) {
+ INSERT_TASK_INDEX_SQL += " WHERE task_index.status != EXCLUDED.status";
+ }
+
TemporalAccessor updateTa = DateTimeFormatter.ISO_INSTANT.parse(task.getUpdateTime());
Timestamp updateTime = Timestamp.from(Instant.from(updateTa));
TemporalAccessor startTa = DateTimeFormatter.ISO_INSTANT.parse(task.getStartTime());
Timestamp startTime = Timestamp.from(Instant.from(startTa));
- queryWithTransaction(
- INSERT_TASK_INDEX_SQL,
- q ->
- q.addParameter(task.getTaskId())
- .addParameter(task.getTaskType())
- .addParameter(task.getTaskDefName())
- .addParameter(task.getStatus().toString())
- .addParameter(startTime)
- .addParameter(updateTime)
- .addParameter(task.getWorkflowType())
- .addJsonParameter(task)
- .executeUpdate());
+ int rowsUpdated =
+ queryWithTransaction(
+ INSERT_TASK_INDEX_SQL,
+ q ->
+ q.addParameter(task.getTaskId())
+ .addParameter(task.getTaskType())
+ .addParameter(task.getTaskDefName())
+ .addParameter(task.getStatus().toString())
+ .addParameter(startTime)
+ .addParameter(updateTime)
+ .addParameter(task.getWorkflowType())
+ .addJsonParameter(task)
+ .executeUpdate());
+ logger.debug("Postgres index task rows updated: {}", rowsUpdated);
}
@Override
public SearchResult searchTaskSummary(
String query, String freeText, int start, int count, List sort) {
PostgresIndexQueryBuilder queryBuilder =
- new PostgresIndexQueryBuilder("task_index", query, freeText, start, count, sort);
+ new PostgresIndexQueryBuilder(
+ "task_index", query, freeText, start, count, sort, properties);
List results =
queryWithTransaction(
@@ -200,13 +251,14 @@ public SearchResult searchTasks(
@Override
public void removeWorkflow(String workflowId) {
- logger.info("removeWorkflow is not supported for postgres indexing");
+ String REMOVE_WORKFLOW_SQL = "DELETE FROM workflow_index WHERE workflow_id = ?";
+
+ queryWithTransaction(REMOVE_WORKFLOW_SQL, q -> q.addParameter(workflowId).executeUpdate());
}
@Override
public CompletableFuture asyncRemoveWorkflow(String workflowId) {
- logger.info("asyncRemoveWorkflow is not supported for postgres indexing");
- return CompletableFuture.completedFuture(null);
+ return CompletableFuture.runAsync(() -> removeWorkflow(workflowId), executorService);
}
@Override
@@ -223,13 +275,17 @@ public CompletableFuture asyncUpdateWorkflow(
@Override
public void removeTask(String workflowId, String taskId) {
- logger.info("removeTask is not supported for postgres indexing");
+ String REMOVE_TASK_SQL =
+ "WITH task_delete AS (DELETE FROM task_index WHERE task_id = ?)"
+ + "DELETE FROM task_execution_logs WHERE task_id =?";
+
+ queryWithTransaction(
+ REMOVE_TASK_SQL, q -> q.addParameter(taskId).addParameter(taskId).executeUpdate());
}
@Override
public CompletableFuture asyncRemoveTask(String workflowId, String taskId) {
- logger.info("asyncRemoveTask is not supported for postgres indexing");
- return CompletableFuture.completedFuture(null);
+ return CompletableFuture.runAsync(() -> removeTask(workflowId, taskId), executorService);
}
@Override
diff --git a/postgres-persistence/src/main/java/com/netflix/conductor/postgres/dao/PostgresLockDAO.java b/postgres-persistence/src/main/java/com/netflix/conductor/postgres/dao/PostgresLockDAO.java
new file mode 100644
index 000000000..072ec1524
--- /dev/null
+++ b/postgres-persistence/src/main/java/com/netflix/conductor/postgres/dao/PostgresLockDAO.java
@@ -0,0 +1,82 @@
+/*
+ * Copyright 2024 Conductor Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
+ * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations under the License.
+ */
+package com.netflix.conductor.postgres.dao;
+
+import java.util.concurrent.TimeUnit;
+
+import javax.sql.DataSource;
+
+import org.springframework.retry.support.RetryTemplate;
+
+import com.netflix.conductor.core.sync.Lock;
+
+import com.fasterxml.jackson.databind.ObjectMapper;
+
+public class PostgresLockDAO extends PostgresBaseDAO implements Lock {
+ private final long DAY_MS = 24 * 60 * 60 * 1000;
+
+ public PostgresLockDAO(
+ RetryTemplate retryTemplate, ObjectMapper objectMapper, DataSource dataSource) {
+ super(retryTemplate, objectMapper, dataSource);
+ }
+
+ @Override
+ public void acquireLock(String lockId) {
+ acquireLock(lockId, DAY_MS, DAY_MS, TimeUnit.MILLISECONDS);
+ }
+
+ @Override
+ public boolean acquireLock(String lockId, long timeToTry, TimeUnit unit) {
+ return acquireLock(lockId, timeToTry, DAY_MS, unit);
+ }
+
+ @Override
+ public boolean acquireLock(String lockId, long timeToTry, long leaseTime, TimeUnit unit) {
+ long endTime = System.currentTimeMillis() + unit.toMillis(timeToTry);
+ while (System.currentTimeMillis() < endTime) {
+ var sql =
+ "INSERT INTO locks(lock_id, lease_expiration) VALUES (?, now() + (?::text || ' milliseconds')::interval) ON CONFLICT (lock_id) DO UPDATE SET lease_expiration = EXCLUDED.lease_expiration WHERE locks.lease_expiration <= now()";
+
+ int rowsAffected =
+ queryWithTransaction(
+ sql,
+ q ->
+ q.addParameter(lockId)
+ .addParameter(unit.toMillis(leaseTime))
+ .executeUpdate());
+
+ if (rowsAffected > 0) {
+ return true;
+ }
+
+ try {
+ Thread.sleep(100);
+ } catch (InterruptedException ie) {
+ Thread.currentThread().interrupt();
+ return false;
+ }
+ }
+ return false;
+ }
+
+ @Override
+ public void releaseLock(String lockId) {
+ var sql = "DELETE FROM locks WHERE lock_id = ?";
+ queryWithTransaction(sql, q -> q.addParameter(lockId).executeDelete());
+ }
+
+ @Override
+ public void deleteLock(String lockId) {
+ releaseLock(lockId);
+ }
+}
diff --git a/postgres-persistence/src/main/java/com/netflix/conductor/postgres/dao/PostgresPollDataDAO.java b/postgres-persistence/src/main/java/com/netflix/conductor/postgres/dao/PostgresPollDataDAO.java
new file mode 100644
index 000000000..d79bdc5da
--- /dev/null
+++ b/postgres-persistence/src/main/java/com/netflix/conductor/postgres/dao/PostgresPollDataDAO.java
@@ -0,0 +1,218 @@
+/*
+ * Copyright 2024 Conductor Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
+ * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations under the License.
+ */
+package com.netflix.conductor.postgres.dao;
+
+import java.sql.Connection;
+import java.sql.SQLException;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.Executors;
+import java.util.concurrent.TimeUnit;
+
+import javax.sql.DataSource;
+
+import org.springframework.retry.support.RetryTemplate;
+
+import com.netflix.conductor.common.metadata.tasks.PollData;
+import com.netflix.conductor.core.exception.NonTransientException;
+import com.netflix.conductor.dao.PollDataDAO;
+import com.netflix.conductor.postgres.config.PostgresProperties;
+
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.google.common.base.Preconditions;
+import jakarta.annotation.PostConstruct;
+
+public class PostgresPollDataDAO extends PostgresBaseDAO implements PollDataDAO {
+
+ private ConcurrentHashMap> pollDataCache =
+ new ConcurrentHashMap<>();
+
+ private long pollDataFlushInterval;
+
+ private long cacheValidityPeriod;
+
+ private long lastFlushTime = 0;
+
+ private boolean useReadCache;
+
+ public PostgresPollDataDAO(
+ RetryTemplate retryTemplate,
+ ObjectMapper objectMapper,
+ DataSource dataSource,
+ PostgresProperties properties) {
+ super(retryTemplate, objectMapper, dataSource);
+ this.pollDataFlushInterval = properties.getPollDataFlushInterval().toMillis();
+ if (this.pollDataFlushInterval > 0) {
+ logger.info("Using Postgres pollData write cache");
+ }
+ this.cacheValidityPeriod = properties.getPollDataCacheValidityPeriod().toMillis();
+ this.useReadCache = cacheValidityPeriod > 0;
+ if (this.useReadCache) {
+ logger.info("Using Postgres pollData read cache");
+ }
+ }
+
+ @PostConstruct
+ public void schedulePollDataRefresh() {
+ if (pollDataFlushInterval > 0) {
+ Executors.newSingleThreadScheduledExecutor()
+ .scheduleWithFixedDelay(
+ this::flushData,
+ pollDataFlushInterval,
+ pollDataFlushInterval,
+ TimeUnit.MILLISECONDS);
+ }
+ }
+
+ @Override
+ public void updateLastPollData(String taskDefName, String domain, String workerId) {
+ Preconditions.checkNotNull(taskDefName, "taskDefName name cannot be null");
+
+ String effectiveDomain = domain == null ? "DEFAULT" : domain;
+ PollData pollData = new PollData(taskDefName, domain, workerId, System.currentTimeMillis());
+
+ if (pollDataFlushInterval > 0) {
+ ConcurrentHashMap domainPollData = pollDataCache.get(taskDefName);
+ if (domainPollData == null) {
+ domainPollData = new ConcurrentHashMap<>();
+ pollDataCache.put(taskDefName, domainPollData);
+ }
+ domainPollData.put(effectiveDomain, pollData);
+ } else {
+ withTransaction(tx -> insertOrUpdatePollData(tx, pollData, effectiveDomain));
+ }
+ }
+
+ @Override
+ public PollData getPollData(String taskDefName, String domain) {
+ PollData result;
+
+ if (useReadCache) {
+ ConcurrentHashMap domainPollData = pollDataCache.get(taskDefName);
+ if (domainPollData == null) {
+ return null;
+ }
+ result = domainPollData.get(domain == null ? "DEFAULT" : domain);
+ long diffSeconds = System.currentTimeMillis() - result.getLastPollTime();
+ if (diffSeconds < cacheValidityPeriod) {
+ return result;
+ }
+ }
+
+ Preconditions.checkNotNull(taskDefName, "taskDefName name cannot be null");
+ String effectiveDomain = (domain == null) ? "DEFAULT" : domain;
+ return getWithRetriedTransactions(tx -> readPollData(tx, taskDefName, effectiveDomain));
+ }
+
+ @Override
+ public List getPollData(String taskDefName) {
+ Preconditions.checkNotNull(taskDefName, "taskDefName name cannot be null");
+ return readAllPollData(taskDefName);
+ }
+
+ @Override
+ public List getAllPollData() {
+ try (Connection tx = dataSource.getConnection()) {
+ boolean previousAutoCommitMode = tx.getAutoCommit();
+ tx.setAutoCommit(true);
+ try {
+ String GET_ALL_POLL_DATA = "SELECT json_data FROM poll_data ORDER BY queue_name";
+ return query(tx, GET_ALL_POLL_DATA, q -> q.executeAndFetch(PollData.class));
+ } catch (Throwable th) {
+ throw new NonTransientException(th.getMessage(), th);
+ } finally {
+ tx.setAutoCommit(previousAutoCommitMode);
+ }
+ } catch (SQLException ex) {
+ throw new NonTransientException(ex.getMessage(), ex);
+ }
+ }
+
+ public long getLastFlushTime() {
+ return lastFlushTime;
+ }
+
+ private void insertOrUpdatePollData(Connection connection, PollData pollData, String domain) {
+ try {
+ /*
+ * Most times the row will be updated so let's try the update first. This used to be an 'INSERT/ON CONFLICT do update' sql statement. The problem with that
+ * is that if we try the INSERT first, the sequence will be increased even if the ON CONFLICT happens. Since polling happens *a lot*, the sequence can increase
+ * dramatically even though it won't be used.
+ */
+ String UPDATE_POLL_DATA =
+ "UPDATE poll_data SET json_data=?, modified_on=CURRENT_TIMESTAMP WHERE queue_name=? AND domain=?";
+ int rowsUpdated =
+ query(
+ connection,
+ UPDATE_POLL_DATA,
+ q ->
+ q.addJsonParameter(pollData)
+ .addParameter(pollData.getQueueName())
+ .addParameter(domain)
+ .executeUpdate());
+
+ if (rowsUpdated == 0) {
+ String INSERT_POLL_DATA =
+ "INSERT INTO poll_data (queue_name, domain, json_data, modified_on) VALUES (?, ?, ?, CURRENT_TIMESTAMP) ON CONFLICT (queue_name,domain) DO UPDATE SET json_data=excluded.json_data, modified_on=excluded.modified_on";
+ execute(
+ connection,
+ INSERT_POLL_DATA,
+ q ->
+ q.addParameter(pollData.getQueueName())
+ .addParameter(domain)
+ .addJsonParameter(pollData)
+ .executeUpdate());
+ }
+ } catch (NonTransientException e) {
+ if (!e.getMessage().startsWith("ERROR: lastPollTime cannot be set to a lower value")) {
+ throw e;
+ }
+ }
+ }
+
+ private PollData readPollData(Connection connection, String queueName, String domain) {
+ String GET_POLL_DATA =
+ "SELECT json_data FROM poll_data WHERE queue_name = ? AND domain = ?";
+ return query(
+ connection,
+ GET_POLL_DATA,
+ q ->
+ q.addParameter(queueName)
+ .addParameter(domain)
+ .executeAndFetchFirst(PollData.class));
+ }
+
+ private List readAllPollData(String queueName) {
+ String GET_ALL_POLL_DATA = "SELECT json_data FROM poll_data WHERE queue_name = ?";
+ return queryWithTransaction(
+ GET_ALL_POLL_DATA, q -> q.addParameter(queueName).executeAndFetch(PollData.class));
+ }
+
+ private void flushData() {
+ try {
+ for (Map.Entry> queue :
+ pollDataCache.entrySet()) {
+ for (Map.Entry domain : queue.getValue().entrySet()) {
+ withTransaction(
+ tx -> {
+ insertOrUpdatePollData(tx, domain.getValue(), domain.getKey());
+ });
+ }
+ }
+ lastFlushTime = System.currentTimeMillis();
+ } catch (Exception e) {
+ logger.error("Postgres pollData cache flush failed ", e);
+ }
+ }
+}
diff --git a/postgres-persistence/src/main/java/com/netflix/conductor/postgres/dao/PostgresQueueDAO.java b/postgres-persistence/src/main/java/com/netflix/conductor/postgres/dao/PostgresQueueDAO.java
index 71af60838..ab38281aa 100644
--- a/postgres-persistence/src/main/java/com/netflix/conductor/postgres/dao/PostgresQueueDAO.java
+++ b/postgres-persistence/src/main/java/com/netflix/conductor/postgres/dao/PostgresQueueDAO.java
@@ -25,7 +25,9 @@
import com.netflix.conductor.core.events.queue.Message;
import com.netflix.conductor.dao.QueueDAO;
+import com.netflix.conductor.postgres.config.PostgresProperties;
import com.netflix.conductor.postgres.util.ExecutorsUtil;
+import com.netflix.conductor.postgres.util.PostgresQueueListener;
import com.netflix.conductor.postgres.util.Query;
import com.fasterxml.jackson.databind.ObjectMapper;
@@ -40,8 +42,13 @@ public class PostgresQueueDAO extends PostgresBaseDAO implements QueueDAO {
private final ScheduledExecutorService scheduledExecutorService;
+ private PostgresQueueListener queueListener;
+
public PostgresQueueDAO(
- RetryTemplate retryTemplate, ObjectMapper objectMapper, DataSource dataSource) {
+ RetryTemplate retryTemplate,
+ ObjectMapper objectMapper,
+ DataSource dataSource,
+ PostgresProperties properties) {
super(retryTemplate, objectMapper, dataSource);
this.scheduledExecutorService =
@@ -53,6 +60,10 @@ public PostgresQueueDAO(
UNACK_SCHEDULE_MS,
TimeUnit.MILLISECONDS);
logger.debug("{} is ready to serve", PostgresQueueDAO.class.getName());
+
+ if (properties.getExperimentalQueueNotify()) {
+ this.queueListener = new PostgresQueueListener(dataSource, properties);
+ }
}
@PreDestroy
@@ -169,6 +180,13 @@ public void remove(String queueName, String messageId) {
@Override
public int getSize(String queueName) {
+ if (queueListener != null) {
+ Optional size = queueListener.getSize(queueName);
+ if (size.isPresent()) {
+ return size.get();
+ }
+ }
+
final String GET_QUEUE_SIZE = "SELECT COUNT(*) FROM queue_message WHERE queue_name = ?";
return queryWithTransaction(
GET_QUEUE_SIZE, q -> ((Long) q.addParameter(queueName).executeCount()).intValue());
@@ -425,6 +443,12 @@ private boolean removeMessage(Connection connection, String queueName, String me
private List popMessages(
Connection connection, String queueName, int count, int timeout) {
+ if (this.queueListener != null) {
+ if (!this.queueListener.hasMessagesReady(queueName)) {
+ return new ArrayList<>();
+ }
+ }
+
String POP_QUERY =
"UPDATE queue_message SET popped = true WHERE message_id IN ("
+ "SELECT message_id FROM queue_message WHERE queue_name = ? AND popped = false AND "
diff --git a/postgres-persistence/src/main/java/com/netflix/conductor/postgres/util/PostgresIndexQueryBuilder.java b/postgres-persistence/src/main/java/com/netflix/conductor/postgres/util/PostgresIndexQueryBuilder.java
index 141df11b2..9e73df686 100644
--- a/postgres-persistence/src/main/java/com/netflix/conductor/postgres/util/PostgresIndexQueryBuilder.java
+++ b/postgres-persistence/src/main/java/com/netflix/conductor/postgres/util/PostgresIndexQueryBuilder.java
@@ -24,6 +24,8 @@
import org.apache.commons.lang3.StringUtils;
+import com.netflix.conductor.postgres.config.PostgresProperties;
+
public class PostgresIndexQueryBuilder {
private final String table;
@@ -33,6 +35,10 @@ public class PostgresIndexQueryBuilder {
private final List sort;
private final List conditions = new ArrayList<>();
+ private boolean allowJsonQueries;
+
+ private boolean allowFullTextQueries;
+
private static final String[] VALID_FIELDS = {
"workflow_id",
"correlation_id",
@@ -44,7 +50,7 @@ public class PostgresIndexQueryBuilder {
"task_def_name",
"update_time",
"json_data",
- "to_tsvector(json_data::text)"
+ "jsonb_to_tsvector('english', json_data, '[\"all\"]')"
};
private static final String[] VALID_SORT_ORDER = {"ASC", "DESC"};
@@ -69,6 +75,8 @@ public Condition(String query) {
if (this.attribute.endsWith("_time")) {
values.set(0, millisToUtc(values.get(0)));
}
+ } else {
+ throw new IllegalArgumentException("Incorrectly formatted query string: " + query);
}
}
@@ -128,12 +136,20 @@ public void setValues(List values) {
}
public PostgresIndexQueryBuilder(
- String table, String query, String freeText, int start, int count, List sort) {
+ String table,
+ String query,
+ String freeText,
+ int start,
+ int count,
+ List sort,
+ PostgresProperties properties) {
this.table = table;
this.freeText = freeText;
this.start = start;
this.count = count;
this.sort = sort;
+ this.allowFullTextQueries = properties.getAllowFullTextQueries();
+ this.allowJsonQueries = properties.getAllowJsonQueries();
this.parseQuery(query);
this.parseFreeText(freeText);
}
@@ -177,16 +193,16 @@ private void parseQuery(String query) {
private void parseFreeText(String freeText) {
if (!StringUtils.isEmpty(freeText) && !freeText.equals("*")) {
- if (freeText.startsWith("{") && freeText.endsWith("}")) {
+ if (allowJsonQueries && freeText.startsWith("{") && freeText.endsWith("}")) {
Condition cond = new Condition();
cond.setAttribute("json_data");
cond.setOperator("@>");
String[] values = {freeText};
cond.setValues(Arrays.asList(values));
conditions.add(cond);
- } else {
+ } else if (allowFullTextQueries) {
Condition cond = new Condition();
- cond.setAttribute("to_tsvector(json_data::text)");
+ cond.setAttribute("jsonb_to_tsvector('english', json_data, '[\"all\"]')");
cond.setOperator("@@");
String[] values = {freeText};
cond.setValues(Arrays.asList(values));
diff --git a/postgres-persistence/src/main/java/com/netflix/conductor/postgres/util/PostgresQueueListener.java b/postgres-persistence/src/main/java/com/netflix/conductor/postgres/util/PostgresQueueListener.java
new file mode 100644
index 000000000..e0a99beda
--- /dev/null
+++ b/postgres-persistence/src/main/java/com/netflix/conductor/postgres/util/PostgresQueueListener.java
@@ -0,0 +1,230 @@
+/*
+ * Copyright 2024 Conductor Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
+ * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations under the License.
+ */
+package com.netflix.conductor.postgres.util;
+
+import java.sql.Connection;
+import java.sql.SQLException;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.Optional;
+import java.util.concurrent.locks.Lock;
+import java.util.concurrent.locks.ReentrantLock;
+
+import javax.sql.DataSource;
+
+import org.postgresql.PGConnection;
+import org.postgresql.PGNotification;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.netflix.conductor.core.exception.NonTransientException;
+import com.netflix.conductor.postgres.config.PostgresProperties;
+
+import com.fasterxml.jackson.core.JsonProcessingException;
+import com.fasterxml.jackson.databind.JsonNode;
+import com.fasterxml.jackson.databind.ObjectMapper;
+
+public class PostgresQueueListener {
+
+ private PGConnection pgconn;
+
+ private volatile Connection conn;
+
+ private final Lock connectionLock = new ReentrantLock();
+
+ private DataSource dataSource;
+
+ private HashMap queues;
+
+ private volatile boolean connected = false;
+
+ private long lastNotificationTime = 0;
+
+ private Integer stalePeriod;
+
+ protected final Logger logger = LoggerFactory.getLogger(getClass());
+
+ public PostgresQueueListener(DataSource dataSource, PostgresProperties properties) {
+ logger.info("Using experimental PostgresQueueListener");
+ this.dataSource = dataSource;
+ this.stalePeriod = properties.getExperimentalQueueNotifyStalePeriod();
+ connect();
+ }
+
+ public boolean hasMessagesReady(String queueName) {
+ checkUpToDate();
+ handleNotifications();
+ if (notificationIsStale() || !connected) {
+ connect();
+ return true;
+ }
+
+ QueueStats queueStats = queues.get(queueName);
+ if (queueStats == null) {
+ return false;
+ }
+
+ if (queueStats.getNextDelivery() > System.currentTimeMillis()) {
+ return false;
+ }
+
+ return true;
+ }
+
+ public Optional getSize(String queueName) {
+ checkUpToDate();
+ handleNotifications();
+ if (notificationIsStale() || !connected) {
+ connect();
+ return Optional.empty();
+ }
+
+ QueueStats queueStats = queues.get(queueName);
+ if (queueStats == null) {
+ return Optional.of(0);
+ }
+
+ return Optional.of(queueStats.getDepth());
+ }
+
+ private boolean notificationIsStale() {
+ return System.currentTimeMillis() - lastNotificationTime > this.stalePeriod;
+ }
+
+ private void connect() {
+ // Attempt to acquire the lock without waiting.
+ if (!connectionLock.tryLock()) {
+ // If the lock is not available, return early.
+ return;
+ }
+
+ boolean newConnectedState = false;
+
+ try {
+ // Check if the connection is null or not valid.
+ if (conn == null || !conn.isValid(1)) {
+ // Close the old connection if it exists and is not valid.
+ if (conn != null) {
+ try {
+ conn.close();
+ } catch (Exception e) {
+ logger.error(e.getMessage(), e);
+ }
+ }
+
+ // Establish a new connection.
+ try {
+ this.conn = dataSource.getConnection();
+ this.pgconn = conn.unwrap(PGConnection.class);
+
+ boolean previousAutoCommitMode = conn.getAutoCommit();
+ conn.setAutoCommit(true);
+ try {
+ conn.prepareStatement("LISTEN conductor_queue_state").execute();
+ newConnectedState = true;
+ } catch (Throwable th) {
+ conn.rollback();
+ logger.error(th.getMessage());
+ } finally {
+ conn.setAutoCommit(previousAutoCommitMode);
+ }
+ requestStats();
+ } catch (SQLException e) {
+ throw new NonTransientException(e.getMessage(), e);
+ }
+ }
+ } catch (Exception e) {
+ throw new NonTransientException(e.getMessage(), e);
+ } finally {
+ connected = newConnectedState;
+ // Ensure the lock is always released.
+ connectionLock.unlock();
+ }
+ }
+
+ private void requestStats() {
+ try {
+ boolean previousAutoCommitMode = conn.getAutoCommit();
+ conn.setAutoCommit(true);
+ try {
+ conn.prepareStatement("SELECT queue_notify()").execute();
+ connected = true;
+ } catch (Throwable th) {
+ conn.rollback();
+ logger.error(th.getMessage());
+ } finally {
+ conn.setAutoCommit(previousAutoCommitMode);
+ }
+ } catch (SQLException e) {
+ if (!isSQLExceptionConnectionDoesNotExists(e)) {
+ logger.error("Error fetching notifications {}", e.getSQLState());
+ }
+ connect();
+ }
+ }
+
+ private void checkUpToDate() {
+ if (System.currentTimeMillis() - lastNotificationTime > this.stalePeriod * 0.75) {
+ requestStats();
+ }
+ }
+
+ private void handleNotifications() {
+ try {
+ PGNotification[] notifications = pgconn.getNotifications();
+ if (notifications == null || notifications.length == 0) {
+ return;
+ }
+ processPayload(notifications[notifications.length - 1].getParameter());
+ } catch (SQLException e) {
+ if (!isSQLExceptionConnectionDoesNotExists(e)) {
+ logger.error("Error fetching notifications {}", e.getSQLState());
+ }
+ connect();
+ }
+ }
+
+ private void processPayload(String payload) {
+ ObjectMapper objectMapper = new ObjectMapper();
+ try {
+ JsonNode notification = objectMapper.readTree(payload);
+ JsonNode lastNotificationTime = notification.get("__now__");
+ if (lastNotificationTime != null) {
+ this.lastNotificationTime = lastNotificationTime.asLong();
+ }
+ Iterator iterator = notification.fieldNames();
+
+ HashMap queueStats = new HashMap<>();
+ iterator.forEachRemaining(
+ key -> {
+ if (!key.equals("__now__")) {
+ try {
+ QueueStats stats =
+ objectMapper.treeToValue(
+ notification.get(key), QueueStats.class);
+ queueStats.put(key, stats);
+ } catch (JsonProcessingException e) {
+ throw new RuntimeException(e);
+ }
+ }
+ });
+ this.queues = queueStats;
+ } catch (JsonProcessingException e) {
+ throw new RuntimeException(e);
+ }
+ }
+
+ private static boolean isSQLExceptionConnectionDoesNotExists(SQLException e) {
+ return "08003".equals(e.getSQLState());
+ }
+}
diff --git a/postgres-persistence/src/main/java/com/netflix/conductor/postgres/util/QueueStats.java b/postgres-persistence/src/main/java/com/netflix/conductor/postgres/util/QueueStats.java
new file mode 100644
index 000000000..6cbb9cecd
--- /dev/null
+++ b/postgres-persistence/src/main/java/com/netflix/conductor/postgres/util/QueueStats.java
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2024 Conductor Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
+ * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations under the License.
+ */
+package com.netflix.conductor.postgres.util;
+
+public class QueueStats {
+ private Integer depth;
+
+ private long nextDelivery;
+
+ public void setDepth(Integer depth) {
+ this.depth = depth;
+ }
+
+ public Integer getDepth() {
+ return depth;
+ }
+
+ public void setNextDelivery(long nextDelivery) {
+ this.nextDelivery = nextDelivery;
+ }
+
+ public long getNextDelivery() {
+ return nextDelivery;
+ }
+
+ public String toString() {
+ return "{nextDelivery: " + nextDelivery + " depth: " + depth + "}";
+ }
+}
diff --git a/postgres-persistence/src/main/resources/db/migration_postgres/V10__poll_data_check.sql b/postgres-persistence/src/main/resources/db/migration_postgres/V10__poll_data_check.sql
new file mode 100644
index 000000000..8bdbebe7c
--- /dev/null
+++ b/postgres-persistence/src/main/resources/db/migration_postgres/V10__poll_data_check.sql
@@ -0,0 +1,13 @@
+CREATE OR REPLACE FUNCTION poll_data_update_check ()
+ RETURNS TRIGGER
+ AS $$
+BEGIN
+ IF(NEW.json_data::json ->> 'lastPollTime')::BIGINT < (OLD.json_data::json ->> 'lastPollTime')::BIGINT THEN
+ RAISE EXCEPTION 'lastPollTime cannot be set to a lower value';
+ END IF;
+ RETURN NEW;
+END;
+$$
+LANGUAGE plpgsql;
+
+CREATE TRIGGER poll_data_update_check_trigger BEFORE UPDATE ON poll_data FOR EACH ROW EXECUTE FUNCTION poll_data_update_check ();
\ No newline at end of file
diff --git a/postgres-persistence/src/main/resources/db/migration_postgres/V11__locking.sql b/postgres-persistence/src/main/resources/db/migration_postgres/V11__locking.sql
new file mode 100644
index 000000000..f2062d9c3
--- /dev/null
+++ b/postgres-persistence/src/main/resources/db/migration_postgres/V11__locking.sql
@@ -0,0 +1,4 @@
+CREATE TABLE IF NOT EXISTS locks (
+ lock_id VARCHAR PRIMARY KEY,
+ lease_expiration TIMESTAMP WITH TIME ZONE NOT NULL
+);
diff --git a/postgres-persistence/src/main/resources/db/migration_postgres/V12__task_index_columns.sql b/postgres-persistence/src/main/resources/db/migration_postgres/V12__task_index_columns.sql
new file mode 100644
index 000000000..62697e767
--- /dev/null
+++ b/postgres-persistence/src/main/resources/db/migration_postgres/V12__task_index_columns.sql
@@ -0,0 +1,5 @@
+ALTER TABLE task_index
+ALTER COLUMN task_type TYPE TEXT;
+
+ALTER TABLE task_index
+ALTER COLUMN task_def_name TYPE TEXT;
diff --git a/postgres-persistence/src/main/resources/db/migration_postgres/V9__indexing_index_fix.sql b/postgres-persistence/src/main/resources/db/migration_postgres/V9__indexing_index_fix.sql
new file mode 100644
index 000000000..410d01bd0
--- /dev/null
+++ b/postgres-persistence/src/main/resources/db/migration_postgres/V9__indexing_index_fix.sql
@@ -0,0 +1,12 @@
+-- Drop the unused text index on the json_data column
+DROP INDEX CONCURRENTLY IF EXISTS workflow_index_json_data_text_idx;
+-- Create a new index to enable querying the json by attribute and value
+CREATE INDEX CONCURRENTLY IF NOT EXISTS workflow_index_json_data_gin_idx ON workflow_index USING GIN (json_data jsonb_path_ops);
+
+-- Drop the incorrectly created indices on the workflow_index that should be on the task_index table
+DROP INDEX CONCURRENTLY IF EXISTS task_index_json_data_json_idx;
+DROP INDEX CONCURRENTLY IF EXISTS task_index_json_data_text_idx;
+-- Create the full text index on the json_data column of the task_index table
+CREATE INDEX CONCURRENTLY IF NOT EXISTS task_index_json_data_fulltext_idx ON task_index USING GIN (jsonb_to_tsvector('english', json_data, '["all"]'));
+-- Create a new index to enable querying the json by attribute and value
+CREATE INDEX CONCURRENTLY IF NOT EXISTS task_index_json_data_gin_idx ON task_index USING GIN (json_data jsonb_path_ops);
diff --git a/postgres-persistence/src/main/resources/db/migration_postgres_notify/V10.1__notify.sql b/postgres-persistence/src/main/resources/db/migration_postgres_notify/V10.1__notify.sql
new file mode 100644
index 000000000..7d40d6eda
--- /dev/null
+++ b/postgres-persistence/src/main/resources/db/migration_postgres_notify/V10.1__notify.sql
@@ -0,0 +1,59 @@
+-- This function notifies on 'conductor_queue_state' with a JSON string containing
+-- queue metadata that looks like:
+-- {
+-- "queue_name_1": {
+-- "nextDelivery": 1234567890123,
+-- "depth": 10
+-- },
+-- "queue_name_2": {
+-- "nextDelivery": 1234567890456,
+-- "depth": 5
+-- },
+-- "__now__": 1234567890999
+-- }
+--
+CREATE OR REPLACE FUNCTION queue_notify() RETURNS void
+LANGUAGE SQL
+AS $$
+ SELECT pg_notify('conductor_queue_state', (
+ SELECT
+ COALESCE(jsonb_object_agg(KEY, val), '{}'::jsonb) ||
+ jsonb_build_object('__now__', (extract('epoch' from CURRENT_TIMESTAMP)*1000)::bigint)
+ FROM (
+ SELECT
+ queue_name AS KEY,
+ jsonb_build_object(
+ 'nextDelivery',
+ (extract('epoch' from min(deliver_on))*1000)::bigint,
+ 'depth',
+ count(*)
+ ) AS val
+ FROM
+ queue_message
+ WHERE
+ popped = FALSE
+ GROUP BY
+ queue_name) AS sq)::text);
+$$;
+
+
+CREATE FUNCTION queue_notify_trigger()
+ RETURNS TRIGGER
+ LANGUAGE PLPGSQL
+AS $$
+BEGIN
+ PERFORM queue_notify();
+ RETURN NULL;
+END;
+$$;
+
+CREATE TRIGGER queue_update
+ AFTER UPDATE ON queue_message
+ FOR EACH ROW
+ WHEN (OLD.popped IS DISTINCT FROM NEW.popped)
+ EXECUTE FUNCTION queue_notify_trigger();
+
+CREATE TRIGGER queue_insert_delete
+ AFTER INSERT OR DELETE ON queue_message
+ FOR EACH ROW
+ EXECUTE FUNCTION queue_notify_trigger();
diff --git a/postgres-persistence/src/test/java/com/netflix/conductor/postgres/dao/PostgresIndexDAOStatusChangeOnlyTest.java b/postgres-persistence/src/test/java/com/netflix/conductor/postgres/dao/PostgresIndexDAOStatusChangeOnlyTest.java
new file mode 100644
index 000000000..80811e80e
--- /dev/null
+++ b/postgres-persistence/src/test/java/com/netflix/conductor/postgres/dao/PostgresIndexDAOStatusChangeOnlyTest.java
@@ -0,0 +1,180 @@
+/*
+ * Copyright 2023 Conductor Authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
+ * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations under the License.
+ */
+package com.netflix.conductor.postgres.dao;
+
+import java.sql.Connection;
+import java.sql.SQLException;
+import java.util.*;
+
+import javax.sql.DataSource;
+
+import org.flywaydb.core.Flyway;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.springframework.beans.factory.annotation.Autowired;
+import org.springframework.beans.factory.annotation.Qualifier;
+import org.springframework.boot.autoconfigure.flyway.FlywayAutoConfiguration;
+import org.springframework.boot.test.context.SpringBootTest;
+import org.springframework.test.context.ContextConfiguration;
+import org.springframework.test.context.TestPropertySource;
+import org.springframework.test.context.junit4.SpringRunner;
+
+import com.netflix.conductor.common.config.TestObjectMapperConfiguration;
+import com.netflix.conductor.common.metadata.tasks.Task;
+import com.netflix.conductor.common.run.TaskSummary;
+import com.netflix.conductor.common.run.Workflow;
+import com.netflix.conductor.common.run.WorkflowSummary;
+import com.netflix.conductor.postgres.config.PostgresConfiguration;
+import com.netflix.conductor.postgres.util.Query;
+
+import com.fasterxml.jackson.databind.ObjectMapper;
+
+import static org.junit.Assert.assertEquals;
+
+@ContextConfiguration(
+ classes = {
+ TestObjectMapperConfiguration.class,
+ PostgresConfiguration.class,
+ FlywayAutoConfiguration.class
+ })
+@RunWith(SpringRunner.class)
+@TestPropertySource(
+ properties = {
+ "conductor.app.asyncIndexingEnabled=false",
+ "conductor.elasticsearch.version=0",
+ "conductor.indexing.type=postgres",
+ "conductor.postgres.onlyIndexOnStatusChange=true",
+ "spring.flyway.clean-disabled=false"
+ })
+@SpringBootTest
+public class PostgresIndexDAOStatusChangeOnlyTest {
+
+ @Autowired private PostgresIndexDAO indexDAO;
+
+ @Autowired private ObjectMapper objectMapper;
+
+ @Qualifier("dataSource")
+ @Autowired
+ private DataSource dataSource;
+
+ @Autowired Flyway flyway;
+
+ // clean the database between tests.
+ @Before
+ public void before() {
+ flyway.migrate();
+ }
+
+ private WorkflowSummary getMockWorkflowSummary(String id) {
+ WorkflowSummary wfs = new WorkflowSummary();
+ wfs.setWorkflowId(id);
+ wfs.setCorrelationId("correlation-id");
+ wfs.setWorkflowType("workflow-type");
+ wfs.setStartTime("2023-02-07T08:42:45Z");
+ wfs.setStatus(Workflow.WorkflowStatus.RUNNING);
+ return wfs;
+ }
+
+ private TaskSummary getMockTaskSummary(String taskId) {
+ TaskSummary ts = new TaskSummary();
+ ts.setTaskId(taskId);
+ ts.setTaskType("task-type");
+ ts.setTaskDefName("task-def-name");
+ ts.setStatus(Task.Status.SCHEDULED);
+ ts.setStartTime("2023-02-07T09:41:45Z");
+ ts.setUpdateTime("2023-02-07T09:42:45Z");
+ ts.setWorkflowType("workflow-type");
+ return ts;
+ }
+
+ private List