diff --git a/src/autoconf/Config.py b/src/autoconf/Config.py
index 3ce254d9c3..49fc8f7ef0 100644
--- a/src/autoconf/Config.py
+++ b/src/autoconf/Config.py
@@ -101,7 +101,7 @@ def wait_applying(self, startup: bool = False):
             raise Exception("Too many retries while waiting for scheduler to apply configuration...")
 
     def apply(
-        self, instances: List[Dict[str, Any]], services: List[Dict[str, str]], configs: Optional[Dict[str, Dict[str, bytes]]] = None, first: bool = False
+        self, instances: List[Dict[str, Any]], services: List[Dict[str, str]], configs: Optional[Dict[str, Dict[str, bytes]]] = None, first: bool = False, global_config: Dict[str, str] = None
     ) -> bool:
         success = True
 
@@ -127,9 +127,12 @@ def apply(
         if configs != self.__configs or first:
             self.__configs = configs
             changes.append("custom_configs")
-        if "instances" in changes or "services" in changes:
+
+        if "instances" in changes or "services" in changes or global_config:
             old_env = deepcopy(self.__config)
             new_env = self.__get_full_env()
+            if global_config:
+                new_env.update(global_config)
             if old_env != new_env or first:
                 self.__config = new_env
                 changes.append("config")
diff --git a/src/autoconf/IngressController.py b/src/autoconf/IngressController.py
index 865f279961..fa93fe31a5 100644
--- a/src/autoconf/IngressController.py
+++ b/src/autoconf/IngressController.py
@@ -1,6 +1,9 @@
 #!/usr/bin/env python3
 
 from time import sleep
+from os import getenv
+import copy
+import base64
 from traceback import format_exc
 from typing import List
 from kubernetes import client, config, watch
@@ -9,7 +12,6 @@
 
 from Controller import Controller
 
-
 class IngressController(Controller):
     def __init__(self):
         self.__internal_lock = Lock()
@@ -17,6 +19,8 @@ def __init__(self):
         config.load_incluster_config()
         self.__corev1 = client.CoreV1Api()
         self.__networkingv1 = client.NetworkingV1Api()
+        self.global_config_data = {} #store config from secret and config map
+        self.next_global_config_data = {} #store config from secret and config map
 
     def _get_controller_instances(self) -> list:
         return [
@@ -30,9 +34,9 @@ def _to_instances(self, controller_instance) -> List[dict]:
         instance["name"] = controller_instance.metadata.name
         instance["hostname"] = controller_instance.status.pod_ip or controller_instance.metadata.name
         health = False
-        if controller_instance.status.conditions:
-            for condition in controller_instance.status.conditions:
-                if condition.type == "Ready" and condition.status == "True":
+        if controller_instance.status.container_statuses:
+            for container in controller_instance.status.container_statuses:
+                if container.name == "bunkerweb" and container.state.running is not None and container.state.terminated is None:
                     health = True
                     break
         instance["health"] = health
@@ -47,15 +51,7 @@ def _to_instances(self, controller_instance) -> List[dict]:
         else:
             for env in pod.env:
                 instance["env"][env.name] = env.value or ""
-        for controller_service in self._get_controller_services():
-            if controller_service.metadata.annotations:
-                for (
-                    annotation,
-                    value,
-                ) in controller_service.metadata.annotations.items():
-                    if not annotation.startswith("bunkerweb.io/"):
-                        continue
-                    instance["env"][annotation.replace("bunkerweb.io/", "", 1)] = value
+
         return [instance]
 
     def _get_controller_services(self) -> list:
@@ -66,6 +62,8 @@ def _to_services(self, controller_service) -> List[dict]:
             return []
         namespace = controller_service.metadata.namespace
         services = []
+        if controller_service.metadata.annotations is None or "bunkerweb.io" not in controller_service.metadata.annotations:
+            return []
         # parse rules
         for rule in controller_service.spec.rules:
             if not rule.host:
@@ -79,45 +77,13 @@ def _to_services(self, controller_service) -> List[dict]:
                 services.append(service)
                 continue
             location = 1
-            for path in rule.http.paths:
-                if not path.path:
-                    self._logger.warning(
-                        "Ignoring unsupported ingress rule without path.",
-                    )
-                    continue
-                elif not path.backend.service:
-                    self._logger.warning(
-                        "Ignoring unsupported ingress rule without backend service.",
-                    )
-                    continue
-                elif not path.backend.service.port:
-                    self._logger.warning(
-                        "Ignoring unsupported ingress rule without backend service port.",
-                    )
-                    continue
-                elif not path.backend.service.port.number:
-                    self._logger.warning(
-                        "Ignoring unsupported ingress rule without backend service port number.",
-                    )
-                    continue
-
-                service_list = self.__corev1.list_service_for_all_namespaces(
-                    watch=False,
-                    field_selector=f"metadata.name={path.backend.service.name},metadata.namespace={namespace}",
-                ).items
-
-                if not service_list:
-                    self._logger.warning(
-                        f"Ignoring ingress rule with service {path.backend.service.name} : service not found.",
-                    )
-                    continue
-
-                reverse_proxy_host = f"http://{path.backend.service.name}.{namespace}.svc.cluster.local:{path.backend.service.port.number}"
+            if len(rule.http.paths) > 0:
+                reverse_proxy_host = "http://localhost:80"
                 service.update(
                     {
                         "USE_REVERSE_PROXY": "yes",
                         f"REVERSE_PROXY_HOST_{location}": reverse_proxy_host,
-                        f"REVERSE_PROXY_URL_{location}": path.path,
+                        f"REVERSE_PROXY_URL_{location}": "/",
                     }
                 )
                 location += 1
@@ -132,12 +98,12 @@ def _to_services(self, controller_service) -> List[dict]:
                 ) in controller_service.metadata.annotations.items():
                     if not annotation.startswith("bunkerweb.io/"):
                         continue
-
                     variable = annotation.replace("bunkerweb.io/", "", 1)
                     server_name = service["SERVER_NAME"].strip().split(" ")[0]
                     if not variable.startswith(f"{server_name}_"):
-                        continue
-                    service[variable.replace(f"{server_name}_", "", 1)] = value
+                        service[variable] = value
+                    else:
+                        service[variable.replace(f"{server_name}_", "", 1)] = value
 
         # parse tls
         if controller_service.spec.tls:
@@ -200,6 +166,38 @@ def get_configs(self) -> dict:
                 configs[config_type][f"{config_site}{config_name}"] = config_data
         return configs
 
+    def get_global_config_data(self):
+        namespace = getenv("NAMESPACE", "default")
+        global_config_data = {}
+        for configMap in self.__corev1.list_namespaced_config_map(watch=False, namespace=namespace).items:
+            if not configMap.metadata.annotations or "bunkerweb.io/GLOBAL_CONFIG" not in configMap.metadata.annotations:
+                continue
+            if not configMap.data:
+                self._logger.warning(
+                    f"Ignoring blank ConfigMap {configMap.metadata.name}",
+                )
+                continue
+            for config_name, config_value in configMap.data.items():
+                self._logger.debug(
+                    f"Get configMap {config_name}={config_value} from {configMap.metadata.name}",
+                )
+                global_config_data[config_name] = config_value
+        for secret in self.__corev1.list_namespaced_secret(watch=False, namespace=namespace).items:
+            if not secret.metadata.annotations or "bunkerweb.io/GLOBAL_CONFIG" not in secret.metadata.annotations:
+                continue
+            if not secret.data:
+                self._logger.warning(
+                    f"Ignoring blank secret {secret.metadata.name}",
+                )
+                continue
+            for key, value in secret.data.items():
+                decoded_value = base64.b64decode(value).decode("utf-8")
+                self._logger.debug(
+                    f"Get secret {key} from {secret.metadata.name}",
+                )
+                global_config_data[key] = decoded_value
+        return global_config_data
+
     def __process_event(self, event):
         obj = event["object"]
         metadata = obj.metadata if obj else None
@@ -210,12 +208,16 @@ def __process_event(self, event):
         if obj.kind == "Pod":
             return annotations and "bunkerweb.io/INSTANCE" in annotations
         if obj.kind == "Ingress":
-            return True
+            return annotations and "bunkerweb.io" in annotations
         if obj.kind == "ConfigMap":
-            return annotations and "bunkerweb.io/CONFIG_TYPE" in annotations
+            if annotations is None:
+                return False
+            return "bunkerweb.io/CONFIG_TYPE" in annotations or "bunkerweb.io/GLOBAL_CONFIG" in annotations
         if obj.kind == "Service":
             return True
         if obj.kind == "Secret":
+            if annotations and "bunkerweb.io/GLOBAL_CONFIG" in annotations:
+                return True
             return data and "tls.crt" in data and "tls.key" in data
         return False
 
@@ -256,8 +258,8 @@ def __watch(self, watch_type):
                         self._instances = self.get_instances()
                         self._services = self.get_services()
                         self._configs = self.get_configs()
-
-                        if not to_apply and not self.update_needed(self._instances, self._services, configs=self._configs):
+                        self.next_global_config_data  = self.get_global_config_data()
+                        if not to_apply and not self.update_needed(self._instances, self._services, configs=self._configs) and self.next_global_config_data == self.global_config_data:
                             if locked:
                                 self.__internal_lock.release()
                                 locked = False
@@ -276,7 +278,7 @@ def __watch(self, watch_type):
                                 self._logger.error("Error while deploying new configuration ...")
                             else:
                                 self._logger.info("Successfully deployed new configuration 🚀")
-
+                                self.global_config_data = copy.deepcopy(self.next_global_config_data)
                                 self._set_autoconf_load_db()
                         except:
                             self._logger.error(f"Exception while deploying new configuration :\n{format_exc()}")
@@ -311,6 +313,7 @@ def apply_config(self) -> bool:
             self._services,
             configs=self._configs,
             first=not self._loaded,
+            global_config=self.next_global_config_data
         )
 
     def process_events(self):
diff --git a/src/autoconf/main.py b/src/autoconf/main.py
index 626bc65e0e..b3428f7a6d 100644
--- a/src/autoconf/main.py
+++ b/src/autoconf/main.py
@@ -15,6 +15,11 @@
 from SwarmController import SwarmController
 from IngressController import IngressController
 from DockerController import DockerController
+import uuid
+from kubernetes import config
+from kubernetes.leaderelection import leaderelection
+from kubernetes.leaderelection.resourcelock.configmaplock import ConfigMapLock
+from kubernetes.leaderelection import electionconfig
 
 # Get variables
 logger = setup_logger("Autoconf", getenv("LOG_LEVEL", "INFO"))
@@ -22,6 +27,9 @@
 kubernetes = getenv("KUBERNETES_MODE", "no").lower() == "yes"
 docker_host = getenv("DOCKER_HOST", "unix:///var/run/docker.sock")
 wait_retry_interval = getenv("WAIT_RETRY_INTERVAL", "5")
+namespace = getenv("NAMESPACE", "default")
+pod_name =  getenv("POD_NAME", f'auto-{uuid.uuid4()}')
+
 
 if not wait_retry_interval.isdigit():
     logger.error("Invalid WAIT_RETRY_INTERVAL value, must be an integer")
@@ -38,19 +46,37 @@ def exit_handler(signum, frame):
 signal(SIGINT, exit_handler)
 signal(SIGTERM, exit_handler)
 
-try:
-    # Instantiate the controller
-    if swarm:
-        logger.info("Swarm mode detected")
-        controller = SwarmController(docker_host)
-    elif kubernetes:
-        logger.info("Kubernetes mode detected")
-        controller = IngressController()
-    else:
-        logger.info("Docker mode detected")
-        controller = DockerController(docker_host)
-
-    # Wait for instances
+
+def run_on_kubernetes_ha_mode():
+    # Authenticate using config file
+    config.load_incluster_config()
+    lock_name = "autoconfig-election"
+
+    election_config = electionconfig.Config(
+        ConfigMapLock(lock_name, namespace, pod_name),
+        lease_duration=17,
+        renew_deadline=15,
+        retry_period=5,
+        onstarted_leading=kubernetes_start,
+        onstopped_leading=onstopped_leading
+    )
+    logger.info(f'I am {pod_name} with {lock_name} in namespace {namespace}')
+    # Enter leader election
+    leaderelection.LeaderElection(election_config).run()
+
+
+def onstopped_leading():
+    logger.info(f'{pod_name} on stop leading, will exit 0')
+    _exit(0)
+
+
+def kubernetes_start():
+    logger.info(f'{pod_name} is leader')
+    controller = IngressController()
+    start(controller=controller)
+
+
+def start(controller):
     logger.info("Waiting for BunkerWeb instances ...")
     instances = controller.wait(wait_retry_interval)
     logger.info("BunkerWeb instances are ready 🚀")
@@ -65,8 +91,29 @@ def exit_handler(signum, frame):
     Path(sep, "var", "tmp", "bunkerweb", "autoconf.healthy").write_text("ok")
     logger.info("Processing events ...")
     controller.process_events()
-except:
-    logger.error(f"Exception while running autoconf :\n{format_exc()}")
-    sys_exit(1)
-finally:
-    Path(sep, "var", "tmp", "bunkerweb", "autoconf.healthy").unlink(missing_ok=True)
+
+
+def start_server():
+    try:
+        # Instantiate the controller
+        if kubernetes:
+            run_on_kubernetes_ha_mode()
+        elif swarm:
+            logger.info("Swarm mode detected")
+            controller = SwarmController(docker_host)
+        else:
+            logger.info("Docker mode detected")
+            controller = DockerController(docker_host)
+
+        if not kubernetes:
+            start(controller=controller)
+
+    except:
+        logger.error(f"Exception while running autoconf :\n{format_exc()}")
+        sys_exit(1)
+    finally:
+        Path(sep, "var", "tmp", "bunkerweb", "autoconf.healthy").unlink(missing_ok=True)
+
+
+if __name__ == '__main__':
+    start_server()
\ No newline at end of file
diff --git a/src/bw/Dockerfile b/src/bw/Dockerfile
index e8dbbc3d21..82de441c7b 100644
--- a/src/bw/Dockerfile
+++ b/src/bw/Dockerfile
@@ -19,11 +19,13 @@ WORKDIR /usr/share/bunkerweb
 # Copy python requirements
 COPY src/deps/requirements.txt /tmp/requirements-deps.txt
 COPY src/common/gen/requirements.txt deps/requirements-gen.txt
+COPY src/common/db/requirements.txt deps/requirements-db.txt
 
 # Install python requirements
 RUN export MAKEFLAGS="-j$(nproc)" && \
 	pip install --break-system-packages --no-cache-dir --require-hashes --ignore-installed -r /tmp/requirements-deps.txt && \
-	pip install --break-system-packages --no-cache-dir --require-hashes --target deps/python -r deps/requirements-gen.txt
+	pip install --break-system-packages --no-cache-dir --require-hashes --target deps/python -r deps/requirements-gen.txt && \
+	pip install --break-system-packages --no-cache-dir --require-hashes --target deps/python -r deps/requirements-db.txt
 
 # Copy files
 # can't exclude deps from . so we are copying everything by hand
@@ -36,6 +38,7 @@ COPY src/common/cli cli
 COPY src/common/confs confs
 COPY src/common/core core
 COPY src/common/gen gen
+COPY src/common/db db
 COPY src/common/helpers helpers
 COPY src/common/settings.json settings.json
 COPY src/common/utils utils
diff --git a/src/bw/entrypoint.sh b/src/bw/entrypoint.sh
index 3eab5b7c63..7f773d633f 100644
--- a/src/bw/entrypoint.sh
+++ b/src/bw/entrypoint.sh
@@ -44,7 +44,7 @@ function trap_reload() {
 trap "trap_reload" HUP
 
 # generate "temp" config
-echo -e "IS_LOADING=yes\nUSE_BUNKERNET=no\nSEND_ANONYMOUS_REPORT=no\nSERVER_NAME=\nMODSECURITY_CRS_VERSION=${MODSECURITY_CRS_VERSION:-4}\nAPI_HTTP_PORT=${API_HTTP_PORT:-5000}\nAPI_SERVER_NAME=${API_SERVER_NAME:-bwapi}\nAPI_WHITELIST_IP=${API_WHITELIST_IP:-127.0.0.0/8}\nUSE_REAL_IP=${USE_REAL_IP:-no}\nUSE_PROXY_PROTOCOL=${USE_PROXY_PROTOCOL:-no}\nREAL_IP_FROM=${REAL_IP_FROM:-192.168.0.0/16 172.16.0.0/12 10.0.0.0/8}\nREAL_IP_HEADER=${REAL_IP_HEADER:-X-Forwarded-For}\nHTTP_PORT=${HTTP_PORT:-8080}\nHTTPS_PORT=${HTTPS_PORT:-8443}" > /tmp/variables.env
+echo -e "IS_LOADING=yes\nUSE_BUNKERNET=no\nSEND_ANONYMOUS_REPORT=no\nSERVER_NAME=\nMODSECURITY_CRS_VERSION=${MODSECURITY_CRS_VERSION:-4}\nAPI_HTTP_PORT=${API_HTTP_PORT:-5000}\nAPI_SERVER_NAME=${API_SERVER_NAME:-bwapi}\nAPI_WHITELIST_IP=${API_WHITELIST_IP:-127.0.0.0/8}\nUSE_REAL_IP=${USE_REAL_IP:-no}\nUSE_PROXY_PROTOCOL=${USE_PROXY_PROTOCOL:-no}\nREAL_IP_FROM=${REAL_IP_FROM:-192.168.0.0/16 172.16.0.0/12 10.0.0.0/8}\nREAL_IP_HEADER=${REAL_IP_HEADER:-X-Forwarded-For}\nHTTP_PORT=${HTTP_PORT:-8080}\nHTTPS_PORT=${HTTPS_PORT:-8843}" > /tmp/variables.env
 python3 /usr/share/bunkerweb/gen/main.py --variables /tmp/variables.env
 
 # start nginx
diff --git a/src/common/confs/default-server-http.conf b/src/common/confs/default-server-http.conf
index 768c7951eb..83015a5dd5 100644
--- a/src/common/confs/default-server-http.conf
+++ b/src/common/confs/default-server-http.conf
@@ -97,6 +97,25 @@ server {
 				})
 		}
 	}
+{% else +%}
+	location / {
+		etag off;
+		proxy_pass "http://localhost:80";
+		proxy_set_header Host $host;
+		proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+		proxy_set_header X-Real-IP $remote_addr;
+		proxy_set_header X-Forwarded-Proto $scheme;
+		proxy_set_header X-Forwarded-Protocol $scheme;
+		proxy_set_header X-Forwarded-Host $http_host;
+
+		proxy_set_header X-Forwarded-Prefix "/";
+
+		proxy_buffering on;
+
+		proxy_connect_timeout 60s;
+		proxy_read_timeout 600s;
+		proxy_send_timeout 600s;
+ 	}
 {% endif %}
 
 	# include core and plugins default-server configurations
@@ -189,5 +208,4 @@ server {
 		logger:log(INFO, "log_default phase ended")
 
 	}
-
 }
diff --git a/src/common/confs/healthcheck.conf b/src/common/confs/healthcheck.conf
index 2870135351..422d393b33 100644
--- a/src/common/confs/healthcheck.conf
+++ b/src/common/confs/healthcheck.conf
@@ -15,6 +15,12 @@ server {
 		}
 	}
 
+	location /nginx_status {
+		stub_status on;
+		allow 127.0.0.1;
+		deny all;
+	}
+
 	# disable logging
 	access_log off;
 
diff --git a/src/common/confs/http-modsec-crs/http-http3.conf b/src/common/confs/http-modsec-crs/http-http3.conf
new file mode 100644
index 0000000000..21d1d118ab
--- /dev/null
+++ b/src/common/confs/http-modsec-crs/http-http3.conf
@@ -0,0 +1,9 @@
+{% if USE_MODSECURITY == "yes" and MODSECURITY_CRS_VERSION == "3" and HTTP3 == "yes" +%}
+SecAction \
+"id:900230,\
+ phase:1,\
+ nolog,\
+ pass,\
+ t:none,\
+ setvar:'tx.allowed_http_versions=HTTP/1.0 HTTP/1.1 HTTP/2 HTTP/2.0 HTTP/3 HTTP/3.0'"
+{% endif %}
diff --git a/src/common/confs/http-modsecurity/http-modsecurity.conf b/src/common/confs/http-modsecurity/http-modsecurity.conf
new file mode 100644
index 0000000000..4c1860de62
--- /dev/null
+++ b/src/common/confs/http-modsecurity/http-modsecurity.conf
@@ -0,0 +1,4 @@
+{% if USE_MODSECURITY == "yes" +%}
+modsecurity on;
+modsecurity_rules_file /etc/nginx/http-modsecurity/modsecurity-rules.conf.modsec;
+{% endif %}
\ No newline at end of file
diff --git a/src/common/confs/http-modsecurity/modsecurity-rules.conf.modsec b/src/common/confs/http-modsecurity/modsecurity-rules.conf.modsec
new file mode 100644
index 0000000000..313ee082c9
--- /dev/null
+++ b/src/common/confs/http-modsecurity/modsecurity-rules.conf.modsec
@@ -0,0 +1,139 @@
+{% set os_path = import("os.path") %}
+# process rules with disruptive actions
+SecRuleEngine {{ MODSECURITY_SEC_RULE_ENGINE }}
+
+# allow body checks
+SecRequestBodyAccess On
+
+# enable XML parsing
+SecRule REQUEST_HEADERS:Content-Type "(?:application(?:/soap\+|/)|text/)xml" \
+     "id:'200000',phase:1,t:none,t:lowercase,pass,nolog,ctl:requestBodyProcessor=XML"
+
+# enable JSON parsing
+SecRule REQUEST_HEADERS:Content-Type "application/json" \
+     "id:'200001',phase:1,t:none,t:lowercase,pass,nolog,ctl:requestBodyProcessor=JSON"
+
+# maximum data size
+{% if MAX_CLIENT_SIZE.endswith("k") or MAX_CLIENT_SIZE.endswith("K") %}
+SecRequestBodyLimit {{ MAX_CLIENT_SIZE[:-1] | int * 1024 }}
+{% elif MAX_CLIENT_SIZE.endswith("m") or MAX_CLIENT_SIZE.endswith("M") %}
+SecRequestBodyLimit {{ MAX_CLIENT_SIZE[:-1] | int * 1024 * 1024 }}
+{% elif MAX_CLIENT_SIZE.endswith("g") or MAX_CLIENT_SIZE.endswith("G") %}
+SecRequestBodyLimit {{ MAX_CLIENT_SIZE[:-1] | int * 1024 * 1024 * 1024 }}
+{% elif MAX_CLIENT_SIZE.isdigit() %}
+SecRequestBodyLimit {{ MAX_CLIENT_SIZE }}
+{% else %}
+SecRequestBodyLimit 13107200
+{% endif %}
+SecRequestBodyNoFilesLimit 131072
+
+# reject requests if bigger than max data size
+SecRequestBodyLimitAction Reject
+
+# reject if we can't process the body
+SecRule REQBODY_ERROR "!@eq 0" \
+"id:'200002', phase:2,t:none,log,deny,status:400,msg:'Failed to parse request body.',logdata:'%{reqbody_error_msg}',severity:2"
+
+# be strict with multipart/form-data body
+SecRule MULTIPART_STRICT_ERROR "!@eq 0" \
+"id:'200003',phase:2,t:none,log,deny,status:400, \
+msg:'Multipart request body failed strict validation: \
+PE %{REQBODY_PROCESSOR_ERROR}, \
+BQ %{MULTIPART_BOUNDARY_QUOTED}, \
+BW %{MULTIPART_BOUNDARY_WHITESPACE}, \
+DB %{MULTIPART_DATA_BEFORE}, \
+DA %{MULTIPART_DATA_AFTER}, \
+HF %{MULTIPART_HEADER_FOLDING}, \
+LF %{MULTIPART_LF_LINE}, \
+SM %{MULTIPART_MISSING_SEMICOLON}, \
+IQ %{MULTIPART_INVALID_QUOTING}, \
+IP %{MULTIPART_INVALID_PART}, \
+IH %{MULTIPART_INVALID_HEADER_FOLDING}, \
+FL %{MULTIPART_FILE_LIMIT_EXCEEDED}'"
+SecRule MULTIPART_UNMATCHED_BOUNDARY "@eq 1" \
+    "id:'200004',phase:2,t:none,log,deny,msg:'Multipart parser detected a possible unmatched boundary.'"
+
+# enable response body checks
+SecResponseBodyAccess On
+SecResponseBodyMimeType text/plain text/html text/xml application/json
+SecResponseBodyLimit 524288
+SecResponseBodyLimitAction ProcessPartial
+
+# log usefull stuff
+SecAuditEngine {{ MODSECURITY_SEC_AUDIT_ENGINE }}
+SecAuditLogParts {{ MODSECURITY_SEC_AUDIT_LOG_PARTS }}
+SecAuditLogType Serial
+SecAuditLog /var/log/bunkerweb/modsec_audit.log
+
+# include OWASP CRS configurations
+{% if USE_MODSECURITY_CRS == "yes" %}
+{% if MODSECURITY_CRS_VERSION == "nightly" %}
+{% if os_path.isfile("/var/cache/bunkerweb/modsecurity/crs/crs-setup-nightly.conf") %}
+include /var/cache/bunkerweb/modsecurity/crs/crs-setup-nightly.conf
+{% else %}
+# fallback to the default CRS setup as the nightly one is not available
+include /usr/share/bunkerweb/core/modsecurity/files/crs-setup-v3.conf
+{% endif %}
+{% else %}
+include /usr/share/bunkerweb/core/modsecurity/files/crs-setup-v{{ MODSECURITY_CRS_VERSION }}.conf
+{% endif %}
+
+# custom CRS configurations before loading rules (e.g. exclusions)
+{% if is_custom_conf("/etc/bunkerweb/configs/modsec-crs") %}
+include /etc/bunkerweb/configs/modsec-crs/*.conf
+{% endif %}
+{% if is_custom_conf("/etc/nginx/modsec-crs") %}
+include /etc/nginx/modsec-crs/*.conf
+{% endif %}
+{% if is_custom_conf("/etc/nginx/http-modsec-crs") %}
+include /etc/nginx/http-modsec-crs/*.conf
+{% endif %}
+# unset REASON env var
+SecAction "nolog,phase:1,setenv:REASON=none"
+
+# Auto update allowed methods
+SecAction \
+ "id:900200,\
+  phase:1,\
+  nolog,\
+  pass,\
+  t:none,\
+  setvar:'tx.allowed_methods={{ ALLOWED_METHODS.replace("|", " ") }}'"
+
+# Check if client is whitelisted
+{% if USE_WHITELIST == "yes" +%}
+SecRule ENV:is_whitelisted "yes" "id:1000,phase:1,allow,nolog,ctl:ruleEngine=Off"
+{% endif +%}
+
+# include OWASP CRS rules
+{% if MODSECURITY_CRS_VERSION == "nightly" %}
+{% if os_path.exists("/var/cache/bunkerweb/modsecurity/crs/crs-nightly/rules") %}
+include /var/cache/bunkerweb/modsecurity/crs/crs-nightly/rules/*.conf
+{% else %}
+# fallback to the default CRS setup as the nightly one is not available
+include /usr/share/bunkerweb/core/modsecurity/files/coreruleset-v3/rules/*.conf
+{% endif %}
+{% else %}
+include /usr/share/bunkerweb/core/modsecurity/files/coreruleset-v{{ MODSECURITY_CRS_VERSION }}/rules/*.conf
+{% endif %}
+{% endif +%}
+
+# custom rules after loading the CRS
+{% if is_custom_conf("/etc/bunkerweb/configs/modsec") %}
+include /etc/bunkerweb/configs/modsec/*.conf
+{% endif %}
+{% if is_custom_conf("/etc/nginx/modsec") %}
+include /etc/nginx/modsec/*.conf
+{% endif %}
+
+
+{% if USE_MODSECURITY_CRS == "yes" %}
+
+# set REASON env var
+SecRuleUpdateActionById 949110 "t:none,deny,status:{{ DENY_HTTP_STATUS }},setenv:REASON=modsecurity"
+SecRuleUpdateActionById 959100 "t:none,deny,status:{{ DENY_HTTP_STATUS }},setenv:REASON=modsecurity"
+
+# let BW manage when method is not allowed (and save up some computing)
+SecRuleUpdateActionById 911100 "t:none,allow,nolog"
+
+{% endif %}
diff --git a/src/common/confs/http.conf b/src/common/confs/http.conf
index d734c5f444..8e775c54b9 100644
--- a/src/common/confs/http.conf
+++ b/src/common/confs/http.conf
@@ -33,6 +33,9 @@ client_header_timeout 10;
 keepalive_timeout 15;
 send_timeout 10;
 
+server_names_hash_bucket_size   128;
+server_names_hash_max_size      1024;
+
 # resolvers to use
 resolver {{ DNS_RESOLVERS }} {% if USE_IPV6 == "no" %}ipv6=off{% endif %};
 
@@ -75,6 +78,9 @@ include /etc/nginx/default-server-http.conf;
 # disable sending nginx version globally
 server_tokens off;
 
+# global modsecurity config
+include /etc/nginx/http-modsecurity/*.conf;
+
 # server config(s)
 {% if MULTISITE == "yes" and SERVER_NAME != "" %}
 	{% set map_servers = {} %}
diff --git a/src/common/core/misc/confs/default-server-http/disable.conf b/src/common/core/misc/confs/default-server-http/disable.conf
index 96fa324fae..446838bedc 100644
--- a/src/common/core/misc/confs/default-server-http/disable.conf
+++ b/src/common/core/misc/confs/default-server-http/disable.conf
@@ -1,10 +1,3 @@
-{% if DISABLE_DEFAULT_SERVER == "yes" +%}
-location / {
-	set $reason "default";
-	set $reason_data "";
-	return {{ DENY_HTTP_STATUS }};
-}
-{% endif %}
 {% if DISABLE_DEFAULT_SERVER_STRICT_SNI == "yes" +%}
 ssl_client_hello_by_lua_block {
 	local ssl_clt = require "ngx.ssl.clienthello"
diff --git a/src/common/core/misc/confs/default-server-http/page.conf b/src/common/core/misc/confs/default-server-http/page.conf
deleted file mode 100644
index e810fa767d..0000000000
--- a/src/common/core/misc/confs/default-server-http/page.conf
+++ /dev/null
@@ -1,47 +0,0 @@
-{% if IS_LOADING != "yes" and DISABLE_DEFAULT_SERVER == "no" +%}
-location / {
-	etag off;
-	add_header Last-Modified "";
-	server_tokens off;
-	default_type 'text/html';
-	root /usr/share/bunkerweb/core/misc/files;
-	content_by_lua_block {
-			local utils = require "bunkerweb.utils"
-			local rand = utils.rand
-			local subsystem = ngx.config.subsystem
-
-			local template
-			local render = nil
-			if subsystem == "http" then
-				template = require "resty.template"
-				render = template.render
-			end
-
-			local nonce_style = rand(16)
-
-			-- Override CSP header
-			ngx.header["Content-Security-Policy"] = "default-src 'none'; frame-ancestors 'none'; form-action 'self'; img-src 'self' data:; style-src 'self' 'nonce-"
-				.. nonce_style
-				.. "'; font-src 'self' data:; base-uri 'self'; require-trusted-types-for 'script'; block-all-mixed-content; upgrade-insecure-requests;"
-
-			-- Remove server header
-			ngx.header["Server"] = nil
-
-			-- Override HSTS header
-			if ngx.var.scheme == "https" then
-				ngx.header["Strict-Transport-Security"] = "max-age=31536000; includeSubDomains; preload"
-			end
-
-			-- Override X-Content-Type-Options header
-			ngx.header["X-Content-Type-Options"] = "nosniff"
-
-			-- Override Referrer-Policy header
-			ngx.header["Referrer-Policy"] = "no-referrer"
-
-			-- Render template
-			render("default.html", {
-				nonce_style = nonce_style,
-			})
-	}
-}
-{% endif %}
diff --git a/src/common/core/modsecurity/confs/server-http/modsecurity-rules.conf.modsec b/src/common/core/modsecurity/confs/server-http/modsecurity-rules.conf.modsec
index 9913e93548..41b388dff4 100644
--- a/src/common/core/modsecurity/confs/server-http/modsecurity-rules.conf.modsec
+++ b/src/common/core/modsecurity/confs/server-http/modsecurity-rules.conf.modsec
@@ -1,149 +1,20 @@
 {% set os_path = import("os.path") %}
-# process rules with disruptive actions
-SecRuleEngine {{ MODSECURITY_SEC_RULE_ENGINE }}
-
-# allow body checks
-SecRequestBodyAccess On
-
-# enable XML parsing
-SecRule REQUEST_HEADERS:Content-Type "(?:application(?:/soap\+|/)|text/)xml" \
-     "id:'200000',phase:1,t:none,t:lowercase,pass,nolog,ctl:requestBodyProcessor=XML"
-
-# enable JSON parsing
-SecRule REQUEST_HEADERS:Content-Type "application/json" \
-     "id:'200001',phase:1,t:none,t:lowercase,pass,nolog,ctl:requestBodyProcessor=JSON"
-
-# maximum data size
-{% if MAX_CLIENT_SIZE.endswith("k") or MAX_CLIENT_SIZE.endswith("K") %}
-SecRequestBodyLimit {{ MAX_CLIENT_SIZE[:-1] | int * 1024 }}
-{% elif MAX_CLIENT_SIZE.endswith("m") or MAX_CLIENT_SIZE.endswith("M") %}
-SecRequestBodyLimit {{ MAX_CLIENT_SIZE[:-1] | int * 1024 * 1024 }}
-{% elif MAX_CLIENT_SIZE.endswith("g") or MAX_CLIENT_SIZE.endswith("G") %}
-SecRequestBodyLimit {{ MAX_CLIENT_SIZE[:-1] | int * 1024 * 1024 * 1024 }}
-{% elif MAX_CLIENT_SIZE.isdigit() %}
-SecRequestBodyLimit {{ MAX_CLIENT_SIZE }}
-{% else %}
-SecRequestBodyLimit 13107200
-{% endif %}
-SecRequestBodyNoFilesLimit 131072
-
-# reject requests if bigger than max data size
-SecRequestBodyLimitAction Reject
-
-# reject if we can't process the body
-SecRule REQBODY_ERROR "!@eq 0" \
-"id:'200002', phase:2,t:none,log,deny,status:400,msg:'Failed to parse request body.',logdata:'%{reqbody_error_msg}',severity:2"
-
-# be strict with multipart/form-data body
-SecRule MULTIPART_STRICT_ERROR "!@eq 0" \
-"id:'200003',phase:2,t:none,log,deny,status:400, \
-msg:'Multipart request body failed strict validation: \
-PE %{REQBODY_PROCESSOR_ERROR}, \
-BQ %{MULTIPART_BOUNDARY_QUOTED}, \
-BW %{MULTIPART_BOUNDARY_WHITESPACE}, \
-DB %{MULTIPART_DATA_BEFORE}, \
-DA %{MULTIPART_DATA_AFTER}, \
-HF %{MULTIPART_HEADER_FOLDING}, \
-LF %{MULTIPART_LF_LINE}, \
-SM %{MULTIPART_MISSING_SEMICOLON}, \
-IQ %{MULTIPART_INVALID_QUOTING}, \
-IP %{MULTIPART_INVALID_PART}, \
-IH %{MULTIPART_INVALID_HEADER_FOLDING}, \
-FL %{MULTIPART_FILE_LIMIT_EXCEEDED}'"
-SecRule MULTIPART_UNMATCHED_BOUNDARY "@eq 1" \
-    "id:'200004',phase:2,t:none,log,deny,msg:'Multipart parser detected a possible unmatched boundary.'"
-
-# enable response body checks
-SecResponseBodyAccess On
-SecResponseBodyMimeType text/plain text/html text/xml application/json
-SecResponseBodyLimit 524288
-SecResponseBodyLimitAction ProcessPartial
-
-# log usefull stuff
-SecAuditEngine {{ MODSECURITY_SEC_AUDIT_ENGINE }}
-SecAuditLogParts {{ MODSECURITY_SEC_AUDIT_LOG_PARTS }}
-SecAuditLogType Serial
-SecAuditLog /var/log/bunkerweb/modsec_audit.log
-
-# include OWASP CRS configurations
-{% if USE_MODSECURITY_CRS == "yes" %}
-{% if MODSECURITY_CRS_VERSION == "nightly" %}
-{% if os_path.isfile("/var/cache/bunkerweb/modsecurity/crs/crs-setup-nightly.conf") %}
-include /var/cache/bunkerweb/modsecurity/crs/crs-setup-nightly.conf
-{% else %}
-# fallback to the default CRS setup as the nightly one is not available
-include /usr/share/bunkerweb/core/modsecurity/files/crs-setup-v3.conf
-{% endif %}
-{% else %}
-include /usr/share/bunkerweb/core/modsecurity/files/crs-setup-v{{ MODSECURITY_CRS_VERSION }}.conf
-{% endif %}
 
 # custom CRS configurations before loading rules (e.g. exclusions)
-{% if is_custom_conf("/etc/bunkerweb/configs/modsec-crs") %}
-include /etc/bunkerweb/configs/modsec-crs/*.conf
-{% endif %}
 {% if MULTISITE == "yes" and is_custom_conf("/etc/bunkerweb/configs/modsec-crs/" + SERVER_NAME.split(" ")[0]) %}
 include /etc/bunkerweb/configs/modsec-crs/{{ SERVER_NAME.split(" ")[0] }}/*.conf
 {% endif %}
-{% if is_custom_conf("/etc/nginx/modsec-crs") %}
-include /etc/nginx/modsec-crs/*.conf
-{% endif %}
+
 {% if MULTISITE == "yes" and is_custom_conf("/etc/nginx/" + SERVER_NAME.split(" ")[0] + "/modsec-crs/") %}
 include /etc/nginx/{{ SERVER_NAME.split(" ")[0] }}/modsec-crs/*.conf
 {% endif %}
 
-# unset REASON env var
-SecAction "nolog,phase:1,setenv:REASON=none"
-
-# Auto update allowed methods
-SecAction \
- "id:900200,\
-  phase:1,\
-  nolog,\
-  pass,\
-  t:none,\
-  setvar:'tx.allowed_methods={{ ALLOWED_METHODS.replace("|", " ") }}'"
-
-# Check if client is whitelisted
-{% if USE_WHITELIST == "yes" +%}
-SecRule ENV:is_whitelisted "yes" "id:1000,phase:1,allow,nolog,ctl:ruleEngine=Off"
-{% endif +%}
-
-# include OWASP CRS rules
-{% if MODSECURITY_CRS_VERSION == "nightly" %}
-{% if os_path.exists("/var/cache/bunkerweb/modsecurity/crs/crs-nightly/rules") %}
-include /var/cache/bunkerweb/modsecurity/crs/crs-nightly/rules/*.conf
-{% else %}
-# fallback to the default CRS setup as the nightly one is not available
-include /usr/share/bunkerweb/core/modsecurity/files/coreruleset-v3/rules/*.conf
-{% endif %}
-{% else %}
-include /usr/share/bunkerweb/core/modsecurity/files/coreruleset-v{{ MODSECURITY_CRS_VERSION }}/rules/*.conf
-{% endif %}
-{% endif +%}
-
 # custom rules after loading the CRS
-{% if is_custom_conf("/etc/bunkerweb/configs/modsec") %}
-include /etc/bunkerweb/configs/modsec/*.conf
-{% endif %}
 {% if MULTISITE == "yes" and is_custom_conf("/etc/bunkerweb/configs/modsec/" + SERVER_NAME.split(" ")[0]) %}
 include /etc/bunkerweb/configs/modsec/{{ SERVER_NAME.split(" ")[0] }}/*.conf
 {% endif %}
-{% if is_custom_conf("/etc/nginx/modsec") %}
-include /etc/nginx/modsec/*.conf
-{% endif %}
+
 {% if MULTISITE == "yes" and is_custom_conf("/etc/nginx/" + SERVER_NAME.split(" ")[0] + "/modsec") %}
 include /etc/nginx/{{ SERVER_NAME.split(" ")[0] }}/modsec/*.conf
 {% endif %}
 
-
-{% if USE_MODSECURITY_CRS == "yes" %}
-
-# set REASON env var
-SecRuleUpdateActionById 949110 "t:none,deny,status:{{ DENY_HTTP_STATUS }},setenv:REASON=modsecurity"
-SecRuleUpdateActionById 959100 "t:none,deny,status:{{ DENY_HTTP_STATUS }},setenv:REASON=modsecurity"
-
-# let BW manage when method is not allowed (and save up some computing)
-SecRuleUpdateActionById 911100 "t:none,allow,nolog"
-
-{% endif %}
diff --git a/src/common/core/modsecurity/confs/server-http/modsecurity.conf b/src/common/core/modsecurity/confs/server-http/modsecurity.conf
index cf64d00d62..6b1c8c4a8f 100644
--- a/src/common/core/modsecurity/confs/server-http/modsecurity.conf
+++ b/src/common/core/modsecurity/confs/server-http/modsecurity.conf
@@ -1,4 +1,6 @@
 {% if USE_MODSECURITY == "yes" +%}
 modsecurity on;
 modsecurity_rules_file {{ NGINX_PREFIX }}server-http/modsecurity-rules.conf.modsec;
+{% else %}
+modsecurity off;
 {% endif %}
diff --git a/src/common/core/redis/plugin.json b/src/common/core/redis/plugin.json
index ee39a4370e..439c0749d4 100644
--- a/src/common/core/redis/plugin.json
+++ b/src/common/core/redis/plugin.json
@@ -20,7 +20,7 @@
       "help": "Redis server IP or hostname.",
       "id": "redis-host",
       "label": "Redis server",
-      "regex": "^((?!-)[a-zA-Z0-9\\-]{1,63}(.[a-zA-Z]{2,})+|(\\b25[0-5]|\\b2[0-4]\\d|\\b[01]?\\d\\d?)(\\.(25[0-5]|2[0-4]\\d|[01]?\\d\\d?)){3}|(([0-9a-fA-F]{1,4}:){7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]Z{0,4}){0,4}%[0-9a-zA-Z]+|::(ffff(:0{1,4})?:)?((25[0-5]|(2[0-4]|1?\\d)?\\d)\\.){3}(25[0-5]|(2[0-4]|1?\\d)?\\d)|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1?\\d)?\\d)\\.){3}(25[0-5]|(2[0-4]|1?\\d)?\\d)))?$",
+      "regex": "^.*$",
       "type": "text"
     },
     "REDIS_PORT": {
diff --git a/src/common/core/reverseproxy/confs/server-http/reverse-proxy.conf b/src/common/core/reverseproxy/confs/server-http/reverse-proxy.conf
index eacc209017..df201cc35c 100644
--- a/src/common/core/reverseproxy/confs/server-http/reverse-proxy.conf
+++ b/src/common/core/reverseproxy/confs/server-http/reverse-proxy.conf
@@ -36,7 +36,7 @@ add_header X-Proxy-Cache $upstream_cache_status;
 		{% if k.startswith("REVERSE_PROXY_HOST") and v != "" +%}
 			{% set host = v %}
 			{% set url = all[k.replace("HOST", "URL")] if k.replace("HOST", "URL") in all else "/" %}
-			{% set ws = all[k.replace("HOST", "WS")] if k.replace("HOST", "WS") in all else "" %}
+			{% set ws = all["REVERSE_PROXY_WS"] if "REVERSE_PROXY_WS" in all else "" %}
 			{% set headers = all[k.replace("HOST", "HEADERS")] if k.replace("HOST", "HEADERS") in all else "" %}
 			{% set headers_client = all[k.replace("HOST", "HEADERS_CLIENT")] if k.replace("HOST", "HEADERS_CLIENT") in all else "" %}
 			{% set buffering = all[k.replace("HOST", "BUFFERING")] if k.replace("HOST", "BUFFERING") in all else "yes" %}
@@ -45,8 +45,8 @@ add_header X-Proxy-Cache $upstream_cache_status;
 			{% set auth_request_signin_url = all[k.replace("HOST", "AUTH_REQUEST_SIGNIN_URL")] if k.replace("HOST", "AUTH_REQUEST_SIGNIN_URL") in all else "" %}
 			{% set auth_request_sets = all[k.replace("HOST", "AUTH_REQUEST_SET")] if k.replace("HOST", "AUTH_REQUEST_SET") in all else "" %}
 			{% set connect_timeout = all[k.replace("HOST", "CONNECT_TIMEOUT")] if k.replace("HOST", "CONNECT_TIMEOUT") in all else "60s" %}
-			{% set read_timeout = all[k.replace("HOST", "READ_TIMEOUT")] if k.replace("HOST", "READ_TIMEOUT") in all else "60s" %}
-			{% set send_timeout = all[k.replace("HOST", "SEND_TIMEOUT")] if k.replace("HOST", "SEND_TIMEOUT") in all else "60s" %}
+			{% set read_timeout = all[k.replace("HOST", "READ_TIMEOUT")] if k.replace("HOST", "READ_TIMEOUT") in all else "600s" %}
+			{% set send_timeout = all[k.replace("HOST", "SEND_TIMEOUT")] if k.replace("HOST", "SEND_TIMEOUT") in all else "600s" %}
 			{% set includes = all[k.replace("HOST", "INCLUDES")] if k.replace("HOST", "INCLUDES") in all else "" %}
 location {{ url }} {% raw %}{{% endraw +%}
 	etag off;
diff --git a/src/common/core/ui/confs/default-server-http/ui.conf b/src/common/core/ui/confs/default-server-http/ui.conf
index f5547b9fa6..5589dbf4d5 100644
--- a/src/common/core/ui/confs/default-server-http/ui.conf
+++ b/src/common/core/ui/confs/default-server-http/ui.conf
@@ -4,9 +4,6 @@ access_by_lua_block {
     local scheme = ngx_var.scheme
     local http_host = ngx_var.http_host
     local request_uri = ngx_var.request_uri
-    if scheme == "http" and http_host ~= nil and http_host ~= "" and request_uri and request_uri ~= "" then
-        return ngx.redirect("https://" .. http_host .. request_uri, ngx.HTTP_MOVED_PERMANENTLY)
-    end
 }
 location /setup {
     etag off;
diff --git a/src/common/db/model.py b/src/common/db/model.py
index 03ed953a8b..3be5575a67 100644
--- a/src/common/db/model.py
+++ b/src/common/db/model.py
@@ -115,7 +115,7 @@ class Global_values(Base):
 class Services(Base):
     __tablename__ = "bw_services"
 
-    id = Column(String(64), primary_key=True)
+    id = Column(String(256), primary_key=True)
     method = Column(METHODS_ENUM, nullable=False)
     is_draft = Column(Boolean, default=False, nullable=False)
 
@@ -127,7 +127,7 @@ class Services(Base):
 class Services_settings(Base):
     __tablename__ = "bw_services_settings"
 
-    service_id = Column(String(64), ForeignKey("bw_services.id", onupdate="cascade", ondelete="cascade"), primary_key=True)
+    service_id = Column(String(256), ForeignKey("bw_services.id", onupdate="cascade", ondelete="cascade"), primary_key=True)
     setting_id = Column(String(256), ForeignKey("bw_settings.id", onupdate="cascade", ondelete="cascade"), primary_key=True)
     value = Column(TEXT, nullable=False)
     suffix = Column(Integer, primary_key=True, nullable=True, default=0)
@@ -172,7 +172,7 @@ class Jobs_cache(Base):
 
     id = Column(Integer, Identity(start=1, increment=1), primary_key=True)
     job_name = Column(String(128), ForeignKey("bw_jobs.name", onupdate="cascade", ondelete="cascade"), nullable=False)
-    service_id = Column(String(64), ForeignKey("bw_services.id", onupdate="cascade", ondelete="cascade"), nullable=True)
+    service_id = Column(String(256), ForeignKey("bw_services.id", onupdate="cascade", ondelete="cascade"), nullable=True)
     file_name = Column(String(256), nullable=False)
     data = Column(LargeBinary(length=(2**32) - 1), nullable=True)
     last_update = Column(DateTime, nullable=True)
@@ -187,7 +187,7 @@ class Custom_configs(Base):
     __table_args__ = (UniqueConstraint("service_id", "type", "name"),)
 
     id = Column(Integer, Identity(start=1, increment=1), primary_key=True)
-    service_id = Column(String(64), ForeignKey("bw_services.id", onupdate="cascade", ondelete="cascade"), nullable=True)
+    service_id = Column(String(256), ForeignKey("bw_services.id", onupdate="cascade", ondelete="cascade"), nullable=True)
     type = Column(CUSTOM_CONFIGS_TYPES_ENUM, nullable=False)
     name = Column(String(256), nullable=False)
     data = Column(LargeBinary(length=(2**32) - 1), nullable=False)
diff --git a/src/common/gen/Templator.py b/src/common/gen/Templator.py
index 5fbca82806..67193193fb 100644
--- a/src/common/gen/Templator.py
+++ b/src/common/gen/Templator.py
@@ -60,7 +60,7 @@ def __write_config(self, subpath: Optional[str] = None, config: Optional[Dict[st
 
     def __render_global(self):
         self.__write_config()
-        templates = self.__find_templates(["global", "http", "stream", "default-server-http"])
+        templates = self.__find_templates(["global", "http", "stream", "default-server-http", "http-modsec-crs", "http-modsecurity"])
         for template in templates:
             self.__render_template(template)
 
diff --git a/src/common/helpers/readiness.sh b/src/common/helpers/readiness.sh
new file mode 100755
index 0000000000..e64816ebd2
--- /dev/null
+++ b/src/common/helpers/readiness.sh
@@ -0,0 +1,19 @@
+#!/bin/bash
+
+if [ ! -f /var/run/bunkerweb/nginx.pid ] ; then
+	exit 1
+fi
+
+check="$(curl -s -H "Host: healthcheck.bunkerweb.io" http://127.0.0.1:6000/healthz 2>&1)"
+# shellcheck disable=SC2181
+if [ $? -ne 0 ] || [ "$check" != "ok" ] ; then
+	exit 1
+fi
+
+# check IS_LOADING
+VAR_FILE=/etc/nginx/variables.env
+if grep -q "IS_LOADING=yes" "$VAR_FILE"; then
+    echo "pod is loading, waiting..."
+		exit 1
+fi
+exit 0
diff --git a/src/common/settings.json b/src/common/settings.json
index 83018e0c98..f470edffbe 100644
--- a/src/common/settings.json
+++ b/src/common/settings.json
@@ -28,7 +28,7 @@
   },
   "HTTPS_PORT": {
     "context": "global",
-    "default": "8443",
+    "default": "8843",
     "help": "HTTPS port number which bunkerweb binds to.",
     "id": "https-port",
     "label": "HTTPS port",
diff --git a/src/scheduler/entrypoint.sh b/src/scheduler/entrypoint.sh
index 6417f59de0..ff7a3f03e6 100755
--- a/src/scheduler/entrypoint.sh
+++ b/src/scheduler/entrypoint.sh
@@ -47,7 +47,7 @@ fi
 
 # execute jobs
 log "ENTRYPOINT" "ℹī¸ " "Executing scheduler ..."
-/usr/share/bunkerweb/scheduler/main.py &
+/usr/share/bunkerweb/scheduler/run.py &
 pid="$!"
 wait "$pid"
 while [ -f /var/run/bunkerweb/scheduler.pid ] ; do
diff --git a/src/scheduler/run.py b/src/scheduler/run.py
new file mode 100755
index 0000000000..8777b10de0
--- /dev/null
+++ b/src/scheduler/run.py
@@ -0,0 +1,57 @@
+#!/usr/bin/env python3
+import subprocess
+from argparse import ArgumentParser
+from os.path import join, sep
+from sys import path as sys_path
+
+for deps_path in [join(sep, "usr", "share", "bunkerweb", *paths) for paths in (("deps", "python"), ("utils",), ("api",), ("db",))]:
+    if deps_path not in sys_path:
+        sys_path.append(deps_path)
+
+import uuid
+from kubernetes import config
+from kubernetes.leaderelection import leaderelection
+from kubernetes.leaderelection.resourcelock.configmaplock import ConfigMapLock
+from kubernetes.leaderelection import electionconfig
+from os import _exit, getenv
+
+namespace = getenv("NAMESPACE", "default")
+pod_name =  getenv("POD_NAME", f'scheduler-{uuid.uuid4()}')
+variables = None
+
+
+def run_on_kubernetes_ha_mode():
+    config.load_incluster_config()
+    lock_name = "scheduler-election"
+    election_config = electionconfig.Config(
+        ConfigMapLock(lock_name, namespace, pod_name),
+        lease_duration=17,
+        renew_deadline=15,
+        retry_period=5,
+        onstarted_leading=kubernetes_start,
+        onstopped_leading=onstopped_leading
+    )
+    print(f'I am {pod_name} with {lock_name} in namespace {namespace}')
+    # Enter leader election
+    leaderelection.LeaderElection(election_config).run()
+
+
+def kubernetes_start():
+    print(f'{pod_name} is leader')
+    if variables:
+        subprocess.call(f"/usr/share/bunkerweb/scheduler/main.py --variables {variables} &", shell =True)
+    else:
+        subprocess.call("/usr/share/bunkerweb/scheduler/main.py &", shell =True)
+
+def onstopped_leading():
+    print(f'{pod_name} on stop leading, will exit 0')
+    _exit(0)
+
+
+if __name__ == "__main__":
+    # Parse arguments
+    parser = ArgumentParser(description="Job scheduler for BunkerWeb")
+    parser.add_argument("--variables", type=str, help="path to the file containing environment variables")
+    args = parser.parse_args()
+    variables = args.variables
+    run_on_kubernetes_ha_mode()
\ No newline at end of file
diff --git a/src/ui/main.py b/src/ui/main.py
index 1b9752c667..b3f201aa95 100755
--- a/src/ui/main.py
+++ b/src/ui/main.py
@@ -165,8 +165,8 @@ def handle_stop(signum, frame):
     app.jinja_env.globals.update(check_settings=check_settings)
 
     # CSRF protection
-    csrf = CSRFProtect()
-    csrf.init_app(app)
+    #csrf = CSRFProtect()
+    #csrf.init_app(app)
 
 LOG_RX = re_compile(r"^(?P<date>\d+/\d+/\d+\s\d+:\d+:\d+)\s\[(?P<level>[a-z]+)\]\s\d+#\d+:\s(?P<message>[^\n]+)$")
 REVERSE_PROXY_PATH = re_compile(r"^(?P<host>https?://.{1,255}(:((6553[0-5])|(655[0-2]\d)|(65[0-4]\d{2})|(6[0-4]\d{3})|([1-5]\d{4})|([0-5]{0,5})|(\d{1,4})))?)$")