diff --git a/django_logging/tests/commands/test_generate_pretty_json.py b/django_logging/tests/commands/test_generate_pretty_json.py
new file mode 100644
index 0000000..6e9d9b1
--- /dev/null
+++ b/django_logging/tests/commands/test_generate_pretty_json.py
@@ -0,0 +1,111 @@
+import json
+import os
+import sys
+from io import StringIO
+from pathlib import Path
+from typing import Any
+
+import pytest
+from django.core.management import call_command
+from django.test import override_settings
+
+from django_logging.tests.constants import PYTHON_VERSION, PYTHON_VERSION_REASON
+
+pytestmark = [
+    pytest.mark.commands,
+    pytest.mark.commands_generate_pretty_json,
+    pytest.mark.skipif(sys.version_info < PYTHON_VERSION, reason=PYTHON_VERSION_REASON),
+]
+
+
+class TestJsonReformatCommand:
+    """
+    Test suite for the Django management command that reformats JSON files in a log directory.
+
+    This test suite verifies the functionality of the command, which searches for `.json` files,
+    parses multiple JSON objects, and saves them in a 'pretty' subdirectory in a valid JSON array format.
+    """
+
+    @override_settings(DJANGO_LOGGING={"LOG_DIR": "/tmp/test_logs"})
+    def test_command_successful_processing(
+        self, temp_json_log_directory: str, settings: Any
+    ) -> None:
+        """
+        Test the successful processing and pretty-printing of JSON files.
+
+        This test verifies that the command:
+        1. Processes JSON files in the 'json' directory.
+        2. Writes pretty JSON arrays into the 'pretty' subdirectory.
+        3. Logs the successful processing of files.
+
+        Args:
+            temp_json_log_directory (str): Path to the temporary log directory.
+            settings (django.conf.Settings): Django settings.
+        """
+        settings.DJANGO_LOGGING["LOG_DIR"] = temp_json_log_directory
+
+        out = StringIO()
+        call_command("generate_pretty_json", stdout=out)
+
+        # Check output
+        assert "Processing file" in out.getvalue()
+        assert (
+            "reformatted and generated new pretty file successfully" in out.getvalue()
+        )
+
+        # Verify that the formatted JSON file exists in the pretty directory
+        pretty_dir = os.path.join(
+            settings.DJANGO_LOGGING.get("LOG_DIR"), "json", "pretty"
+        )
+        formatted_file = os.path.join(pretty_dir, "formatted_test.json")
+        assert os.path.exists(formatted_file)
+
+        # Load and verify the content of the generated pretty file
+        with open(formatted_file) as f:
+            data = json.load(f)
+            assert isinstance(data, list)
+            assert len(data) == 2
+            assert data[0]["key"] == "value"
+
+    @override_settings(DJANGO_LOGGING={"LOG_DIR": "/non_existent_dir"})
+    def test_command_file_not_found_error(self, settings: Any) -> None:
+        """
+        Test handling of FileNotFoundError when the log directory does not exist.
+
+        This test checks that the command logs an error when it fails to find the specified log directory.
+
+        Args:
+            settings (django.conf.Settings): Django settings.
+        """
+        out = StringIO()
+        call_command("generate_pretty_json", stdout=out)
+
+        # Check if the command logs the directory not found error
+        assert "does not exist." in out.getvalue()
+
+    def test_command_invalid_json(self, temp_json_log_directory: str, settings: Any) -> None:
+        """
+        Test the command's handling of invalid JSON files.
+
+        This test verifies that the command logs a JSONDecodeError when it encounters invalid JSON content.
+
+        Args:
+            temp_json_log_directory (str): Path to the temporary log directory.
+            settings (django.conf.Settings): Django settings.
+        """
+        settings.DJANGO_LOGGING["LOG_DIR"] = temp_json_log_directory
+
+        # Create a faulty JSON file with invalid syntax.
+        faulty_json_file = Path(temp_json_log_directory) / "json" / "faulty.json"
+        faulty_json_file.write_text(
+            """
+            {"key": "value", \n "key2" }
+            
+            """
+        )  # Invalid JSON
+
+        out = StringIO()
+        call_command("generate_pretty_json", stdout=out)
+
+        assert "faulty.json" in out.getvalue()
+        assert 'Incomplete JSON object: {"key": "value","key2" }' in out.getvalue()
diff --git a/django_logging/tests/commands/test_generate_pretty_xml.py b/django_logging/tests/commands/test_generate_pretty_xml.py
new file mode 100644
index 0000000..d8d85aa
--- /dev/null
+++ b/django_logging/tests/commands/test_generate_pretty_xml.py
@@ -0,0 +1,83 @@
+import os
+import sys
+from io import StringIO
+from typing import Any
+
+import pytest
+from django.core.management import call_command
+from django.test import override_settings
+
+from django_logging.tests.constants import PYTHON_VERSION, PYTHON_VERSION_REASON
+
+pytestmark = [
+    pytest.mark.commands,
+    pytest.mark.commands_generate_pretty_xml,
+    pytest.mark.skipif(sys.version_info < PYTHON_VERSION, reason=PYTHON_VERSION_REASON),
+]
+
+
+class TestGeneratePrettyXMLCommand:
+    """
+    Test suite for the `generate_pretty_xml` management command.
+
+    This test suite verifies the functionality of the command, which searches for `.xml` files,
+    reformats them by wrapping their content in a <logs> element, and saves them in a 'pretty' subdirectory.
+    """
+
+    @override_settings(DJANGO_LOGGING={"LOG_DIR": "/tmp/test_logs"})
+    def test_command_successful_processing(
+            self, temp_xml_log_directory: str, settings: Any
+    ) -> None:
+        """
+        Test the successful processing and reformatting of XML files.
+
+        This test verifies that the command:
+        1. Processes XML files in the 'xml' directory.
+        2. Writes reformatted XML files into the 'pretty' subdirectory.
+        3. Logs the successful processing of files.
+
+        Args:
+        ----
+            temp_xml_log_directory (str): Path to the temporary log directory.
+            settings (django.conf.Settings): Django settings.
+        """
+        # Update the settings to point to the temp log directory
+        settings.DJANGO_LOGGING["LOG_DIR"] = temp_xml_log_directory
+
+        out = StringIO()
+        call_command("generate_pretty_xml", stdout=out)
+
+        # Check command output for success message
+        assert "Processing file" in out.getvalue()
+        assert "File test.xml reformatted successfully." in out.getvalue()
+
+        # Verify that the reformatted XML file exists in the pretty directory
+        pretty_dir = os.path.join(
+            settings.DJANGO_LOGGING["LOG_DIR"], "xml", "pretty"
+        )
+        formatted_file = os.path.join(pretty_dir, "formatted_test.xml")
+        assert os.path.exists(formatted_file), "Reformatted file was not created."
+
+        # Check the content of the generated pretty XML file
+        with open(formatted_file) as f:
+            content = f.read()
+            assert "<logs>" in content
+            assert "<entry>Test Entry</entry>" in content
+            assert "</logs>" in content
+
+    @override_settings(DJANGO_LOGGING={"LOG_DIR": "/non_existent_dir"})
+    def test_command_directory_not_found(self, settings: Any) -> None:
+        """
+        Test that the command handles the case when the XML directory is missing.
+
+        This test checks that the command outputs an appropriate error message when the directory does not exist.
+
+        Args:
+        ----
+            settings (django.conf.Settings): Django settings.
+        """
+        out = StringIO()
+        call_command("generate_pretty_xml", stdout=out)
+
+        # Verify error output
+        assert "does not exist." in out.getvalue()
diff --git a/django_logging/tests/commands/test_logs_size_audit.py b/django_logging/tests/commands/test_logs_size_audit.py
new file mode 100644
index 0000000..92d554a
--- /dev/null
+++ b/django_logging/tests/commands/test_logs_size_audit.py
@@ -0,0 +1,98 @@
+import sys
+from io import StringIO
+from unittest.mock import MagicMock, patch
+
+import pytest
+from django.core.management import call_command
+
+from django_logging.tests.constants import PYTHON_VERSION, PYTHON_VERSION_REASON
+
+pytestmark = [
+    pytest.mark.commands,
+    pytest.mark.commands_logs_size_audit,
+    pytest.mark.skipif(sys.version_info < PYTHON_VERSION, reason=PYTHON_VERSION_REASON),
+]
+
+
+class TestCheckLogSizeCommand:
+    """
+    Test suite for the `check_log_size` management command.
+    """
+
+    @patch("os.path.exists", return_value=True)
+    @patch("os.walk")
+    def test_command_log_directory_size_under_limit(
+        self, mock_os_walk: MagicMock, temp_log_directory: str
+    ) -> None:
+        """
+        Test that the command correctly handles the case when the log directory size is under the limit.
+
+        This test verifies that the command calculates the log directory size correctly and does not send
+        an email when the size is below the limit.
+
+        Args:
+            mock_os_walk (MagicMock): Mock for `os.walk`.
+            temp_log_directory (str): Temporary log directory fixture.
+        """
+        # Mock the os.walk to return an empty directory
+        mock_os_walk.return_value = [(temp_log_directory, [], [])]
+
+        # Execute the command and capture the output
+        out = StringIO()
+        with patch("django.conf.settings.DJANGO_LOGGING", {"LOG_DIR_SIZE_LIMIT": 100}):
+            call_command("logs_size_audit", stdout=out)
+
+        assert "Log directory size is under the limit" in out.getvalue()
+
+    @patch("os.path.exists", return_value=True)
+    @patch("os.walk")
+    @patch("django_logging.management.commands.logs_size_audit.send_email_async")
+    def test_command_log_directory_size_exceeds_limit(
+        self,
+        mock_send_email: MagicMock,
+        mock_os_walk: MagicMock,
+        temp_log_directory: str,
+    ) -> None:
+        """
+        Test that the command sends a warning email when the log directory size exceeds the limit.
+
+        This test verifies that the command calculates the log directory size correctly and sends
+        an email notification when the size exceeds the limit.
+
+        Args:
+        ----
+            mock_send_email (MagicMock): Mock for the `send_warning_email` method.
+            mock_os_walk (MagicMock): Mock for `os.walk`.
+            temp_log_directory (str): Temporary log directory fixture.
+        """
+        # Mock the os.walk to simulate a large directory
+        mock_os_walk.return_value = [
+            (temp_log_directory, [], ["log1.txt", "log2.txt"]),
+        ]
+        # Mock the file sizes to exceed the limit
+        with patch("os.path.getsize", side_effect=[60 * 1024 * 1024, 50 * 1024 * 1024]):
+            out = StringIO()
+            with patch("django.conf.settings.ADMIN_EMAIL", "admin@example.com"):
+                with patch("django.conf.settings.DJANGO_LOGGING", {"LOG_DIR_SIZE_LIMIT": 100}):
+                    call_command("logs_size_audit", stdout=out)
+
+        # Verify that the warning email was sent
+        mock_send_email.assert_called_once()
+        assert "Warning email sent successfully" in out.getvalue()
+
+    @patch("os.path.exists", return_value=False)
+    def test_command_log_directory_not_found(self, temp_log_directory: str) -> None:
+        """
+        Test that the command handles the case where the log directory does not exist.
+
+        This test verifies that the command logs an error message and exits gracefully
+        when the log directory is missing.
+
+        Args:
+        ----
+            temp_log_directory (str): Temporary log directory fixture.
+        """
+        out = StringIO()
+        call_command("logs_size_audit", stdout=out)
+
+        assert "Log directory not found" in out.getvalue()
diff --git a/django_logging/tests/conftest.py b/django_logging/tests/conftest.py
index 2d2c7ec..b0fc2ce 100644
--- a/django_logging/tests/conftest.py
+++ b/django_logging/tests/conftest.py
@@ -5,7 +5,10 @@
     email_handler,
     email_mock_settings,
     error_log_record,
+    error_with_exc_log_record,
+    flat_formatter,
     get_response,
+    json_formatter,
     log_config,
     log_manager,
     magic_mock_logger,
@@ -17,6 +20,10 @@
     request_factory,
     request_middleware,
     reset_settings,
+    temp_json_log_directory,
+    temp_log_directory,
+    temp_xml_log_directory,
+    xml_formatter,
 )
 from django_logging.tests.setup import configure_django_settings
 
diff --git a/django_logging/tests/contextvar/__init__.py b/django_logging/tests/contextvar/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/django_logging/tests/contextvar/test_contextvar_manager.py b/django_logging/tests/contextvar/test_contextvar_manager.py
new file mode 100644
index 0000000..21012fc
--- /dev/null
+++ b/django_logging/tests/contextvar/test_contextvar_manager.py
@@ -0,0 +1,106 @@
+import sys
+
+import pytest
+
+from django_logging.contextvar.contextvar_manager import ContextVarManager
+from django_logging.tests.constants import PYTHON_VERSION, PYTHON_VERSION_REASON
+
+pytestmark = [
+    pytest.mark.contextvar,
+    pytest.mark.contextvar_manager,
+    pytest.mark.skipif(sys.version_info < PYTHON_VERSION, reason=PYTHON_VERSION_REASON),
+]
+
+
+class TestContextVarManager:
+    """Tests for the ContextVarManager class."""
+
+    def setup_method(self) -> None:
+        """Set up a new ContextVarManager instance before each test."""
+        self.manager = ContextVarManager()
+
+    def test_bind_and_get_contextvars(self) -> None:
+        """
+        Test that variables can be bound and retrieved.
+        """
+        self.manager.bind(user_id=42, request_id="abc123")
+
+        context_vars = self.manager.get_contextvars()
+        assert context_vars["user_id"] == 42
+        assert context_vars["request_id"] == "abc123"
+        self.manager.clear()
+
+    def test_batch_bind_and_reset(self) -> None:
+        """
+        Test batch binding context variables and resetting them using tokens.
+        """
+        tokens = self.manager.batch_bind(user_id=42, request_id="abc123")
+
+        context_vars = self.manager.get_contextvars()
+        assert context_vars["user_id"] == 42
+        assert context_vars["request_id"] == "abc123"
+
+        self.manager.reset(tokens)
+        context_vars = self.manager.get_contextvars()
+        assert "user_id" not in context_vars
+        assert "request_id" not in context_vars
+
+    def test_unbind(self) -> None:
+        """
+        Test unbinding a context variable.
+        """
+        self.manager.bind(user_id=42)
+        self.manager.unbind("user_id")
+
+        context_vars = self.manager.get_contextvars()
+        assert "user_id" not in context_vars
+
+    def test_clear(self) -> None:
+        """
+        Test clearing all context variables.
+        """
+        self.manager.bind(user_id=42, request_id="abc123")
+        self.manager.clear()
+
+        context_vars = self.manager.get_contextvars()
+        assert "user_id" not in context_vars
+        assert "request_id" not in context_vars
+
+    def test_merge_contexts(self) -> None:
+        """
+        Test merging context variables with priority given to bound context.
+        """
+        local_context = {"user_id": 42, "request_id": "abc123"}
+        bound_context = {"user_id": 99, "role": "admin"}
+
+        merged_context = self.manager.merge_contexts(bound_context, local_context)
+
+        assert merged_context["user_id"] == 99  # bound context should override
+        assert merged_context["request_id"] == "abc123"
+        assert merged_context["role"] == "admin"
+        self.manager.clear()
+
+    def test_get_merged_context(self) -> None:
+        """
+        Test getting the merged context from both logger-bound and local context variables.
+        """
+        self.manager.bind(user_id=42, request_id="abc123")
+        bound_logger_context = {"user_id": 99, "role": "admin"}
+
+        merged_context = self.manager.get_merged_context(bound_logger_context)
+
+        assert merged_context["user_id"] == 99  # bound context should override
+        assert merged_context["request_id"] == "abc123"
+        assert merged_context["role"] == "admin"
+        self.manager.clear()
+
+    def test_scoped_context(self) -> None:
+        """
+        Test using the context manager to temporarily bind and reset context variables.
+        """
+        with self.manager.scoped_context(user_id=42):
+            context_vars = self.manager.get_contextvars()
+            assert context_vars["user_id"] == 42
+
+        context_vars = self.manager.get_contextvars()
+        assert "user_id" not in context_vars
diff --git a/django_logging/tests/decorators/test_execution_tracking.py b/django_logging/tests/decorators/test_execution_tracking.py
index 316f10d..1da3fb3 100644
--- a/django_logging/tests/decorators/test_execution_tracking.py
+++ b/django_logging/tests/decorators/test_execution_tracking.py
@@ -1,13 +1,14 @@
+import logging
 import sys
+import time
 
 import pytest
-import logging
-import time
 from django.conf import settings
 from django.db import connection
 
 from django_logging.decorators import execution_tracker
 from django_logging.tests.constants import PYTHON_VERSION, PYTHON_VERSION_REASON
+from django_logging.utils.time import format_elapsed_time
 
 pytestmark = [
     pytest.mark.decorators,
@@ -79,6 +80,10 @@ def test_execution_tracker_execution_time(self) -> None:
         result = self.sample_function(0.2)
         elapsed_time = time.time() - start_time
 
+        # test return type of time utility
+        formatted_time = format_elapsed_time(120.0)
+        assert "2 minute(s)" in formatted_time
+
         assert (
             result == "Function executed"
         ), "The function did not return the expected message."
@@ -98,7 +103,7 @@ def test_execution_tracker_logs_queries(
         caplog.clear()
 
         @execution_tracker(logging_level=logging.INFO, log_queries=True)
-        def sample_db_function():
+        def sample_db_function() -> str:
             with connection.cursor() as cursor:
                 cursor.execute("SELECT 1")
             return "DB function executed"
@@ -128,7 +133,7 @@ def test_execution_tracker_query_threshold_warning(
             query_threshold=1,
             query_exceed_warning=True,
         )
-        def sample_db_threshold_function():
+        def sample_db_threshold_function() -> str:
             with connection.cursor() as cursor:
                 cursor.execute("SELECT 1")
                 cursor.execute("SELECT 2")
@@ -156,7 +161,7 @@ def test_execution_tracker_logs_queries_debug_false(
         caplog.clear()
 
         @execution_tracker(logging_level=logging.INFO, log_queries=True)
-        def sample_db_function():
+        def sample_db_function() -> str:
             with connection.cursor() as cursor:
                 cursor.execute("SELECT 1")
             return "DB function executed"
@@ -181,7 +186,7 @@ def test_execution_tracker_error_handling(
         """
 
         @execution_tracker(logging_level=logging.ERROR)
-        def sample_error_function():
+        def sample_error_function() -> None:
             raise ValueError("Sample error")
 
         with pytest.raises(ValueError, match="Sample error"):
diff --git a/django_logging/tests/fixtures/__init__.py b/django_logging/tests/fixtures/__init__.py
index 5b8601d..72e664e 100644
--- a/django_logging/tests/fixtures/__init__.py
+++ b/django_logging/tests/fixtures/__init__.py
@@ -1,10 +1,19 @@
-from .colored_formatter_fixture import colored_formatter
+from .commands_fixture import (
+    temp_json_log_directory,
+    temp_log_directory,
+    temp_xml_log_directory,
+)
 from .conf_fixture import log_config, log_manager
 from .email_handler_fixture import email_handler
 from .email_notifier_fixture import email_mock_settings, mock_smtp, notifier_mock_logger
 from .email_settings_fixture import mock_email_settings
+from .formatters import colored_formatter, flat_formatter, json_formatter, xml_formatter
 from .log_and_notify_fixture import admin_email_mock_settings, magic_mock_logger
-from .log_record_fixture import debug_log_record, error_log_record
+from .log_record_fixture import (
+    debug_log_record,
+    error_log_record,
+    error_with_exc_log_record,
+)
 from .logger_fixture import mock_logger
 from .request_middleware_fixture import (
     get_response,
diff --git a/django_logging/tests/fixtures/commands_fixture.py b/django_logging/tests/fixtures/commands_fixture.py
new file mode 100644
index 0000000..0c70e54
--- /dev/null
+++ b/django_logging/tests/fixtures/commands_fixture.py
@@ -0,0 +1,63 @@
+from typing import Any
+
+import pytest
+from _pytest._py.path import LocalPath
+from _pytest.tmpdir import TempPathFactory
+
+
+@pytest.fixture
+def temp_json_log_directory(tmpdir: Any) -> str:
+    """
+    Fixture to create a temporary log directory with sample JSON files for testing.
+
+    Args:
+        tmpdir (TempPathFactory): Temporary directory fixture provided by pytest.
+
+    Returns:
+        str: The path to the temporary log directory.
+    """
+    json_dir = tmpdir.mkdir("json")
+
+    # Create a valid JSON file with multiple JSON objects.
+    json_file = json_dir.join("test.json")
+    json_file.write('{"key": "value"}\n{"key": "value2"}')
+
+    return str(tmpdir)
+
+
+@pytest.fixture
+def temp_log_directory(tmpdir: LocalPath) -> str:
+    """
+    Fixture to create a temporary log directory for testing.
+
+    Args:
+        tmpdir (LocalPath): Temporary directory fixture provided by pytest.
+
+    Returns:
+        str: Path to the temporary log directory.
+    """
+    log_dir = tmpdir.mkdir("logs")
+    return str(log_dir)
+
+
+@pytest.fixture
+def temp_xml_log_directory(tmpdir: Any) -> str:
+    """
+    Fixture to create a temporary log directory with sample XML files for testing.
+
+    Args:
+    ----
+        tmpdir (TempPathFactory): Temporary directory fixture provided by pytest.
+
+    Returns:
+    -------
+        str: The path to the temporary log directory.
+    """
+    # Create the directory structure for logs/xml and logs/pretty
+    xml_dir = tmpdir.mkdir("xml")
+
+    # Create a valid XML file for testing
+    xml_file = xml_dir.join("test.xml")
+    xml_file.write("<log><entry>Test Entry</entry></log>")
+
+    return str(tmpdir)
diff --git a/django_logging/tests/fixtures/conf_fixture.py b/django_logging/tests/fixtures/conf_fixture.py
index 976edeb..8b87815 100644
--- a/django_logging/tests/fixtures/conf_fixture.py
+++ b/django_logging/tests/fixtures/conf_fixture.py
@@ -20,6 +20,8 @@ def log_config() -> LogConfig:
         log_levels=["INFO", "WARNING", "ERROR"],
         log_dir="/tmp/logs",
         log_file_formats={"INFO": 1, "WARNING": None, "ERROR": "%(message)s"},  # type: ignore
+        log_file_format_types={"INFO": "NORMAL"},
+        extra_log_files={"INFO": False},
         console_level="INFO",
         console_format=1,
         colorize_console=False,
diff --git a/django_logging/tests/fixtures/formatters.py b/django_logging/tests/fixtures/formatters.py
index f8b1ca9..bf8b39f 100644
--- a/django_logging/tests/fixtures/formatters.py
+++ b/django_logging/tests/fixtures/formatters.py
@@ -1,6 +1,11 @@
 import pytest
 
-from django_logging.formatters import ColoredFormatter
+from django_logging.formatters import (
+    ColoredFormatter,
+    FLATFormatter,
+    JSONFormatter,
+    XMLFormatter,
+)
 
 
 @pytest.fixture
@@ -14,3 +19,39 @@ def colored_formatter() -> ColoredFormatter:
         An instance of `ColoredFormatter` with a predefined format.
     """
     return ColoredFormatter(fmt="%(levelname)s: %(message)s")
+
+
+@pytest.fixture
+def json_formatter() -> JSONFormatter:
+    """
+    Fixture to provide an instance of `JSONFormatter`.
+
+    Returns:
+    -------
+    JSONFormatter: An instance of the `JSONFormatter` with a predefined format.
+    """
+    return JSONFormatter(fmt="%(levelname)s: %(message)s")
+
+
+@pytest.fixture
+def xml_formatter() -> XMLFormatter:
+    """
+    Fixture to provide an instance of `XMLFormatter`.
+
+    Returns:
+    -------
+    XMLFormatter: An instance of the `XMLFormatter` with predefined format.
+    """
+    return XMLFormatter(fmt="%(levelname)s: %(message)s")
+
+
+@pytest.fixture
+def flat_formatter() -> FLATFormatter:
+    """
+    Fixture to provide an instance of `FLATFormatter`.
+
+    Returns:
+    -------
+    FLATFormatter: An instance of the `FLATLineFormatter` with predefined format.
+    """
+    return FLATFormatter(fmt="%(levelname)s: %(message)s")
diff --git a/django_logging/tests/fixtures/log_record_fixture.py b/django_logging/tests/fixtures/log_record_fixture.py
index cab0438..6eaa86c 100644
--- a/django_logging/tests/fixtures/log_record_fixture.py
+++ b/django_logging/tests/fixtures/log_record_fixture.py
@@ -1,3 +1,4 @@
+import sys
 from logging import DEBUG, ERROR, LogRecord
 
 import pytest
@@ -41,3 +42,22 @@ def error_log_record() -> LogRecord:
         args=(),
         exc_info=None,
     )
+
+@pytest.fixture
+def error_with_exc_log_record() -> LogRecord:
+    """
+    Fixture to create a dummy log record for testing.
+
+    Returns:
+    -------
+        logging.LogRecord: A dummy log record with predefined attributes.
+    """
+    return LogRecord(
+        name="test",
+        level=ERROR,
+        pathname=__file__,
+        lineno=10,
+        msg="Test message",
+        args=(),
+        exc_info=sys.exc_info(),
+    )
diff --git a/django_logging/tests/fixtures/request_middleware_fixture.py b/django_logging/tests/fixtures/request_middleware_fixture.py
index a291a94..cbaae2a 100644
--- a/django_logging/tests/fixtures/request_middleware_fixture.py
+++ b/django_logging/tests/fixtures/request_middleware_fixture.py
@@ -38,18 +38,16 @@ def _get_response(request: RequestFactory) -> HttpResponse:
 
 
 @pytest.fixture
-def request_middleware(get_response: Callable) -> RequestLogMiddleware:
+def request_middleware() -> RequestLogMiddleware:
     """
     Fixture to create an instance of RequestLogMiddleware.
 
-    Args:
-    ----
-    get_response : function
-        A function that returns an HttpResponse for a given request.
-
     Returns:
     -------
     RequestLogMiddleware
-        An instance of RequestLogMiddleware with the provided get_response function.
+        An instance of RequestLogMiddleware with the sample HttpResponse.
     """
-    return RequestLogMiddleware(get_response)
+    middleware = RequestLogMiddleware(lambda request: HttpResponse("OK"))
+    middleware.log_sql = True
+
+    return middleware
diff --git a/django_logging/tests/fixtures/settings_fixture.py b/django_logging/tests/fixtures/settings_fixture.py
index a26642c..387705f 100644
--- a/django_logging/tests/fixtures/settings_fixture.py
+++ b/django_logging/tests/fixtures/settings_fixture.py
@@ -26,6 +26,14 @@ def mock_settings() -> Generator[Dict, None, None]:
             "LOG_FILE_FORMATS": {
                 "DEBUG": "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
             },
+            "LOG_FILE_FORMAT_TYPES": {
+                "DEBUG": "JSON",
+                "INFO": "XML",
+            },
+            "EXTRA_LOG_FILES": {
+                "DEBUG": False,
+                "INFO": True,
+            },
             "LOG_CONSOLE_LEVEL": "WARNING",
             "LOG_CONSOLE_FORMAT": "%(levelname)s - %(message)s",
             "LOG_CONSOLE_COLORIZE": True,
diff --git a/django_logging/tests/formatters/test_base_formatter.py b/django_logging/tests/formatters/test_base_formatter.py
new file mode 100644
index 0000000..440cd07
--- /dev/null
+++ b/django_logging/tests/formatters/test_base_formatter.py
@@ -0,0 +1,91 @@
+import sys
+from logging import LogRecord
+
+import pytest
+
+from django_logging.formatters.base import BaseStructuredFormatter
+from django_logging.tests.constants import PYTHON_VERSION, PYTHON_VERSION_REASON
+
+pytestmark = [
+    pytest.mark.formatters,
+    pytest.mark.base_formatter,
+    pytest.mark.skipif(sys.version_info < PYTHON_VERSION, reason=PYTHON_VERSION_REASON),
+]
+
+
+class TestBaseStructuredFormatter:
+
+    def test_extract_specifiers(self) -> None:
+        """
+        Test that the `_extract_specifiers` method correctly extracts format specifiers from the provided format string.
+
+        Asserts:
+        -------
+            - The extracted specifiers match the expected list.
+        """
+        fmt_string = "%(levelname)s | %(asctime)s | %(message)s | %(custom_field)s"
+        formatter = BaseStructuredFormatter(fmt=fmt_string)
+
+        expected_specifiers = ["levelname", "asctime", "message", "custom_field"]
+        assert formatter.specifiers == expected_specifiers, (
+            f"Expected specifiers {expected_specifiers}, "
+            f"but got {formatter.specifiers}"
+        )
+
+    def test_extract_specifiers_empty_format(self) -> None:
+        """
+        Test that `_extract_specifiers` returns an empty list when no format string is provided.
+
+        Asserts:
+        -------
+            - The specifiers list is empty.
+        """
+        formatter = BaseStructuredFormatter(fmt=None)
+        assert (
+            formatter.specifiers == []
+        ), "Specifiers list should be empty when no format string is provided."
+
+    def test_get_field_value(self, debug_log_record: LogRecord) -> None:
+        """
+        Test that `_get_field_value` correctly retrieves field values from the log record.
+
+        Args:
+        ----
+            debug_log_record (logging.LogRecord): The log record instance with known fields.
+
+        Asserts:
+        -------
+            - The `levelname` field value matches 'INFO'.
+            - The `message` field value matches 'Test log message'.
+            - The `custom_field` value matches 'CustomValue'.
+        """
+        fmt_string = "%(levelname)s | %(asctime)s | %(message)s | %(custom_field)s"
+        formatter = BaseStructuredFormatter(fmt=fmt_string)
+
+        # Test known fields from log record
+        assert formatter._get_field_value(debug_log_record, "levelname") == "DEBUG"
+        assert formatter._get_field_value(debug_log_record, "message") == "Test message"
+
+        # Test custom field
+        debug_log_record.custom_field = "CustomValue"
+        assert formatter._get_field_value(debug_log_record, "custom_field") == "CustomValue"
+
+    def test_get_field_value_unknown_field(self, error_with_exc_log_record: LogRecord) -> None:
+        """
+        Test that `_get_field_value` returns None when an unknown field is requested.
+
+        Args:
+        ----
+            error_with_exc_log_record (logging.LogRecord): The log record instance with no such field.
+
+        Asserts:
+        -------
+            - The method returns None for an unknown field.
+        """
+        fmt_string = "%(unknown_field)s"
+        formatter = BaseStructuredFormatter(fmt=fmt_string)
+        formatter._add_exception(error_with_exc_log_record, {})
+
+        assert (
+            formatter._get_field_value(error_with_exc_log_record, "unknown_field") is None
+        ), "Should return None for unknown field."
diff --git a/django_logging/tests/formatters/test_flat_line_formatter.py b/django_logging/tests/formatters/test_flat_line_formatter.py
new file mode 100644
index 0000000..de62ad3
--- /dev/null
+++ b/django_logging/tests/formatters/test_flat_line_formatter.py
@@ -0,0 +1,69 @@
+import logging
+import sys
+
+import pytest
+
+from django_logging.formatters import FLATFormatter
+from django_logging.tests.constants import PYTHON_VERSION, PYTHON_VERSION_REASON
+
+pytestmark = [
+    pytest.mark.formatters,
+    pytest.mark.flat_formatter,
+    pytest.mark.skipif(sys.version_info < PYTHON_VERSION, reason=PYTHON_VERSION_REASON),
+]
+
+
+class TestFLATFormatter:
+
+    def test_format_flat_record(
+        self, flat_formatter: FLATFormatter, debug_log_record: logging.LogRecord
+    ) -> None:
+        """
+        Test that the `format` method formats a log record into a single flat line.
+
+        Args:
+        ----
+            flat_formatter (FLATFormatter): The formatter instance being tested.
+            debug_log_record (logging.LogRecord): The dummy log record to format.
+
+        Asserts:
+        -------
+            - The flat line contains key-value pairs for each specifier.
+            - There is no 'None' or empty fields in the formatted log.
+        """
+        debug_log_record.custom_field = "custom_value"
+        flat_formatter.specifiers = ["asctime", "levelname", "message", "custom_field"]
+
+        formatted_output = flat_formatter.format(debug_log_record)
+
+        # Check for presence of the fields as key-value pairs in the output
+        assert "asctime" in formatted_output, "asctime field not present."
+        assert (
+            "levelname='DEBUG'" in formatted_output
+        ), "levelname field incorrectly formatted."
+        assert (
+            "custom_field='custom_value'" in formatted_output
+        ), "custom_field incorrectly formatted."
+        assert "message" in formatted_output, "message field not present."
+
+    def test_format_with_exception(
+        self, flat_formatter: FLATFormatter, error_with_exc_log_record: logging.LogRecord
+    ) -> None:
+        """
+        Test that the `format` method correctly adds exception debugrmation to the flat line string.
+
+        Args:
+        ----
+            flat_formatter (FLATFormatter): The formatter instance being tested.
+            error_with_exc_log_record (logging.LogRecord): The dummy log record with an exception.
+
+        Asserts:
+        -------
+            - The exception debug is included in the flat line.
+        """
+        flat_formatter.specifiers = ["asctime", "levelname", "message"]
+
+        formatted_output = flat_formatter.format(error_with_exc_log_record)
+
+        # Check that exception debug is included in the output
+        assert "exception" in formatted_output, "Exception info not present in the formatted log."
diff --git a/django_logging/tests/formatters/test_json_formatter.py b/django_logging/tests/formatters/test_json_formatter.py
new file mode 100644
index 0000000..8ccc25d
--- /dev/null
+++ b/django_logging/tests/formatters/test_json_formatter.py
@@ -0,0 +1,154 @@
+import json
+import logging
+import sys
+
+import pytest
+
+from django_logging.formatters.json_formatter import JSONFormatter
+from django_logging.tests.constants import PYTHON_VERSION, PYTHON_VERSION_REASON
+
+pytestmark = [
+    pytest.mark.formatters,
+    pytest.mark.json_formatter,
+    pytest.mark.skipif(sys.version_info < PYTHON_VERSION, reason=PYTHON_VERSION_REASON),
+]
+
+
+class TestJSONFormatter:
+
+    def test_format_creates_valid_json(
+        self, json_formatter: JSONFormatter, debug_log_record: logging.LogRecord
+    ) -> None:
+        """
+        Test that the `format` method produces a valid JSON string.
+
+        This test checks whether the formatter's output is valid JSON
+        and can be parsed into a Python dictionary.
+
+        Parameters:
+        ----------
+        json_formatter : JSONFormatter
+            The formatter instance being tested.
+        debug_log_record : logging.LogRecord
+            The dummy log record created by the fixture.
+
+        Asserts:
+        -------
+        - The formatted output can be parsed as valid JSON.
+        """
+        formatted_output = json_formatter.format(debug_log_record)
+        try:
+            parsed_output = json.loads(formatted_output)
+            assert isinstance(
+                parsed_output, dict
+            ), "Formatted output is not a valid JSON object."
+        except json.JSONDecodeError as e:
+            pytest.fail(f"Formatted output is not valid JSON: {e}")
+
+    def test_format_includes_message(
+        self, json_formatter: JSONFormatter, debug_log_record: logging.LogRecord
+    ) -> None:
+        """
+        Test that the `format` method includes the 'message' field in the output.
+
+        This test ensures that the log record's message is present in the formatted JSON string.
+
+        Parameters:
+        ----------
+        json_formatter : JSONFormatter
+            The formatter instance being tested.
+        debug_log_record : logging.LogRecord
+            The dummy log record created by the fixture.
+
+        Asserts:
+        -------
+        - The 'message' field in the formatted output matches the log record's message.
+        """
+        formatted_output = json_formatter.format(debug_log_record)
+        parsed_output = json.loads(formatted_output)
+        assert (
+            parsed_output["message"] == debug_log_record.getMessage()
+        ), "Message field is missing or incorrect."
+
+    def test_key_value_pairs_are_extracted(
+        self, json_formatter: JSONFormatter, debug_log_record: logging.LogRecord
+    ) -> None:
+        """
+        Test that the `format` method extracts 'key=value' pairs from the log message.
+
+        This test checks that key-value pairs present in the log message are extracted and included in the JSON output.
+
+        Parameters:
+        ----------
+        json_formatter : JSONFormatter
+            The formatter instance being tested.
+        debug_log_record : logging.LogRecord
+            The dummy log record created by the fixture, which includes key-value pairs.
+
+        Asserts:
+        -------
+        - The key-value pairs from the log message are correctly parsed and included in the JSON output.
+        """
+        debug_log_record.msg = "user_id=123 action=login is_active=True"
+        formatted_output = json_formatter.format(debug_log_record)
+        parsed_output = json.loads(formatted_output)
+
+        assert parsed_output["user_id"] == 123, "Key 'user_id' not correctly extracted."
+        assert (
+            parsed_output["action"] == "login"
+        ), "Key 'action' not correctly extracted."
+
+    def test_clean_message_removes_key_value_pairs(
+        self, json_formatter: JSONFormatter, debug_log_record: logging.LogRecord
+    ) -> None:
+        """
+        Test that the `format` method removes key-value pairs from the message after extracting them.
+
+        This test ensures that after key-value pairs are extracted, the original log message is cleaned up by removing those pairs.
+
+        Parameters:
+        ----------
+        json_formatter : JSONFormatter
+            The formatter instance being tested.
+        debug_log_record : logging.LogRecord
+            The dummy log record created by the fixture, which includes key-value pairs.
+
+        Asserts:
+        -------
+        - The final message in the output does not include key-value pairs.
+        """
+        debug_log_record.msg = "user_id=123 action=login"
+        formatted_output = json_formatter.format(debug_log_record)
+        parsed_output = json.loads(formatted_output)
+
+        # The message field should not include the key-value pairs
+        assert parsed_output["message"] == "", "Message still contains key-value pairs."
+
+    def test_format_handles_complex_types(
+        self, json_formatter: JSONFormatter, debug_log_record: logging.LogRecord
+    ) -> None:
+        """
+        Test that the `format` method correctly handles complex types such as dict, list, and tuple in the message.
+
+        This test verifies that key-value pairs with complex types like dictionaries, lists, and tuples
+        are parsed correctly and included in the JSON output.
+
+        Parameters:
+        ----------
+        json_formatter : JSONFormatter
+            The formatter instance being tested.
+        debug_log_record : logging.LogRecord
+            The dummy log record created by the fixture, which includes complex types.
+
+        Asserts:
+        -------
+        - Complex types are correctly parsed and converted in the JSON output.
+        """
+        debug_log_record.msg = "data={'key': 'value'} items=[1,2,3] coords=(1,2)"
+        formatted_output = json_formatter.format(debug_log_record)
+        parsed_output = json.loads(formatted_output)
+
+        assert parsed_output["data"] == {
+            "key": "value"
+        }, "Dictionary type not correctly parsed."
+        assert parsed_output["items"] == [1, 2, 3], "List type not correctly parsed."
diff --git a/django_logging/tests/formatters/test_xml_formatter.py b/django_logging/tests/formatters/test_xml_formatter.py
new file mode 100644
index 0000000..f1ab1bf
--- /dev/null
+++ b/django_logging/tests/formatters/test_xml_formatter.py
@@ -0,0 +1,197 @@
+import logging
+import sys
+import xml.etree.ElementTree as ET
+
+import pytest
+
+from django_logging.formatters.xml_formatter import XMLFormatter
+from django_logging.tests.constants import PYTHON_VERSION, PYTHON_VERSION_REASON
+
+pytestmark = [
+    pytest.mark.formatters,
+    pytest.mark.xml_formatter,
+    pytest.mark.skipif(sys.version_info < PYTHON_VERSION, reason=PYTHON_VERSION_REASON),
+]
+
+
+class TestXMLFormatter:
+
+    def test_format_creates_valid_xml(
+            self, xml_formatter: XMLFormatter, debug_log_record: logging.LogRecord
+    ) -> None:
+        """
+        Test that the `format` method produces a valid XML string.
+
+        This test checks if the output from the formatter is valid XML
+        and can be parsed without errors.
+
+        Parameters:
+        ----------
+        xml_formatter : XMLFormatter
+            The formatter instance being tested.
+        debug_log_record : logging.LogRecord
+            The dummy log record created by the fixture.
+
+        Asserts:
+        -------
+        - The formatted output can be parsed as valid XML.
+        """
+        formatted_output = xml_formatter.format(debug_log_record)
+        try:
+            ET.fromstring(formatted_output)
+        except ET.ParseError as e:
+            pytest.fail(f"Formatted output is not valid XML: {e}")
+
+    def test_format_includes_message_field(
+            self, xml_formatter: XMLFormatter, debug_log_record: logging.LogRecord
+    ) -> None:
+        """
+        Test that the `format` method includes the 'message' field in the output.
+
+        This test ensures that the log record's message field is present in the
+        formatted XML string.
+
+        Parameters:
+        ----------
+        xml_formatter : XMLFormatter
+            The formatter instance being tested.
+        debug_log_record : logging.LogRecord
+            The dummy log record created by the fixture.
+
+        Asserts:
+        -------
+        - The formatted XML contains the 'message' element.
+        """
+        formatted_output = xml_formatter.format(debug_log_record)
+        root = ET.fromstring(formatted_output)
+        message_element = root.find("message")
+        assert message_element is not None, "Message field not found in XML."
+        assert message_element.text == debug_log_record.getMessage(), (
+            f"Expected message to be '{debug_log_record.getMessage()}', but got '{message_element.text}'"
+        )
+
+    def test_format_pretty_prints_xml(
+            self, xml_formatter: XMLFormatter, debug_log_record: logging.LogRecord
+    ) -> None:
+        """
+        Test that the `format` method returns a pretty-printed XML string.
+
+        This test ensures that the XML output is indented and well-formatted.
+
+        Parameters:
+        ----------
+        xml_formatter : XMLFormatter
+            The formatter instance being tested.
+        debug_log_record : logging.LogRecord
+            The dummy log record created by the fixture.
+
+        Asserts:
+        -------
+        - The XML string is properly indented and formatted.
+        """
+        formatted_output = xml_formatter.format(debug_log_record)
+        lines = formatted_output.split("\n")
+
+        # Ensure there are multiple lines due to pretty-printing
+        assert len(lines) > 1, "XML output is not pretty-printed."
+
+        # Check indentation in the XML (default indent is 2 spaces)
+        for line in lines[1:3]:
+            if line.strip():
+                assert line.startswith("  "), "XML elements are not properly indented."
+
+    def test_format_includes_exception_if_present(
+            self, xml_formatter: XMLFormatter, error_with_exc_log_record: logging.LogRecord
+    ) -> None:
+        """
+        Test that the `format` method includes the 'exception' field in the XML output
+        if exception debug is present in the log record.
+
+        Parameters:
+        ----------
+        xml_formatter : XMLFormatter
+            The formatter instance being tested.
+        error_log_record : logging.LogRecord
+            A log record with an exception set in exc_debug.
+
+        Asserts:
+        -------
+        - The XML output contains an 'exception' element when an exception is logged.
+        """
+        formatted_output = xml_formatter.format(error_with_exc_log_record)
+        root = ET.fromstring(formatted_output)
+        exception_element = root.find("exception")
+        assert exception_element is not None, "Exception field not found in XML."
+        assert exception_element.text, "Exception field is empty."
+
+    def test_format_handles_list_in_field(
+            self, xml_formatter: XMLFormatter, debug_log_record: logging.LogRecord
+    ) -> None:
+        """
+        Test that the `format` method correctly handles a log field containing a list.
+
+        Parameters:
+        ----------
+        xml_formatter : XMLFormatter
+            The formatter instance being tested.
+        debug_log_record : logging.LogRecord
+            The dummy log record created by the fixture, modified to include a list in a custom field.
+
+        Asserts:
+        -------
+        - The XML output includes properly formatted list elements in the specified field.
+        """
+        # Modify the log record to include a list in a custom field
+        debug_log_record.args = None  # Make sure it's not interfering
+        debug_log_record.custom_list = [1, 2, 3, 4]
+        xml_formatter.specifiers = ["custom_list"]
+
+        formatted_output = xml_formatter.format(debug_log_record)
+        root = ET.fromstring(formatted_output)
+
+        # Check if the 'custom_list' field is correctly formatted as XML
+        custom_list_element = root.find("custom_list")
+        assert custom_list_element is not None, "custom_list field not found in XML."
+        assert len(custom_list_element) == 4, "List items not correctly formatted in XML."
+        for i, item in enumerate(custom_list_element):
+            assert item.text == str(i + 1), f"List item {i} does not match expected value."
+
+    def test_format_handles_dict_in_field(
+            self, xml_formatter: XMLFormatter, debug_log_record: logging.LogRecord
+    ) -> None:
+        """
+        Test that the `format` method correctly handles a log field containing a dictionary.
+
+        Parameters:
+        ----------
+        xml_formatter : XMLFormatter
+            The formatter instance being tested.
+        debug_log_record : logging.LogRecord
+            The dummy log record created by the fixture, modified to include a dict in a custom field.
+
+        Asserts:
+        -------
+        - The XML output includes properly formatted dictionary elements in the specified field.
+        """
+        # Modify the log record to include a dict in a custom field
+        debug_log_record.args = None  # Make sure it's not interfering
+        debug_log_record.custom_dict = {"id": 123, "name": "John"}
+        xml_formatter.specifiers = ["custom_dict"]
+
+        formatted_output = xml_formatter.format(debug_log_record)
+        root = ET.fromstring(formatted_output)
+
+        # Check if the 'custom_dict' field is correctly formatted as XML
+        custom_dict_element = root.find("custom_dict")
+        assert custom_dict_element is not None, "custom_dict field not found in XML."
+
+        # Verify that dictionary keys are formatted as child elements
+        user_id_element = custom_dict_element.find("id")
+        user_name_element = custom_dict_element.find("name")
+
+        assert user_id_element is not None, "ID element not found in XML."
+        assert user_name_element is not None, "Name element not found in XML."
+
+        # Ensure that the values match
+        assert user_id_element.text == "123", f"Expected ID to be '123', got {user_id_element.text}"
+        assert user_name_element.text == "John", f"Expected name to be 'John', got {user_name_element.text}"
diff --git a/django_logging/tests/middleware/test_base_middleware.py b/django_logging/tests/middleware/test_base_middleware.py
new file mode 100644
index 0000000..816b9d2
--- /dev/null
+++ b/django_logging/tests/middleware/test_base_middleware.py
@@ -0,0 +1,71 @@
+import sys
+from typing import Callable
+from unittest.mock import Mock
+
+import pytest
+from asgiref.sync import iscoroutinefunction
+from django.http import HttpRequest, HttpResponseBase
+
+from django_logging.middleware.base import BaseMiddleware
+from django_logging.tests.constants import PYTHON_VERSION, PYTHON_VERSION_REASON
+
+pytestmark = [
+    pytest.mark.middleware,
+    pytest.mark.base_middleware,
+    pytest.mark.skipif(sys.version_info < PYTHON_VERSION, reason=PYTHON_VERSION_REASON),
+]
+
+
+class TestBaseMiddleware:
+    """
+    Test suite for the BaseMiddleware class.
+    """
+
+    def test_sync_mode(self) -> None:
+        """
+        Test that the middleware correctly identifies and handles synchronous requests.
+        This test verifies that when the `get_response` function is synchronous,
+        the middleware calls the `__sync_call__` method.
+        """
+        # Mock synchronous get_response
+        mock_get_response = Mock(spec=Callable[[HttpRequest], HttpResponseBase])
+
+        # Create an instance of the middleware
+        middleware = BaseMiddleware(mock_get_response)
+
+        # Ensure that it is in synchronous mode
+        assert not iscoroutinefunction(middleware.get_response)
+        assert not middleware.async_mode
+
+        # Test that calling the middleware raises NotImplementedError (since __sync_call__ is not implemented)
+        with pytest.raises(
+            NotImplementedError, match="__sync_call__ must be implemented by subclass"
+        ):
+            request = HttpRequest()
+            middleware(request)
+
+    @pytest.mark.asyncio
+    async def test_async_mode(self) -> None:
+        """
+        Test that the middleware correctly identifies and handles asynchronous requests.
+        This test verifies that when the `get_response` function is asynchronous,
+        the middleware calls the `__acall__` method.
+        """
+
+        # Mock asynchronous get_response
+        async def mock_get_response(request: HttpRequest) -> HttpResponseBase:
+            return Mock(spec=HttpResponseBase)
+
+        # Create an instance of the middleware
+        middleware = BaseMiddleware(mock_get_response)
+
+        # Ensure that it is in asynchronous mode
+        assert iscoroutinefunction(middleware.get_response)
+        assert middleware.async_mode
+
+        # Test that calling the middleware raises NotImplementedError (since __acall__ is not implemented)
+        with pytest.raises(
+            NotImplementedError, match="__acall__ must be implemented by subclass"
+        ):
+            request = HttpRequest()
+            await middleware(request)
diff --git a/django_logging/tests/middleware/test_monitor_log_size.py b/django_logging/tests/middleware/test_monitor_log_size.py
new file mode 100644
index 0000000..c16e7e0
--- /dev/null
+++ b/django_logging/tests/middleware/test_monitor_log_size.py
@@ -0,0 +1,141 @@
+import sys
+from datetime import timedelta
+from unittest.mock import Mock, patch
+
+import pytest
+from django.core.cache import cache
+from django.http import HttpRequest, HttpResponse
+from django.utils.timezone import now
+
+from django_logging.middleware.monitor_log_size import MonitorLogSizeMiddleware
+from django_logging.tests.constants import PYTHON_VERSION, PYTHON_VERSION_REASON
+
+pytestmark = [
+    pytest.mark.middleware,
+    pytest.mark.monitor_log_size_middleware,
+    pytest.mark.skipif(sys.version_info < PYTHON_VERSION, reason=PYTHON_VERSION_REASON),
+]
+
+
+class TestMonitorLogSizeMiddleware:
+    """
+    Test suite for the MonitorLogSizeMiddleware class.
+    """
+
+    @pytest.fixture(autouse=True)
+    def setup(self) -> None:
+        """
+        Clears cache before each test.
+        """
+        cache.clear()
+
+    def test_should_run_task_no_cache(self) -> None:
+        """
+        Test that the task should run when there is no cache entry for 'last_run_logs_size_audit'.
+        """
+        assert MonitorLogSizeMiddleware.should_run_task() is True
+
+    def test_should_run_task_with_recent_cache(self) -> None:
+        """
+        Test that the task should not run if the cache indicates the last run was within a week.
+        """
+        last_run_time = now() - timedelta(days=2)
+        cache.set("last_run_logs_size_audit", last_run_time)
+
+        assert MonitorLogSizeMiddleware.should_run_task() is False
+
+    def test_should_run_task_with_old_cache(self) -> None:
+        """
+        Test that the task should run if the cache indicates the last run was more than a week ago.
+        """
+        last_run_time = now() - timedelta(weeks=2)
+        cache.set("last_run_logs_size_audit", last_run_time)
+
+        assert MonitorLogSizeMiddleware.should_run_task() is True
+
+    @patch("django_logging.middleware.monitor_log_size.call_command")
+    def test_sync_run_log_size_check(self, mock_call_command: Mock) -> None:
+        """
+        Test the synchronous execution of the log size check.
+        """
+        mock_get_response = Mock(return_value=HttpResponse())
+        middleware = MonitorLogSizeMiddleware(mock_get_response)
+
+        request = HttpRequest()
+
+        # Simulate no recent audit, so the task should run
+        cache.set("last_run_logs_size_audit", now() - timedelta(weeks=2))
+
+        response = middleware.__sync_call__(request)
+
+        mock_call_command.assert_called_once_with("logs_size_audit")
+        assert cache.get("last_run_logs_size_audit") is not None
+        assert response.status_code == 200
+
+    @pytest.mark.asyncio
+    @patch("django_logging.middleware.monitor_log_size.call_command")
+    async def test_async_run_log_size_check(self, mock_call_command: Mock) -> None:
+        """
+        Test the asynchronous execution of the log size check.
+        """
+
+        async def mock_get_response(request: HttpRequest) -> HttpResponse:
+            return HttpResponse()
+
+        middleware = MonitorLogSizeMiddleware(mock_get_response)
+
+        request = HttpRequest()
+
+        # Simulate no recent audit, so the task should run
+        cache.set("last_run_logs_size_audit", now() - timedelta(weeks=2))
+
+        response = await middleware.__acall__(request)
+
+        mock_call_command.assert_called_once_with("logs_size_audit")
+        assert cache.get("last_run_logs_size_audit") is not None
+        assert response.status_code == 200
+
+    @patch(
+        "django_logging.middleware.monitor_log_size.call_command", side_effect=Exception("Command failed")
+    )
+    def test_sync_run_log_size_check_failure(self, mock_call_command: Mock) -> None:
+        """
+        Test error handling in the synchronous log size check.
+        """
+        mock_get_response = Mock(return_value=HttpResponse())
+        middleware = MonitorLogSizeMiddleware(mock_get_response)
+
+        request = HttpRequest()
+
+        with patch(
+            "django_logging.middleware.monitor_log_size.logger.error"
+        ) as mock_logger:
+            middleware.__sync_call__(request)
+
+            mock_call_command.assert_called_once_with("logs_size_audit")
+            mock_logger.assert_called_once()
+
+    @pytest.mark.asyncio
+    @patch(
+        "django_logging.middleware.monitor_log_size.call_command", side_effect=Exception("Command failed")
+    )
+    async def test_async_run_log_size_check_failure(self, mock_call_command: Mock) -> None:
+        """
+        Test error handling in the asynchronous log size check.
+        """
+
+        async def mock_get_response(request):
+            return HttpResponse()
+
+        middleware = MonitorLogSizeMiddleware(mock_get_response)
+
+        request = HttpRequest()
+
+        with patch(
+                "django_logging.middleware.monitor_log_size.logger.error"
+        ) as mock_logger:
+            await middleware.__acall__(request)
+
+            mock_call_command.assert_called_once_with("logs_size_audit")
+            mock_logger.assert_called_once()
+
diff --git a/django_logging/tests/middleware/test_request_middleware.py b/django_logging/tests/middleware/test_request_middleware.py
index 1571881..61089c9 100644
--- a/django_logging/tests/middleware/test_request_middleware.py
+++ b/django_logging/tests/middleware/test_request_middleware.py
@@ -1,10 +1,14 @@
-import logging
+import asyncio
+import io
 import sys
-from unittest.mock import Mock
+from typing import AsyncGenerator, Generator
+from unittest.mock import AsyncMock, MagicMock, patch
 
 import pytest
-from django.contrib.auth import get_user_model
-from django.contrib.auth.models import AnonymousUser
+from django.contrib.auth.models import AnonymousUser, User
+from django.core.handlers.asgi import ASGIRequest
+from django.db import connection
+from django.http import HttpRequest, HttpResponse, StreamingHttpResponse
 from django.test import RequestFactory
 
 from django_logging.middleware import RequestLogMiddleware
@@ -19,135 +23,343 @@
 
 class TestRequestMiddleware:
 
-    def test_authenticated_user_logging(
-        self,
-        request_middleware: RequestLogMiddleware,
-        request_factory: RequestFactory,
-        caplog: pytest.LogCaptureFixture,
+    @pytest.mark.django_db
+    def test_sync_sql_logging(
+        self, request_factory: RequestFactory, request_middleware: RequestLogMiddleware
     ) -> None:
         """
-        Test logging of requests for authenticated users.
-
-        This test verifies that when an authenticated user makes a request,
-        the relevant request information, including the username, is logged.
-
-        Args:
-        ----
-        request_middleware : RequestLogMiddleware
-            The middleware instance used to process the request.
-        request_factory : RequestFactory
-            A factory for creating mock HTTP requests.
-        caplog : pytest.LogCaptureFixture
-            A fixture for capturing log messages.
+        Test that SQL query logging works for synchronous requests.
 
         Asserts:
         -------
-        - "Request Info" is present in the logs.
-        - The requested path is logged.
-        - The username of the authenticated user is logged.
-        - The request object has `ip_address` and `browser_type` attributes.
-        """
-        request = request_factory.get("/test-path")
-
-        UserModel = get_user_model()
-        username_field = UserModel.USERNAME_FIELD
-
-        request.user = Mock()
-        request.user.is_authenticated = True
-        setattr(request.user, username_field, "test_user")
-
-        with caplog.at_level(logging.INFO):
-            request_middleware(request)
-
-        assert "Request Info" in caplog.text
-        assert "test-path" in caplog.text
-        assert "test_user" in caplog.text
-        assert request.ip_address
-        assert request.browser_type
-
-    def test_anonymous_user_logging(
-        self,
-        request_middleware: RequestLogMiddleware,
-        request_factory: RequestFactory,
-        caplog: pytest.LogCaptureFixture,
-    ) -> None:
+            - SQL queries are logged when `self.log_sql` is True.
         """
-        Test logging of requests for anonymous users.
+        request = request_factory.get("/")
+        request.user = AnonymousUser()
+
+        # Simulate an SQL query
+        with connection.cursor() as cursor:
+            cursor.execute("SELECT 1")
+
+        response = request_middleware(request)
 
-        This test ensures that when an anonymous user makes a request,
-        the relevant request information, including the identification as "Anonymous", is logged.
+        assert response.status_code == 200
+        # If possible, capture the logger output to assert SQL logging
 
-        Args:
-        ----
-        request_middleware : RequestLogMiddleware
-            The middleware instance used to process the request.
-        request_factory : RequestFactory
-            A factory for creating mock HTTP requests.
-        caplog : pytest.LogCaptureFixture
-            A fixture for capturing log messages.
+    @pytest.mark.asyncio
+    async def test_async_request(self, request_factory: RequestFactory) -> None:
+        """
+        Test handling of an asynchronous request with RequestLogMiddleware.
 
         Asserts:
         -------
-        - "Request Info" is present in the logs.
-        - The request is identified as coming from an "Anonymous" user.
+            - The middleware processes the asynchronous request successfully and returns a response.
         """
-        request = request_factory.get("/test-path")
+        request = request_factory.get("/")
         request.user = AnonymousUser()
 
-        with caplog.at_level(logging.INFO):
-            request_middleware(request)
+        async def async_get_response(request: HttpRequest) -> HttpResponse:
+            return HttpResponse("OK")
+
+        middleware = RequestLogMiddleware(async_get_response)
+        middleware.log_sql = True
+
+        # Convert HttpRequest to ASGIRequest for async behavior
+        scope = {
+            "type": "http",
+            "method": "GET",
+            "path": "/",
+            "headers": [],
+        }
+
+        body_file = io.BytesIO(b"")
+        # Create an ASGIRequest object
+        asgi_request = ASGIRequest(scope, body_file)
+        response = await middleware(asgi_request)
+        assert response.status_code == 200
+        assert "OK" in response.content.decode()
+
+        # Test exception block
+        async def async_get_response_with_error(request: HttpRequest) -> HttpResponse:
+            raise asyncio.CancelledError()
+
+        middleware = RequestLogMiddleware(async_get_response_with_error)
 
-        assert "Request Info" in caplog.text
-        assert "Anonymous" in caplog.text
+        with pytest.raises(asyncio.CancelledError):
+            await middleware(asgi_request)
 
-    def test_ip_address_extraction(
-        self, request_middleware: RequestLogMiddleware, request_factory: RequestFactory
+    @pytest.mark.django_db
+    def test_request_id_header(
+        self, request_factory: RequestFactory, request_middleware: RequestLogMiddleware
     ) -> None:
         """
-        Test extraction of the client's IP address from the request.
+        Test that RequestLogMiddleware retrieves the request ID from the headers.
 
-        This test verifies that the middleware correctly extracts the IP address
-        from the `HTTP_X_FORWARDED_FOR` header in the request.
+        Asserts:
+        -------
+            - The request ID is correctly retrieved from the request headers.
+        """
+        request = request_factory.get("/")
+        request.headers = {"x-request-id": "12345"}
+        request.user = AnonymousUser()
+
+        response = request_middleware(request)
+
+        assert response.status_code == 200
+        assert request_middleware.context["request_id"] == "12345"
 
-        Args:
-        ----
-        request_middleware : RequestLogMiddleware
-            The middleware instance used to process the request.
-        request_factory : RequestFactory
-            A factory for creating mock HTTP requests.
+    @pytest.mark.asyncio
+    async def test_async_streaming_response(
+        self, request_factory: RequestFactory
+    ) -> None:
+        """
+        Test handling of asynchronous streaming responses with RequestLogMiddleware.
 
         Asserts:
         -------
-        - The `ip_address` attribute of the request is correctly set to the value in the `HTTP_X_FORWARDED_FOR` header.
+            - The middleware handles asynchronous streaming responses correctly.
         """
-        request = request_factory.get("/test-path", HTTP_X_FORWARDED_FOR="192.168.1.1")
+        request = request_factory.get("/")
+        request.user = AnonymousUser()
 
-        request_middleware(request)
+        async def streaming_response(request: HttpRequest) -> StreamingHttpResponse:
+            async def generator() -> AsyncGenerator:
+                for chunk in [b"chunk1", b"chunk2"]:
+                    yield chunk
+
+            _response = StreamingHttpResponse(generator())
+            return _response
+
+        middleware = RequestLogMiddleware(streaming_response)
 
-        assert request.ip_address == "192.168.1.1"
+        response = await middleware(request)
 
-    def test_user_agent_extraction(
-        self, request_middleware: RequestLogMiddleware, request_factory: RequestFactory
+        assert response.status_code == 200
+        assert response.streaming
+        # Assert the streaming content
+        streaming_content = [chunk async for chunk in response.streaming_content]
+        assert streaming_content == [b"chunk1", b"chunk2"]
+
+        # Test exception handling in sync_streaming_wrapper
+
+    def test_sync_streaming_wrapper(
+        self, request_factory: RequestFactory, request_middleware: RequestLogMiddleware
     ) -> None:
         """
-        Test extraction of the client's user agent from the request.
+        Test that the sync_streaming_wrapper handles StreamingHttpResponse and exceptions correctly.
+        """
 
-        This test verifies that the middleware correctly extracts the user agent
-        from the `HTTP_USER_AGENT` header in the request.
+        def streaming_view(request: HttpRequest) -> StreamingHttpResponse:
+            def generator() -> Generator:
+                yield b"chunk1"
+                yield b"chunk2"
 
-        Args:
-        ----
-        request_middleware : RequestLogMiddleware
-            The middleware instance used to process the request.
-        request_factory : RequestFactory
-            A factory for creating mock HTTP requests.
+            return StreamingHttpResponse(generator())
 
-        Asserts:
-        -------
-        - The `browser_type` attribute of the request is correctly set to the value in the `HTTP_USER_AGENT` header.
+        request = request_factory.get("/")
+        request.user = AnonymousUser()
+
+        middleware = RequestLogMiddleware(streaming_view)
+
+        with patch(
+            "django_logging.middleware.request_middleware.logger"
+        ) as mock_logger:
+            response = middleware(request)
+            assert response.status_code == 200
+            assert response.streaming
+
+    def test_sync_streaming_wrapper_raises_exception(self, request_middleware: RequestLogMiddleware) -> None:
+        """
+        Test that sync_streaming_wrapper handles an exception during streaming.
+
+        Steps:
+            - Mock the streaming content to raise an exception.
+            - Assert that the exception is logged and re-raised.
+        """
+
+        request_id = "test-request-id"
+
+        # Mock the streaming content to raise an exception when iterated
+        streaming_content = MagicMock()
+        streaming_content.__iter__.side_effect = Exception("Test Exception")
+
+        # Patch the logger to check for log messages
+        with patch(
+            "django_logging.middleware.request_middleware.logger"
+        ) as mock_logger:
+            with pytest.raises(Exception, match="Test Exception"):
+                list(
+                    request_middleware._sync_streaming_wrapper(
+                        streaming_content, request_id
+                    )
+                )
+
+            # Check that logger.exception was called with the correct message
+            mock_logger.exception.assert_called_once_with(
+                "Streaming failed: request_id=%s", request_id
+            )
+
+    @pytest.mark.asyncio
+    async def test_async_streaming_wrapper_cancelled_error(self, request_middleware: RequestLogMiddleware) -> None:
+        """
+        Test that async_streaming_wrapper handles asyncio.CancelledError properly.
+
+        Steps:
+            - Mock the streaming content to raise asyncio.CancelledError.
+            - Assert that the cancellation is logged and re-raised.
         """
-        request = request_factory.get("/test-path", HTTP_USER_AGENT="Mozilla/5.0")
 
+        request_id = "test-request-id"
+
+        # Mock the streaming content to raise asyncio.CancelledError
+        streaming_content = AsyncMock()
+        streaming_content.__aiter__.side_effect = asyncio.CancelledError
+
+        # Patch the logger to check for log messages
+        with patch(
+            "django_logging.middleware.request_middleware.logger"
+        ) as mock_logger:
+            with pytest.raises(asyncio.CancelledError):
+                async for _ in request_middleware._async_streaming_wrapper(
+                    streaming_content, request_id
+                ):
+                    pass
+
+            # Check that logger.warning was called with the correct message
+            mock_logger.warning.assert_called_once_with(
+                "Streaming was cancelled: request_id=%s", request_id
+            )
+
+    @pytest.mark.asyncio
+    async def test_async_streaming_wrapper_generic_exception(self, request_middleware: RequestLogMiddleware) -> None:
+        """
+        Test that async_streaming_wrapper handles a generic Exception properly.
+
+        Steps:
+            - Mock the streaming content to raise a generic Exception.
+            - Assert that the exception is logged and re-raised.
+        """
+
+        request_id = "test-request-id"
+
+        # Mock the streaming content to raise a generic Exception
+        streaming_content = AsyncMock()
+        streaming_content.__aiter__.side_effect = Exception("Test Exception")
+
+        # Patch the logger to check for log messages
+        with patch(
+            "django_logging.middleware.request_middleware.logger"
+        ) as mock_logger:
+            with pytest.raises(Exception, match="Test Exception"):
+                async for _ in request_middleware._async_streaming_wrapper(
+                    streaming_content, request_id
+                ):
+                    pass
+
+            # Check that logger.exception was called with the correct message
+            mock_logger.exception.assert_called_once_with(
+                "Streaming failed: request_id=%s", request_id
+            )
+
+    def test_sync_streaming_response_wrapper(self, request_factory: RequestFactory, request_middleware: RequestLogMiddleware) -> None:
+        """
+        Test that the synchronous streaming wrapper works correctly.
+        """
+
+        def streaming_view(request: HttpRequest) -> StreamingHttpResponse:
+            return StreamingHttpResponse(iter([b"chunk1", b"chunk2"]))
+
+        request = request_factory.get("/")
+        request.user = AnonymousUser()
+
+        # Wrap the streaming content in the middleware
+        middleware = RequestLogMiddleware(streaming_view)
+        response = middleware(request)
+
+        assert response.status_code == 200
+        assert response.streaming
+        # Assert the streaming content
+        streaming_content = list(response.streaming_content)
+        assert streaming_content == [b"chunk1", b"chunk2"]
+
+    @pytest.mark.django_db
+    def test_get_user_authenticated(self, request_factory: RequestFactory, request_middleware: RequestLogMiddleware) -> None:
+        """
+        Test that the middleware retrieves the correct username for authenticated users.
+        """
+        user = User.objects.create(username="testuser")
+
+        request = request_factory.get("/")
+        request.user = user
+
+        response = request_middleware(request)
+
+        assert response.status_code == 200
+        assert request_middleware.get_user(request) == f"[testuser (ID:{user.pk})]"
+
+    def test_get_user_anonymous(self, request_factory: RequestFactory, request_middleware: RequestLogMiddleware) -> None:
+        """
+        Test that the middleware retrieves 'Anonymous' for unauthenticated users.
+        """
+        request = request_factory.get("/")
+        request.user = AnonymousUser()
+
+        response = request_middleware(request)
+
+        assert response.status_code == 200
+        assert request_middleware.get_user(request) == "Anonymous"
+
+    def test_get_ip_address(self, request_factory: RequestFactory, request_middleware: RequestLogMiddleware) -> None:
+        """
+        Test that the middleware correctly retrieves the client's IP address.
+        """
+        request = request_factory.get("/")
+        request.META["REMOTE_ADDR"] = "192.168.1.1"
+        request.user = AnonymousUser()
+
+        response = request_middleware(request)
+
+        assert response.status_code == 200
+        assert request_middleware.get_ip_address(request) == "192.168.1.1"
+
+        request.META["REMOTE_ADDR"] = None
+
+        request.META["HTTP_X_FORWARDED_FOR"] = "192.168.1.1,"
+        assert request_middleware.get_ip_address(request) == "192.168.1.1"
+
+        request.ip_address = "192.168.1.1"
+        assert request_middleware.get_ip_address(request) == "192.168.1.1"
+
+    def test_get_request_id(self, request_factory: RequestFactory, request_middleware: RequestLogMiddleware) -> None:
+        """
+        Test that the middleware correctly retrieves the request ID from headers.
+        """
+        request = request_factory.get("/")
+        request.headers = {"x-request-id": "12345"}
+        request.user = AnonymousUser()
+
+        response = request_middleware(request)
+
+        assert response.status_code == 200
+        assert request_middleware.get_request_id(request) == "12345"
+
+        request.headers = {}
+        request.META["HTTP_X_REQUEST_ID"] = "12345"
         request_middleware(request)
+        assert request_middleware.get_request_id(request) == "12345"
+
+    def test_log_sql_queries_with_queries(self, request_middleware: RequestLogMiddleware) -> None:
+        """
+        Test that _log_sql_queries correctly logs and formats SQL queries.
+        """
+
+        # Simulated SQL queries (new_queries in the method)
+        mock_queries = [
+            {"time": "0.002", "sql": "SELECT * FROM my_table WHERE id = 1"},
+            {"time": "0.004", "sql": "UPDATE my_table SET value = 'test' WHERE id = 1"},
+        ]
+
+        log_output = request_middleware._log_sql_queries(0, mock_queries)
 
-        assert request.browser_type == "Mozilla/5.0"
+        # Assert that the log output executed
+        assert log_output
diff --git a/django_logging/tests/settings/test_checks.py b/django_logging/tests/settings/test_checks.py
index 326a58d..ee59bd6 100644
--- a/django_logging/tests/settings/test_checks.py
+++ b/django_logging/tests/settings/test_checks.py
@@ -1,5 +1,5 @@
 import sys
-from typing import Generator, List
+from typing import List
 from unittest.mock import patch
 
 import pytest
diff --git a/django_logging/tests/setup.py b/django_logging/tests/setup.py
index 0f22ec1..d0e8d54 100644
--- a/django_logging/tests/setup.py
+++ b/django_logging/tests/setup.py
@@ -1,8 +1,8 @@
-from django.conf import settings
 import django
+from django.conf import settings
 
 
-def configure_django_settings():
+def configure_django_settings() -> None:
     if not settings.configured:
         settings.configure(
             DEBUG=True,
@@ -18,6 +18,21 @@ def configure_django_settings():
                 "django_logging",
             ],
             MIDDLEWARE=[],
+            TEMPLATES=[
+                {
+                    "BACKEND": "django.template.backends.django.DjangoTemplates",
+                    "DIRS": [],
+                    "APP_DIRS": True,
+                    "OPTIONS": {
+                        "context_processors": [
+                            "django.template.context_processors.debug",
+                            "django.template.context_processors.request",
+                            "django.contrib.auth.context_processors.auth",
+                            "django.contrib.messages.context_processors.messages",
+                        ],
+                    },
+                },
+            ],
             DJANGO_LOGGING={
                 "AUTO_INITIALIZATION_ENABLE": True,
                 "INITIALIZATION_MESSAGE_ENABLE": True,
@@ -27,6 +42,12 @@ def configure_django_settings():
                     "DEBUG": 1,
                     "INFO": 1,
                 },
+                "LOG_FILE_FORMAT_TYPES": {
+                    "DEBUG": "JSON",
+                },
+                "EXTRA_LOG_FILES": {
+                    "DEBUG": True,
+                },
                 "LOG_CONSOLE_LEVEL": "DEBUG",
                 "LOG_CONSOLE_FORMAT": 1,
                 "LOG_CONSOLE_COLORIZE": True,
diff --git a/django_logging/tests/utils/test_context_manager.py b/django_logging/tests/utils/test_context_manager.py
index 578cf9c..53f1c92 100644
--- a/django_logging/tests/utils/test_context_manager.py
+++ b/django_logging/tests/utils/test_context_manager.py
@@ -4,8 +4,8 @@
 
 import pytest
 
-from django_logging.utils.context_manager import _restore_logging_config, config_setup
 from django_logging.tests.constants import PYTHON_VERSION, PYTHON_VERSION_REASON
+from django_logging.utils.context_manager import _restore_logging_config, config_setup
 
 pytestmark = [
     pytest.mark.utils,
@@ -74,6 +74,8 @@ def test_config_setup_applies_custom_config(
                     "log_levels": ["INFO"],
                     "log_dir": "/tmp/logs",
                     "log_file_formats": {"INFO": 1},
+                    "log_file_format_types": {"INFO": "normal"},
+                    "extra_log_files": {"INFO": False},
                     "console_level": "DEBUG",
                     "console_format": 2,
                     "colorize_console": False,
@@ -127,6 +129,8 @@ def test_config_context_restores_original_config(
                     "log_levels": ["INFO"],
                     "log_dir": "/tmp/logs",
                     "log_file_formats": {"INFO": 1},
+                    "log_file_format_types": {"INFO": "normal"},
+                    "extra_log_files": {"INFO": False},
                     "console_level": "DEBUG",
                     "console_format": 2,
                     "colorize_console": False,
diff --git a/django_logging/tests/utils/test_email_notifier.py b/django_logging/tests/utils/test_email_notifier.py
index b68020e..d83ac7a 100644
--- a/django_logging/tests/utils/test_email_notifier.py
+++ b/django_logging/tests/utils/test_email_notifier.py
@@ -6,8 +6,8 @@
 
 import pytest
 
-from django_logging.utils.log_email_notifier.notifier import send_email_async
 from django_logging.tests.constants import PYTHON_VERSION, PYTHON_VERSION_REASON
+from django_logging.utils.log_email_notifier.notifier import send_email_async
 
 pytestmark = [
     pytest.mark.utils,
@@ -84,7 +84,7 @@ def test_send_email_async_success(
         mock_smtp_instance.quit.assert_called_once()
 
         mock_info.assert_called_once_with(
-            "Log Record has been sent to ADMIN EMAIL successfully."
+            "The Record has been sent to ADMIN EMAIL successfully."
         )
         mock_warning.assert_not_called()
 
@@ -122,6 +122,6 @@ def test_send_email_async_failure(
         email_sent_event.wait()
 
         mock_warning.assert_called_once_with(
-            "Email Notifier failed to send Log Record: %s", ANY
+            "Email Notifier failed to send the Record: %s", ANY
         )
         mock_info.assert_not_called()
diff --git a/django_logging/tests/utils/test_get_conf.py b/django_logging/tests/utils/test_get_conf.py
index 6b6df79..38bd2ef 100644
--- a/django_logging/tests/utils/test_get_conf.py
+++ b/django_logging/tests/utils/test_get_conf.py
@@ -5,13 +5,13 @@
 import pytest
 from django.conf import settings
 
+from django_logging.tests.constants import PYTHON_VERSION, PYTHON_VERSION_REASON
 from django_logging.utils.get_conf import (
     get_config,
     is_auto_initialization_enabled,
     is_initialization_message_enabled,
     use_email_notifier_template,
 )
-from django_logging.tests.constants import PYTHON_VERSION, PYTHON_VERSION_REASON
 
 pytestmark = [
     pytest.mark.utils,
@@ -44,6 +44,14 @@ def test_get_conf(self, mock_settings: Dict) -> None:
             "log_file_formats": {
                 "DEBUG": "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
             },
+            "log_file_format_types": {
+                "DEBUG": "JSON",
+                "INFO": "XML",
+            },
+            "extra_log_files": {
+                "DEBUG": False,
+                "INFO": True,
+            },
             "console_level": "WARNING",
             "console_format": "%(levelname)s - %(message)s",
             "colorize_console": True,
@@ -52,32 +60,31 @@ def test_get_conf(self, mock_settings: Dict) -> None:
             "log_email_notifier_log_levels": ["ERROR", None],
             "log_email_notifier_log_format": "custom_format",
         }
-        print(expected)
         result = get_config()
         assert result == expected
 
         result = get_config(extra_info=True)
-
-        expected_extra = {
-            "log_levels": ["DEBUG", "INFO"],
-            "log_dir": "/custom/log/dir",
-            "log_file_formats": {
-                "DEBUG": "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
-            },
-            "console_level": "WARNING",
-            "console_format": "%(levelname)s - %(message)s",
-            "colorize_console": True,
-            "log_date_format": "%Y-%m-%d",
-            "log_email_notifier_enable": True,
-            "log_email_notifier_log_levels": ["ERROR", None],
-            "log_email_notifier_log_format": "custom_format",
-            "log_email_notifier": {
-                "ENABLE": True,
-                "NOTIFY_ERROR": True,
-                "NOTIFY_CRITICAL": False,
-                "LOG_FORMAT": "custom_format",
-            },
-        }
+        result.pop("log_settings")
+
+        expected_extra = expected.copy()
+        expected_extra.update(
+            {
+                "log_file_format_types": {
+                    "DEBUG": "JSON",
+                    "INFO": "XML",
+                },
+                "extra_log_files": {
+                    "DEBUG": False,
+                    "INFO": True,
+                },
+                "log_email_notifier": {
+                    "ENABLE": True,
+                    "NOTIFY_ERROR": True,
+                    "NOTIFY_CRITICAL": False,
+                    "LOG_FORMAT": "custom_format",
+                },
+            }
+        )
 
         assert result == expected_extra
 
@@ -152,3 +159,18 @@ def test_is_initialization_message_enabled(self, mock_settings: Dict) -> None:
         mock_settings["DJANGO_LOGGING"]["INITIALIZATION_MESSAGE_ENABLE"] = False
         with patch.object(settings, "DJANGO_LOGGING", mock_settings["DJANGO_LOGGING"]):
             assert is_initialization_message_enabled() is False
+
+    def test_logging_settings_none(self) -> None:
+        """
+        Test that logging settings is None and raise `ValueError`.
+
+        This test verifies that when logging settings (DJANGO_LOGGING) is None,
+        the `get_config` function raises `ValueError`.
+
+        Asserts:
+        -------
+        - ValueError raised by `check_logging_settings`.
+        """
+        settings.DJANGO_LOGGING = None
+        with pytest.raises(ValueError, match="DJANGO_LOGGING must be a dictionary with configs as keys"):
+            get_config()
diff --git a/django_logging/tests/utils/test_log_and_notify.py b/django_logging/tests/utils/test_log_and_notify.py
index b2262c4..709bb84 100644
--- a/django_logging/tests/utils/test_log_and_notify.py
+++ b/django_logging/tests/utils/test_log_and_notify.py
@@ -6,8 +6,8 @@
 import pytest
 from django.conf import settings
 
-from django_logging.utils.log_email_notifier.log_and_notify import log_and_notify_admin
 from django_logging.tests.constants import PYTHON_VERSION, PYTHON_VERSION_REASON
+from django_logging.utils.log_email_notifier.log_and_notify import log_and_notify_admin
 
 pytestmark = [
     pytest.mark.utils,
@@ -94,6 +94,8 @@ def test_log_and_notify_admin_success(
             args=None,
             exc_info=None,
         )
+        # assign the custom value 'context'
+        log_record.context = ""
 
         with patch(
             "django_logging.utils.log_email_notifier.log_and_notify.get_config",
@@ -210,6 +212,9 @@ def test_log_and_notify_admin_missing_admin_email(
             exc_info=None,
         )
 
+        # assign the custom value 'context'
+        log_record.context = ""
+
         with patch(
             "django_logging.utils.log_email_notifier.log_and_notify.get_config",
             return_value=self.mock_log_config(True),
diff --git a/django_logging/tests/utils/test_process_file.py b/django_logging/tests/utils/test_process_file.py
new file mode 100644
index 0000000..850018a
--- /dev/null
+++ b/django_logging/tests/utils/test_process_file.py
@@ -0,0 +1,56 @@
+import os
+import sys
+from unittest.mock import Mock, patch
+
+import pytest
+
+from django_logging.tests.constants import PYTHON_VERSION, PYTHON_VERSION_REASON
+from django_logging.utils.command.process_file import setup_directories
+
+pytestmark = [
+    pytest.mark.utils,
+    pytest.mark.utils_process_file,
+    pytest.mark.skipif(sys.version_info < PYTHON_VERSION, reason=PYTHON_VERSION_REASON),
+]
+
+
+class TestProcessFile:
+    """
+    Test suite for the process_file module focusing on FileNotFoundError.
+    """
+
+    @patch("os.path.exists", return_value=False)
+    def test_log_directory_not_found(self, mock_exists: Mock) -> None:
+        """
+        Test if setup_directories raises FileNotFoundError when the log directory does not exist.
+
+        Args:
+        ----
+            mock_exists (Callable): Mock for os.path.exists.
+        """
+        log_dir = "/non/existent/log_dir"
+        sub_dir = "sub_dir"
+
+        with pytest.raises(FileNotFoundError, match=f"does not exist."):
+            setup_directories(log_dir, sub_dir)
+
+        mock_exists.assert_called_once_with(log_dir)
+
+    @patch("os.path.exists", side_effect=[True, False])
+    def test_sub_directory_not_found(self, mock_exists: Mock) -> None:
+        """
+        Test if setup_directories raises FileNotFoundError when the subdirectory does not exist.
+
+        Args:
+        ----
+            mock_exists (Callable): Mock for os.path.exists.
+        """
+        log_dir = "/existent/log_dir"
+        sub_dir = "sub_dir"
+        sub_directory = os.path.join(log_dir, sub_dir)
+
+        with pytest.raises(FileNotFoundError, match=f"does not exist."):
+            setup_directories(log_dir, sub_dir)
+
+        mock_exists.assert_any_call(log_dir)
+        mock_exists.assert_any_call(sub_directory)
diff --git a/django_logging/tests/utils/test_set_conf.py b/django_logging/tests/utils/test_set_conf.py
index 900dcbd..8c986ba 100644
--- a/django_logging/tests/utils/test_set_conf.py
+++ b/django_logging/tests/utils/test_set_conf.py
@@ -5,8 +5,8 @@
 import pytest
 
 from django_logging.constants.ansi_colors import AnsiColors
-from django_logging.utils.set_conf import set_config
 from django_logging.tests.constants import PYTHON_VERSION, PYTHON_VERSION_REASON
+from django_logging.utils.set_conf import set_config
 
 pytestmark = [
     pytest.mark.utils,
@@ -61,6 +61,8 @@ def test_set_config_success(
             ["DEBUG", "INFO"],
             "/path/to/logs",
             {"DEBUG": 1, "INFO": 2},
+            {"DEBUG": "XML", "INFO": "JSON"},
+            {"DEBUG": False, "INFO": True},
             "DEBUG",
             1,
             True,
@@ -79,6 +81,8 @@ def test_set_config_success(
             ["DEBUG", "INFO"],
             "/path/to/logs",
             {"DEBUG": 1, "INFO": 2},
+            {"DEBUG": "XML", "INFO": "JSON"},
+            {"DEBUG": False, "INFO": True},
             "DEBUG",
             1,
             True,
@@ -93,6 +97,8 @@ def test_set_config_success(
             ["DEBUG", "INFO"],
             "/path/to/logs",
             {"DEBUG": 1, "INFO": 2},
+            {"DEBUG": "XML", "INFO": "JSON"},
+            {"DEBUG": False, "INFO": True},
             "DEBUG",
             1,
             True,
@@ -114,6 +120,8 @@ def test_set_config_success(
             ["DEBUG", "INFO"],
             "/path/to/logs",
             {"DEBUG": 1, "INFO": 2},
+            {"DEBUG": "XML", "INFO": "JSON"},
+            {"DEBUG": False, "INFO": True},
             "DEBUG",
             1,
             True,
@@ -156,6 +164,8 @@ def test_set_config_auto_initialization_disabled(
             ["DEBUG", "INFO"],
             "/path/to/logs",
             {"DEBUG": 1, "INFO": 2},
+            {"DEBUG": "XML", "INFO": "JSON"},
+            {"DEBUG": False, "INFO": True},
             "DEBUG",
             1,
             True,
@@ -203,6 +213,8 @@ def test_set_config_exception_handling(
                 ["DEBUG", "INFO"],
                 "/path/to/logs",
                 {"DEBUG": 1, "INFO": 2},
+                {"DEBUG": "XML", "INFO": "JSON"},
+                {"DEBUG": False, "INFO": True},
                 "DEBUG",
                 1,
                 True,
diff --git a/django_logging/tests/validators/test_config_validators.py b/django_logging/tests/validators/test_config_validators.py
index ac66b74..b93924c 100644
--- a/django_logging/tests/validators/test_config_validators.py
+++ b/django_logging/tests/validators/test_config_validators.py
@@ -4,16 +4,18 @@
 import pytest
 
 from django_logging.constants.config_types import LogLevels
+from django_logging.tests.constants import PYTHON_VERSION, PYTHON_VERSION_REASON
 from django_logging.validators.config_validators import (
     validate_boolean_setting,
     validate_date_format,
     validate_directory,
     validate_email_notifier,
+    validate_extra_log_files,
     validate_format_option,
     validate_format_string,
+    validate_log_file_format_types,
     validate_log_levels,
 )
-from django_logging.tests.constants import PYTHON_VERSION, PYTHON_VERSION_REASON
 
 pytestmark = [
     pytest.mark.validators,
@@ -240,7 +242,7 @@ def test_validate_format_option_failure(self) -> None:
         -------
         - Appropriate errors are returned for invalid format options.
         """
-        format_option = 15
+        format_option = 25
         errors = validate_format_option(format_option, "log_format_option")
         assert len(errors) == 1
         assert errors[0].id == "django_logging.E012_log_format_option"
@@ -379,3 +381,95 @@ def test_validate_email_notifier_invalid_type(self) -> None:
         errors = validate_email_notifier(notifier_config)  # type: ignore
         assert len(errors) == 1
         assert errors[0].id == "django_logging.E018_LOG_EMAIL_NOTIFIER['ENABLE']"
+
+    def test_validate_log_file_format_types_invalid_type(self) -> None:
+        """
+        Test validation of log file format types with an invalid data type.
+
+        Args:
+        ----
+            format_types (Dict): The format types to validate.
+            config_name (str): The name of the configuration being validated.
+            valid_levels (List[str]): Valid logging levels.
+            valid_formats (List[str]): Valid format types.
+
+        Asserts:
+        -------
+            - Errors are returned when format types are not a dictionary.
+        """
+        errors = validate_log_file_format_types(
+            "INVALID_TYPE", "log_file_format_types",  # type: ignore
+            ["DEBUG", "INFO"], ["JSON", "XML"]
+        )
+        assert len(errors) == 1
+        assert errors[0].id == "django_logging.E022_log_file_format_types"
+
+    def test_validate_log_file_format_types_invalid_level_and_format(self) -> None:
+        """
+        Test validation of log file format types with invalid log levels and formats.
+
+        Args:
+        ----
+            format_types (Dict): The format types to validate.
+            config_name (str): The name of the configuration being validated.
+            valid_levels (List[str]): Valid logging levels.
+            valid_formats (List[str]): Valid format types.
+
+        Asserts:
+        -------
+            - Errors are returned for invalid log levels.
+            - Errors are returned for invalid format types.
+        """
+        errors = validate_log_file_format_types(
+            {"INVALID_LEVEL": "JSON", "DEBUG": "INVALID_FORMAT"},
+            "log_file_format_types",
+            ["DEBUG", "INFO", "WARNING"],
+            ["JSON", "XML"],
+        )
+        assert len(errors) == 2
+        assert errors[0].id == "django_logging.E023_log_file_format_types"
+        assert errors[1].id == "django_logging.E024_log_file_format_types"
+
+    def test_validate_extra_log_files_invalid_type(self) -> None:
+        """
+        Test validation of extra log files with an invalid data type.
+
+        Args:
+        ----
+            extra_log_files (Dict): The extra log files configuration to validate.
+            config_name (str): The name of the configuration being validated.
+            valid_levels (List[str]): Valid logging levels.
+
+        Asserts:
+        -------
+            - Errors are returned when extra log files are not a dictionary.
+        """
+        errors = validate_extra_log_files(
+            "INVALID_TYPE", "extra_log_files", ["DEBUG", "INFO"]  # type: ignore
+        )
+        assert len(errors) == 1
+        assert errors[0].id == "django_logging.E025_extra_log_files"
+
+    def test_validate_extra_log_files_invalid_level_and_value(self) -> None:
+        """
+        Test validation of extra log files with invalid log levels and values.
+
+        Args:
+        ----
+            extra_log_files (Dict): The extra log files configuration to validate.
+            config_name (str): The name of the configuration being validated.
+            valid_levels (List[str]): Valid logging levels.
+
+        Asserts:
+        -------
+            - Errors are returned for invalid log levels.
+            - Errors are returned for non-boolean values.
+        """
+        errors = validate_extra_log_files(
+            {"INVALID_LEVEL": True, "DEBUG": "INVALID_VALUE"},
+            "extra_log_files",
+            ["DEBUG", "INFO", "WARNING"],
+        )
+        assert len(errors) == 2
+        assert errors[0].id == "django_logging.E026_extra_log_files"
+        assert errors[1].id == "django_logging.E027_extra_log_files"
diff --git a/django_logging/tests/validators/test_email_settings_validator.py b/django_logging/tests/validators/test_email_settings_validator.py
index 6064220..ace0bbd 100644
--- a/django_logging/tests/validators/test_email_settings_validator.py
+++ b/django_logging/tests/validators/test_email_settings_validator.py
@@ -5,8 +5,8 @@
 import pytest
 from django.conf import settings
 
-from django_logging.validators.email_settings_validator import check_email_settings
 from django_logging.tests.constants import PYTHON_VERSION, PYTHON_VERSION_REASON
+from django_logging.validators.email_settings_validator import check_email_settings
 
 pytestmark = [
     pytest.mark.validators,