diff --git a/config.json b/config.json index 1e797f1..324f179 100644 --- a/config.json +++ b/config.json @@ -64,7 +64,8 @@ "50" ], "array_expansion": 5, - "init_max_depth": 4 + "init_max_depth": 4, + "partial_equivalence_check": "deep" } } } diff --git a/esbmc_ai_lib/config.py b/esbmc_ai_lib/config.py index e9b8d1a..253fd98 100644 --- a/esbmc_ai_lib/config.py +++ b/esbmc_ai_lib/config.py @@ -3,7 +3,7 @@ import os import json import sys -from typing import Any, NamedTuple, Union +from typing import Any, NamedTuple, Union, Literal from dotenv import load_dotenv from .logging import * @@ -62,7 +62,8 @@ class ChatPromptSettings(NamedTuple): """Used for allocating continuous memory. Arrays and pointers will be initialized using this.""" ocm_init_max_depth: int """Max depth that structs will be initialized into, afterwards initializes with NULL.""" - +ocm_partial_equivalence_check: Literal["basic", "deep"] = "basic" +"""Mode to check for partial equivalence on the return value.""" def _load_custom_ai(config: dict) -> None: ai_custom: dict = config @@ -248,6 +249,13 @@ def load_config(file_path: str) -> None: default=10, ) + global ocm_partial_equivalence_check + ocm_partial_equivalence_check, _ = _load_config_value( + config_file=config_file["chat_modes"]["optimize_code"], + name="partial_equivalence_check", + default="basic", + ) + # Load the custom ai configs. _load_custom_ai(config_file["ai_custom"])