From 19d62c8d69840fa5b5680f103c3b45b20cd15234 Mon Sep 17 00:00:00 2001 From: Roman Bredehoft Date: Fri, 26 Apr 2024 15:20:37 +0200 Subject: [PATCH] chore: enable input compression in llm use case --- use_case_examples/llm/qgpt2_class.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/use_case_examples/llm/qgpt2_class.py b/use_case_examples/llm/qgpt2_class.py index 846068f7c..b06453d7b 100644 --- a/use_case_examples/llm/qgpt2_class.py +++ b/use_case_examples/llm/qgpt2_class.py @@ -183,7 +183,9 @@ def compile(self, configuration: Optional[Configuration] = None) -> Circuit: compiler = fhe.Compiler(self.run_numpy, {"q_inputs": "encrypted"}) # Compile the circuit on the calibration quantized data - self.circuit = compiler.compile(inputset, configuration=configuration) + self.circuit = compiler.compile( + inputset, configuration=configuration, compress_input_ciphertexts=True + ) # Print the maximum bit-width reached in the circuit print(