diff --git a/circuits/parser_http_request/language.circom b/circuits/http/parser_request/language.circom similarity index 100% rename from circuits/parser_http_request/language.circom rename to circuits/http/parser_request/language.circom diff --git a/circuits/parser_http_request/machine.circom b/circuits/http/parser_request/machine.circom similarity index 100% rename from circuits/parser_http_request/machine.circom rename to circuits/http/parser_request/machine.circom diff --git a/circuits/parser_http_request/parser.circom b/circuits/http/parser_request/parser.circom similarity index 100% rename from circuits/parser_http_request/parser.circom rename to circuits/http/parser_request/parser.circom diff --git a/circuits/interpreter.circom b/circuits/json/interpreter.circom similarity index 95% rename from circuits/interpreter.circom rename to circuits/json/interpreter.circom index 1a8c3d6..5a789cb 100644 --- a/circuits/interpreter.circom +++ b/circuits/json/interpreter.circom @@ -1,10 +1,9 @@ pragma circom 2.1.9; -include "extract.circom"; -include "parser.circom"; -include "language.circom"; -include "search.circom"; -include "./utils/array.circom"; +include "./parser/parser.circom"; +include "./parser/language.circom"; +include "../utils/search.circom"; +include "../utils/array.circom"; include "circomlib/circuits/mux1.circom"; include "circomlib/circuits/gates.circom"; include "@zk-email/circuits/utils/functions.circom"; @@ -179,7 +178,9 @@ template NextKVPair(n) { signal currentVal[2] <== topOfStack.value; signal isNextPair <== IsEqualArray(2)([currentVal, [1, 0]]); - signal isComma <== IsEqual()([currByte, 44]); // `, -> 44` + + component syntax = Syntax(); + signal isComma <== IsEqual()([currByte, syntax.COMMA]); // `, -> 44` out <== isNextPair*isComma ; } @@ -212,7 +213,8 @@ template NextKVPairAtDepth(n, depth) { signal isNextPair <== IsEqualArray(2)([currentVal, [1, 0]]); // `, -> 44` - signal isComma <== IsEqual()([currByte, 44]); + component syntax = Syntax(); + signal isComma <== IsEqual()([currByte, syntax.COMMA]); // pointer <= depth signal atLessDepth <== LessEqThan(logMaxDepth)([pointer, depth]); // current depth is less than key depth @@ -243,11 +245,13 @@ template KeyMatch(dataLen, keyLen) { signal input index; signal input parsing_key; + component syntax = Syntax(); + signal end_of_key <== IndexSelector(dataLen)(data, index + keyLen); - signal is_end_of_key_equal_to_quote <== IsEqual()([end_of_key, 34]); + signal is_end_of_key_equal_to_quote <== IsEqual()([end_of_key, syntax.QUOTE]); signal start_of_key <== IndexSelector(dataLen)(data, index - 1); - signal is_start_of_key_equal_to_quote <== IsEqual()([start_of_key, 34]); + signal is_start_of_key_equal_to_quote <== IsEqual()([start_of_key, syntax.QUOTE]); signal substring_match <== SubstringMatchWithIndex(dataLen, keyLen)(data, key, r, index); @@ -287,13 +291,15 @@ template KeyMatchAtDepth(dataLen, n, keyLen, depth) { topOfStack.stack <== stack; signal pointer <== topOfStack.pointer; + component syntax = Syntax(); + // end of key equals `"` signal end_of_key <== IndexSelector(dataLen)(data, index + keyLen); - signal is_end_of_key_equal_to_quote <== IsEqual()([end_of_key, 34]); + signal is_end_of_key_equal_to_quote <== IsEqual()([end_of_key, syntax.QUOTE]); // start of key equals `"` signal start_of_key <== IndexSelector(dataLen)(data, index - 1); - signal is_start_of_key_equal_to_quote <== IsEqual()([start_of_key, 34]); + signal is_start_of_key_equal_to_quote <== IsEqual()([start_of_key, syntax.QUOTE]); // key matches signal substring_match <== SubstringMatchWithIndex(dataLen, keyLen)(data, key, r, index); diff --git a/circuits/parser_json/language.circom b/circuits/json/parser/language.circom similarity index 100% rename from circuits/parser_json/language.circom rename to circuits/json/parser/language.circom diff --git a/circuits/parser_json/machine.circom b/circuits/json/parser/machine.circom similarity index 99% rename from circuits/parser_json/machine.circom rename to circuits/json/parser/machine.circom index 9fda500..78a38c6 100644 --- a/circuits/parser_json/machine.circom +++ b/circuits/json/parser/machine.circom @@ -23,9 +23,9 @@ Tests for this module are located in the files: `circuits/test/parser/*.test.ts pragma circom 2.1.9; -include "../utils/array.circom"; -include "../utils/bytes.circom"; -include "../utils/operators.circom"; +include "../../utils/array.circom"; +include "../../utils/bytes.circom"; +include "../../utils/operators.circom"; include "language.circom"; /* diff --git a/circuits/parser_json/parser.circom b/circuits/json/parser/parser.circom similarity index 98% rename from circuits/parser_json/parser.circom rename to circuits/json/parser/parser.circom index f3e4b51..c3cc7e8 100644 --- a/circuits/parser_json/parser.circom +++ b/circuits/json/parser/parser.circom @@ -1,6 +1,6 @@ pragma circom 2.1.9; -include "../utils/bytes.circom"; +include "../../utils/bytes.circom"; include "machine.circom"; template Parser(DATA_BYTES, MAX_STACK_HEIGHT) { diff --git a/circuits/test/common/index.ts b/circuits/test/common/index.ts index f78729b..6206f23 100644 --- a/circuits/test/common/index.ts +++ b/circuits/test/common/index.ts @@ -1,4 +1,6 @@ import 'mocha'; +import { readFileSync } from "fs"; +import { join } from "path"; import { Circomkit, WitnessTester } from "circomkit"; export const circomkit = new Circomkit({ @@ -21,4 +23,36 @@ export function generateDescription(input: any): string { return Object.entries(input) .map(([key, value]) => `${key} = ${stringifyValue(value)}`) .join(", "); +} + +export function readInputFile(filename: string, key: any[]): [number[], number[][], number[]] { + const valueStringPath = join(__dirname, "..", "..", "..", "examples", "json", "test", filename); + + let input: number[] = []; + let output: number[] = []; + + let data = readFileSync(valueStringPath, 'utf-8'); + + let keyUnicode: number[][] = []; + for (let i = 0; i < key.length; i++) { + keyUnicode[i] = []; + let key_string = key[i].toString(); + for (let j = 0; j < key_string.length; j++) { + keyUnicode[i].push(key_string.charCodeAt(j)); + } + } + + const byteArray = []; + for (let i = 0; i < data.length; i++) { + byteArray.push(data.charCodeAt(i)); + } + input = byteArray; + + let jsonFile = JSON.parse(data); + let value: string = key.reduce((acc, key) => acc && acc[key], jsonFile).toString(); + for (let i = 0; i < value.length; i++) { + output.push(value.charCodeAt(i)); + } + + return [input, keyUnicode, output]; } \ No newline at end of file diff --git a/circuits/test/extractor/extractor.test.ts b/circuits/test/json/extractor/extractor.test.ts similarity index 84% rename from circuits/test/extractor/extractor.test.ts rename to circuits/test/json/extractor/extractor.test.ts index f5144cd..2a693ec 100644 --- a/circuits/test/extractor/extractor.test.ts +++ b/circuits/test/json/extractor/extractor.test.ts @@ -1,45 +1,13 @@ -import { circomkit, WitnessTester } from "../common"; -import { readFileSync } from "fs"; +import { circomkit, WitnessTester, readInputFile } from "../../common"; import { join } from "path"; import { spawn } from "child_process"; -export function readInputFile(filename: string, key: any[]): [number[], number[][], number[]] { - const value_string_path = join(__dirname, "..", "..", "..", "json_examples", "test", filename); - let input: number[] = []; - let output: number[] = []; - - let data = readFileSync(value_string_path, 'utf-8'); - - let keyUnicode: number[][] = []; - for (let i = 0; i < key.length; i++) { - keyUnicode[i] = []; - let key_string = key[i].toString(); - for (let j = 0; j < key_string.length; j++) { - keyUnicode[i].push(key_string.charCodeAt(j)); - } - } - - const byteArray = []; - for (let i = 0; i < data.length; i++) { - byteArray.push(data.charCodeAt(i)); - } - input = byteArray; - - let jsonFile = JSON.parse(data); - let value: string = key.reduce((acc, key) => acc && acc[key], jsonFile).toString(); - for (let i = 0; i < value.length; i++) { - output.push(value.charCodeAt(i)); - } - - return [input, keyUnicode, output]; -} - -function executeCodegen(input_file_name: string, output_filename: string) { +function executeCodegen(inputFilename: string, outputFilename: string) { return new Promise((resolve, reject) => { - const input_path = join(__dirname, "..", "..", "..", "json_examples", "codegen", input_file_name); + const inputPath = join(__dirname, "..", "..", "..", "..", "examples", "json", "test", "codegen", inputFilename); - const codegen = spawn("cargo", ["run", "--bin", "codegen", "--", "--json-file", input_path, "--output-filename", output_filename]); + const codegen = spawn("cargo", ["run", "--bin", "codegen", "--", "--json-file", inputPath, "--output-filename", outputFilename]); codegen.stdout.on('data', (data) => { console.log(`stdout: ${data}`); diff --git a/circuits/test/extractor/interpreter.test.ts b/circuits/test/json/extractor/interpreter.test.ts similarity index 96% rename from circuits/test/extractor/interpreter.test.ts rename to circuits/test/json/extractor/interpreter.test.ts index 89894c6..9ee8953 100644 --- a/circuits/test/extractor/interpreter.test.ts +++ b/circuits/test/json/extractor/interpreter.test.ts @@ -1,6 +1,5 @@ -import { circomkit, WitnessTester, generateDescription } from "../common"; -import { PoseidonModular } from "../common/poseidon"; -import { readInputFile } from "./extractor.test"; +import { circomkit, WitnessTester, generateDescription, readInputFile } from "../../common"; +import { PoseidonModular } from "../../common/poseidon"; describe("Interpreter", async () => { describe("InsideKey", async () => { @@ -8,7 +7,7 @@ describe("Interpreter", async () => { before(async () => { circuit = await circomkit.WitnessTester(`InsideKey`, { - file: "circuits/interpreter", + file: "circuits/json/interpreter", template: "InsideKey", params: [4], }); @@ -47,7 +46,7 @@ describe("Interpreter", async () => { before(async () => { circuit = await circomkit.WitnessTester(`InsideValue`, { - file: "circuits/interpreter", + file: "circuits/json/interpreter", template: "InsideValue", params: [4], }); @@ -89,7 +88,7 @@ describe("Interpreter", async () => { it(`(valid) witness: ${description} ${desc}`, async () => { circuit = await circomkit.WitnessTester(`InsideValueAtDepth`, { - file: "circuits/interpreter", + file: "circuits/json/interpreter", template: "InsideValueAtDepth", params: [4, depth], }); @@ -126,7 +125,7 @@ describe("Interpreter", async () => { it(`(valid) witness: ${description} ${desc}`, async () => { circuit = await circomkit.WitnessTester(`InsideArrayIndex`, { - file: "circuits/interpreter", + file: "circuits/json/interpreter", template: "InsideArrayIndex", params: [4, index], }); @@ -166,7 +165,7 @@ describe("Interpreter", async () => { it(`(valid) witness: ${description} ${desc}`, async () => { circuit = await circomkit.WitnessTester(`InsideArrayIndexAtDepth`, { - file: "circuits/interpreter", + file: "circuits/json/interpreter", template: "InsideArrayIndexAtDepth", params: [4, index, depth], }); @@ -200,7 +199,7 @@ describe("Interpreter", async () => { before(async () => { circuit = await circomkit.WitnessTester(`NextKVPair`, { - file: "circuits/interpreter", + file: "circuits/json/interpreter", template: "NextKVPair", params: [4], }); @@ -240,7 +239,7 @@ describe("Interpreter", async () => { it(`(valid) witness: ${description} ${desc}`, async () => { circuit = await circomkit.WitnessTester(`NextKVPairAtDepth`, { - file: "circuits/interpreter", + file: "circuits/json/interpreter", template: "NextKVPairAtDepth", params: [4, depth], }); @@ -274,7 +273,7 @@ describe("Interpreter", async () => { it(`(valid) witness: ${description} ${desc}`, async () => { circuit = await circomkit.WitnessTester(`KeyMatch`, { - file: "circuits/interpreter", + file: "circuits/json/interpreter", template: "KeyMatch", params: [input.data.length, input.key.length], }); @@ -315,7 +314,7 @@ describe("Interpreter", async () => { it(`(valid) witness: ${description} ${desc}`, async () => { circuit = await circomkit.WitnessTester(`KeyMatchAtDepth`, { - file: "circuits/interpreter", + file: "circuits/json/interpreter", template: "KeyMatchAtDepth", params: [input.data.length, 4, input.key.length, depth], }); diff --git a/circuits/test/parser_json/index.ts b/circuits/test/json/parser/index.ts similarity index 100% rename from circuits/test/parser_json/index.ts rename to circuits/test/json/parser/index.ts diff --git a/circuits/test/parser_json/parsing_types.test.ts b/circuits/test/json/parser/parsing_types.test.ts similarity index 98% rename from circuits/test/parser_json/parsing_types.test.ts rename to circuits/test/json/parser/parsing_types.test.ts index fce76bf..a5783dc 100644 --- a/circuits/test/parser_json/parsing_types.test.ts +++ b/circuits/test/json/parser/parsing_types.test.ts @@ -1,4 +1,4 @@ -import { circomkit, WitnessTester, generateDescription } from "../common"; +import { circomkit, WitnessTester, generateDescription } from "../../common"; import { Delimiters, WhiteSpace, Numbers, Escape, INITIAL_IN, INITIAL_OUT } from '.'; @@ -19,7 +19,7 @@ describe("StateUpdate", () => { before(async () => { circuit = await circomkit.WitnessTester(`StateUpdate`, { - file: "circuits/parser_json/machine", + file: "circuits/json/parser/machine", template: "StateUpdate", params: [4], }); @@ -65,7 +65,7 @@ describe("StateUpdate", () => { // init: stack == [[1, 0], [0, 0], [0, 0], [0, 0]] // read: `"` // expect: parsing_string --> 0 - // + // let in_key_to_exit = { ...INITIAL_IN }; in_key_to_exit.stack = [[1, 0], [0, 0], [0, 0], [0, 0]]; in_key_to_exit.parsing_string = 1 diff --git a/circuits/test/parser_json/stack.test.ts b/circuits/test/json/parser/stack.test.ts similarity index 98% rename from circuits/test/parser_json/stack.test.ts rename to circuits/test/json/parser/stack.test.ts index a8b151f..f719582 100644 --- a/circuits/test/parser_json/stack.test.ts +++ b/circuits/test/json/parser/stack.test.ts @@ -1,11 +1,11 @@ -import { circomkit, WitnessTester, generateDescription } from "../common"; +import { circomkit, WitnessTester, generateDescription } from "../../common"; import { Delimiters, WhiteSpace, Numbers, Escape, INITIAL_IN, INITIAL_OUT } from '.'; describe("GetTopOfStack", () => { let circuit: WitnessTester<["stack"], ["value", "pointer"]>; before(async () => { circuit = await circomkit.WitnessTester(`GetTopOfStack`, { - file: "circuits/parser_json/machine", + file: "circuits/json/parser/machine", template: "GetTopOfStack", params: [4], }); @@ -34,7 +34,7 @@ describe("StateUpdate :: RewriteStack", () => { >; before(async () => { circuit = await circomkit.WitnessTester(`GetTopOfStack`, { - file: "circuits/parser_json/machine", + file: "circuits/json/parser/machine", template: "StateUpdate", params: [4], }); diff --git a/circuits/test/parser_json/values.test.ts b/circuits/test/json/parser/values.test.ts similarity index 99% rename from circuits/test/parser_json/values.test.ts rename to circuits/test/json/parser/values.test.ts index 6e89edd..2bca379 100644 --- a/circuits/test/parser_json/values.test.ts +++ b/circuits/test/json/parser/values.test.ts @@ -1,4 +1,4 @@ -import { circomkit, WitnessTester, generateDescription } from "../common"; +import { circomkit, WitnessTester, generateDescription } from "../../common"; import { Delimiters, WhiteSpace, Numbers, Escape, INITIAL_IN, INITIAL_OUT } from '.'; describe("StateUpdate :: Values", () => { @@ -8,7 +8,7 @@ describe("StateUpdate :: Values", () => { >; before(async () => { circuit = await circomkit.WitnessTester(`GetTopOfStack`, { - file: "circuits/parser_json/machine", + file: "circuits/json/parser/machine", template: "StateUpdate", params: [4], }); diff --git a/circuits/test/search.test.ts b/circuits/test/utils/search.test.ts similarity index 92% rename from circuits/test/search.test.ts rename to circuits/test/utils/search.test.ts index 39e8143..f0938db 100644 --- a/circuits/test/search.test.ts +++ b/circuits/test/utils/search.test.ts @@ -1,7 +1,7 @@ -import { circomkit, WitnessTester } from "./common"; +import { circomkit, WitnessTester } from "../common"; -import witness from "../../inputs/search/witness.json"; -import { PoseidonModular } from "./common/poseidon"; +import witness from "../../../inputs/search/witness.json"; +import { PoseidonModular } from "../common/poseidon"; describe("search", () => { describe("SubstringSearch", () => { @@ -14,7 +14,7 @@ describe("search", () => { const hashResult = PoseidonModular(concatenatedInput); circuit = await circomkit.WitnessTester(`SubstringSearch`, { - file: "circuits/search", + file: "circuits/utils/search", template: "SubstringSearch", params: [data.length, key.length], }); @@ -32,7 +32,7 @@ describe("search", () => { const hashResult = PoseidonModular(concatenatedInput); circuit = await circomkit.WitnessTester(`SubstringSearch`, { - file: "circuits/search", + file: "circuits/utils/search", template: "SubstringSearch", params: [data.length, key.length], }); @@ -51,7 +51,7 @@ describe("search", () => { const key = [1, 0]; circuit = await circomkit.WitnessTester(`SubstringSearch`, { - file: "circuits/search", + file: "circuits/utils/search", template: "SubstringSearch", params: [data.length, key.length], }); @@ -67,7 +67,7 @@ describe("search", () => { const hashResult = PoseidonModular(concatenatedInput); circuit = await circomkit.WitnessTester(`SubstringSearch`, { - file: "circuits/search", + file: "circuits/utils/search", template: "SubstringSearch", params: [witness["data"].length, witness["key"].length], }); @@ -85,7 +85,7 @@ describe("search", () => { before(async () => { circuit = await circomkit.WitnessTester(`SubstringSearch`, { - file: "circuits/search", + file: "circuits/utils/search", template: "SubstringMatchWithIndex", params: [787, 10], }); @@ -122,7 +122,7 @@ describe("search", () => { before(async () => { circuit = await circomkit.WitnessTester(`SubstringSearch`, { - file: "circuits/search", + file: "circuits/utils/search", template: "SubstringMatch", params: [787, 10], }); diff --git a/circuits/search.circom b/circuits/utils/search.circom similarity index 98% rename from circuits/search.circom rename to circuits/utils/search.circom index 1799c01..b9bb86e 100644 --- a/circuits/search.circom +++ b/circuits/utils/search.circom @@ -1,9 +1,9 @@ pragma circom 2.1.9; include "circomlib/circuits/mux1.circom"; -include "./utils/hash.circom"; -include "./utils/operators.circom"; -include "./utils/array.circom"; +include "./hash.circom"; +include "./operators.circom"; +include "./array.circom"; /* SubstringSearch diff --git a/json_examples/codegen/two_keys.json b/examples/json/test/codegen/two_keys.json similarity index 100% rename from json_examples/codegen/two_keys.json rename to examples/json/test/codegen/two_keys.json diff --git a/json_examples/codegen/value_array_nested.json b/examples/json/test/codegen/value_array_nested.json similarity index 100% rename from json_examples/codegen/value_array_nested.json rename to examples/json/test/codegen/value_array_nested.json diff --git a/json_examples/codegen/value_array_number.json b/examples/json/test/codegen/value_array_number.json similarity index 100% rename from json_examples/codegen/value_array_number.json rename to examples/json/test/codegen/value_array_number.json diff --git a/json_examples/codegen/value_array_object.json b/examples/json/test/codegen/value_array_object.json similarity index 100% rename from json_examples/codegen/value_array_object.json rename to examples/json/test/codegen/value_array_object.json diff --git a/json_examples/codegen/value_array_string.json b/examples/json/test/codegen/value_array_string.json similarity index 100% rename from json_examples/codegen/value_array_string.json rename to examples/json/test/codegen/value_array_string.json diff --git a/json_examples/codegen/value_number.json b/examples/json/test/codegen/value_number.json similarity index 100% rename from json_examples/codegen/value_number.json rename to examples/json/test/codegen/value_number.json diff --git a/json_examples/codegen/value_object.json b/examples/json/test/codegen/value_object.json similarity index 100% rename from json_examples/codegen/value_object.json rename to examples/json/test/codegen/value_object.json diff --git a/json_examples/codegen/value_string.json b/examples/json/test/codegen/value_string.json similarity index 100% rename from json_examples/codegen/value_string.json rename to examples/json/test/codegen/value_string.json diff --git a/src/bin/codegen.rs b/src/bin/codegen.rs index 3ff2acf..ed5f55f 100644 --- a/src/bin/codegen.rs +++ b/src/bin/codegen.rs @@ -1,8 +1,6 @@ use clap::Parser; use serde::{Deserialize, Serialize}; -use std::collections::HashMap; use std::fs; -use std::iter::Map; use std::path::PathBuf; use std::str::FromStr; @@ -24,10 +22,6 @@ enum ValueType { String, #[serde(rename = "number")] Number, - #[serde(skip_deserializing)] - Array, - #[serde(skip_deserializing)] - ArrayElement, } #[derive(Debug, Serialize, Deserialize)] @@ -93,7 +87,6 @@ fn extract_string(data: Data, cfb: &mut String) { *cfb += r#" log("value_starting_index", value_starting_index[DATA_BYTES-2]); - // TODO: why +1 not required here,when required on all other string implss? value <== SelectSubArray(DATA_BYTES, maxValueLen)(data, value_starting_index[DATA_BYTES-2]+1, maxValueLen); for (var i=0 ; i Result<(), Box> { - let mut num_string_keys = 0; - let mut string_key_index_map = HashMap::new(); - for (i, key) in data.keys.iter().enumerate() { - match key { - Key::String(_) => { - string_key_index_map.insert(num_string_keys, i + 1); - num_string_keys += 1; - } - Key::Num(_) => (), - } - } - let mut cfb = String::new(); cfb += PRAGMA; - cfb += "include \"../interpreter.circom\";\n\n"; + cfb += "include \"../json/interpreter.circom\";\n\n"; - cfb += "template ExtractValue2(DATA_BYTES, MAX_STACK_HEIGHT, "; - for (i, key) in data.keys.iter().enumerate() { - match key { - Key::String(_) => cfb += &format!("keyLen{}, depth{}, ", i + 1, i + 1), - Key::Num(_) => cfb += &format!("index{}, depth{}, ", i + 1, i + 1), + // template ExtractValue2(DATA_BYTES, MAX_STACK_HEIGHT, keyLen1, depth1, index2, depth2, keyLen3, depth3, index4, depth4, maxValueLen) { + { + cfb += "template ExtractValue2(DATA_BYTES, MAX_STACK_HEIGHT, "; + for (i, key) in data.keys.iter().enumerate() { + match key { + Key::String(_) => cfb += &format!("keyLen{}, depth{}, ", i + 1, i + 1), + Key::Num(_) => cfb += &format!("index{}, depth{}, ", i + 1, i + 1), + } } + cfb += "maxValueLen) {\n"; } - cfb += "maxValueLen) {\n"; - cfb += " signal input data[DATA_BYTES];\n\n"; + /* + signal input data[DATA_BYTES]; - for (i, key) in data.keys.iter().enumerate() { - match key { - Key::String(_) => cfb += &format!(" signal input key{}[keyLen{}];\n", i + 1, i + 1), - Key::Num(_) => (), + signal input key1[keyLen1]; + signal input key3[keyLen3]; + */ + { + cfb += " signal input data[DATA_BYTES];\n\n"; + + for (i, key) in data.keys.iter().enumerate() { + match key { + Key::String(_) => { + cfb += &format!(" signal input key{}[keyLen{}];\n", i + 1, i + 1) + } + Key::Num(_) => (), + } } } @@ -221,29 +214,31 @@ fn parse_json_request( } signal r <== rHasher.out; */ - cfb += "\n // r must be secret, so either has to be derived from hash in the circuit or off the circuit\n component rHasher = PoseidonModular(DATA_BYTES + "; - for (i, key) in data.keys.iter().enumerate() { - match key { - Key::String(_) => cfb += &format!(" keyLen{} +", i + 1), - Key::Num(_) => (), + { + cfb += "\n // r must be secret, so either has to be derived from hash in the circuit or off the circuit\n component rHasher = PoseidonModular(DATA_BYTES + "; + for (i, key) in data.keys.iter().enumerate() { + match key { + Key::String(_) => cfb += &format!(" keyLen{} +", i + 1), + Key::Num(_) => (), + } } - } - cfb.pop(); - cfb.pop(); - cfb += ");\n"; + cfb.pop(); + cfb.pop(); + cfb += ");\n"; - let mut key_len_counter_str = String::from_str("i")?; - for (i, key) in data.keys.iter().enumerate() { - match key { - Key::String(_) => { - cfb += &format!(" for (var i = 0 ; i < keyLen{} ; i++) {{\n rHasher.in[{}] <== key{}[i];\n }}\n", i+1, key_len_counter_str, i+1); - key_len_counter_str += &format!(" + keyLen{}", i + 1); + let mut key_len_counter_str = String::from_str("i")?; + for (i, key) in data.keys.iter().enumerate() { + match key { + Key::String(_) => { + cfb += &format!(" for (var i = 0 ; i < keyLen{} ; i++) {{\n rHasher.in[{}] <== key{}[i];\n }}\n", i+1, key_len_counter_str, i+1); + key_len_counter_str += &format!(" + keyLen{}", i + 1); + } + Key::Num(_) => (), } - Key::Num(_) => (), } - } - cfb += &format!(" for (var i = 0 ; i < DATA_BYTES ; i++) {{\n rHasher.in[{}] <== data[i];\n }}\n", key_len_counter_str); + cfb += &format!(" for (var i = 0 ; i < DATA_BYTES ; i++) {{\n rHasher.in[{}] <== data[i];\n }}\n", key_len_counter_str); + } cfb += r#" signal r <== rHasher.out; @@ -267,22 +262,35 @@ fn parse_json_request( signal parsing_value[DATA_BYTES]; "#; - for (i, key) in data.keys.iter().enumerate() { - match key { - Key::String(_) => { - cfb += &format!(" signal parsing_object{}_value[DATA_BYTES];\n", i + 1) + /* // signals for parsing string key and array index + signal parsing_key[DATA_BYTES]; + signal parsing_value[DATA_BYTES]; + signal parsing_object1_value[DATA_BYTES]; + signal parsing_array2[DATA_BYTES]; + signal is_key1_match[DATA_BYTES]; + signal is_key1_match_for_value[DATA_BYTES]; + is_key1_match_for_value[0] <== 0; + signal is_next_pair_at_depth1[DATA_BYTES]; + */ + { + for (i, key) in data.keys.iter().enumerate() { + match key { + Key::String(_) => { + cfb += &format!(" signal parsing_object{}_value[DATA_BYTES];\n", i + 1) + } + Key::Num(_) => cfb += &format!(" signal parsing_array{}[DATA_BYTES];\n", i + 1), } - Key::Num(_) => cfb += &format!(" signal parsing_array{}[DATA_BYTES];\n", i + 1), } - } - for (i, key) in data.keys.iter().enumerate() { - match key { + for (i, key) in data.keys.iter().enumerate() { + match key { Key::String(_) => cfb += &format!(" signal is_key{}_match[DATA_BYTES];\n signal is_key{}_match_for_value[DATA_BYTES];\n is_key{}_match_for_value[0] <== 0;\n signal is_next_pair_at_depth{}[DATA_BYTES];\n", i+1, i+1, i+1, i+1), Key::Num(_) => (), } + } } + // debugging cfb += r#" signal is_value_match[DATA_BYTES]; is_value_match[0] <== 0; @@ -310,81 +318,116 @@ fn parse_json_request( // - value_mask // - mask + // check if inside key or not parsing_key[data_idx-1] <== InsideKey(MAX_STACK_HEIGHT)(State[data_idx].stack, State[data_idx].parsing_string, State[data_idx].parsing_number); // log("parsing key:", parsing_key[data_idx]); "#; - for (i, key) in data.keys.iter().enumerate() { - match key { - Key::String(_) => { - cfb += &format!(" parsing_object{}_value[data_idx-1] <== InsideValueAtDepth(MAX_STACK_HEIGHT, depth{})(State[data_idx].stack, State[data_idx].parsing_string, State[data_idx].parsing_number);\n", i+1, i+1); - } - Key::Num(_) => { - cfb += &format!(" parsing_array{}[data_idx-1] <== InsideArrayIndexAtDepth(MAX_STACK_HEIGHT, index{}, depth{})(State[data_idx].stack, State[data_idx].parsing_string, State[data_idx].parsing_number);\n", i+1, i+1, i+1); + /* Determining wheter parsing correct value and array index + parsing_object1_value[data_idx-1] <== InsideValueAtDepth(MAX_STACK_HEIGHT, depth1)(State[data_idx].stack, State[data_idx].parsing_string, State[data_idx].parsing_number); + parsing_array2[data_idx-1] <== InsideArrayIndexAtDepth(MAX_STACK_HEIGHT, index2, depth2)(State[data_idx].stack, State[data_idx].parsing_string, State[data_idx].parsing_number); + */ + { + for (i, key) in data.keys.iter().enumerate() { + match key { + Key::String(_) => { + cfb += &format!(" parsing_object{}_value[data_idx-1] <== InsideValueAtDepth(MAX_STACK_HEIGHT, depth{})(State[data_idx].stack, State[data_idx].parsing_string, State[data_idx].parsing_number);\n", i+1, i+1); + } + Key::Num(_) => { + cfb += &format!(" parsing_array{}[data_idx-1] <== InsideArrayIndexAtDepth(MAX_STACK_HEIGHT, index{}, depth{})(State[data_idx].stack, State[data_idx].parsing_string, State[data_idx].parsing_number);\n", i+1, i+1, i+1); + } } } } - cfb += &format!( - " parsing_value[data_idx-1] <== MultiAND({})([", + // parsing correct value = AND(all individual stack values) + // parsing_value[data_idx-1] <== MultiAND(4)([parsing_object1_value[data_idx-1], parsing_array2[data_idx-1], parsing_object3_value[data_idx-1], parsing_array4[data_idx-1]]); + { + cfb += &format!( + " // parsing correct value = AND(all individual stack values)\n parsing_value[data_idx-1] <== MultiAND({})([", data.keys.len() ); - for (i, key) in data.keys.iter().take(data.keys.len() - 1).enumerate() { - match key { - Key::String(_) => cfb += &format!("parsing_object{}_value[data_idx-1], ", i + 1), - Key::Num(_) => cfb += &format!("parsing_array{}[data_idx-1], ", i + 1), + for (i, key) in data.keys.iter().take(data.keys.len() - 1).enumerate() { + match key { + Key::String(_) => cfb += &format!("parsing_object{}_value[data_idx-1], ", i + 1), + Key::Num(_) => cfb += &format!("parsing_array{}[data_idx-1], ", i + 1), + } } - } - match data.keys[data.keys.len() - 1] { - Key::String(_) => { - cfb += &format!("parsing_object{}_value[data_idx-1]]);\n", data.keys.len()) + match data.keys[data.keys.len() - 1] { + Key::String(_) => { + cfb += &format!("parsing_object{}_value[data_idx-1]]);\n", data.keys.len()) + } + Key::Num(_) => cfb += &format!("parsing_array{}[data_idx-1]]);\n", data.keys.len()), } - Key::Num(_) => cfb += &format!("parsing_array{}[data_idx-1]]);\n", data.keys.len()), - } - // optional debug logs - cfb += " // log(\"parsing value:\", "; - for (i, key) in data.keys.iter().enumerate() { - match key { - Key::String(_) => cfb += &format!("parsing_object{}_value[data_idx-1], ", i + 1), - Key::Num(_) => cfb += &format!("parsing_array{}[data_idx-1], ", i + 1), + // optional debug logs + cfb += " // log(\"parsing value:\", "; + for (i, key) in data.keys.iter().enumerate() { + match key { + Key::String(_) => cfb += &format!("parsing_object{}_value[data_idx-1], ", i + 1), + Key::Num(_) => cfb += &format!("parsing_array{}[data_idx-1], ", i + 1), + } } + cfb += "parsing_value[data_idx-1]);\n\n"; } - cfb += "parsing_value[data_idx-1]);\n\n"; let mut num_objects = 0; - for (i, key) in data.keys.iter().enumerate() { - match key { - Key::String(_) => { - num_objects += 1; - cfb += &format!(" is_key{}_match[data_idx-1] <== KeyMatchAtDepth(DATA_BYTES, MAX_STACK_HEIGHT, keyLen{}, depth{})(data, key{}, r, data_idx-1, parsing_key[data_idx-1], State[data_idx].stack);\n", i+1, i+1, i+1, i+1); - cfb += &format!(" is_next_pair_at_depth{}[data_idx-1] <== NextKVPairAtDepth(MAX_STACK_HEIGHT, depth{})(State[data_idx].stack, data[data_idx-1]);\n", i+1, i+1); - cfb += &format!(" is_key{}_match_for_value[data_idx] <== Mux1()([is_key{}_match_for_value[data_idx-1] * (1-is_next_pair_at_depth{}[data_idx-1]), is_key{}_match[data_idx-1] * (1-is_next_pair_at_depth{}[data_idx-1])], is_key{}_match[data_idx-1]);\n", i+1, i+1, i+1, i+1, i+1, i+1); - cfb += &format!(" // log(\"is_key{}_match_for_value\", is_key{}_match_for_value[data_idx]);\n\n", i + 1, i + 1); + + /* + to get correct value, check: + - key matches at current index and depth of key is as specified + - whether next KV pair starts + - whether key matched for a value (propogate key match until new KV pair of lower depth starts) + is_key1_match[data_idx-1] <== KeyMatchAtDepth(DATA_BYTES, MAX_STACK_HEIGHT, keyLen1, depth1)(data, key1, r, data_idx-1, parsing_key[data_idx-1], State[data_idx].stack); + is_next_pair_at_depth1[data_idx-1] <== NextKVPairAtDepth(MAX_STACK_HEIGHT, depth1)(State[data_idx].stack, data[data_idx-1]); + is_key1_match_for_value[data_idx] <== Mux1()([is_key1_match_for_value[data_idx-1] * (1-is_next_pair_at_depth1[data_idx-1]), is_key1_match[data_idx-1] * (1-is_next_pair_at_depth1[data_idx-1])], is_key1_match[data_idx-1]); + */ + { + cfb += r#" + // to get correct value, check: + // - key matches at current index and depth of key is as specified + // - whether next KV pair starts + // - whether key matched for a value (propogate key match until new KV pair of lower depth starts) +"#; + + for (i, key) in data.keys.iter().enumerate() { + match key { + Key::String(_) => { + num_objects += 1; + cfb += &format!(" is_key{}_match[data_idx-1] <== KeyMatchAtDepth(DATA_BYTES, MAX_STACK_HEIGHT, keyLen{}, depth{})(data, key{}, r, data_idx-1, parsing_key[data_idx-1], State[data_idx].stack);\n", i+1, i+1, i+1, i+1); + cfb += &format!(" is_next_pair_at_depth{}[data_idx-1] <== NextKVPairAtDepth(MAX_STACK_HEIGHT, depth{})(State[data_idx].stack, data[data_idx-1]);\n", i+1, i+1); + cfb += &format!(" is_key{}_match_for_value[data_idx] <== Mux1()([is_key{}_match_for_value[data_idx-1] * (1-is_next_pair_at_depth{}[data_idx-1]), is_key{}_match[data_idx-1] * (1-is_next_pair_at_depth{}[data_idx-1])], is_key{}_match[data_idx-1]);\n", i+1, i+1, i+1, i+1, i+1, i+1); + cfb += &format!(" // log(\"is_key{}_match_for_value\", is_key{}_match_for_value[data_idx]);\n\n", i + 1, i + 1); + } + Key::Num(_) => (), } - Key::Num(_) => (), } } - cfb += &format!( - " is_value_match[data_idx] <== MultiAND({})([", - num_objects - ); - for (i, key) in data.keys.iter().enumerate() { - match key { - Key::String(_) => cfb += &format!("is_key{}_match_for_value[data_idx], ", i + 1), - Key::Num(_) => (), + // is_value_match[data_idx] <== MultiAND(2)([is_key1_match_for_value[data_idx], is_key3_match_for_value[data_idx]]); + { + cfb += &format!( + " is_value_match[data_idx] <== MultiAND({})([", + num_objects + ); + for (i, key) in data.keys.iter().enumerate() { + match key { + Key::String(_) => cfb += &format!("is_key{}_match_for_value[data_idx], ", i + 1), + Key::Num(_) => (), + } } - } - // remove last 2 chars `, ` from string buffer - cfb.pop(); - cfb.pop(); - cfb += "]);\n"; + // remove last 2 chars `, ` from string buffer + cfb.pop(); + cfb.pop(); + cfb += "]);\n"; + } - cfb += r#" // log("is_value_match", is_value_match[data_idx]); + // debugging and output bytes + { + cfb += r#" // log("is_value_match", is_value_match[data_idx]); // mask[i] = data[i] * parsing_value[i] * is_key_match_for_value[i] value_mask[data_idx-1] <== data[data_idx-1] * parsing_value[data_idx-1]; @@ -413,13 +456,13 @@ fn parse_json_request( } "#; - // template ends - cfb += "}\n"; + // template ends + cfb += "}\n"; + } match data.value_type { ValueType::String => extract_string(data, &mut cfb), ValueType::Number => extract_number(data, &mut cfb), - _ => unimplemented!(), } // write circuits to file