diff --git a/Makefile b/Makefile index 14dec9ae4e..f8c147654f 100644 --- a/Makefile +++ b/Makefile @@ -93,7 +93,6 @@ endif all: all-staged all-unstaged: compile-gcc-compatible compile-msvc-compatible compile-gcc64-only \ - compile-c89-compatible \ compile-portable-gcc-compatible \ dist/wasm/package.json dist/merkle-tree/Makefile.basic \ obj/libhaclml.cmxa compile-election-guard @@ -866,14 +865,6 @@ dist/msvc-compatible/Makefile.basic: DEFAULT_FLAGS += -falloca -ftail-calls dist/gcc64-only/Makefile.basic: DEFAULT_FLAGS += -fbuiltin-uint128 -# C89 distribution -# ---------------- -# -# - MerkleTree doesn't compile in C89 mode (FIXME?) -# - Use C89 versions of ancient HACL code -dist/c89-compatible/Makefile.basic: MERKLE_BUNDLE = -bundle 'MerkleTree.*,MerkleTree' -dist/c89-compatible/Makefile.basic: DEFAULT_FLAGS += -fc89 -ccopt -std=c89 -ccopt -Wno-typedef-redefinition - # Election Guard distribution # --------------------------- diff --git a/dist/c89-compatible/EverCrypt_AEAD.c b/dist/c89-compatible/EverCrypt_AEAD.c deleted file mode 100644 index 2c1187da87..0000000000 --- a/dist/c89-compatible/EverCrypt_AEAD.c +++ /dev/null @@ -1,2303 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#include "EverCrypt_AEAD.h" - -#include "internal/Vale.h" -#include "internal/Hacl_Spec.h" - -typedef struct EverCrypt_AEAD_state_s_s -{ - Spec_Cipher_Expansion_impl impl; - uint8_t *ek; -} -EverCrypt_AEAD_state_s; - -bool EverCrypt_AEAD_uu___is_Ek(Spec_Agile_AEAD_alg a, EverCrypt_AEAD_state_s projectee) -{ - return true; -} - -Spec_Agile_AEAD_alg EverCrypt_AEAD_alg_of_state(EverCrypt_AEAD_state_s *s) -{ - EverCrypt_AEAD_state_s scrut = *s; - Spec_Cipher_Expansion_impl impl = scrut.impl; - switch (impl) - { - case Spec_Cipher_Expansion_Hacl_CHACHA20: - { - return Spec_Agile_AEAD_CHACHA20_POLY1305; - } - case Spec_Cipher_Expansion_Vale_AES128: - { - return Spec_Agile_AEAD_AES128_GCM; - } - case Spec_Cipher_Expansion_Vale_AES256: - { - return Spec_Agile_AEAD_AES256_GCM; - } - default: - { - KRML_HOST_PRINTF("KaRaMeL incomplete match at %s:%d\n", __FILE__, __LINE__); - KRML_HOST_EXIT(253U); - } - } -} - -static EverCrypt_Error_error_code -create_in_chacha20_poly1305(EverCrypt_AEAD_state_s **dst, uint8_t *k) -{ - uint8_t *ek = (uint8_t *)KRML_HOST_CALLOC((uint32_t)32U, sizeof (uint8_t)); - EverCrypt_AEAD_state_s lit; - lit.impl = Spec_Cipher_Expansion_Hacl_CHACHA20; - lit.ek = ek; - KRML_CHECK_SIZE(sizeof (EverCrypt_AEAD_state_s), (uint32_t)1U); - { - EverCrypt_AEAD_state_s - *p = (EverCrypt_AEAD_state_s *)KRML_HOST_MALLOC(sizeof (EverCrypt_AEAD_state_s)); - p[0U] = lit; - memcpy(ek, k, (uint32_t)32U * sizeof (uint8_t)); - dst[0U] = p; - return EverCrypt_Error_Success; - } -} - -static EverCrypt_Error_error_code -create_in_aes128_gcm(EverCrypt_AEAD_state_s **dst, uint8_t *k) -{ - bool has_aesni = EverCrypt_AutoConfig2_has_aesni(); - bool has_pclmulqdq = EverCrypt_AutoConfig2_has_pclmulqdq(); - bool has_avx = EverCrypt_AutoConfig2_has_avx(); - bool has_sse = EverCrypt_AutoConfig2_has_sse(); - bool has_movbe = EverCrypt_AutoConfig2_has_movbe(); - #if HACL_CAN_COMPILE_VALE - if (has_aesni && has_pclmulqdq && has_avx && has_sse && has_movbe) - { - uint8_t *ek = (uint8_t *)KRML_HOST_CALLOC((uint32_t)480U, sizeof (uint8_t)); - uint8_t *keys_b = ek; - uint8_t *hkeys_b = ek + (uint32_t)176U; - uint64_t scrut = aes128_key_expansion(k, keys_b); - uint64_t scrut0 = aes128_keyhash_init(keys_b, hkeys_b); - EverCrypt_AEAD_state_s lit; - lit.impl = Spec_Cipher_Expansion_Vale_AES128; - lit.ek = ek; - KRML_CHECK_SIZE(sizeof (EverCrypt_AEAD_state_s), (uint32_t)1U); - { - EverCrypt_AEAD_state_s - *p = (EverCrypt_AEAD_state_s *)KRML_HOST_MALLOC(sizeof (EverCrypt_AEAD_state_s)); - p[0U] = lit; - *dst = p; - return EverCrypt_Error_Success; - } - } - #endif - return EverCrypt_Error_UnsupportedAlgorithm; -} - -static EverCrypt_Error_error_code -create_in_aes256_gcm(EverCrypt_AEAD_state_s **dst, uint8_t *k) -{ - bool has_aesni = EverCrypt_AutoConfig2_has_aesni(); - bool has_pclmulqdq = EverCrypt_AutoConfig2_has_pclmulqdq(); - bool has_avx = EverCrypt_AutoConfig2_has_avx(); - bool has_sse = EverCrypt_AutoConfig2_has_sse(); - bool has_movbe = EverCrypt_AutoConfig2_has_movbe(); - #if HACL_CAN_COMPILE_VALE - if (has_aesni && has_pclmulqdq && has_avx && has_sse && has_movbe) - { - uint8_t *ek = (uint8_t *)KRML_HOST_CALLOC((uint32_t)544U, sizeof (uint8_t)); - uint8_t *keys_b = ek; - uint8_t *hkeys_b = ek + (uint32_t)240U; - uint64_t scrut = aes256_key_expansion(k, keys_b); - uint64_t scrut0 = aes256_keyhash_init(keys_b, hkeys_b); - EverCrypt_AEAD_state_s lit; - lit.impl = Spec_Cipher_Expansion_Vale_AES256; - lit.ek = ek; - KRML_CHECK_SIZE(sizeof (EverCrypt_AEAD_state_s), (uint32_t)1U); - { - EverCrypt_AEAD_state_s - *p = (EverCrypt_AEAD_state_s *)KRML_HOST_MALLOC(sizeof (EverCrypt_AEAD_state_s)); - p[0U] = lit; - *dst = p; - return EverCrypt_Error_Success; - } - } - #endif - return EverCrypt_Error_UnsupportedAlgorithm; -} - -EverCrypt_Error_error_code -EverCrypt_AEAD_create_in(Spec_Agile_AEAD_alg a, EverCrypt_AEAD_state_s **dst, uint8_t *k) -{ - switch (a) - { - case Spec_Agile_AEAD_AES128_GCM: - { - return create_in_aes128_gcm(dst, k); - } - case Spec_Agile_AEAD_AES256_GCM: - { - return create_in_aes256_gcm(dst, k); - } - case Spec_Agile_AEAD_CHACHA20_POLY1305: - { - return create_in_chacha20_poly1305(dst, k); - } - default: - { - return EverCrypt_Error_UnsupportedAlgorithm; - } - } -} - -static EverCrypt_Error_error_code -encrypt_aes128_gcm( - EverCrypt_AEAD_state_s *s, - uint8_t *iv, - uint32_t iv_len, - uint8_t *ad, - uint32_t ad_len, - uint8_t *plain, - uint32_t plain_len, - uint8_t *cipher, - uint8_t *tag -) -{ - #if HACL_CAN_COMPILE_VALE - if (s == NULL) - { - return EverCrypt_Error_InvalidKey; - } - if (iv_len == (uint32_t)0U) - { - return EverCrypt_Error_InvalidIVLength; - } - { - EverCrypt_AEAD_state_s scrut = *s; - uint8_t *ek = scrut.ek; - uint8_t *scratch_b = ek + (uint32_t)304U; - uint8_t *ek1 = ek; - uint8_t *keys_b = ek1; - uint8_t *hkeys_b = ek1 + (uint32_t)176U; - uint8_t tmp_iv[16U] = { 0U }; - uint32_t len = iv_len / (uint32_t)16U; - uint32_t bytes_len = len * (uint32_t)16U; - uint8_t *iv_b = iv; - memcpy(tmp_iv, iv + bytes_len, iv_len % (uint32_t)16U * sizeof (uint8_t)); - { - uint64_t - uu____0 = compute_iv_stdcall(iv_b, (uint64_t)iv_len, (uint64_t)len, tmp_iv, tmp_iv, hkeys_b); - uint8_t *inout_b = scratch_b; - uint8_t *abytes_b = scratch_b + (uint32_t)16U; - uint8_t *scratch_b1 = scratch_b + (uint32_t)32U; - uint32_t plain_len_ = (uint32_t)(uint64_t)plain_len / (uint32_t)16U * (uint32_t)16U; - uint32_t auth_len_ = (uint32_t)(uint64_t)ad_len / (uint32_t)16U * (uint32_t)16U; - uint8_t *plain_b_ = plain; - uint8_t *out_b_ = cipher; - uint8_t *auth_b_ = ad; - memcpy(inout_b, - plain + plain_len_, - (uint32_t)(uint64_t)plain_len % (uint32_t)16U * sizeof (uint8_t)); - memcpy(abytes_b, - ad + auth_len_, - (uint32_t)(uint64_t)ad_len % (uint32_t)16U * sizeof (uint8_t)); - { - uint64_t len128x6 = (uint64_t)plain_len / (uint64_t)96U * (uint64_t)96U; - if (len128x6 / (uint64_t)16U >= (uint64_t)18U) - { - uint64_t len128_num = (uint64_t)plain_len / (uint64_t)16U * (uint64_t)16U - len128x6; - uint8_t *in128x6_b = plain_b_; - uint8_t *out128x6_b = out_b_; - uint8_t *in128_b = plain_b_ + (uint32_t)len128x6; - uint8_t *out128_b = out_b_ + (uint32_t)len128x6; - uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U; - uint64_t len128x6_ = len128x6 / (uint64_t)16U; - uint64_t len128_num_ = len128_num / (uint64_t)16U; - uint64_t - scrut0 = - gcm128_encrypt_opt(auth_b_, - (uint64_t)ad_len, - auth_num, - keys_b, - tmp_iv, - hkeys_b, - abytes_b, - in128x6_b, - out128x6_b, - len128x6_, - in128_b, - out128_b, - len128_num_, - inout_b, - (uint64_t)plain_len, - scratch_b1, - tag); - } - else - { - uint32_t len128x61 = (uint32_t)0U; - uint64_t len128_num = (uint64_t)plain_len / (uint64_t)16U * (uint64_t)16U; - uint8_t *in128x6_b = plain_b_; - uint8_t *out128x6_b = out_b_; - uint8_t *in128_b = plain_b_ + len128x61; - uint8_t *out128_b = out_b_ + len128x61; - uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U; - uint64_t len128_num_ = len128_num / (uint64_t)16U; - uint64_t len128x6_ = (uint64_t)0U; - uint64_t - scrut0 = - gcm128_encrypt_opt(auth_b_, - (uint64_t)ad_len, - auth_num, - keys_b, - tmp_iv, - hkeys_b, - abytes_b, - in128x6_b, - out128x6_b, - len128x6_, - in128_b, - out128_b, - len128_num_, - inout_b, - (uint64_t)plain_len, - scratch_b1, - tag); - } - memcpy(cipher + (uint32_t)(uint64_t)plain_len / (uint32_t)16U * (uint32_t)16U, - inout_b, - (uint32_t)(uint64_t)plain_len % (uint32_t)16U * sizeof (uint8_t)); - return EverCrypt_Error_Success; - } - } - } - #else - KRML_HOST_PRINTF("KaRaMeL abort at %s:%d\n%s\n", __FILE__, __LINE__, "statically unreachable"); - KRML_HOST_EXIT(255U); - #endif -} - -static EverCrypt_Error_error_code -encrypt_aes256_gcm( - EverCrypt_AEAD_state_s *s, - uint8_t *iv, - uint32_t iv_len, - uint8_t *ad, - uint32_t ad_len, - uint8_t *plain, - uint32_t plain_len, - uint8_t *cipher, - uint8_t *tag -) -{ - #if HACL_CAN_COMPILE_VALE - if (s == NULL) - { - return EverCrypt_Error_InvalidKey; - } - if (iv_len == (uint32_t)0U) - { - return EverCrypt_Error_InvalidIVLength; - } - { - EverCrypt_AEAD_state_s scrut = *s; - uint8_t *ek = scrut.ek; - uint8_t *scratch_b = ek + (uint32_t)368U; - uint8_t *ek1 = ek; - uint8_t *keys_b = ek1; - uint8_t *hkeys_b = ek1 + (uint32_t)240U; - uint8_t tmp_iv[16U] = { 0U }; - uint32_t len = iv_len / (uint32_t)16U; - uint32_t bytes_len = len * (uint32_t)16U; - uint8_t *iv_b = iv; - memcpy(tmp_iv, iv + bytes_len, iv_len % (uint32_t)16U * sizeof (uint8_t)); - { - uint64_t - uu____0 = compute_iv_stdcall(iv_b, (uint64_t)iv_len, (uint64_t)len, tmp_iv, tmp_iv, hkeys_b); - uint8_t *inout_b = scratch_b; - uint8_t *abytes_b = scratch_b + (uint32_t)16U; - uint8_t *scratch_b1 = scratch_b + (uint32_t)32U; - uint32_t plain_len_ = (uint32_t)(uint64_t)plain_len / (uint32_t)16U * (uint32_t)16U; - uint32_t auth_len_ = (uint32_t)(uint64_t)ad_len / (uint32_t)16U * (uint32_t)16U; - uint8_t *plain_b_ = plain; - uint8_t *out_b_ = cipher; - uint8_t *auth_b_ = ad; - memcpy(inout_b, - plain + plain_len_, - (uint32_t)(uint64_t)plain_len % (uint32_t)16U * sizeof (uint8_t)); - memcpy(abytes_b, - ad + auth_len_, - (uint32_t)(uint64_t)ad_len % (uint32_t)16U * sizeof (uint8_t)); - { - uint64_t len128x6 = (uint64_t)plain_len / (uint64_t)96U * (uint64_t)96U; - if (len128x6 / (uint64_t)16U >= (uint64_t)18U) - { - uint64_t len128_num = (uint64_t)plain_len / (uint64_t)16U * (uint64_t)16U - len128x6; - uint8_t *in128x6_b = plain_b_; - uint8_t *out128x6_b = out_b_; - uint8_t *in128_b = plain_b_ + (uint32_t)len128x6; - uint8_t *out128_b = out_b_ + (uint32_t)len128x6; - uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U; - uint64_t len128x6_ = len128x6 / (uint64_t)16U; - uint64_t len128_num_ = len128_num / (uint64_t)16U; - uint64_t - scrut0 = - gcm256_encrypt_opt(auth_b_, - (uint64_t)ad_len, - auth_num, - keys_b, - tmp_iv, - hkeys_b, - abytes_b, - in128x6_b, - out128x6_b, - len128x6_, - in128_b, - out128_b, - len128_num_, - inout_b, - (uint64_t)plain_len, - scratch_b1, - tag); - } - else - { - uint32_t len128x61 = (uint32_t)0U; - uint64_t len128_num = (uint64_t)plain_len / (uint64_t)16U * (uint64_t)16U; - uint8_t *in128x6_b = plain_b_; - uint8_t *out128x6_b = out_b_; - uint8_t *in128_b = plain_b_ + len128x61; - uint8_t *out128_b = out_b_ + len128x61; - uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U; - uint64_t len128_num_ = len128_num / (uint64_t)16U; - uint64_t len128x6_ = (uint64_t)0U; - uint64_t - scrut0 = - gcm256_encrypt_opt(auth_b_, - (uint64_t)ad_len, - auth_num, - keys_b, - tmp_iv, - hkeys_b, - abytes_b, - in128x6_b, - out128x6_b, - len128x6_, - in128_b, - out128_b, - len128_num_, - inout_b, - (uint64_t)plain_len, - scratch_b1, - tag); - } - memcpy(cipher + (uint32_t)(uint64_t)plain_len / (uint32_t)16U * (uint32_t)16U, - inout_b, - (uint32_t)(uint64_t)plain_len % (uint32_t)16U * sizeof (uint8_t)); - return EverCrypt_Error_Success; - } - } - } - #else - KRML_HOST_PRINTF("KaRaMeL abort at %s:%d\n%s\n", __FILE__, __LINE__, "statically unreachable"); - KRML_HOST_EXIT(255U); - #endif -} - -EverCrypt_Error_error_code -EverCrypt_AEAD_encrypt( - EverCrypt_AEAD_state_s *s, - uint8_t *iv, - uint32_t iv_len, - uint8_t *ad, - uint32_t ad_len, - uint8_t *plain, - uint32_t plain_len, - uint8_t *cipher, - uint8_t *tag -) -{ - if (s == NULL) - { - return EverCrypt_Error_InvalidKey; - } - { - EverCrypt_AEAD_state_s scrut = *s; - Spec_Cipher_Expansion_impl i = scrut.impl; - uint8_t *ek = scrut.ek; - switch (i) - { - case Spec_Cipher_Expansion_Vale_AES128: - { - return encrypt_aes128_gcm(s, iv, iv_len, ad, ad_len, plain, plain_len, cipher, tag); - } - case Spec_Cipher_Expansion_Vale_AES256: - { - return encrypt_aes256_gcm(s, iv, iv_len, ad, ad_len, plain, plain_len, cipher, tag); - } - case Spec_Cipher_Expansion_Hacl_CHACHA20: - { - if (iv_len != (uint32_t)12U) - { - return EverCrypt_Error_InvalidIVLength; - } - EverCrypt_Chacha20Poly1305_aead_encrypt(ek, - iv, - ad_len, - ad, - plain_len, - plain, - cipher, - tag); - return EverCrypt_Error_Success; - } - default: - { - KRML_HOST_PRINTF("KaRaMeL incomplete match at %s:%d\n", __FILE__, __LINE__); - KRML_HOST_EXIT(253U); - } - } - } -} - -/** -WARNING: this function doesn't perform any dynamic - hardware check. You MUST make sure your hardware supports the - implementation of AESGCM. Besides, this function was not designed - for cross-compilation: if you compile it on a system which doesn't - support Vale, it will compile it to a function which makes the - program exit. -*/ -EverCrypt_Error_error_code -EverCrypt_AEAD_encrypt_expand_aes128_gcm_no_check( - uint8_t *k, - uint8_t *iv, - uint32_t iv_len, - uint8_t *ad, - uint32_t ad_len, - uint8_t *plain, - uint32_t plain_len, - uint8_t *cipher, - uint8_t *tag -) -{ - #if HACL_CAN_COMPILE_VALE - uint8_t ek[480U] = { 0U }; - uint8_t *keys_b0 = ek; - uint8_t *hkeys_b0 = ek + (uint32_t)176U; - uint64_t scrut0 = aes128_key_expansion(k, keys_b0); - uint64_t scrut1 = aes128_keyhash_init(keys_b0, hkeys_b0); - EverCrypt_AEAD_state_s lit; - lit.impl = Spec_Cipher_Expansion_Vale_AES128; - lit.ek = ek; - { - EverCrypt_AEAD_state_s p = lit; - EverCrypt_AEAD_state_s *s = &p; - EverCrypt_Error_error_code r; - if (s == NULL) - { - r = EverCrypt_Error_InvalidKey; - } - else if (iv_len == (uint32_t)0U) - { - r = EverCrypt_Error_InvalidIVLength; - } - else - { - EverCrypt_AEAD_state_s scrut = *s; - uint8_t *ek0 = scrut.ek; - uint8_t *scratch_b = ek0 + (uint32_t)304U; - uint8_t *ek1 = ek0; - uint8_t *keys_b = ek1; - uint8_t *hkeys_b = ek1 + (uint32_t)176U; - uint8_t tmp_iv[16U] = { 0U }; - uint32_t len = iv_len / (uint32_t)16U; - uint32_t bytes_len = len * (uint32_t)16U; - uint8_t *iv_b = iv; - memcpy(tmp_iv, iv + bytes_len, iv_len % (uint32_t)16U * sizeof (uint8_t)); - { - uint64_t - uu____0 = compute_iv_stdcall(iv_b, (uint64_t)iv_len, (uint64_t)len, tmp_iv, tmp_iv, hkeys_b); - uint8_t *inout_b = scratch_b; - uint8_t *abytes_b = scratch_b + (uint32_t)16U; - uint8_t *scratch_b1 = scratch_b + (uint32_t)32U; - uint32_t plain_len_ = (uint32_t)(uint64_t)plain_len / (uint32_t)16U * (uint32_t)16U; - uint32_t auth_len_ = (uint32_t)(uint64_t)ad_len / (uint32_t)16U * (uint32_t)16U; - uint8_t *plain_b_ = plain; - uint8_t *out_b_ = cipher; - uint8_t *auth_b_ = ad; - memcpy(inout_b, - plain + plain_len_, - (uint32_t)(uint64_t)plain_len % (uint32_t)16U * sizeof (uint8_t)); - memcpy(abytes_b, - ad + auth_len_, - (uint32_t)(uint64_t)ad_len % (uint32_t)16U * sizeof (uint8_t)); - { - uint64_t len128x6 = (uint64_t)plain_len / (uint64_t)96U * (uint64_t)96U; - if (len128x6 / (uint64_t)16U >= (uint64_t)18U) - { - uint64_t len128_num = (uint64_t)plain_len / (uint64_t)16U * (uint64_t)16U - len128x6; - uint8_t *in128x6_b = plain_b_; - uint8_t *out128x6_b = out_b_; - uint8_t *in128_b = plain_b_ + (uint32_t)len128x6; - uint8_t *out128_b = out_b_ + (uint32_t)len128x6; - uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U; - uint64_t len128x6_ = len128x6 / (uint64_t)16U; - uint64_t len128_num_ = len128_num / (uint64_t)16U; - uint64_t - scrut2 = - gcm128_encrypt_opt(auth_b_, - (uint64_t)ad_len, - auth_num, - keys_b, - tmp_iv, - hkeys_b, - abytes_b, - in128x6_b, - out128x6_b, - len128x6_, - in128_b, - out128_b, - len128_num_, - inout_b, - (uint64_t)plain_len, - scratch_b1, - tag); - } - else - { - uint32_t len128x61 = (uint32_t)0U; - uint64_t len128_num = (uint64_t)plain_len / (uint64_t)16U * (uint64_t)16U; - uint8_t *in128x6_b = plain_b_; - uint8_t *out128x6_b = out_b_; - uint8_t *in128_b = plain_b_ + len128x61; - uint8_t *out128_b = out_b_ + len128x61; - uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U; - uint64_t len128_num_ = len128_num / (uint64_t)16U; - uint64_t len128x6_ = (uint64_t)0U; - uint64_t - scrut2 = - gcm128_encrypt_opt(auth_b_, - (uint64_t)ad_len, - auth_num, - keys_b, - tmp_iv, - hkeys_b, - abytes_b, - in128x6_b, - out128x6_b, - len128x6_, - in128_b, - out128_b, - len128_num_, - inout_b, - (uint64_t)plain_len, - scratch_b1, - tag); - } - memcpy(cipher + (uint32_t)(uint64_t)plain_len / (uint32_t)16U * (uint32_t)16U, - inout_b, - (uint32_t)(uint64_t)plain_len % (uint32_t)16U * sizeof (uint8_t)); - r = EverCrypt_Error_Success; - } - } - } - return EverCrypt_Error_Success; - } - #else - KRML_HOST_PRINTF("KaRaMeL abort at %s:%d\n%s\n", - __FILE__, - __LINE__, - "EverCrypt was compiled on a system which doesn\'t support Vale"); - KRML_HOST_EXIT(255U); - #endif -} - -/** -WARNING: this function doesn't perform any dynamic - hardware check. You MUST make sure your hardware supports the - implementation of AESGCM. Besides, this function was not designed - for cross-compilation: if you compile it on a system which doesn't - support Vale, it will compile it to a function which makes the - program exit. -*/ -EverCrypt_Error_error_code -EverCrypt_AEAD_encrypt_expand_aes256_gcm_no_check( - uint8_t *k, - uint8_t *iv, - uint32_t iv_len, - uint8_t *ad, - uint32_t ad_len, - uint8_t *plain, - uint32_t plain_len, - uint8_t *cipher, - uint8_t *tag -) -{ - #if HACL_CAN_COMPILE_VALE - uint8_t ek[544U] = { 0U }; - uint8_t *keys_b0 = ek; - uint8_t *hkeys_b0 = ek + (uint32_t)240U; - uint64_t scrut0 = aes256_key_expansion(k, keys_b0); - uint64_t scrut1 = aes256_keyhash_init(keys_b0, hkeys_b0); - EverCrypt_AEAD_state_s lit; - lit.impl = Spec_Cipher_Expansion_Vale_AES256; - lit.ek = ek; - { - EverCrypt_AEAD_state_s p = lit; - EverCrypt_AEAD_state_s *s = &p; - EverCrypt_Error_error_code r; - if (s == NULL) - { - r = EverCrypt_Error_InvalidKey; - } - else if (iv_len == (uint32_t)0U) - { - r = EverCrypt_Error_InvalidIVLength; - } - else - { - EverCrypt_AEAD_state_s scrut = *s; - uint8_t *ek0 = scrut.ek; - uint8_t *scratch_b = ek0 + (uint32_t)368U; - uint8_t *ek1 = ek0; - uint8_t *keys_b = ek1; - uint8_t *hkeys_b = ek1 + (uint32_t)240U; - uint8_t tmp_iv[16U] = { 0U }; - uint32_t len = iv_len / (uint32_t)16U; - uint32_t bytes_len = len * (uint32_t)16U; - uint8_t *iv_b = iv; - memcpy(tmp_iv, iv + bytes_len, iv_len % (uint32_t)16U * sizeof (uint8_t)); - { - uint64_t - uu____0 = compute_iv_stdcall(iv_b, (uint64_t)iv_len, (uint64_t)len, tmp_iv, tmp_iv, hkeys_b); - uint8_t *inout_b = scratch_b; - uint8_t *abytes_b = scratch_b + (uint32_t)16U; - uint8_t *scratch_b1 = scratch_b + (uint32_t)32U; - uint32_t plain_len_ = (uint32_t)(uint64_t)plain_len / (uint32_t)16U * (uint32_t)16U; - uint32_t auth_len_ = (uint32_t)(uint64_t)ad_len / (uint32_t)16U * (uint32_t)16U; - uint8_t *plain_b_ = plain; - uint8_t *out_b_ = cipher; - uint8_t *auth_b_ = ad; - memcpy(inout_b, - plain + plain_len_, - (uint32_t)(uint64_t)plain_len % (uint32_t)16U * sizeof (uint8_t)); - memcpy(abytes_b, - ad + auth_len_, - (uint32_t)(uint64_t)ad_len % (uint32_t)16U * sizeof (uint8_t)); - { - uint64_t len128x6 = (uint64_t)plain_len / (uint64_t)96U * (uint64_t)96U; - if (len128x6 / (uint64_t)16U >= (uint64_t)18U) - { - uint64_t len128_num = (uint64_t)plain_len / (uint64_t)16U * (uint64_t)16U - len128x6; - uint8_t *in128x6_b = plain_b_; - uint8_t *out128x6_b = out_b_; - uint8_t *in128_b = plain_b_ + (uint32_t)len128x6; - uint8_t *out128_b = out_b_ + (uint32_t)len128x6; - uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U; - uint64_t len128x6_ = len128x6 / (uint64_t)16U; - uint64_t len128_num_ = len128_num / (uint64_t)16U; - uint64_t - scrut2 = - gcm256_encrypt_opt(auth_b_, - (uint64_t)ad_len, - auth_num, - keys_b, - tmp_iv, - hkeys_b, - abytes_b, - in128x6_b, - out128x6_b, - len128x6_, - in128_b, - out128_b, - len128_num_, - inout_b, - (uint64_t)plain_len, - scratch_b1, - tag); - } - else - { - uint32_t len128x61 = (uint32_t)0U; - uint64_t len128_num = (uint64_t)plain_len / (uint64_t)16U * (uint64_t)16U; - uint8_t *in128x6_b = plain_b_; - uint8_t *out128x6_b = out_b_; - uint8_t *in128_b = plain_b_ + len128x61; - uint8_t *out128_b = out_b_ + len128x61; - uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U; - uint64_t len128_num_ = len128_num / (uint64_t)16U; - uint64_t len128x6_ = (uint64_t)0U; - uint64_t - scrut2 = - gcm256_encrypt_opt(auth_b_, - (uint64_t)ad_len, - auth_num, - keys_b, - tmp_iv, - hkeys_b, - abytes_b, - in128x6_b, - out128x6_b, - len128x6_, - in128_b, - out128_b, - len128_num_, - inout_b, - (uint64_t)plain_len, - scratch_b1, - tag); - } - memcpy(cipher + (uint32_t)(uint64_t)plain_len / (uint32_t)16U * (uint32_t)16U, - inout_b, - (uint32_t)(uint64_t)plain_len % (uint32_t)16U * sizeof (uint8_t)); - r = EverCrypt_Error_Success; - } - } - } - return EverCrypt_Error_Success; - } - #else - KRML_HOST_PRINTF("KaRaMeL abort at %s:%d\n%s\n", - __FILE__, - __LINE__, - "EverCrypt was compiled on a system which doesn\'t support Vale"); - KRML_HOST_EXIT(255U); - #endif -} - -EverCrypt_Error_error_code -EverCrypt_AEAD_encrypt_expand_aes128_gcm( - uint8_t *k, - uint8_t *iv, - uint32_t iv_len, - uint8_t *ad, - uint32_t ad_len, - uint8_t *plain, - uint32_t plain_len, - uint8_t *cipher, - uint8_t *tag -) -{ - bool has_pclmulqdq = EverCrypt_AutoConfig2_has_pclmulqdq(); - bool has_avx = EverCrypt_AutoConfig2_has_avx(); - bool has_sse = EverCrypt_AutoConfig2_has_sse(); - bool has_movbe = EverCrypt_AutoConfig2_has_movbe(); - bool has_aesni = EverCrypt_AutoConfig2_has_aesni(); - #if HACL_CAN_COMPILE_VALE - if (has_aesni && has_pclmulqdq && has_avx && has_sse && has_movbe) - { - uint8_t ek[480U] = { 0U }; - uint8_t *keys_b0 = ek; - uint8_t *hkeys_b0 = ek + (uint32_t)176U; - uint64_t scrut0 = aes128_key_expansion(k, keys_b0); - uint64_t scrut1 = aes128_keyhash_init(keys_b0, hkeys_b0); - EverCrypt_AEAD_state_s lit; - lit.impl = Spec_Cipher_Expansion_Vale_AES128; - lit.ek = ek; - { - EverCrypt_AEAD_state_s p = lit; - EverCrypt_AEAD_state_s *s = &p; - EverCrypt_Error_error_code r; - if (s == NULL) - { - r = EverCrypt_Error_InvalidKey; - } - else if (iv_len == (uint32_t)0U) - { - r = EverCrypt_Error_InvalidIVLength; - } - else - { - EverCrypt_AEAD_state_s scrut = *s; - uint8_t *ek0 = scrut.ek; - uint8_t *scratch_b = ek0 + (uint32_t)304U; - uint8_t *ek1 = ek0; - uint8_t *keys_b = ek1; - uint8_t *hkeys_b = ek1 + (uint32_t)176U; - uint8_t tmp_iv[16U] = { 0U }; - uint32_t len = iv_len / (uint32_t)16U; - uint32_t bytes_len = len * (uint32_t)16U; - uint8_t *iv_b = iv; - memcpy(tmp_iv, iv + bytes_len, iv_len % (uint32_t)16U * sizeof (uint8_t)); - { - uint64_t - uu____0 = - compute_iv_stdcall(iv_b, - (uint64_t)iv_len, - (uint64_t)len, - tmp_iv, - tmp_iv, - hkeys_b); - uint8_t *inout_b = scratch_b; - uint8_t *abytes_b = scratch_b + (uint32_t)16U; - uint8_t *scratch_b1 = scratch_b + (uint32_t)32U; - uint32_t plain_len_ = (uint32_t)(uint64_t)plain_len / (uint32_t)16U * (uint32_t)16U; - uint32_t auth_len_ = (uint32_t)(uint64_t)ad_len / (uint32_t)16U * (uint32_t)16U; - uint8_t *plain_b_ = plain; - uint8_t *out_b_ = cipher; - uint8_t *auth_b_ = ad; - memcpy(inout_b, - plain + plain_len_, - (uint32_t)(uint64_t)plain_len % (uint32_t)16U * sizeof (uint8_t)); - memcpy(abytes_b, - ad + auth_len_, - (uint32_t)(uint64_t)ad_len % (uint32_t)16U * sizeof (uint8_t)); - { - uint64_t len128x6 = (uint64_t)plain_len / (uint64_t)96U * (uint64_t)96U; - if (len128x6 / (uint64_t)16U >= (uint64_t)18U) - { - uint64_t len128_num = (uint64_t)plain_len / (uint64_t)16U * (uint64_t)16U - len128x6; - uint8_t *in128x6_b = plain_b_; - uint8_t *out128x6_b = out_b_; - uint8_t *in128_b = plain_b_ + (uint32_t)len128x6; - uint8_t *out128_b = out_b_ + (uint32_t)len128x6; - uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U; - uint64_t len128x6_ = len128x6 / (uint64_t)16U; - uint64_t len128_num_ = len128_num / (uint64_t)16U; - uint64_t - scrut2 = - gcm128_encrypt_opt(auth_b_, - (uint64_t)ad_len, - auth_num, - keys_b, - tmp_iv, - hkeys_b, - abytes_b, - in128x6_b, - out128x6_b, - len128x6_, - in128_b, - out128_b, - len128_num_, - inout_b, - (uint64_t)plain_len, - scratch_b1, - tag); - } - else - { - uint32_t len128x61 = (uint32_t)0U; - uint64_t len128_num = (uint64_t)plain_len / (uint64_t)16U * (uint64_t)16U; - uint8_t *in128x6_b = plain_b_; - uint8_t *out128x6_b = out_b_; - uint8_t *in128_b = plain_b_ + len128x61; - uint8_t *out128_b = out_b_ + len128x61; - uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U; - uint64_t len128_num_ = len128_num / (uint64_t)16U; - uint64_t len128x6_ = (uint64_t)0U; - uint64_t - scrut2 = - gcm128_encrypt_opt(auth_b_, - (uint64_t)ad_len, - auth_num, - keys_b, - tmp_iv, - hkeys_b, - abytes_b, - in128x6_b, - out128x6_b, - len128x6_, - in128_b, - out128_b, - len128_num_, - inout_b, - (uint64_t)plain_len, - scratch_b1, - tag); - } - memcpy(cipher + (uint32_t)(uint64_t)plain_len / (uint32_t)16U * (uint32_t)16U, - inout_b, - (uint32_t)(uint64_t)plain_len % (uint32_t)16U * sizeof (uint8_t)); - r = EverCrypt_Error_Success; - } - } - } - return EverCrypt_Error_Success; - } - } - #endif - return EverCrypt_Error_UnsupportedAlgorithm; -} - -EverCrypt_Error_error_code -EverCrypt_AEAD_encrypt_expand_aes256_gcm( - uint8_t *k, - uint8_t *iv, - uint32_t iv_len, - uint8_t *ad, - uint32_t ad_len, - uint8_t *plain, - uint32_t plain_len, - uint8_t *cipher, - uint8_t *tag -) -{ - bool has_pclmulqdq = EverCrypt_AutoConfig2_has_pclmulqdq(); - bool has_avx = EverCrypt_AutoConfig2_has_avx(); - bool has_sse = EverCrypt_AutoConfig2_has_sse(); - bool has_movbe = EverCrypt_AutoConfig2_has_movbe(); - bool has_aesni = EverCrypt_AutoConfig2_has_aesni(); - #if HACL_CAN_COMPILE_VALE - if (has_aesni && has_pclmulqdq && has_avx && has_sse && has_movbe) - { - uint8_t ek[544U] = { 0U }; - uint8_t *keys_b0 = ek; - uint8_t *hkeys_b0 = ek + (uint32_t)240U; - uint64_t scrut0 = aes256_key_expansion(k, keys_b0); - uint64_t scrut1 = aes256_keyhash_init(keys_b0, hkeys_b0); - EverCrypt_AEAD_state_s lit; - lit.impl = Spec_Cipher_Expansion_Vale_AES256; - lit.ek = ek; - { - EverCrypt_AEAD_state_s p = lit; - EverCrypt_AEAD_state_s *s = &p; - EverCrypt_Error_error_code r; - if (s == NULL) - { - r = EverCrypt_Error_InvalidKey; - } - else if (iv_len == (uint32_t)0U) - { - r = EverCrypt_Error_InvalidIVLength; - } - else - { - EverCrypt_AEAD_state_s scrut = *s; - uint8_t *ek0 = scrut.ek; - uint8_t *scratch_b = ek0 + (uint32_t)368U; - uint8_t *ek1 = ek0; - uint8_t *keys_b = ek1; - uint8_t *hkeys_b = ek1 + (uint32_t)240U; - uint8_t tmp_iv[16U] = { 0U }; - uint32_t len = iv_len / (uint32_t)16U; - uint32_t bytes_len = len * (uint32_t)16U; - uint8_t *iv_b = iv; - memcpy(tmp_iv, iv + bytes_len, iv_len % (uint32_t)16U * sizeof (uint8_t)); - { - uint64_t - uu____0 = - compute_iv_stdcall(iv_b, - (uint64_t)iv_len, - (uint64_t)len, - tmp_iv, - tmp_iv, - hkeys_b); - uint8_t *inout_b = scratch_b; - uint8_t *abytes_b = scratch_b + (uint32_t)16U; - uint8_t *scratch_b1 = scratch_b + (uint32_t)32U; - uint32_t plain_len_ = (uint32_t)(uint64_t)plain_len / (uint32_t)16U * (uint32_t)16U; - uint32_t auth_len_ = (uint32_t)(uint64_t)ad_len / (uint32_t)16U * (uint32_t)16U; - uint8_t *plain_b_ = plain; - uint8_t *out_b_ = cipher; - uint8_t *auth_b_ = ad; - memcpy(inout_b, - plain + plain_len_, - (uint32_t)(uint64_t)plain_len % (uint32_t)16U * sizeof (uint8_t)); - memcpy(abytes_b, - ad + auth_len_, - (uint32_t)(uint64_t)ad_len % (uint32_t)16U * sizeof (uint8_t)); - { - uint64_t len128x6 = (uint64_t)plain_len / (uint64_t)96U * (uint64_t)96U; - if (len128x6 / (uint64_t)16U >= (uint64_t)18U) - { - uint64_t len128_num = (uint64_t)plain_len / (uint64_t)16U * (uint64_t)16U - len128x6; - uint8_t *in128x6_b = plain_b_; - uint8_t *out128x6_b = out_b_; - uint8_t *in128_b = plain_b_ + (uint32_t)len128x6; - uint8_t *out128_b = out_b_ + (uint32_t)len128x6; - uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U; - uint64_t len128x6_ = len128x6 / (uint64_t)16U; - uint64_t len128_num_ = len128_num / (uint64_t)16U; - uint64_t - scrut2 = - gcm256_encrypt_opt(auth_b_, - (uint64_t)ad_len, - auth_num, - keys_b, - tmp_iv, - hkeys_b, - abytes_b, - in128x6_b, - out128x6_b, - len128x6_, - in128_b, - out128_b, - len128_num_, - inout_b, - (uint64_t)plain_len, - scratch_b1, - tag); - } - else - { - uint32_t len128x61 = (uint32_t)0U; - uint64_t len128_num = (uint64_t)plain_len / (uint64_t)16U * (uint64_t)16U; - uint8_t *in128x6_b = plain_b_; - uint8_t *out128x6_b = out_b_; - uint8_t *in128_b = plain_b_ + len128x61; - uint8_t *out128_b = out_b_ + len128x61; - uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U; - uint64_t len128_num_ = len128_num / (uint64_t)16U; - uint64_t len128x6_ = (uint64_t)0U; - uint64_t - scrut2 = - gcm256_encrypt_opt(auth_b_, - (uint64_t)ad_len, - auth_num, - keys_b, - tmp_iv, - hkeys_b, - abytes_b, - in128x6_b, - out128x6_b, - len128x6_, - in128_b, - out128_b, - len128_num_, - inout_b, - (uint64_t)plain_len, - scratch_b1, - tag); - } - memcpy(cipher + (uint32_t)(uint64_t)plain_len / (uint32_t)16U * (uint32_t)16U, - inout_b, - (uint32_t)(uint64_t)plain_len % (uint32_t)16U * sizeof (uint8_t)); - r = EverCrypt_Error_Success; - } - } - } - return EverCrypt_Error_Success; - } - } - #endif - return EverCrypt_Error_UnsupportedAlgorithm; -} - -EverCrypt_Error_error_code -EverCrypt_AEAD_encrypt_expand_chacha20_poly1305( - uint8_t *k, - uint8_t *iv, - uint32_t iv_len, - uint8_t *ad, - uint32_t ad_len, - uint8_t *plain, - uint32_t plain_len, - uint8_t *cipher, - uint8_t *tag -) -{ - uint8_t ek0[32U] = { 0U }; - EverCrypt_AEAD_state_s lit; - lit.impl = Spec_Cipher_Expansion_Hacl_CHACHA20; - lit.ek = ek0; - { - EverCrypt_AEAD_state_s p = lit; - EverCrypt_AEAD_state_s *s; - EverCrypt_AEAD_state_s scrut; - uint8_t *ek; - memcpy(ek0, k, (uint32_t)32U * sizeof (uint8_t)); - s = &p; - scrut = *s; - ek = scrut.ek; - EverCrypt_Chacha20Poly1305_aead_encrypt(ek, iv, ad_len, ad, plain_len, plain, cipher, tag); - return EverCrypt_Error_Success; - } -} - -EverCrypt_Error_error_code -EverCrypt_AEAD_encrypt_expand( - Spec_Agile_AEAD_alg a, - uint8_t *k, - uint8_t *iv, - uint32_t iv_len, - uint8_t *ad, - uint32_t ad_len, - uint8_t *plain, - uint32_t plain_len, - uint8_t *cipher, - uint8_t *tag -) -{ - switch (a) - { - case Spec_Agile_AEAD_AES128_GCM: - { - return - EverCrypt_AEAD_encrypt_expand_aes128_gcm(k, - iv, - iv_len, - ad, - ad_len, - plain, - plain_len, - cipher, - tag); - } - case Spec_Agile_AEAD_AES256_GCM: - { - return - EverCrypt_AEAD_encrypt_expand_aes256_gcm(k, - iv, - iv_len, - ad, - ad_len, - plain, - plain_len, - cipher, - tag); - } - case Spec_Agile_AEAD_CHACHA20_POLY1305: - { - return - EverCrypt_AEAD_encrypt_expand_chacha20_poly1305(k, - iv, - iv_len, - ad, - ad_len, - plain, - plain_len, - cipher, - tag); - } - default: - { - KRML_HOST_PRINTF("KaRaMeL incomplete match at %s:%d\n", __FILE__, __LINE__); - KRML_HOST_EXIT(253U); - } - } -} - -static EverCrypt_Error_error_code -decrypt_aes128_gcm( - EverCrypt_AEAD_state_s *s, - uint8_t *iv, - uint32_t iv_len, - uint8_t *ad, - uint32_t ad_len, - uint8_t *cipher, - uint32_t cipher_len, - uint8_t *tag, - uint8_t *dst -) -{ - #if HACL_CAN_COMPILE_VALE - if (s == NULL) - { - return EverCrypt_Error_InvalidKey; - } - if (iv_len == (uint32_t)0U) - { - return EverCrypt_Error_InvalidIVLength; - } - { - EverCrypt_AEAD_state_s scrut = *s; - uint8_t *ek = scrut.ek; - uint8_t *scratch_b = ek + (uint32_t)304U; - uint8_t *ek1 = ek; - uint8_t *keys_b = ek1; - uint8_t *hkeys_b = ek1 + (uint32_t)176U; - uint8_t tmp_iv[16U] = { 0U }; - uint32_t len = iv_len / (uint32_t)16U; - uint32_t bytes_len = len * (uint32_t)16U; - uint8_t *iv_b = iv; - memcpy(tmp_iv, iv + bytes_len, iv_len % (uint32_t)16U * sizeof (uint8_t)); - { - uint64_t - uu____0 = compute_iv_stdcall(iv_b, (uint64_t)iv_len, (uint64_t)len, tmp_iv, tmp_iv, hkeys_b); - uint8_t *inout_b = scratch_b; - uint8_t *abytes_b = scratch_b + (uint32_t)16U; - uint8_t *scratch_b1 = scratch_b + (uint32_t)32U; - uint32_t cipher_len_ = (uint32_t)(uint64_t)cipher_len / (uint32_t)16U * (uint32_t)16U; - uint32_t auth_len_ = (uint32_t)(uint64_t)ad_len / (uint32_t)16U * (uint32_t)16U; - uint8_t *cipher_b_ = cipher; - uint8_t *out_b_ = dst; - uint8_t *auth_b_ = ad; - memcpy(inout_b, - cipher + cipher_len_, - (uint32_t)(uint64_t)cipher_len % (uint32_t)16U * sizeof (uint8_t)); - memcpy(abytes_b, - ad + auth_len_, - (uint32_t)(uint64_t)ad_len % (uint32_t)16U * sizeof (uint8_t)); - { - uint64_t len128x6 = (uint64_t)cipher_len / (uint64_t)96U * (uint64_t)96U; - uint64_t c; - if (len128x6 / (uint64_t)16U >= (uint64_t)6U) - { - uint64_t len128_num = (uint64_t)cipher_len / (uint64_t)16U * (uint64_t)16U - len128x6; - uint8_t *in128x6_b = cipher_b_; - uint8_t *out128x6_b = out_b_; - uint8_t *in128_b = cipher_b_ + (uint32_t)len128x6; - uint8_t *out128_b = out_b_ + (uint32_t)len128x6; - uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U; - uint64_t len128x6_ = len128x6 / (uint64_t)16U; - uint64_t len128_num_ = len128_num / (uint64_t)16U; - uint64_t - scrut0 = - gcm128_decrypt_opt(auth_b_, - (uint64_t)ad_len, - auth_num, - keys_b, - tmp_iv, - hkeys_b, - abytes_b, - in128x6_b, - out128x6_b, - len128x6_, - in128_b, - out128_b, - len128_num_, - inout_b, - (uint64_t)cipher_len, - scratch_b1, - tag); - uint64_t c0 = scrut0; - c = c0; - } - else - { - uint32_t len128x61 = (uint32_t)0U; - uint64_t len128_num = (uint64_t)cipher_len / (uint64_t)16U * (uint64_t)16U; - uint8_t *in128x6_b = cipher_b_; - uint8_t *out128x6_b = out_b_; - uint8_t *in128_b = cipher_b_ + len128x61; - uint8_t *out128_b = out_b_ + len128x61; - uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U; - uint64_t len128_num_ = len128_num / (uint64_t)16U; - uint64_t len128x6_ = (uint64_t)0U; - uint64_t - scrut0 = - gcm128_decrypt_opt(auth_b_, - (uint64_t)ad_len, - auth_num, - keys_b, - tmp_iv, - hkeys_b, - abytes_b, - in128x6_b, - out128x6_b, - len128x6_, - in128_b, - out128_b, - len128_num_, - inout_b, - (uint64_t)cipher_len, - scratch_b1, - tag); - uint64_t c0 = scrut0; - c = c0; - } - memcpy(dst + (uint32_t)(uint64_t)cipher_len / (uint32_t)16U * (uint32_t)16U, - inout_b, - (uint32_t)(uint64_t)cipher_len % (uint32_t)16U * sizeof (uint8_t)); - { - uint64_t r = c; - if (r == (uint64_t)0U) - { - return EverCrypt_Error_Success; - } - return EverCrypt_Error_AuthenticationFailure; - } - } - } - } - #else - KRML_HOST_PRINTF("KaRaMeL abort at %s:%d\n%s\n", __FILE__, __LINE__, "statically unreachable"); - KRML_HOST_EXIT(255U); - #endif -} - -static EverCrypt_Error_error_code -decrypt_aes256_gcm( - EverCrypt_AEAD_state_s *s, - uint8_t *iv, - uint32_t iv_len, - uint8_t *ad, - uint32_t ad_len, - uint8_t *cipher, - uint32_t cipher_len, - uint8_t *tag, - uint8_t *dst -) -{ - #if HACL_CAN_COMPILE_VALE - if (s == NULL) - { - return EverCrypt_Error_InvalidKey; - } - if (iv_len == (uint32_t)0U) - { - return EverCrypt_Error_InvalidIVLength; - } - { - EverCrypt_AEAD_state_s scrut = *s; - uint8_t *ek = scrut.ek; - uint8_t *scratch_b = ek + (uint32_t)368U; - uint8_t *ek1 = ek; - uint8_t *keys_b = ek1; - uint8_t *hkeys_b = ek1 + (uint32_t)240U; - uint8_t tmp_iv[16U] = { 0U }; - uint32_t len = iv_len / (uint32_t)16U; - uint32_t bytes_len = len * (uint32_t)16U; - uint8_t *iv_b = iv; - memcpy(tmp_iv, iv + bytes_len, iv_len % (uint32_t)16U * sizeof (uint8_t)); - { - uint64_t - uu____0 = compute_iv_stdcall(iv_b, (uint64_t)iv_len, (uint64_t)len, tmp_iv, tmp_iv, hkeys_b); - uint8_t *inout_b = scratch_b; - uint8_t *abytes_b = scratch_b + (uint32_t)16U; - uint8_t *scratch_b1 = scratch_b + (uint32_t)32U; - uint32_t cipher_len_ = (uint32_t)(uint64_t)cipher_len / (uint32_t)16U * (uint32_t)16U; - uint32_t auth_len_ = (uint32_t)(uint64_t)ad_len / (uint32_t)16U * (uint32_t)16U; - uint8_t *cipher_b_ = cipher; - uint8_t *out_b_ = dst; - uint8_t *auth_b_ = ad; - memcpy(inout_b, - cipher + cipher_len_, - (uint32_t)(uint64_t)cipher_len % (uint32_t)16U * sizeof (uint8_t)); - memcpy(abytes_b, - ad + auth_len_, - (uint32_t)(uint64_t)ad_len % (uint32_t)16U * sizeof (uint8_t)); - { - uint64_t len128x6 = (uint64_t)cipher_len / (uint64_t)96U * (uint64_t)96U; - uint64_t c; - if (len128x6 / (uint64_t)16U >= (uint64_t)6U) - { - uint64_t len128_num = (uint64_t)cipher_len / (uint64_t)16U * (uint64_t)16U - len128x6; - uint8_t *in128x6_b = cipher_b_; - uint8_t *out128x6_b = out_b_; - uint8_t *in128_b = cipher_b_ + (uint32_t)len128x6; - uint8_t *out128_b = out_b_ + (uint32_t)len128x6; - uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U; - uint64_t len128x6_ = len128x6 / (uint64_t)16U; - uint64_t len128_num_ = len128_num / (uint64_t)16U; - uint64_t - scrut0 = - gcm256_decrypt_opt(auth_b_, - (uint64_t)ad_len, - auth_num, - keys_b, - tmp_iv, - hkeys_b, - abytes_b, - in128x6_b, - out128x6_b, - len128x6_, - in128_b, - out128_b, - len128_num_, - inout_b, - (uint64_t)cipher_len, - scratch_b1, - tag); - uint64_t c0 = scrut0; - c = c0; - } - else - { - uint32_t len128x61 = (uint32_t)0U; - uint64_t len128_num = (uint64_t)cipher_len / (uint64_t)16U * (uint64_t)16U; - uint8_t *in128x6_b = cipher_b_; - uint8_t *out128x6_b = out_b_; - uint8_t *in128_b = cipher_b_ + len128x61; - uint8_t *out128_b = out_b_ + len128x61; - uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U; - uint64_t len128_num_ = len128_num / (uint64_t)16U; - uint64_t len128x6_ = (uint64_t)0U; - uint64_t - scrut0 = - gcm256_decrypt_opt(auth_b_, - (uint64_t)ad_len, - auth_num, - keys_b, - tmp_iv, - hkeys_b, - abytes_b, - in128x6_b, - out128x6_b, - len128x6_, - in128_b, - out128_b, - len128_num_, - inout_b, - (uint64_t)cipher_len, - scratch_b1, - tag); - uint64_t c0 = scrut0; - c = c0; - } - memcpy(dst + (uint32_t)(uint64_t)cipher_len / (uint32_t)16U * (uint32_t)16U, - inout_b, - (uint32_t)(uint64_t)cipher_len % (uint32_t)16U * sizeof (uint8_t)); - { - uint64_t r = c; - if (r == (uint64_t)0U) - { - return EverCrypt_Error_Success; - } - return EverCrypt_Error_AuthenticationFailure; - } - } - } - } - #else - KRML_HOST_PRINTF("KaRaMeL abort at %s:%d\n%s\n", __FILE__, __LINE__, "statically unreachable"); - KRML_HOST_EXIT(255U); - #endif -} - -static EverCrypt_Error_error_code -decrypt_chacha20_poly1305( - EverCrypt_AEAD_state_s *s, - uint8_t *iv, - uint32_t iv_len, - uint8_t *ad, - uint32_t ad_len, - uint8_t *cipher, - uint32_t cipher_len, - uint8_t *tag, - uint8_t *dst -) -{ - if (s == NULL) - { - return EverCrypt_Error_InvalidKey; - } - if (iv_len != (uint32_t)12U) - { - return EverCrypt_Error_InvalidIVLength; - } - { - EverCrypt_AEAD_state_s scrut = *s; - uint8_t *ek = scrut.ek; - uint32_t - r = EverCrypt_Chacha20Poly1305_aead_decrypt(ek, iv, ad_len, ad, cipher_len, dst, cipher, tag); - if (r == (uint32_t)0U) - { - return EverCrypt_Error_Success; - } - return EverCrypt_Error_AuthenticationFailure; - } -} - -EverCrypt_Error_error_code -EverCrypt_AEAD_decrypt( - EverCrypt_AEAD_state_s *s, - uint8_t *iv, - uint32_t iv_len, - uint8_t *ad, - uint32_t ad_len, - uint8_t *cipher, - uint32_t cipher_len, - uint8_t *tag, - uint8_t *dst -) -{ - if (s == NULL) - { - return EverCrypt_Error_InvalidKey; - } - { - EverCrypt_AEAD_state_s scrut = *s; - Spec_Cipher_Expansion_impl i = scrut.impl; - switch (i) - { - case Spec_Cipher_Expansion_Vale_AES128: - { - return decrypt_aes128_gcm(s, iv, iv_len, ad, ad_len, cipher, cipher_len, tag, dst); - } - case Spec_Cipher_Expansion_Vale_AES256: - { - return decrypt_aes256_gcm(s, iv, iv_len, ad, ad_len, cipher, cipher_len, tag, dst); - } - case Spec_Cipher_Expansion_Hacl_CHACHA20: - { - return decrypt_chacha20_poly1305(s, iv, iv_len, ad, ad_len, cipher, cipher_len, tag, dst); - } - default: - { - KRML_HOST_PRINTF("KaRaMeL incomplete match at %s:%d\n", __FILE__, __LINE__); - KRML_HOST_EXIT(253U); - } - } - } -} - -/** -WARNING: this function doesn't perform any dynamic - hardware check. You MUST make sure your hardware supports the - implementation of AESGCM. Besides, this function was not designed - for cross-compilation: if you compile it on a system which doesn't - support Vale, it will compile it to a function which makes the - program exit. -*/ -EverCrypt_Error_error_code -EverCrypt_AEAD_decrypt_expand_aes128_gcm_no_check( - uint8_t *k, - uint8_t *iv, - uint32_t iv_len, - uint8_t *ad, - uint32_t ad_len, - uint8_t *cipher, - uint32_t cipher_len, - uint8_t *tag, - uint8_t *dst -) -{ - #if HACL_CAN_COMPILE_VALE - uint8_t ek[480U] = { 0U }; - uint8_t *keys_b0 = ek; - uint8_t *hkeys_b0 = ek + (uint32_t)176U; - uint64_t scrut = aes128_key_expansion(k, keys_b0); - uint64_t scrut0 = aes128_keyhash_init(keys_b0, hkeys_b0); - EverCrypt_AEAD_state_s lit; - lit.impl = Spec_Cipher_Expansion_Vale_AES128; - lit.ek = ek; - { - EverCrypt_AEAD_state_s p = lit; - EverCrypt_AEAD_state_s *s = &p; - if (s == NULL) - { - return EverCrypt_Error_InvalidKey; - } - if (iv_len == (uint32_t)0U) - { - return EverCrypt_Error_InvalidIVLength; - } - { - EverCrypt_AEAD_state_s scrut1 = *s; - uint8_t *ek0 = scrut1.ek; - uint8_t *scratch_b = ek0 + (uint32_t)304U; - uint8_t *ek1 = ek0; - uint8_t *keys_b = ek1; - uint8_t *hkeys_b = ek1 + (uint32_t)176U; - uint8_t tmp_iv[16U] = { 0U }; - uint32_t len = iv_len / (uint32_t)16U; - uint32_t bytes_len = len * (uint32_t)16U; - uint8_t *iv_b = iv; - memcpy(tmp_iv, iv + bytes_len, iv_len % (uint32_t)16U * sizeof (uint8_t)); - { - uint64_t - uu____0 = compute_iv_stdcall(iv_b, (uint64_t)iv_len, (uint64_t)len, tmp_iv, tmp_iv, hkeys_b); - uint8_t *inout_b = scratch_b; - uint8_t *abytes_b = scratch_b + (uint32_t)16U; - uint8_t *scratch_b1 = scratch_b + (uint32_t)32U; - uint32_t cipher_len_ = (uint32_t)(uint64_t)cipher_len / (uint32_t)16U * (uint32_t)16U; - uint32_t auth_len_ = (uint32_t)(uint64_t)ad_len / (uint32_t)16U * (uint32_t)16U; - uint8_t *cipher_b_ = cipher; - uint8_t *out_b_ = dst; - uint8_t *auth_b_ = ad; - memcpy(inout_b, - cipher + cipher_len_, - (uint32_t)(uint64_t)cipher_len % (uint32_t)16U * sizeof (uint8_t)); - memcpy(abytes_b, - ad + auth_len_, - (uint32_t)(uint64_t)ad_len % (uint32_t)16U * sizeof (uint8_t)); - { - uint64_t len128x6 = (uint64_t)cipher_len / (uint64_t)96U * (uint64_t)96U; - uint64_t c; - if (len128x6 / (uint64_t)16U >= (uint64_t)6U) - { - uint64_t len128_num = (uint64_t)cipher_len / (uint64_t)16U * (uint64_t)16U - len128x6; - uint8_t *in128x6_b = cipher_b_; - uint8_t *out128x6_b = out_b_; - uint8_t *in128_b = cipher_b_ + (uint32_t)len128x6; - uint8_t *out128_b = out_b_ + (uint32_t)len128x6; - uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U; - uint64_t len128x6_ = len128x6 / (uint64_t)16U; - uint64_t len128_num_ = len128_num / (uint64_t)16U; - uint64_t - scrut2 = - gcm128_decrypt_opt(auth_b_, - (uint64_t)ad_len, - auth_num, - keys_b, - tmp_iv, - hkeys_b, - abytes_b, - in128x6_b, - out128x6_b, - len128x6_, - in128_b, - out128_b, - len128_num_, - inout_b, - (uint64_t)cipher_len, - scratch_b1, - tag); - uint64_t c0 = scrut2; - c = c0; - } - else - { - uint32_t len128x61 = (uint32_t)0U; - uint64_t len128_num = (uint64_t)cipher_len / (uint64_t)16U * (uint64_t)16U; - uint8_t *in128x6_b = cipher_b_; - uint8_t *out128x6_b = out_b_; - uint8_t *in128_b = cipher_b_ + len128x61; - uint8_t *out128_b = out_b_ + len128x61; - uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U; - uint64_t len128_num_ = len128_num / (uint64_t)16U; - uint64_t len128x6_ = (uint64_t)0U; - uint64_t - scrut2 = - gcm128_decrypt_opt(auth_b_, - (uint64_t)ad_len, - auth_num, - keys_b, - tmp_iv, - hkeys_b, - abytes_b, - in128x6_b, - out128x6_b, - len128x6_, - in128_b, - out128_b, - len128_num_, - inout_b, - (uint64_t)cipher_len, - scratch_b1, - tag); - uint64_t c0 = scrut2; - c = c0; - } - memcpy(dst + (uint32_t)(uint64_t)cipher_len / (uint32_t)16U * (uint32_t)16U, - inout_b, - (uint32_t)(uint64_t)cipher_len % (uint32_t)16U * sizeof (uint8_t)); - { - uint64_t r = c; - if (r == (uint64_t)0U) - { - return EverCrypt_Error_Success; - } - return EverCrypt_Error_AuthenticationFailure; - } - } - } - } - } - #else - KRML_HOST_PRINTF("KaRaMeL abort at %s:%d\n%s\n", - __FILE__, - __LINE__, - "EverCrypt was compiled on a system which doesn\'t support Vale"); - KRML_HOST_EXIT(255U); - #endif -} - -/** -WARNING: this function doesn't perform any dynamic - hardware check. You MUST make sure your hardware supports the - implementation of AESGCM. Besides, this function was not designed - for cross-compilation: if you compile it on a system which doesn't - support Vale, it will compile it to a function which makes the - program exit. -*/ -EverCrypt_Error_error_code -EverCrypt_AEAD_decrypt_expand_aes256_gcm_no_check( - uint8_t *k, - uint8_t *iv, - uint32_t iv_len, - uint8_t *ad, - uint32_t ad_len, - uint8_t *cipher, - uint32_t cipher_len, - uint8_t *tag, - uint8_t *dst -) -{ - #if HACL_CAN_COMPILE_VALE - uint8_t ek[544U] = { 0U }; - uint8_t *keys_b0 = ek; - uint8_t *hkeys_b0 = ek + (uint32_t)240U; - uint64_t scrut = aes256_key_expansion(k, keys_b0); - uint64_t scrut0 = aes256_keyhash_init(keys_b0, hkeys_b0); - EverCrypt_AEAD_state_s lit; - lit.impl = Spec_Cipher_Expansion_Vale_AES256; - lit.ek = ek; - { - EverCrypt_AEAD_state_s p = lit; - EverCrypt_AEAD_state_s *s = &p; - if (s == NULL) - { - return EverCrypt_Error_InvalidKey; - } - if (iv_len == (uint32_t)0U) - { - return EverCrypt_Error_InvalidIVLength; - } - { - EverCrypt_AEAD_state_s scrut1 = *s; - uint8_t *ek0 = scrut1.ek; - uint8_t *scratch_b = ek0 + (uint32_t)368U; - uint8_t *ek1 = ek0; - uint8_t *keys_b = ek1; - uint8_t *hkeys_b = ek1 + (uint32_t)240U; - uint8_t tmp_iv[16U] = { 0U }; - uint32_t len = iv_len / (uint32_t)16U; - uint32_t bytes_len = len * (uint32_t)16U; - uint8_t *iv_b = iv; - memcpy(tmp_iv, iv + bytes_len, iv_len % (uint32_t)16U * sizeof (uint8_t)); - { - uint64_t - uu____0 = compute_iv_stdcall(iv_b, (uint64_t)iv_len, (uint64_t)len, tmp_iv, tmp_iv, hkeys_b); - uint8_t *inout_b = scratch_b; - uint8_t *abytes_b = scratch_b + (uint32_t)16U; - uint8_t *scratch_b1 = scratch_b + (uint32_t)32U; - uint32_t cipher_len_ = (uint32_t)(uint64_t)cipher_len / (uint32_t)16U * (uint32_t)16U; - uint32_t auth_len_ = (uint32_t)(uint64_t)ad_len / (uint32_t)16U * (uint32_t)16U; - uint8_t *cipher_b_ = cipher; - uint8_t *out_b_ = dst; - uint8_t *auth_b_ = ad; - memcpy(inout_b, - cipher + cipher_len_, - (uint32_t)(uint64_t)cipher_len % (uint32_t)16U * sizeof (uint8_t)); - memcpy(abytes_b, - ad + auth_len_, - (uint32_t)(uint64_t)ad_len % (uint32_t)16U * sizeof (uint8_t)); - { - uint64_t len128x6 = (uint64_t)cipher_len / (uint64_t)96U * (uint64_t)96U; - uint64_t c; - if (len128x6 / (uint64_t)16U >= (uint64_t)6U) - { - uint64_t len128_num = (uint64_t)cipher_len / (uint64_t)16U * (uint64_t)16U - len128x6; - uint8_t *in128x6_b = cipher_b_; - uint8_t *out128x6_b = out_b_; - uint8_t *in128_b = cipher_b_ + (uint32_t)len128x6; - uint8_t *out128_b = out_b_ + (uint32_t)len128x6; - uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U; - uint64_t len128x6_ = len128x6 / (uint64_t)16U; - uint64_t len128_num_ = len128_num / (uint64_t)16U; - uint64_t - scrut2 = - gcm256_decrypt_opt(auth_b_, - (uint64_t)ad_len, - auth_num, - keys_b, - tmp_iv, - hkeys_b, - abytes_b, - in128x6_b, - out128x6_b, - len128x6_, - in128_b, - out128_b, - len128_num_, - inout_b, - (uint64_t)cipher_len, - scratch_b1, - tag); - uint64_t c0 = scrut2; - c = c0; - } - else - { - uint32_t len128x61 = (uint32_t)0U; - uint64_t len128_num = (uint64_t)cipher_len / (uint64_t)16U * (uint64_t)16U; - uint8_t *in128x6_b = cipher_b_; - uint8_t *out128x6_b = out_b_; - uint8_t *in128_b = cipher_b_ + len128x61; - uint8_t *out128_b = out_b_ + len128x61; - uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U; - uint64_t len128_num_ = len128_num / (uint64_t)16U; - uint64_t len128x6_ = (uint64_t)0U; - uint64_t - scrut2 = - gcm256_decrypt_opt(auth_b_, - (uint64_t)ad_len, - auth_num, - keys_b, - tmp_iv, - hkeys_b, - abytes_b, - in128x6_b, - out128x6_b, - len128x6_, - in128_b, - out128_b, - len128_num_, - inout_b, - (uint64_t)cipher_len, - scratch_b1, - tag); - uint64_t c0 = scrut2; - c = c0; - } - memcpy(dst + (uint32_t)(uint64_t)cipher_len / (uint32_t)16U * (uint32_t)16U, - inout_b, - (uint32_t)(uint64_t)cipher_len % (uint32_t)16U * sizeof (uint8_t)); - { - uint64_t r = c; - if (r == (uint64_t)0U) - { - return EverCrypt_Error_Success; - } - return EverCrypt_Error_AuthenticationFailure; - } - } - } - } - } - #else - KRML_HOST_PRINTF("KaRaMeL abort at %s:%d\n%s\n", - __FILE__, - __LINE__, - "EverCrypt was compiled on a system which doesn\'t support Vale"); - KRML_HOST_EXIT(255U); - #endif -} - -EverCrypt_Error_error_code -EverCrypt_AEAD_decrypt_expand_aes128_gcm( - uint8_t *k, - uint8_t *iv, - uint32_t iv_len, - uint8_t *ad, - uint32_t ad_len, - uint8_t *cipher, - uint32_t cipher_len, - uint8_t *tag, - uint8_t *dst -) -{ - bool has_pclmulqdq = EverCrypt_AutoConfig2_has_pclmulqdq(); - bool has_avx = EverCrypt_AutoConfig2_has_avx(); - bool has_sse = EverCrypt_AutoConfig2_has_sse(); - bool has_movbe = EverCrypt_AutoConfig2_has_movbe(); - bool has_aesni = EverCrypt_AutoConfig2_has_aesni(); - #if HACL_CAN_COMPILE_VALE - if (has_aesni && has_pclmulqdq && has_avx && has_sse && has_movbe) - { - uint8_t ek[480U] = { 0U }; - uint8_t *keys_b0 = ek; - uint8_t *hkeys_b0 = ek + (uint32_t)176U; - uint64_t scrut = aes128_key_expansion(k, keys_b0); - uint64_t scrut0 = aes128_keyhash_init(keys_b0, hkeys_b0); - EverCrypt_AEAD_state_s lit; - lit.impl = Spec_Cipher_Expansion_Vale_AES128; - lit.ek = ek; - { - EverCrypt_AEAD_state_s p = lit; - EverCrypt_AEAD_state_s *s = &p; - if (s == NULL) - { - return EverCrypt_Error_InvalidKey; - } - else if (iv_len == (uint32_t)0U) - { - return EverCrypt_Error_InvalidIVLength; - } - else - { - EverCrypt_AEAD_state_s scrut1 = *s; - uint8_t *ek0 = scrut1.ek; - uint8_t *scratch_b = ek0 + (uint32_t)304U; - uint8_t *ek1 = ek0; - uint8_t *keys_b = ek1; - uint8_t *hkeys_b = ek1 + (uint32_t)176U; - uint8_t tmp_iv[16U] = { 0U }; - uint32_t len = iv_len / (uint32_t)16U; - uint32_t bytes_len = len * (uint32_t)16U; - uint8_t *iv_b = iv; - memcpy(tmp_iv, iv + bytes_len, iv_len % (uint32_t)16U * sizeof (uint8_t)); - { - uint64_t - uu____0 = - compute_iv_stdcall(iv_b, - (uint64_t)iv_len, - (uint64_t)len, - tmp_iv, - tmp_iv, - hkeys_b); - uint8_t *inout_b = scratch_b; - uint8_t *abytes_b = scratch_b + (uint32_t)16U; - uint8_t *scratch_b1 = scratch_b + (uint32_t)32U; - uint32_t cipher_len_ = (uint32_t)(uint64_t)cipher_len / (uint32_t)16U * (uint32_t)16U; - uint32_t auth_len_ = (uint32_t)(uint64_t)ad_len / (uint32_t)16U * (uint32_t)16U; - uint8_t *cipher_b_ = cipher; - uint8_t *out_b_ = dst; - uint8_t *auth_b_ = ad; - memcpy(inout_b, - cipher + cipher_len_, - (uint32_t)(uint64_t)cipher_len % (uint32_t)16U * sizeof (uint8_t)); - memcpy(abytes_b, - ad + auth_len_, - (uint32_t)(uint64_t)ad_len % (uint32_t)16U * sizeof (uint8_t)); - { - uint64_t len128x6 = (uint64_t)cipher_len / (uint64_t)96U * (uint64_t)96U; - uint64_t c; - if (len128x6 / (uint64_t)16U >= (uint64_t)6U) - { - uint64_t len128_num = (uint64_t)cipher_len / (uint64_t)16U * (uint64_t)16U - len128x6; - uint8_t *in128x6_b = cipher_b_; - uint8_t *out128x6_b = out_b_; - uint8_t *in128_b = cipher_b_ + (uint32_t)len128x6; - uint8_t *out128_b = out_b_ + (uint32_t)len128x6; - uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U; - uint64_t len128x6_ = len128x6 / (uint64_t)16U; - uint64_t len128_num_ = len128_num / (uint64_t)16U; - uint64_t - scrut2 = - gcm128_decrypt_opt(auth_b_, - (uint64_t)ad_len, - auth_num, - keys_b, - tmp_iv, - hkeys_b, - abytes_b, - in128x6_b, - out128x6_b, - len128x6_, - in128_b, - out128_b, - len128_num_, - inout_b, - (uint64_t)cipher_len, - scratch_b1, - tag); - uint64_t c0 = scrut2; - c = c0; - } - else - { - uint32_t len128x61 = (uint32_t)0U; - uint64_t len128_num = (uint64_t)cipher_len / (uint64_t)16U * (uint64_t)16U; - uint8_t *in128x6_b = cipher_b_; - uint8_t *out128x6_b = out_b_; - uint8_t *in128_b = cipher_b_ + len128x61; - uint8_t *out128_b = out_b_ + len128x61; - uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U; - uint64_t len128_num_ = len128_num / (uint64_t)16U; - uint64_t len128x6_ = (uint64_t)0U; - uint64_t - scrut2 = - gcm128_decrypt_opt(auth_b_, - (uint64_t)ad_len, - auth_num, - keys_b, - tmp_iv, - hkeys_b, - abytes_b, - in128x6_b, - out128x6_b, - len128x6_, - in128_b, - out128_b, - len128_num_, - inout_b, - (uint64_t)cipher_len, - scratch_b1, - tag); - uint64_t c0 = scrut2; - c = c0; - } - memcpy(dst + (uint32_t)(uint64_t)cipher_len / (uint32_t)16U * (uint32_t)16U, - inout_b, - (uint32_t)(uint64_t)cipher_len % (uint32_t)16U * sizeof (uint8_t)); - { - uint64_t r = c; - if (r == (uint64_t)0U) - { - return EverCrypt_Error_Success; - } - else - { - return EverCrypt_Error_AuthenticationFailure; - } - } - } - } - } - } - } - #endif - return EverCrypt_Error_UnsupportedAlgorithm; -} - -EverCrypt_Error_error_code -EverCrypt_AEAD_decrypt_expand_aes256_gcm( - uint8_t *k, - uint8_t *iv, - uint32_t iv_len, - uint8_t *ad, - uint32_t ad_len, - uint8_t *cipher, - uint32_t cipher_len, - uint8_t *tag, - uint8_t *dst -) -{ - bool has_pclmulqdq = EverCrypt_AutoConfig2_has_pclmulqdq(); - bool has_avx = EverCrypt_AutoConfig2_has_avx(); - bool has_sse = EverCrypt_AutoConfig2_has_sse(); - bool has_movbe = EverCrypt_AutoConfig2_has_movbe(); - bool has_aesni = EverCrypt_AutoConfig2_has_aesni(); - #if HACL_CAN_COMPILE_VALE - if (has_aesni && has_pclmulqdq && has_avx && has_sse && has_movbe) - { - uint8_t ek[544U] = { 0U }; - uint8_t *keys_b0 = ek; - uint8_t *hkeys_b0 = ek + (uint32_t)240U; - uint64_t scrut = aes256_key_expansion(k, keys_b0); - uint64_t scrut0 = aes256_keyhash_init(keys_b0, hkeys_b0); - EverCrypt_AEAD_state_s lit; - lit.impl = Spec_Cipher_Expansion_Vale_AES256; - lit.ek = ek; - { - EverCrypt_AEAD_state_s p = lit; - EverCrypt_AEAD_state_s *s = &p; - if (s == NULL) - { - return EverCrypt_Error_InvalidKey; - } - else if (iv_len == (uint32_t)0U) - { - return EverCrypt_Error_InvalidIVLength; - } - else - { - EverCrypt_AEAD_state_s scrut1 = *s; - uint8_t *ek0 = scrut1.ek; - uint8_t *scratch_b = ek0 + (uint32_t)368U; - uint8_t *ek1 = ek0; - uint8_t *keys_b = ek1; - uint8_t *hkeys_b = ek1 + (uint32_t)240U; - uint8_t tmp_iv[16U] = { 0U }; - uint32_t len = iv_len / (uint32_t)16U; - uint32_t bytes_len = len * (uint32_t)16U; - uint8_t *iv_b = iv; - memcpy(tmp_iv, iv + bytes_len, iv_len % (uint32_t)16U * sizeof (uint8_t)); - { - uint64_t - uu____0 = - compute_iv_stdcall(iv_b, - (uint64_t)iv_len, - (uint64_t)len, - tmp_iv, - tmp_iv, - hkeys_b); - uint8_t *inout_b = scratch_b; - uint8_t *abytes_b = scratch_b + (uint32_t)16U; - uint8_t *scratch_b1 = scratch_b + (uint32_t)32U; - uint32_t cipher_len_ = (uint32_t)(uint64_t)cipher_len / (uint32_t)16U * (uint32_t)16U; - uint32_t auth_len_ = (uint32_t)(uint64_t)ad_len / (uint32_t)16U * (uint32_t)16U; - uint8_t *cipher_b_ = cipher; - uint8_t *out_b_ = dst; - uint8_t *auth_b_ = ad; - memcpy(inout_b, - cipher + cipher_len_, - (uint32_t)(uint64_t)cipher_len % (uint32_t)16U * sizeof (uint8_t)); - memcpy(abytes_b, - ad + auth_len_, - (uint32_t)(uint64_t)ad_len % (uint32_t)16U * sizeof (uint8_t)); - { - uint64_t len128x6 = (uint64_t)cipher_len / (uint64_t)96U * (uint64_t)96U; - uint64_t c; - if (len128x6 / (uint64_t)16U >= (uint64_t)6U) - { - uint64_t len128_num = (uint64_t)cipher_len / (uint64_t)16U * (uint64_t)16U - len128x6; - uint8_t *in128x6_b = cipher_b_; - uint8_t *out128x6_b = out_b_; - uint8_t *in128_b = cipher_b_ + (uint32_t)len128x6; - uint8_t *out128_b = out_b_ + (uint32_t)len128x6; - uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U; - uint64_t len128x6_ = len128x6 / (uint64_t)16U; - uint64_t len128_num_ = len128_num / (uint64_t)16U; - uint64_t - scrut2 = - gcm256_decrypt_opt(auth_b_, - (uint64_t)ad_len, - auth_num, - keys_b, - tmp_iv, - hkeys_b, - abytes_b, - in128x6_b, - out128x6_b, - len128x6_, - in128_b, - out128_b, - len128_num_, - inout_b, - (uint64_t)cipher_len, - scratch_b1, - tag); - uint64_t c0 = scrut2; - c = c0; - } - else - { - uint32_t len128x61 = (uint32_t)0U; - uint64_t len128_num = (uint64_t)cipher_len / (uint64_t)16U * (uint64_t)16U; - uint8_t *in128x6_b = cipher_b_; - uint8_t *out128x6_b = out_b_; - uint8_t *in128_b = cipher_b_ + len128x61; - uint8_t *out128_b = out_b_ + len128x61; - uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U; - uint64_t len128_num_ = len128_num / (uint64_t)16U; - uint64_t len128x6_ = (uint64_t)0U; - uint64_t - scrut2 = - gcm256_decrypt_opt(auth_b_, - (uint64_t)ad_len, - auth_num, - keys_b, - tmp_iv, - hkeys_b, - abytes_b, - in128x6_b, - out128x6_b, - len128x6_, - in128_b, - out128_b, - len128_num_, - inout_b, - (uint64_t)cipher_len, - scratch_b1, - tag); - uint64_t c0 = scrut2; - c = c0; - } - memcpy(dst + (uint32_t)(uint64_t)cipher_len / (uint32_t)16U * (uint32_t)16U, - inout_b, - (uint32_t)(uint64_t)cipher_len % (uint32_t)16U * sizeof (uint8_t)); - { - uint64_t r = c; - if (r == (uint64_t)0U) - { - return EverCrypt_Error_Success; - } - else - { - return EverCrypt_Error_AuthenticationFailure; - } - } - } - } - } - } - } - #endif - return EverCrypt_Error_UnsupportedAlgorithm; -} - -EverCrypt_Error_error_code -EverCrypt_AEAD_decrypt_expand_chacha20_poly1305( - uint8_t *k, - uint8_t *iv, - uint32_t iv_len, - uint8_t *ad, - uint32_t ad_len, - uint8_t *cipher, - uint32_t cipher_len, - uint8_t *tag, - uint8_t *dst -) -{ - uint8_t ek[32U] = { 0U }; - EverCrypt_AEAD_state_s lit; - lit.impl = Spec_Cipher_Expansion_Hacl_CHACHA20; - lit.ek = ek; - { - EverCrypt_AEAD_state_s p = lit; - EverCrypt_AEAD_state_s *s; - EverCrypt_Error_error_code r; - memcpy(ek, k, (uint32_t)32U * sizeof (uint8_t)); - s = &p; - r = decrypt_chacha20_poly1305(s, iv, iv_len, ad, ad_len, cipher, cipher_len, tag, dst); - return r; - } -} - -EverCrypt_Error_error_code -EverCrypt_AEAD_decrypt_expand( - Spec_Agile_AEAD_alg a, - uint8_t *k, - uint8_t *iv, - uint32_t iv_len, - uint8_t *ad, - uint32_t ad_len, - uint8_t *cipher, - uint32_t cipher_len, - uint8_t *tag, - uint8_t *dst -) -{ - switch (a) - { - case Spec_Agile_AEAD_AES128_GCM: - { - return - EverCrypt_AEAD_decrypt_expand_aes128_gcm(k, - iv, - iv_len, - ad, - ad_len, - cipher, - cipher_len, - tag, - dst); - } - case Spec_Agile_AEAD_AES256_GCM: - { - return - EverCrypt_AEAD_decrypt_expand_aes256_gcm(k, - iv, - iv_len, - ad, - ad_len, - cipher, - cipher_len, - tag, - dst); - } - case Spec_Agile_AEAD_CHACHA20_POLY1305: - { - return - EverCrypt_AEAD_decrypt_expand_chacha20_poly1305(k, - iv, - iv_len, - ad, - ad_len, - cipher, - cipher_len, - tag, - dst); - } - default: - { - KRML_HOST_PRINTF("KaRaMeL incomplete match at %s:%d\n", __FILE__, __LINE__); - KRML_HOST_EXIT(253U); - } - } -} - -void EverCrypt_AEAD_free(EverCrypt_AEAD_state_s *s) -{ - EverCrypt_AEAD_state_s scrut = *s; - uint8_t *ek = scrut.ek; - KRML_HOST_FREE(ek); - KRML_HOST_FREE(s); -} - diff --git a/dist/c89-compatible/EverCrypt_AEAD.h b/dist/c89-compatible/EverCrypt_AEAD.h deleted file mode 100644 index bca062bce7..0000000000 --- a/dist/c89-compatible/EverCrypt_AEAD.h +++ /dev/null @@ -1,275 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#ifndef __EverCrypt_AEAD_H -#define __EverCrypt_AEAD_H - -#if defined(__cplusplus) -extern "C" { -#endif - -#include -#include "krml/internal/types.h" -#include "krml/lowstar_endianness.h" -#include "krml/internal/target.h" - - -#include "Hacl_Spec.h" -#include "EverCrypt_Error.h" -#include "EverCrypt_Chacha20Poly1305.h" -#include "EverCrypt_AutoConfig2.h" -#include "evercrypt_targetconfig.h" -typedef struct EverCrypt_AEAD_state_s_s EverCrypt_AEAD_state_s; - -bool EverCrypt_AEAD_uu___is_Ek(Spec_Agile_AEAD_alg a, EverCrypt_AEAD_state_s projectee); - -Spec_Agile_AEAD_alg EverCrypt_AEAD_alg_of_state(EverCrypt_AEAD_state_s *s); - -EverCrypt_Error_error_code -EverCrypt_AEAD_create_in(Spec_Agile_AEAD_alg a, EverCrypt_AEAD_state_s **dst, uint8_t *k); - -EverCrypt_Error_error_code -EverCrypt_AEAD_encrypt( - EverCrypt_AEAD_state_s *s, - uint8_t *iv, - uint32_t iv_len, - uint8_t *ad, - uint32_t ad_len, - uint8_t *plain, - uint32_t plain_len, - uint8_t *cipher, - uint8_t *tag -); - -/** -WARNING: this function doesn't perform any dynamic - hardware check. You MUST make sure your hardware supports the - implementation of AESGCM. Besides, this function was not designed - for cross-compilation: if you compile it on a system which doesn't - support Vale, it will compile it to a function which makes the - program exit. -*/ -EverCrypt_Error_error_code -EverCrypt_AEAD_encrypt_expand_aes128_gcm_no_check( - uint8_t *k, - uint8_t *iv, - uint32_t iv_len, - uint8_t *ad, - uint32_t ad_len, - uint8_t *plain, - uint32_t plain_len, - uint8_t *cipher, - uint8_t *tag -); - -/** -WARNING: this function doesn't perform any dynamic - hardware check. You MUST make sure your hardware supports the - implementation of AESGCM. Besides, this function was not designed - for cross-compilation: if you compile it on a system which doesn't - support Vale, it will compile it to a function which makes the - program exit. -*/ -EverCrypt_Error_error_code -EverCrypt_AEAD_encrypt_expand_aes256_gcm_no_check( - uint8_t *k, - uint8_t *iv, - uint32_t iv_len, - uint8_t *ad, - uint32_t ad_len, - uint8_t *plain, - uint32_t plain_len, - uint8_t *cipher, - uint8_t *tag -); - -EverCrypt_Error_error_code -EverCrypt_AEAD_encrypt_expand_aes128_gcm( - uint8_t *k, - uint8_t *iv, - uint32_t iv_len, - uint8_t *ad, - uint32_t ad_len, - uint8_t *plain, - uint32_t plain_len, - uint8_t *cipher, - uint8_t *tag -); - -EverCrypt_Error_error_code -EverCrypt_AEAD_encrypt_expand_aes256_gcm( - uint8_t *k, - uint8_t *iv, - uint32_t iv_len, - uint8_t *ad, - uint32_t ad_len, - uint8_t *plain, - uint32_t plain_len, - uint8_t *cipher, - uint8_t *tag -); - -EverCrypt_Error_error_code -EverCrypt_AEAD_encrypt_expand_chacha20_poly1305( - uint8_t *k, - uint8_t *iv, - uint32_t iv_len, - uint8_t *ad, - uint32_t ad_len, - uint8_t *plain, - uint32_t plain_len, - uint8_t *cipher, - uint8_t *tag -); - -EverCrypt_Error_error_code -EverCrypt_AEAD_encrypt_expand( - Spec_Agile_AEAD_alg a, - uint8_t *k, - uint8_t *iv, - uint32_t iv_len, - uint8_t *ad, - uint32_t ad_len, - uint8_t *plain, - uint32_t plain_len, - uint8_t *cipher, - uint8_t *tag -); - -EverCrypt_Error_error_code -EverCrypt_AEAD_decrypt( - EverCrypt_AEAD_state_s *s, - uint8_t *iv, - uint32_t iv_len, - uint8_t *ad, - uint32_t ad_len, - uint8_t *cipher, - uint32_t cipher_len, - uint8_t *tag, - uint8_t *dst -); - -/** -WARNING: this function doesn't perform any dynamic - hardware check. You MUST make sure your hardware supports the - implementation of AESGCM. Besides, this function was not designed - for cross-compilation: if you compile it on a system which doesn't - support Vale, it will compile it to a function which makes the - program exit. -*/ -EverCrypt_Error_error_code -EverCrypt_AEAD_decrypt_expand_aes128_gcm_no_check( - uint8_t *k, - uint8_t *iv, - uint32_t iv_len, - uint8_t *ad, - uint32_t ad_len, - uint8_t *cipher, - uint32_t cipher_len, - uint8_t *tag, - uint8_t *dst -); - -/** -WARNING: this function doesn't perform any dynamic - hardware check. You MUST make sure your hardware supports the - implementation of AESGCM. Besides, this function was not designed - for cross-compilation: if you compile it on a system which doesn't - support Vale, it will compile it to a function which makes the - program exit. -*/ -EverCrypt_Error_error_code -EverCrypt_AEAD_decrypt_expand_aes256_gcm_no_check( - uint8_t *k, - uint8_t *iv, - uint32_t iv_len, - uint8_t *ad, - uint32_t ad_len, - uint8_t *cipher, - uint32_t cipher_len, - uint8_t *tag, - uint8_t *dst -); - -EverCrypt_Error_error_code -EverCrypt_AEAD_decrypt_expand_aes128_gcm( - uint8_t *k, - uint8_t *iv, - uint32_t iv_len, - uint8_t *ad, - uint32_t ad_len, - uint8_t *cipher, - uint32_t cipher_len, - uint8_t *tag, - uint8_t *dst -); - -EverCrypt_Error_error_code -EverCrypt_AEAD_decrypt_expand_aes256_gcm( - uint8_t *k, - uint8_t *iv, - uint32_t iv_len, - uint8_t *ad, - uint32_t ad_len, - uint8_t *cipher, - uint32_t cipher_len, - uint8_t *tag, - uint8_t *dst -); - -EverCrypt_Error_error_code -EverCrypt_AEAD_decrypt_expand_chacha20_poly1305( - uint8_t *k, - uint8_t *iv, - uint32_t iv_len, - uint8_t *ad, - uint32_t ad_len, - uint8_t *cipher, - uint32_t cipher_len, - uint8_t *tag, - uint8_t *dst -); - -EverCrypt_Error_error_code -EverCrypt_AEAD_decrypt_expand( - Spec_Agile_AEAD_alg a, - uint8_t *k, - uint8_t *iv, - uint32_t iv_len, - uint8_t *ad, - uint32_t ad_len, - uint8_t *cipher, - uint32_t cipher_len, - uint8_t *tag, - uint8_t *dst -); - -void EverCrypt_AEAD_free(EverCrypt_AEAD_state_s *s); - -#if defined(__cplusplus) -} -#endif - -#define __EverCrypt_AEAD_H_DEFINED -#endif diff --git a/dist/c89-compatible/EverCrypt_AutoConfig2.c b/dist/c89-compatible/EverCrypt_AutoConfig2.c deleted file mode 100644 index a9ece51c83..0000000000 --- a/dist/c89-compatible/EverCrypt_AutoConfig2.c +++ /dev/null @@ -1,272 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#include "EverCrypt_AutoConfig2.h" - -#include "internal/Vale.h" - -static bool cpu_has_shaext[1U] = { false }; - -static bool cpu_has_aesni[1U] = { false }; - -static bool cpu_has_pclmulqdq[1U] = { false }; - -static bool cpu_has_avx2[1U] = { false }; - -static bool cpu_has_avx[1U] = { false }; - -static bool cpu_has_bmi2[1U] = { false }; - -static bool cpu_has_adx[1U] = { false }; - -static bool cpu_has_sse[1U] = { false }; - -static bool cpu_has_movbe[1U] = { false }; - -static bool cpu_has_rdrand[1U] = { false }; - -static bool cpu_has_avx512[1U] = { false }; - -bool EverCrypt_AutoConfig2_has_shaext() -{ - return cpu_has_shaext[0U]; -} - -bool EverCrypt_AutoConfig2_has_aesni() -{ - return cpu_has_aesni[0U]; -} - -bool EverCrypt_AutoConfig2_has_pclmulqdq() -{ - return cpu_has_pclmulqdq[0U]; -} - -bool EverCrypt_AutoConfig2_has_avx2() -{ - return cpu_has_avx2[0U]; -} - -bool EverCrypt_AutoConfig2_has_avx() -{ - return cpu_has_avx[0U]; -} - -bool EverCrypt_AutoConfig2_has_bmi2() -{ - return cpu_has_bmi2[0U]; -} - -bool EverCrypt_AutoConfig2_has_adx() -{ - return cpu_has_adx[0U]; -} - -bool EverCrypt_AutoConfig2_has_sse() -{ - return cpu_has_sse[0U]; -} - -bool EverCrypt_AutoConfig2_has_movbe() -{ - return cpu_has_movbe[0U]; -} - -bool EverCrypt_AutoConfig2_has_rdrand() -{ - return cpu_has_rdrand[0U]; -} - -bool EverCrypt_AutoConfig2_has_avx512() -{ - return cpu_has_avx512[0U]; -} - -void EverCrypt_AutoConfig2_recall() -{ - -} - -void EverCrypt_AutoConfig2_init() -{ - #if HACL_CAN_COMPILE_VALE - uint64_t scrut0 = check_aesni(); - uint64_t scrut1; - uint64_t scrut2; - uint64_t scrut3; - uint64_t scrut4; - uint64_t scrut5; - uint64_t scrut6; - uint64_t scrut7; - uint64_t scrut; - if (scrut0 != (uint64_t)0U) - { - cpu_has_aesni[0U] = true; - cpu_has_pclmulqdq[0U] = true; - } - scrut1 = check_sha(); - if (scrut1 != (uint64_t)0U) - { - cpu_has_shaext[0U] = true; - } - scrut2 = check_adx_bmi2(); - if (scrut2 != (uint64_t)0U) - { - cpu_has_bmi2[0U] = true; - cpu_has_adx[0U] = true; - } - scrut3 = check_avx(); - if (scrut3 != (uint64_t)0U) - { - uint64_t scrut8 = check_osxsave(); - if (scrut8 != (uint64_t)0U) - { - uint64_t scrut9 = check_avx_xcr0(); - if (scrut9 != (uint64_t)0U) - { - cpu_has_avx[0U] = true; - } - } - } - scrut4 = check_avx2(); - if (scrut4 != (uint64_t)0U) - { - uint64_t scrut8 = check_osxsave(); - if (scrut8 != (uint64_t)0U) - { - uint64_t scrut9 = check_avx_xcr0(); - if (scrut9 != (uint64_t)0U) - { - cpu_has_avx2[0U] = true; - } - } - } - scrut5 = check_sse(); - if (scrut5 != (uint64_t)0U) - { - cpu_has_sse[0U] = true; - } - scrut6 = check_movbe(); - if (scrut6 != (uint64_t)0U) - { - cpu_has_movbe[0U] = true; - } - scrut7 = check_rdrand(); - if (scrut7 != (uint64_t)0U) - { - cpu_has_rdrand[0U] = true; - } - scrut = check_avx512(); - if (scrut != (uint64_t)0U) - { - uint64_t scrut8 = check_osxsave(); - if (scrut8 != (uint64_t)0U) - { - uint64_t scrut9 = check_avx_xcr0(); - if (scrut9 != (uint64_t)0U) - { - uint64_t scrut10 = check_avx512_xcr0(); - if (scrut10 != (uint64_t)0U) - { - cpu_has_avx512[0U] = true; - return; - } - return; - } - return; - } - return; - } - #endif -} - -void EverCrypt_AutoConfig2_disable_avx2() -{ - cpu_has_avx2[0U] = false; -} - -void EverCrypt_AutoConfig2_disable_avx() -{ - cpu_has_avx[0U] = false; -} - -void EverCrypt_AutoConfig2_disable_bmi2() -{ - cpu_has_bmi2[0U] = false; -} - -void EverCrypt_AutoConfig2_disable_adx() -{ - cpu_has_adx[0U] = false; -} - -void EverCrypt_AutoConfig2_disable_shaext() -{ - cpu_has_shaext[0U] = false; -} - -void EverCrypt_AutoConfig2_disable_aesni() -{ - cpu_has_aesni[0U] = false; -} - -void EverCrypt_AutoConfig2_disable_pclmulqdq() -{ - cpu_has_pclmulqdq[0U] = false; -} - -void EverCrypt_AutoConfig2_disable_sse() -{ - cpu_has_sse[0U] = false; -} - -void EverCrypt_AutoConfig2_disable_movbe() -{ - cpu_has_movbe[0U] = false; -} - -void EverCrypt_AutoConfig2_disable_rdrand() -{ - cpu_has_rdrand[0U] = false; -} - -void EverCrypt_AutoConfig2_disable_avx512() -{ - cpu_has_avx512[0U] = false; -} - -bool EverCrypt_AutoConfig2_has_vec128() -{ - bool avx = EverCrypt_AutoConfig2_has_avx(); - bool other = has_vec128_not_avx(); - return avx || other; -} - -bool EverCrypt_AutoConfig2_has_vec256() -{ - bool avx2 = EverCrypt_AutoConfig2_has_avx2(); - bool other = has_vec256_not_avx2(); - return avx2 || other; -} - diff --git a/dist/c89-compatible/EverCrypt_AutoConfig2.h b/dist/c89-compatible/EverCrypt_AutoConfig2.h deleted file mode 100644 index 1e1f8653d1..0000000000 --- a/dist/c89-compatible/EverCrypt_AutoConfig2.h +++ /dev/null @@ -1,99 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#ifndef __EverCrypt_AutoConfig2_H -#define __EverCrypt_AutoConfig2_H - -#if defined(__cplusplus) -extern "C" { -#endif - -#include -#include "krml/internal/types.h" -#include "krml/lowstar_endianness.h" -#include "krml/internal/target.h" - - - -#include "evercrypt_targetconfig.h" -bool EverCrypt_AutoConfig2_has_shaext(); - -bool EverCrypt_AutoConfig2_has_aesni(); - -bool EverCrypt_AutoConfig2_has_pclmulqdq(); - -bool EverCrypt_AutoConfig2_has_avx2(); - -bool EverCrypt_AutoConfig2_has_avx(); - -bool EverCrypt_AutoConfig2_has_bmi2(); - -bool EverCrypt_AutoConfig2_has_adx(); - -bool EverCrypt_AutoConfig2_has_sse(); - -bool EverCrypt_AutoConfig2_has_movbe(); - -bool EverCrypt_AutoConfig2_has_rdrand(); - -bool EverCrypt_AutoConfig2_has_avx512(); - -void EverCrypt_AutoConfig2_recall(); - -void EverCrypt_AutoConfig2_init(); - -typedef void (*EverCrypt_AutoConfig2_disabler)(); - -void EverCrypt_AutoConfig2_disable_avx2(); - -void EverCrypt_AutoConfig2_disable_avx(); - -void EverCrypt_AutoConfig2_disable_bmi2(); - -void EverCrypt_AutoConfig2_disable_adx(); - -void EverCrypt_AutoConfig2_disable_shaext(); - -void EverCrypt_AutoConfig2_disable_aesni(); - -void EverCrypt_AutoConfig2_disable_pclmulqdq(); - -void EverCrypt_AutoConfig2_disable_sse(); - -void EverCrypt_AutoConfig2_disable_movbe(); - -void EverCrypt_AutoConfig2_disable_rdrand(); - -void EverCrypt_AutoConfig2_disable_avx512(); - -bool EverCrypt_AutoConfig2_has_vec128(); - -bool EverCrypt_AutoConfig2_has_vec256(); - -#if defined(__cplusplus) -} -#endif - -#define __EverCrypt_AutoConfig2_H_DEFINED -#endif diff --git a/dist/c89-compatible/EverCrypt_Chacha20Poly1305.c b/dist/c89-compatible/EverCrypt_Chacha20Poly1305.c deleted file mode 100644 index a411698645..0000000000 --- a/dist/c89-compatible/EverCrypt_Chacha20Poly1305.c +++ /dev/null @@ -1,92 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#include "EverCrypt_Chacha20Poly1305.h" - - - -void -EverCrypt_Chacha20Poly1305_aead_encrypt( - uint8_t *k, - uint8_t *n, - uint32_t aadlen, - uint8_t *aad, - uint32_t mlen, - uint8_t *m, - uint8_t *cipher, - uint8_t *tag -) -{ - bool avx2 = EverCrypt_AutoConfig2_has_avx2(); - bool avx = EverCrypt_AutoConfig2_has_avx(); - bool vec256 = EverCrypt_AutoConfig2_has_vec256(); - bool vec128 = EverCrypt_AutoConfig2_has_vec128(); - #if HACL_CAN_COMPILE_VEC256 - if (vec256) - { - Hacl_Chacha20Poly1305_256_aead_encrypt(k, n, aadlen, aad, mlen, m, cipher, tag); - return; - } - #endif - #if HACL_CAN_COMPILE_VEC128 - if (vec128) - { - Hacl_Chacha20Poly1305_128_aead_encrypt(k, n, aadlen, aad, mlen, m, cipher, tag); - return; - } - #endif - Hacl_Chacha20Poly1305_32_aead_encrypt(k, n, aadlen, aad, mlen, m, cipher, tag); -} - -uint32_t -EverCrypt_Chacha20Poly1305_aead_decrypt( - uint8_t *k, - uint8_t *n, - uint32_t aadlen, - uint8_t *aad, - uint32_t mlen, - uint8_t *m, - uint8_t *cipher, - uint8_t *tag -) -{ - bool avx2 = EverCrypt_AutoConfig2_has_avx2(); - bool avx = EverCrypt_AutoConfig2_has_avx(); - bool vec256 = EverCrypt_AutoConfig2_has_vec256(); - bool vec128 = EverCrypt_AutoConfig2_has_vec128(); - #if HACL_CAN_COMPILE_VEC256 - if (vec256) - { - return Hacl_Chacha20Poly1305_256_aead_decrypt(k, n, aadlen, aad, mlen, m, cipher, tag); - } - #endif - #if HACL_CAN_COMPILE_VEC128 - if (vec128) - { - return Hacl_Chacha20Poly1305_128_aead_decrypt(k, n, aadlen, aad, mlen, m, cipher, tag); - } - #endif - return Hacl_Chacha20Poly1305_32_aead_decrypt(k, n, aadlen, aad, mlen, m, cipher, tag); -} - diff --git a/dist/c89-compatible/EverCrypt_Chacha20Poly1305.h b/dist/c89-compatible/EverCrypt_Chacha20Poly1305.h deleted file mode 100644 index b0b1c2fa80..0000000000 --- a/dist/c89-compatible/EverCrypt_Chacha20Poly1305.h +++ /dev/null @@ -1,72 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#ifndef __EverCrypt_Chacha20Poly1305_H -#define __EverCrypt_Chacha20Poly1305_H - -#if defined(__cplusplus) -extern "C" { -#endif - -#include -#include "krml/internal/types.h" -#include "krml/lowstar_endianness.h" -#include "krml/internal/target.h" - - -#include "Hacl_Chacha20Poly1305_32.h" -#include "Hacl_Chacha20Poly1305_256.h" -#include "Hacl_Chacha20Poly1305_128.h" -#include "EverCrypt_AutoConfig2.h" -#include "evercrypt_targetconfig.h" -void -EverCrypt_Chacha20Poly1305_aead_encrypt( - uint8_t *k, - uint8_t *n, - uint32_t aadlen, - uint8_t *aad, - uint32_t mlen, - uint8_t *m, - uint8_t *cipher, - uint8_t *tag -); - -uint32_t -EverCrypt_Chacha20Poly1305_aead_decrypt( - uint8_t *k, - uint8_t *n, - uint32_t aadlen, - uint8_t *aad, - uint32_t mlen, - uint8_t *m, - uint8_t *cipher, - uint8_t *tag -); - -#if defined(__cplusplus) -} -#endif - -#define __EverCrypt_Chacha20Poly1305_H_DEFINED -#endif diff --git a/dist/c89-compatible/EverCrypt_Cipher.c b/dist/c89-compatible/EverCrypt_Cipher.c deleted file mode 100644 index a8324c004a..0000000000 --- a/dist/c89-compatible/EverCrypt_Cipher.c +++ /dev/null @@ -1,43 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#include "EverCrypt_Cipher.h" - -#include "internal/Hacl_Chacha20.h" - -void -EverCrypt_Cipher_chacha20( - uint32_t len, - uint8_t *dst, - uint8_t *src, - uint8_t *key, - uint8_t *iv, - uint32_t ctr -) -{ - uint32_t ctx[16U] = { 0U }; - Hacl_Impl_Chacha20_chacha20_init(ctx, key, iv, ctr); - Hacl_Impl_Chacha20_chacha20_update(ctx, len, dst, src); -} - diff --git a/dist/c89-compatible/EverCrypt_Cipher.h b/dist/c89-compatible/EverCrypt_Cipher.h deleted file mode 100644 index 6689f79d6a..0000000000 --- a/dist/c89-compatible/EverCrypt_Cipher.h +++ /dev/null @@ -1,55 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#ifndef __EverCrypt_Cipher_H -#define __EverCrypt_Cipher_H - -#if defined(__cplusplus) -extern "C" { -#endif - -#include -#include "krml/internal/types.h" -#include "krml/lowstar_endianness.h" -#include "krml/internal/target.h" - - - -#include "evercrypt_targetconfig.h" -void -EverCrypt_Cipher_chacha20( - uint32_t len, - uint8_t *dst, - uint8_t *src, - uint8_t *key, - uint8_t *iv, - uint32_t ctr -); - -#if defined(__cplusplus) -} -#endif - -#define __EverCrypt_Cipher_H_DEFINED -#endif diff --git a/dist/c89-compatible/EverCrypt_Curve25519.c b/dist/c89-compatible/EverCrypt_Curve25519.c deleted file mode 100644 index 71db562b0e..0000000000 --- a/dist/c89-compatible/EverCrypt_Curve25519.c +++ /dev/null @@ -1,70 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#include "EverCrypt_Curve25519.h" - - - -static inline bool has_adx_bmi2() -{ - bool has_bmi2 = EverCrypt_AutoConfig2_has_bmi2(); - bool has_adx = EverCrypt_AutoConfig2_has_adx(); - return has_bmi2 && has_adx; -} - -void EverCrypt_Curve25519_secret_to_public(uint8_t *pub, uint8_t *priv) -{ - #if HACL_CAN_COMPILE_VALE - if (has_adx_bmi2()) - { - Hacl_Curve25519_64_secret_to_public(pub, priv); - return; - } - #endif - Hacl_Curve25519_51_secret_to_public(pub, priv); -} - -void EverCrypt_Curve25519_scalarmult(uint8_t *shared, uint8_t *my_priv, uint8_t *their_pub) -{ - #if HACL_CAN_COMPILE_VALE - if (has_adx_bmi2()) - { - Hacl_Curve25519_64_scalarmult(shared, my_priv, their_pub); - return; - } - #endif - Hacl_Curve25519_51_scalarmult(shared, my_priv, their_pub); -} - -bool EverCrypt_Curve25519_ecdh(uint8_t *shared, uint8_t *my_priv, uint8_t *their_pub) -{ - #if HACL_CAN_COMPILE_VALE - if (has_adx_bmi2()) - { - return Hacl_Curve25519_64_ecdh(shared, my_priv, their_pub); - } - #endif - return Hacl_Curve25519_51_ecdh(shared, my_priv, their_pub); -} - diff --git a/dist/c89-compatible/EverCrypt_Curve25519.h b/dist/c89-compatible/EverCrypt_Curve25519.h deleted file mode 100644 index 6613484c39..0000000000 --- a/dist/c89-compatible/EverCrypt_Curve25519.h +++ /dev/null @@ -1,53 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#ifndef __EverCrypt_Curve25519_H -#define __EverCrypt_Curve25519_H - -#if defined(__cplusplus) -extern "C" { -#endif - -#include -#include "krml/internal/types.h" -#include "krml/lowstar_endianness.h" -#include "krml/internal/target.h" - - -#include "Hacl_Curve25519_64.h" -#include "Hacl_Curve25519_51.h" -#include "EverCrypt_AutoConfig2.h" -#include "evercrypt_targetconfig.h" -void EverCrypt_Curve25519_secret_to_public(uint8_t *pub, uint8_t *priv); - -void EverCrypt_Curve25519_scalarmult(uint8_t *shared, uint8_t *my_priv, uint8_t *their_pub); - -bool EverCrypt_Curve25519_ecdh(uint8_t *shared, uint8_t *my_priv, uint8_t *their_pub); - -#if defined(__cplusplus) -} -#endif - -#define __EverCrypt_Curve25519_H_DEFINED -#endif diff --git a/dist/c89-compatible/EverCrypt_DRBG.c b/dist/c89-compatible/EverCrypt_DRBG.c deleted file mode 100644 index 9b39bf50ce..0000000000 --- a/dist/c89-compatible/EverCrypt_DRBG.c +++ /dev/null @@ -1,2604 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#include "EverCrypt_DRBG.h" - - - -uint32_t EverCrypt_DRBG_reseed_interval = (uint32_t)1024U; - -uint32_t EverCrypt_DRBG_max_output_length = (uint32_t)65536U; - -uint32_t EverCrypt_DRBG_max_length = (uint32_t)65536U; - -uint32_t EverCrypt_DRBG_max_personalization_string_length = (uint32_t)65536U; - -uint32_t EverCrypt_DRBG_max_additional_input_length = (uint32_t)65536U; - -uint32_t EverCrypt_DRBG_min_length(Spec_Hash_Definitions_hash_alg a) -{ - switch (a) - { - case Spec_Hash_Definitions_SHA1: - { - return (uint32_t)16U; - } - case Spec_Hash_Definitions_SHA2_256: - { - return (uint32_t)32U; - } - case Spec_Hash_Definitions_SHA2_384: - { - return (uint32_t)32U; - } - case Spec_Hash_Definitions_SHA2_512: - { - return (uint32_t)32U; - } - default: - { - KRML_HOST_PRINTF("KaRaMeL incomplete match at %s:%d\n", __FILE__, __LINE__); - KRML_HOST_EXIT(253U); - } - } -} - -#define SHA1_s 0 -#define SHA2_256_s 1 -#define SHA2_384_s 2 -#define SHA2_512_s 3 - -typedef uint8_t state_s_tags; - -typedef struct EverCrypt_DRBG_state_s_s -{ - state_s_tags tag; - union { - Hacl_HMAC_DRBG_state case_SHA1_s; - Hacl_HMAC_DRBG_state case_SHA2_256_s; - Hacl_HMAC_DRBG_state case_SHA2_384_s; - Hacl_HMAC_DRBG_state case_SHA2_512_s; - } - val; -} -EverCrypt_DRBG_state_s; - -bool -EverCrypt_DRBG_uu___is_SHA1_s( - Spec_Hash_Definitions_hash_alg uu___, - EverCrypt_DRBG_state_s projectee -) -{ - if (projectee.tag == SHA1_s) - { - return true; - } - return false; -} - -bool -EverCrypt_DRBG_uu___is_SHA2_256_s( - Spec_Hash_Definitions_hash_alg uu___, - EverCrypt_DRBG_state_s projectee -) -{ - if (projectee.tag == SHA2_256_s) - { - return true; - } - return false; -} - -bool -EverCrypt_DRBG_uu___is_SHA2_384_s( - Spec_Hash_Definitions_hash_alg uu___, - EverCrypt_DRBG_state_s projectee -) -{ - if (projectee.tag == SHA2_384_s) - { - return true; - } - return false; -} - -bool -EverCrypt_DRBG_uu___is_SHA2_512_s( - Spec_Hash_Definitions_hash_alg uu___, - EverCrypt_DRBG_state_s projectee -) -{ - if (projectee.tag == SHA2_512_s) - { - return true; - } - return false; -} - -EverCrypt_DRBG_state_s *EverCrypt_DRBG_create(Spec_Hash_Definitions_hash_alg a) -{ - EverCrypt_DRBG_state_s st; - switch (a) - { - case Spec_Hash_Definitions_SHA1: - { - EverCrypt_DRBG_state_s lit0; - lit0.tag = SHA1_s; - { - uint8_t *k = (uint8_t *)KRML_HOST_CALLOC((uint32_t)20U, sizeof (uint8_t)); - uint8_t *v = (uint8_t *)KRML_HOST_CALLOC((uint32_t)20U, sizeof (uint8_t)); - uint32_t *ctr = (uint32_t *)KRML_HOST_MALLOC(sizeof (uint32_t)); - ctr[0U] = (uint32_t)1U; - { - Hacl_HMAC_DRBG_state lit; - lit.k = k; - lit.v = v; - lit.reseed_counter = ctr; - lit0.val.case_SHA1_s = lit; - st = lit0; - } - } - break; - } - case Spec_Hash_Definitions_SHA2_256: - { - EverCrypt_DRBG_state_s lit0; - lit0.tag = SHA2_256_s; - { - uint8_t *k = (uint8_t *)KRML_HOST_CALLOC((uint32_t)32U, sizeof (uint8_t)); - uint8_t *v = (uint8_t *)KRML_HOST_CALLOC((uint32_t)32U, sizeof (uint8_t)); - uint32_t *ctr = (uint32_t *)KRML_HOST_MALLOC(sizeof (uint32_t)); - ctr[0U] = (uint32_t)1U; - { - Hacl_HMAC_DRBG_state lit; - lit.k = k; - lit.v = v; - lit.reseed_counter = ctr; - lit0.val.case_SHA2_256_s = lit; - st = lit0; - } - } - break; - } - case Spec_Hash_Definitions_SHA2_384: - { - EverCrypt_DRBG_state_s lit0; - lit0.tag = SHA2_384_s; - { - uint8_t *k = (uint8_t *)KRML_HOST_CALLOC((uint32_t)48U, sizeof (uint8_t)); - uint8_t *v = (uint8_t *)KRML_HOST_CALLOC((uint32_t)48U, sizeof (uint8_t)); - uint32_t *ctr = (uint32_t *)KRML_HOST_MALLOC(sizeof (uint32_t)); - ctr[0U] = (uint32_t)1U; - { - Hacl_HMAC_DRBG_state lit; - lit.k = k; - lit.v = v; - lit.reseed_counter = ctr; - lit0.val.case_SHA2_384_s = lit; - st = lit0; - } - } - break; - } - case Spec_Hash_Definitions_SHA2_512: - { - EverCrypt_DRBG_state_s lit0; - lit0.tag = SHA2_512_s; - { - uint8_t *k = (uint8_t *)KRML_HOST_CALLOC((uint32_t)64U, sizeof (uint8_t)); - uint8_t *v = (uint8_t *)KRML_HOST_CALLOC((uint32_t)64U, sizeof (uint8_t)); - uint32_t *ctr = (uint32_t *)KRML_HOST_MALLOC(sizeof (uint32_t)); - ctr[0U] = (uint32_t)1U; - { - Hacl_HMAC_DRBG_state lit; - lit.k = k; - lit.v = v; - lit.reseed_counter = ctr; - lit0.val.case_SHA2_512_s = lit; - st = lit0; - } - } - break; - } - default: - { - KRML_HOST_PRINTF("KaRaMeL incomplete match at %s:%d\n", __FILE__, __LINE__); - KRML_HOST_EXIT(253U); - } - } - KRML_CHECK_SIZE(sizeof (EverCrypt_DRBG_state_s), (uint32_t)1U); - { - EverCrypt_DRBG_state_s - *buf = (EverCrypt_DRBG_state_s *)KRML_HOST_MALLOC(sizeof (EverCrypt_DRBG_state_s)); - buf[0U] = st; - return buf; - } -} - -bool -EverCrypt_DRBG_instantiate_sha1( - EverCrypt_DRBG_state_s *st, - uint8_t *personalization_string, - uint32_t personalization_string_len -) -{ - if (personalization_string_len > Hacl_HMAC_DRBG_max_personalization_string_length) - { - return false; - } - { - uint32_t entropy_input_len = Hacl_HMAC_DRBG_min_length(Spec_Hash_Definitions_SHA1); - uint32_t nonce_len = Hacl_HMAC_DRBG_min_length(Spec_Hash_Definitions_SHA1) / (uint32_t)2U; - uint32_t min_entropy = entropy_input_len + nonce_len; - KRML_CHECK_SIZE(sizeof (uint8_t), min_entropy); - { - uint8_t entropy[min_entropy]; - memset(entropy, 0U, min_entropy * sizeof (uint8_t)); - { - bool ok = Lib_RandomBuffer_System_randombytes(entropy, min_entropy); - if (!ok) - { - return false; - } - { - uint8_t *entropy_input = entropy; - uint8_t *nonce = entropy + entropy_input_len; - EverCrypt_DRBG_state_s st_s = *st; - KRML_CHECK_SIZE(sizeof (uint8_t), - entropy_input_len + nonce_len + personalization_string_len); - { - uint8_t seed_material[entropy_input_len + nonce_len + personalization_string_len]; - memset(seed_material, - 0U, - (entropy_input_len + nonce_len + personalization_string_len) * sizeof (uint8_t)); - memcpy(seed_material, entropy_input, entropy_input_len * sizeof (uint8_t)); - memcpy(seed_material + entropy_input_len, nonce, nonce_len * sizeof (uint8_t)); - memcpy(seed_material + entropy_input_len + nonce_len, - personalization_string, - personalization_string_len * sizeof (uint8_t)); - { - Hacl_HMAC_DRBG_state scrut; - if (st_s.tag == SHA1_s) - { - scrut = st_s.val.case_SHA1_s; - } - else - { - scrut = - KRML_EABORT(Hacl_HMAC_DRBG_state, - "unreachable (pattern matches are exhaustive in F*)"); - } - { - uint8_t *k = scrut.k; - uint8_t *v = scrut.v; - uint32_t *ctr = scrut.reseed_counter; - memset(k, 0U, (uint32_t)20U * sizeof (uint8_t)); - memset(v, (uint8_t)1U, (uint32_t)20U * sizeof (uint8_t)); - ctr[0U] = (uint32_t)1U; - { - uint32_t - input_len = - (uint32_t)21U - + entropy_input_len + nonce_len + personalization_string_len; - KRML_CHECK_SIZE(sizeof (uint8_t), input_len); - { - uint8_t input0[input_len]; - memset(input0, 0U, input_len * sizeof (uint8_t)); - { - uint8_t *k_ = input0; - memcpy(k_, v, (uint32_t)20U * sizeof (uint8_t)); - if - (entropy_input_len + nonce_len + personalization_string_len != (uint32_t)0U) - { - memcpy(input0 + (uint32_t)21U, - seed_material, - (entropy_input_len + nonce_len + personalization_string_len) - * sizeof (uint8_t)); - } - input0[20U] = (uint8_t)0U; - EverCrypt_HMAC_compute_sha1(k_, k, (uint32_t)20U, input0, input_len); - EverCrypt_HMAC_compute_sha1(v, k_, (uint32_t)20U, v, (uint32_t)20U); - memcpy(k, k_, (uint32_t)20U * sizeof (uint8_t)); - if - (entropy_input_len + nonce_len + personalization_string_len != (uint32_t)0U) - { - uint32_t - input_len0 = - (uint32_t)21U - + entropy_input_len + nonce_len + personalization_string_len; - KRML_CHECK_SIZE(sizeof (uint8_t), input_len0); - { - uint8_t input[input_len0]; - memset(input, 0U, input_len0 * sizeof (uint8_t)); - { - uint8_t *k_0 = input; - memcpy(k_0, v, (uint32_t)20U * sizeof (uint8_t)); - if - ( - entropy_input_len - + nonce_len - + personalization_string_len - != (uint32_t)0U - ) - { - memcpy(input + (uint32_t)21U, - seed_material, - (entropy_input_len + nonce_len + personalization_string_len) - * sizeof (uint8_t)); - } - input[20U] = (uint8_t)1U; - EverCrypt_HMAC_compute_sha1(k_0, k, (uint32_t)20U, input, input_len0); - EverCrypt_HMAC_compute_sha1(v, k_0, (uint32_t)20U, v, (uint32_t)20U); - memcpy(k, k_0, (uint32_t)20U * sizeof (uint8_t)); - } - } - } - return true; - } - } - } - } - } - } - } - } - } - } -} - -bool -EverCrypt_DRBG_instantiate_sha2_256( - EverCrypt_DRBG_state_s *st, - uint8_t *personalization_string, - uint32_t personalization_string_len -) -{ - if (personalization_string_len > Hacl_HMAC_DRBG_max_personalization_string_length) - { - return false; - } - { - uint32_t entropy_input_len = Hacl_HMAC_DRBG_min_length(Spec_Hash_Definitions_SHA2_256); - uint32_t nonce_len = Hacl_HMAC_DRBG_min_length(Spec_Hash_Definitions_SHA2_256) / (uint32_t)2U; - uint32_t min_entropy = entropy_input_len + nonce_len; - KRML_CHECK_SIZE(sizeof (uint8_t), min_entropy); - { - uint8_t entropy[min_entropy]; - memset(entropy, 0U, min_entropy * sizeof (uint8_t)); - { - bool ok = Lib_RandomBuffer_System_randombytes(entropy, min_entropy); - if (!ok) - { - return false; - } - { - uint8_t *entropy_input = entropy; - uint8_t *nonce = entropy + entropy_input_len; - EverCrypt_DRBG_state_s st_s = *st; - KRML_CHECK_SIZE(sizeof (uint8_t), - entropy_input_len + nonce_len + personalization_string_len); - { - uint8_t seed_material[entropy_input_len + nonce_len + personalization_string_len]; - memset(seed_material, - 0U, - (entropy_input_len + nonce_len + personalization_string_len) * sizeof (uint8_t)); - memcpy(seed_material, entropy_input, entropy_input_len * sizeof (uint8_t)); - memcpy(seed_material + entropy_input_len, nonce, nonce_len * sizeof (uint8_t)); - memcpy(seed_material + entropy_input_len + nonce_len, - personalization_string, - personalization_string_len * sizeof (uint8_t)); - { - Hacl_HMAC_DRBG_state scrut; - if (st_s.tag == SHA2_256_s) - { - scrut = st_s.val.case_SHA2_256_s; - } - else - { - scrut = - KRML_EABORT(Hacl_HMAC_DRBG_state, - "unreachable (pattern matches are exhaustive in F*)"); - } - { - uint8_t *k = scrut.k; - uint8_t *v = scrut.v; - uint32_t *ctr = scrut.reseed_counter; - memset(k, 0U, (uint32_t)32U * sizeof (uint8_t)); - memset(v, (uint8_t)1U, (uint32_t)32U * sizeof (uint8_t)); - ctr[0U] = (uint32_t)1U; - { - uint32_t - input_len = - (uint32_t)33U - + entropy_input_len + nonce_len + personalization_string_len; - KRML_CHECK_SIZE(sizeof (uint8_t), input_len); - { - uint8_t input0[input_len]; - memset(input0, 0U, input_len * sizeof (uint8_t)); - { - uint8_t *k_ = input0; - memcpy(k_, v, (uint32_t)32U * sizeof (uint8_t)); - if - (entropy_input_len + nonce_len + personalization_string_len != (uint32_t)0U) - { - memcpy(input0 + (uint32_t)33U, - seed_material, - (entropy_input_len + nonce_len + personalization_string_len) - * sizeof (uint8_t)); - } - input0[32U] = (uint8_t)0U; - EverCrypt_HMAC_compute_sha2_256(k_, k, (uint32_t)32U, input0, input_len); - EverCrypt_HMAC_compute_sha2_256(v, k_, (uint32_t)32U, v, (uint32_t)32U); - memcpy(k, k_, (uint32_t)32U * sizeof (uint8_t)); - if - (entropy_input_len + nonce_len + personalization_string_len != (uint32_t)0U) - { - uint32_t - input_len0 = - (uint32_t)33U - + entropy_input_len + nonce_len + personalization_string_len; - KRML_CHECK_SIZE(sizeof (uint8_t), input_len0); - { - uint8_t input[input_len0]; - memset(input, 0U, input_len0 * sizeof (uint8_t)); - { - uint8_t *k_0 = input; - memcpy(k_0, v, (uint32_t)32U * sizeof (uint8_t)); - if - ( - entropy_input_len - + nonce_len - + personalization_string_len - != (uint32_t)0U - ) - { - memcpy(input + (uint32_t)33U, - seed_material, - (entropy_input_len + nonce_len + personalization_string_len) - * sizeof (uint8_t)); - } - input[32U] = (uint8_t)1U; - EverCrypt_HMAC_compute_sha2_256(k_0, - k, - (uint32_t)32U, - input, - input_len0); - EverCrypt_HMAC_compute_sha2_256(v, - k_0, - (uint32_t)32U, - v, - (uint32_t)32U); - memcpy(k, k_0, (uint32_t)32U * sizeof (uint8_t)); - } - } - } - return true; - } - } - } - } - } - } - } - } - } - } -} - -bool -EverCrypt_DRBG_instantiate_sha2_384( - EverCrypt_DRBG_state_s *st, - uint8_t *personalization_string, - uint32_t personalization_string_len -) -{ - if (personalization_string_len > Hacl_HMAC_DRBG_max_personalization_string_length) - { - return false; - } - { - uint32_t entropy_input_len = Hacl_HMAC_DRBG_min_length(Spec_Hash_Definitions_SHA2_384); - uint32_t nonce_len = Hacl_HMAC_DRBG_min_length(Spec_Hash_Definitions_SHA2_384) / (uint32_t)2U; - uint32_t min_entropy = entropy_input_len + nonce_len; - KRML_CHECK_SIZE(sizeof (uint8_t), min_entropy); - { - uint8_t entropy[min_entropy]; - memset(entropy, 0U, min_entropy * sizeof (uint8_t)); - { - bool ok = Lib_RandomBuffer_System_randombytes(entropy, min_entropy); - if (!ok) - { - return false; - } - { - uint8_t *entropy_input = entropy; - uint8_t *nonce = entropy + entropy_input_len; - EverCrypt_DRBG_state_s st_s = *st; - KRML_CHECK_SIZE(sizeof (uint8_t), - entropy_input_len + nonce_len + personalization_string_len); - { - uint8_t seed_material[entropy_input_len + nonce_len + personalization_string_len]; - memset(seed_material, - 0U, - (entropy_input_len + nonce_len + personalization_string_len) * sizeof (uint8_t)); - memcpy(seed_material, entropy_input, entropy_input_len * sizeof (uint8_t)); - memcpy(seed_material + entropy_input_len, nonce, nonce_len * sizeof (uint8_t)); - memcpy(seed_material + entropy_input_len + nonce_len, - personalization_string, - personalization_string_len * sizeof (uint8_t)); - { - Hacl_HMAC_DRBG_state scrut; - if (st_s.tag == SHA2_384_s) - { - scrut = st_s.val.case_SHA2_384_s; - } - else - { - scrut = - KRML_EABORT(Hacl_HMAC_DRBG_state, - "unreachable (pattern matches are exhaustive in F*)"); - } - { - uint8_t *k = scrut.k; - uint8_t *v = scrut.v; - uint32_t *ctr = scrut.reseed_counter; - memset(k, 0U, (uint32_t)48U * sizeof (uint8_t)); - memset(v, (uint8_t)1U, (uint32_t)48U * sizeof (uint8_t)); - ctr[0U] = (uint32_t)1U; - { - uint32_t - input_len = - (uint32_t)49U - + entropy_input_len + nonce_len + personalization_string_len; - KRML_CHECK_SIZE(sizeof (uint8_t), input_len); - { - uint8_t input0[input_len]; - memset(input0, 0U, input_len * sizeof (uint8_t)); - { - uint8_t *k_ = input0; - memcpy(k_, v, (uint32_t)48U * sizeof (uint8_t)); - if - (entropy_input_len + nonce_len + personalization_string_len != (uint32_t)0U) - { - memcpy(input0 + (uint32_t)49U, - seed_material, - (entropy_input_len + nonce_len + personalization_string_len) - * sizeof (uint8_t)); - } - input0[48U] = (uint8_t)0U; - EverCrypt_HMAC_compute_sha2_384(k_, k, (uint32_t)48U, input0, input_len); - EverCrypt_HMAC_compute_sha2_384(v, k_, (uint32_t)48U, v, (uint32_t)48U); - memcpy(k, k_, (uint32_t)48U * sizeof (uint8_t)); - if - (entropy_input_len + nonce_len + personalization_string_len != (uint32_t)0U) - { - uint32_t - input_len0 = - (uint32_t)49U - + entropy_input_len + nonce_len + personalization_string_len; - KRML_CHECK_SIZE(sizeof (uint8_t), input_len0); - { - uint8_t input[input_len0]; - memset(input, 0U, input_len0 * sizeof (uint8_t)); - { - uint8_t *k_0 = input; - memcpy(k_0, v, (uint32_t)48U * sizeof (uint8_t)); - if - ( - entropy_input_len - + nonce_len - + personalization_string_len - != (uint32_t)0U - ) - { - memcpy(input + (uint32_t)49U, - seed_material, - (entropy_input_len + nonce_len + personalization_string_len) - * sizeof (uint8_t)); - } - input[48U] = (uint8_t)1U; - EverCrypt_HMAC_compute_sha2_384(k_0, - k, - (uint32_t)48U, - input, - input_len0); - EverCrypt_HMAC_compute_sha2_384(v, - k_0, - (uint32_t)48U, - v, - (uint32_t)48U); - memcpy(k, k_0, (uint32_t)48U * sizeof (uint8_t)); - } - } - } - return true; - } - } - } - } - } - } - } - } - } - } -} - -bool -EverCrypt_DRBG_instantiate_sha2_512( - EverCrypt_DRBG_state_s *st, - uint8_t *personalization_string, - uint32_t personalization_string_len -) -{ - if (personalization_string_len > Hacl_HMAC_DRBG_max_personalization_string_length) - { - return false; - } - { - uint32_t entropy_input_len = Hacl_HMAC_DRBG_min_length(Spec_Hash_Definitions_SHA2_512); - uint32_t nonce_len = Hacl_HMAC_DRBG_min_length(Spec_Hash_Definitions_SHA2_512) / (uint32_t)2U; - uint32_t min_entropy = entropy_input_len + nonce_len; - KRML_CHECK_SIZE(sizeof (uint8_t), min_entropy); - { - uint8_t entropy[min_entropy]; - memset(entropy, 0U, min_entropy * sizeof (uint8_t)); - { - bool ok = Lib_RandomBuffer_System_randombytes(entropy, min_entropy); - if (!ok) - { - return false; - } - { - uint8_t *entropy_input = entropy; - uint8_t *nonce = entropy + entropy_input_len; - EverCrypt_DRBG_state_s st_s = *st; - KRML_CHECK_SIZE(sizeof (uint8_t), - entropy_input_len + nonce_len + personalization_string_len); - { - uint8_t seed_material[entropy_input_len + nonce_len + personalization_string_len]; - memset(seed_material, - 0U, - (entropy_input_len + nonce_len + personalization_string_len) * sizeof (uint8_t)); - memcpy(seed_material, entropy_input, entropy_input_len * sizeof (uint8_t)); - memcpy(seed_material + entropy_input_len, nonce, nonce_len * sizeof (uint8_t)); - memcpy(seed_material + entropy_input_len + nonce_len, - personalization_string, - personalization_string_len * sizeof (uint8_t)); - { - Hacl_HMAC_DRBG_state scrut; - if (st_s.tag == SHA2_512_s) - { - scrut = st_s.val.case_SHA2_512_s; - } - else - { - scrut = - KRML_EABORT(Hacl_HMAC_DRBG_state, - "unreachable (pattern matches are exhaustive in F*)"); - } - { - uint8_t *k = scrut.k; - uint8_t *v = scrut.v; - uint32_t *ctr = scrut.reseed_counter; - memset(k, 0U, (uint32_t)64U * sizeof (uint8_t)); - memset(v, (uint8_t)1U, (uint32_t)64U * sizeof (uint8_t)); - ctr[0U] = (uint32_t)1U; - { - uint32_t - input_len = - (uint32_t)65U - + entropy_input_len + nonce_len + personalization_string_len; - KRML_CHECK_SIZE(sizeof (uint8_t), input_len); - { - uint8_t input0[input_len]; - memset(input0, 0U, input_len * sizeof (uint8_t)); - { - uint8_t *k_ = input0; - memcpy(k_, v, (uint32_t)64U * sizeof (uint8_t)); - if - (entropy_input_len + nonce_len + personalization_string_len != (uint32_t)0U) - { - memcpy(input0 + (uint32_t)65U, - seed_material, - (entropy_input_len + nonce_len + personalization_string_len) - * sizeof (uint8_t)); - } - input0[64U] = (uint8_t)0U; - EverCrypt_HMAC_compute_sha2_512(k_, k, (uint32_t)64U, input0, input_len); - EverCrypt_HMAC_compute_sha2_512(v, k_, (uint32_t)64U, v, (uint32_t)64U); - memcpy(k, k_, (uint32_t)64U * sizeof (uint8_t)); - if - (entropy_input_len + nonce_len + personalization_string_len != (uint32_t)0U) - { - uint32_t - input_len0 = - (uint32_t)65U - + entropy_input_len + nonce_len + personalization_string_len; - KRML_CHECK_SIZE(sizeof (uint8_t), input_len0); - { - uint8_t input[input_len0]; - memset(input, 0U, input_len0 * sizeof (uint8_t)); - { - uint8_t *k_0 = input; - memcpy(k_0, v, (uint32_t)64U * sizeof (uint8_t)); - if - ( - entropy_input_len - + nonce_len - + personalization_string_len - != (uint32_t)0U - ) - { - memcpy(input + (uint32_t)65U, - seed_material, - (entropy_input_len + nonce_len + personalization_string_len) - * sizeof (uint8_t)); - } - input[64U] = (uint8_t)1U; - EverCrypt_HMAC_compute_sha2_512(k_0, - k, - (uint32_t)64U, - input, - input_len0); - EverCrypt_HMAC_compute_sha2_512(v, - k_0, - (uint32_t)64U, - v, - (uint32_t)64U); - memcpy(k, k_0, (uint32_t)64U * sizeof (uint8_t)); - } - } - } - return true; - } - } - } - } - } - } - } - } - } - } -} - -bool -EverCrypt_DRBG_reseed_sha1( - EverCrypt_DRBG_state_s *st, - uint8_t *additional_input, - uint32_t additional_input_len -) -{ - if (additional_input_len > Hacl_HMAC_DRBG_max_additional_input_length) - { - return false; - } - { - uint32_t entropy_input_len = Hacl_HMAC_DRBG_min_length(Spec_Hash_Definitions_SHA1); - KRML_CHECK_SIZE(sizeof (uint8_t), entropy_input_len); - { - uint8_t entropy_input[entropy_input_len]; - memset(entropy_input, 0U, entropy_input_len * sizeof (uint8_t)); - { - bool ok = Lib_RandomBuffer_System_randombytes(entropy_input, entropy_input_len); - if (!ok) - { - return false; - } - { - EverCrypt_DRBG_state_s st_s = *st; - KRML_CHECK_SIZE(sizeof (uint8_t), entropy_input_len + additional_input_len); - { - uint8_t seed_material[entropy_input_len + additional_input_len]; - memset(seed_material, - 0U, - (entropy_input_len + additional_input_len) * sizeof (uint8_t)); - memcpy(seed_material, entropy_input, entropy_input_len * sizeof (uint8_t)); - memcpy(seed_material + entropy_input_len, - additional_input, - additional_input_len * sizeof (uint8_t)); - { - Hacl_HMAC_DRBG_state uu____0; - if (st_s.tag == SHA1_s) - { - uu____0 = st_s.val.case_SHA1_s; - } - else - { - uu____0 = - KRML_EABORT(Hacl_HMAC_DRBG_state, - "unreachable (pattern matches are exhaustive in F*)"); - } - { - uint8_t *k = uu____0.k; - uint8_t *v = uu____0.v; - uint32_t *ctr = uu____0.reseed_counter; - uint32_t input_len = (uint32_t)21U + entropy_input_len + additional_input_len; - KRML_CHECK_SIZE(sizeof (uint8_t), input_len); - { - uint8_t input0[input_len]; - memset(input0, 0U, input_len * sizeof (uint8_t)); - { - uint8_t *k_ = input0; - memcpy(k_, v, (uint32_t)20U * sizeof (uint8_t)); - if (entropy_input_len + additional_input_len != (uint32_t)0U) - { - memcpy(input0 + (uint32_t)21U, - seed_material, - (entropy_input_len + additional_input_len) * sizeof (uint8_t)); - } - input0[20U] = (uint8_t)0U; - EverCrypt_HMAC_compute_sha1(k_, k, (uint32_t)20U, input0, input_len); - EverCrypt_HMAC_compute_sha1(v, k_, (uint32_t)20U, v, (uint32_t)20U); - memcpy(k, k_, (uint32_t)20U * sizeof (uint8_t)); - if (entropy_input_len + additional_input_len != (uint32_t)0U) - { - uint32_t - input_len0 = (uint32_t)21U + entropy_input_len + additional_input_len; - KRML_CHECK_SIZE(sizeof (uint8_t), input_len0); - { - uint8_t input[input_len0]; - memset(input, 0U, input_len0 * sizeof (uint8_t)); - { - uint8_t *k_0 = input; - memcpy(k_0, v, (uint32_t)20U * sizeof (uint8_t)); - if (entropy_input_len + additional_input_len != (uint32_t)0U) - { - memcpy(input + (uint32_t)21U, - seed_material, - (entropy_input_len + additional_input_len) * sizeof (uint8_t)); - } - input[20U] = (uint8_t)1U; - EverCrypt_HMAC_compute_sha1(k_0, k, (uint32_t)20U, input, input_len0); - EverCrypt_HMAC_compute_sha1(v, k_0, (uint32_t)20U, v, (uint32_t)20U); - memcpy(k, k_0, (uint32_t)20U * sizeof (uint8_t)); - } - } - } - ctr[0U] = (uint32_t)1U; - return true; - } - } - } - } - } - } - } - } - } -} - -bool -EverCrypt_DRBG_reseed_sha2_256( - EverCrypt_DRBG_state_s *st, - uint8_t *additional_input, - uint32_t additional_input_len -) -{ - if (additional_input_len > Hacl_HMAC_DRBG_max_additional_input_length) - { - return false; - } - { - uint32_t entropy_input_len = Hacl_HMAC_DRBG_min_length(Spec_Hash_Definitions_SHA2_256); - KRML_CHECK_SIZE(sizeof (uint8_t), entropy_input_len); - { - uint8_t entropy_input[entropy_input_len]; - memset(entropy_input, 0U, entropy_input_len * sizeof (uint8_t)); - { - bool ok = Lib_RandomBuffer_System_randombytes(entropy_input, entropy_input_len); - if (!ok) - { - return false; - } - { - EverCrypt_DRBG_state_s st_s = *st; - KRML_CHECK_SIZE(sizeof (uint8_t), entropy_input_len + additional_input_len); - { - uint8_t seed_material[entropy_input_len + additional_input_len]; - memset(seed_material, - 0U, - (entropy_input_len + additional_input_len) * sizeof (uint8_t)); - memcpy(seed_material, entropy_input, entropy_input_len * sizeof (uint8_t)); - memcpy(seed_material + entropy_input_len, - additional_input, - additional_input_len * sizeof (uint8_t)); - { - Hacl_HMAC_DRBG_state uu____0; - if (st_s.tag == SHA2_256_s) - { - uu____0 = st_s.val.case_SHA2_256_s; - } - else - { - uu____0 = - KRML_EABORT(Hacl_HMAC_DRBG_state, - "unreachable (pattern matches are exhaustive in F*)"); - } - { - uint8_t *k = uu____0.k; - uint8_t *v = uu____0.v; - uint32_t *ctr = uu____0.reseed_counter; - uint32_t input_len = (uint32_t)33U + entropy_input_len + additional_input_len; - KRML_CHECK_SIZE(sizeof (uint8_t), input_len); - { - uint8_t input0[input_len]; - memset(input0, 0U, input_len * sizeof (uint8_t)); - { - uint8_t *k_ = input0; - memcpy(k_, v, (uint32_t)32U * sizeof (uint8_t)); - if (entropy_input_len + additional_input_len != (uint32_t)0U) - { - memcpy(input0 + (uint32_t)33U, - seed_material, - (entropy_input_len + additional_input_len) * sizeof (uint8_t)); - } - input0[32U] = (uint8_t)0U; - EverCrypt_HMAC_compute_sha2_256(k_, k, (uint32_t)32U, input0, input_len); - EverCrypt_HMAC_compute_sha2_256(v, k_, (uint32_t)32U, v, (uint32_t)32U); - memcpy(k, k_, (uint32_t)32U * sizeof (uint8_t)); - if (entropy_input_len + additional_input_len != (uint32_t)0U) - { - uint32_t - input_len0 = (uint32_t)33U + entropy_input_len + additional_input_len; - KRML_CHECK_SIZE(sizeof (uint8_t), input_len0); - { - uint8_t input[input_len0]; - memset(input, 0U, input_len0 * sizeof (uint8_t)); - { - uint8_t *k_0 = input; - memcpy(k_0, v, (uint32_t)32U * sizeof (uint8_t)); - if (entropy_input_len + additional_input_len != (uint32_t)0U) - { - memcpy(input + (uint32_t)33U, - seed_material, - (entropy_input_len + additional_input_len) * sizeof (uint8_t)); - } - input[32U] = (uint8_t)1U; - EverCrypt_HMAC_compute_sha2_256(k_0, k, (uint32_t)32U, input, input_len0); - EverCrypt_HMAC_compute_sha2_256(v, k_0, (uint32_t)32U, v, (uint32_t)32U); - memcpy(k, k_0, (uint32_t)32U * sizeof (uint8_t)); - } - } - } - ctr[0U] = (uint32_t)1U; - return true; - } - } - } - } - } - } - } - } - } -} - -bool -EverCrypt_DRBG_reseed_sha2_384( - EverCrypt_DRBG_state_s *st, - uint8_t *additional_input, - uint32_t additional_input_len -) -{ - if (additional_input_len > Hacl_HMAC_DRBG_max_additional_input_length) - { - return false; - } - { - uint32_t entropy_input_len = Hacl_HMAC_DRBG_min_length(Spec_Hash_Definitions_SHA2_384); - KRML_CHECK_SIZE(sizeof (uint8_t), entropy_input_len); - { - uint8_t entropy_input[entropy_input_len]; - memset(entropy_input, 0U, entropy_input_len * sizeof (uint8_t)); - { - bool ok = Lib_RandomBuffer_System_randombytes(entropy_input, entropy_input_len); - if (!ok) - { - return false; - } - { - EverCrypt_DRBG_state_s st_s = *st; - KRML_CHECK_SIZE(sizeof (uint8_t), entropy_input_len + additional_input_len); - { - uint8_t seed_material[entropy_input_len + additional_input_len]; - memset(seed_material, - 0U, - (entropy_input_len + additional_input_len) * sizeof (uint8_t)); - memcpy(seed_material, entropy_input, entropy_input_len * sizeof (uint8_t)); - memcpy(seed_material + entropy_input_len, - additional_input, - additional_input_len * sizeof (uint8_t)); - { - Hacl_HMAC_DRBG_state uu____0; - if (st_s.tag == SHA2_384_s) - { - uu____0 = st_s.val.case_SHA2_384_s; - } - else - { - uu____0 = - KRML_EABORT(Hacl_HMAC_DRBG_state, - "unreachable (pattern matches are exhaustive in F*)"); - } - { - uint8_t *k = uu____0.k; - uint8_t *v = uu____0.v; - uint32_t *ctr = uu____0.reseed_counter; - uint32_t input_len = (uint32_t)49U + entropy_input_len + additional_input_len; - KRML_CHECK_SIZE(sizeof (uint8_t), input_len); - { - uint8_t input0[input_len]; - memset(input0, 0U, input_len * sizeof (uint8_t)); - { - uint8_t *k_ = input0; - memcpy(k_, v, (uint32_t)48U * sizeof (uint8_t)); - if (entropy_input_len + additional_input_len != (uint32_t)0U) - { - memcpy(input0 + (uint32_t)49U, - seed_material, - (entropy_input_len + additional_input_len) * sizeof (uint8_t)); - } - input0[48U] = (uint8_t)0U; - EverCrypt_HMAC_compute_sha2_384(k_, k, (uint32_t)48U, input0, input_len); - EverCrypt_HMAC_compute_sha2_384(v, k_, (uint32_t)48U, v, (uint32_t)48U); - memcpy(k, k_, (uint32_t)48U * sizeof (uint8_t)); - if (entropy_input_len + additional_input_len != (uint32_t)0U) - { - uint32_t - input_len0 = (uint32_t)49U + entropy_input_len + additional_input_len; - KRML_CHECK_SIZE(sizeof (uint8_t), input_len0); - { - uint8_t input[input_len0]; - memset(input, 0U, input_len0 * sizeof (uint8_t)); - { - uint8_t *k_0 = input; - memcpy(k_0, v, (uint32_t)48U * sizeof (uint8_t)); - if (entropy_input_len + additional_input_len != (uint32_t)0U) - { - memcpy(input + (uint32_t)49U, - seed_material, - (entropy_input_len + additional_input_len) * sizeof (uint8_t)); - } - input[48U] = (uint8_t)1U; - EverCrypt_HMAC_compute_sha2_384(k_0, k, (uint32_t)48U, input, input_len0); - EverCrypt_HMAC_compute_sha2_384(v, k_0, (uint32_t)48U, v, (uint32_t)48U); - memcpy(k, k_0, (uint32_t)48U * sizeof (uint8_t)); - } - } - } - ctr[0U] = (uint32_t)1U; - return true; - } - } - } - } - } - } - } - } - } -} - -bool -EverCrypt_DRBG_reseed_sha2_512( - EverCrypt_DRBG_state_s *st, - uint8_t *additional_input, - uint32_t additional_input_len -) -{ - if (additional_input_len > Hacl_HMAC_DRBG_max_additional_input_length) - { - return false; - } - { - uint32_t entropy_input_len = Hacl_HMAC_DRBG_min_length(Spec_Hash_Definitions_SHA2_512); - KRML_CHECK_SIZE(sizeof (uint8_t), entropy_input_len); - { - uint8_t entropy_input[entropy_input_len]; - memset(entropy_input, 0U, entropy_input_len * sizeof (uint8_t)); - { - bool ok = Lib_RandomBuffer_System_randombytes(entropy_input, entropy_input_len); - if (!ok) - { - return false; - } - { - EverCrypt_DRBG_state_s st_s = *st; - KRML_CHECK_SIZE(sizeof (uint8_t), entropy_input_len + additional_input_len); - { - uint8_t seed_material[entropy_input_len + additional_input_len]; - memset(seed_material, - 0U, - (entropy_input_len + additional_input_len) * sizeof (uint8_t)); - memcpy(seed_material, entropy_input, entropy_input_len * sizeof (uint8_t)); - memcpy(seed_material + entropy_input_len, - additional_input, - additional_input_len * sizeof (uint8_t)); - { - Hacl_HMAC_DRBG_state uu____0; - if (st_s.tag == SHA2_512_s) - { - uu____0 = st_s.val.case_SHA2_512_s; - } - else - { - uu____0 = - KRML_EABORT(Hacl_HMAC_DRBG_state, - "unreachable (pattern matches are exhaustive in F*)"); - } - { - uint8_t *k = uu____0.k; - uint8_t *v = uu____0.v; - uint32_t *ctr = uu____0.reseed_counter; - uint32_t input_len = (uint32_t)65U + entropy_input_len + additional_input_len; - KRML_CHECK_SIZE(sizeof (uint8_t), input_len); - { - uint8_t input0[input_len]; - memset(input0, 0U, input_len * sizeof (uint8_t)); - { - uint8_t *k_ = input0; - memcpy(k_, v, (uint32_t)64U * sizeof (uint8_t)); - if (entropy_input_len + additional_input_len != (uint32_t)0U) - { - memcpy(input0 + (uint32_t)65U, - seed_material, - (entropy_input_len + additional_input_len) * sizeof (uint8_t)); - } - input0[64U] = (uint8_t)0U; - EverCrypt_HMAC_compute_sha2_512(k_, k, (uint32_t)64U, input0, input_len); - EverCrypt_HMAC_compute_sha2_512(v, k_, (uint32_t)64U, v, (uint32_t)64U); - memcpy(k, k_, (uint32_t)64U * sizeof (uint8_t)); - if (entropy_input_len + additional_input_len != (uint32_t)0U) - { - uint32_t - input_len0 = (uint32_t)65U + entropy_input_len + additional_input_len; - KRML_CHECK_SIZE(sizeof (uint8_t), input_len0); - { - uint8_t input[input_len0]; - memset(input, 0U, input_len0 * sizeof (uint8_t)); - { - uint8_t *k_0 = input; - memcpy(k_0, v, (uint32_t)64U * sizeof (uint8_t)); - if (entropy_input_len + additional_input_len != (uint32_t)0U) - { - memcpy(input + (uint32_t)65U, - seed_material, - (entropy_input_len + additional_input_len) * sizeof (uint8_t)); - } - input[64U] = (uint8_t)1U; - EverCrypt_HMAC_compute_sha2_512(k_0, k, (uint32_t)64U, input, input_len0); - EverCrypt_HMAC_compute_sha2_512(v, k_0, (uint32_t)64U, v, (uint32_t)64U); - memcpy(k, k_0, (uint32_t)64U * sizeof (uint8_t)); - } - } - } - ctr[0U] = (uint32_t)1U; - return true; - } - } - } - } - } - } - } - } - } -} - -bool -EverCrypt_DRBG_generate_sha1( - uint8_t *output, - EverCrypt_DRBG_state_s *st, - uint32_t n, - uint8_t *additional_input, - uint32_t additional_input_len -) -{ - if - ( - additional_input_len - > Hacl_HMAC_DRBG_max_additional_input_length - || n > Hacl_HMAC_DRBG_max_output_length - ) - { - return false; - } - { - uint32_t entropy_input_len = Hacl_HMAC_DRBG_min_length(Spec_Hash_Definitions_SHA1); - bool ok0; - if (additional_input_len > Hacl_HMAC_DRBG_max_additional_input_length) - { - ok0 = false; - } - else - { - uint32_t entropy_input_len1 = Hacl_HMAC_DRBG_min_length(Spec_Hash_Definitions_SHA1); - KRML_CHECK_SIZE(sizeof (uint8_t), entropy_input_len1); - { - uint8_t entropy_input[entropy_input_len1]; - memset(entropy_input, 0U, entropy_input_len1 * sizeof (uint8_t)); - { - bool ok = Lib_RandomBuffer_System_randombytes(entropy_input, entropy_input_len1); - bool result; - if (!ok) - { - result = false; - } - else - { - EverCrypt_DRBG_state_s st_s = *st; - KRML_CHECK_SIZE(sizeof (uint8_t), entropy_input_len1 + additional_input_len); - { - uint8_t seed_material[entropy_input_len1 + additional_input_len]; - memset(seed_material, - 0U, - (entropy_input_len1 + additional_input_len) * sizeof (uint8_t)); - memcpy(seed_material, entropy_input, entropy_input_len1 * sizeof (uint8_t)); - memcpy(seed_material + entropy_input_len1, - additional_input, - additional_input_len * sizeof (uint8_t)); - { - Hacl_HMAC_DRBG_state uu____0; - if (st_s.tag == SHA1_s) - { - uu____0 = st_s.val.case_SHA1_s; - } - else - { - uu____0 = - KRML_EABORT(Hacl_HMAC_DRBG_state, - "unreachable (pattern matches are exhaustive in F*)"); - } - { - uint8_t *k = uu____0.k; - uint8_t *v = uu____0.v; - uint32_t *ctr = uu____0.reseed_counter; - uint32_t input_len = (uint32_t)21U + entropy_input_len1 + additional_input_len; - KRML_CHECK_SIZE(sizeof (uint8_t), input_len); - { - uint8_t input0[input_len]; - memset(input0, 0U, input_len * sizeof (uint8_t)); - { - uint8_t *k_ = input0; - memcpy(k_, v, (uint32_t)20U * sizeof (uint8_t)); - if (entropy_input_len1 + additional_input_len != (uint32_t)0U) - { - memcpy(input0 + (uint32_t)21U, - seed_material, - (entropy_input_len1 + additional_input_len) * sizeof (uint8_t)); - } - input0[20U] = (uint8_t)0U; - EverCrypt_HMAC_compute_sha1(k_, k, (uint32_t)20U, input0, input_len); - EverCrypt_HMAC_compute_sha1(v, k_, (uint32_t)20U, v, (uint32_t)20U); - memcpy(k, k_, (uint32_t)20U * sizeof (uint8_t)); - if (entropy_input_len1 + additional_input_len != (uint32_t)0U) - { - uint32_t - input_len0 = (uint32_t)21U + entropy_input_len1 + additional_input_len; - KRML_CHECK_SIZE(sizeof (uint8_t), input_len0); - { - uint8_t input[input_len0]; - memset(input, 0U, input_len0 * sizeof (uint8_t)); - { - uint8_t *k_0 = input; - memcpy(k_0, v, (uint32_t)20U * sizeof (uint8_t)); - if (entropy_input_len1 + additional_input_len != (uint32_t)0U) - { - memcpy(input + (uint32_t)21U, - seed_material, - (entropy_input_len1 + additional_input_len) * sizeof (uint8_t)); - } - input[20U] = (uint8_t)1U; - EverCrypt_HMAC_compute_sha1(k_0, k, (uint32_t)20U, input, input_len0); - EverCrypt_HMAC_compute_sha1(v, k_0, (uint32_t)20U, v, (uint32_t)20U); - memcpy(k, k_0, (uint32_t)20U * sizeof (uint8_t)); - } - } - } - ctr[0U] = (uint32_t)1U; - result = true; - } - } - } - } - } - } - ok0 = result; - } - } - } - if (!ok0) - { - return false; - } - { - EverCrypt_DRBG_state_s st_s = *st; - Hacl_HMAC_DRBG_state x1; - if (st_s.tag == SHA1_s) - { - x1 = st_s.val.case_SHA1_s; - } - else - { - x1 = KRML_EABORT(Hacl_HMAC_DRBG_state, "unreachable (pattern matches are exhaustive in F*)"); - } - { - bool b; - if (x1.reseed_counter[0U] > Hacl_HMAC_DRBG_reseed_interval) - { - b = false; - } - else - { - Hacl_HMAC_DRBG_state scrut; - if (st_s.tag == SHA1_s) - { - scrut = st_s.val.case_SHA1_s; - } - else - { - scrut = - KRML_EABORT(Hacl_HMAC_DRBG_state, - "unreachable (pattern matches are exhaustive in F*)"); - } - { - uint8_t *k = scrut.k; - uint8_t *v = scrut.v; - uint32_t *ctr = scrut.reseed_counter; - if (additional_input_len > (uint32_t)0U) - { - uint32_t input_len = (uint32_t)21U + additional_input_len; - KRML_CHECK_SIZE(sizeof (uint8_t), input_len); - { - uint8_t input0[input_len]; - memset(input0, 0U, input_len * sizeof (uint8_t)); - { - uint8_t *k_ = input0; - memcpy(k_, v, (uint32_t)20U * sizeof (uint8_t)); - if (additional_input_len != (uint32_t)0U) - { - memcpy(input0 + (uint32_t)21U, - additional_input, - additional_input_len * sizeof (uint8_t)); - } - input0[20U] = (uint8_t)0U; - EverCrypt_HMAC_compute_sha1(k_, k, (uint32_t)20U, input0, input_len); - EverCrypt_HMAC_compute_sha1(v, k_, (uint32_t)20U, v, (uint32_t)20U); - memcpy(k, k_, (uint32_t)20U * sizeof (uint8_t)); - if (additional_input_len != (uint32_t)0U) - { - uint32_t input_len0 = (uint32_t)21U + additional_input_len; - KRML_CHECK_SIZE(sizeof (uint8_t), input_len0); - { - uint8_t input[input_len0]; - memset(input, 0U, input_len0 * sizeof (uint8_t)); - { - uint8_t *k_0 = input; - memcpy(k_0, v, (uint32_t)20U * sizeof (uint8_t)); - if (additional_input_len != (uint32_t)0U) - { - memcpy(input + (uint32_t)21U, - additional_input, - additional_input_len * sizeof (uint8_t)); - } - input[20U] = (uint8_t)1U; - EverCrypt_HMAC_compute_sha1(k_0, k, (uint32_t)20U, input, input_len0); - EverCrypt_HMAC_compute_sha1(v, k_0, (uint32_t)20U, v, (uint32_t)20U); - memcpy(k, k_0, (uint32_t)20U * sizeof (uint8_t)); - } - } - } - } - } - } - { - uint8_t *output1 = output; - uint32_t max = n / (uint32_t)20U; - uint8_t *out = output1; - { - uint32_t i; - for (i = (uint32_t)0U; i < max; i++) - { - EverCrypt_HMAC_compute_sha1(v, k, (uint32_t)20U, v, (uint32_t)20U); - memcpy(out + i * (uint32_t)20U, v, (uint32_t)20U * sizeof (uint8_t)); - } - } - if (max * (uint32_t)20U < n) - { - uint8_t *block = output1 + max * (uint32_t)20U; - EverCrypt_HMAC_compute_sha1(v, k, (uint32_t)20U, v, (uint32_t)20U); - memcpy(block, v, (n - max * (uint32_t)20U) * sizeof (uint8_t)); - } - { - uint32_t input_len = (uint32_t)21U + additional_input_len; - KRML_CHECK_SIZE(sizeof (uint8_t), input_len); - { - uint8_t input0[input_len]; - memset(input0, 0U, input_len * sizeof (uint8_t)); - { - uint8_t *k_ = input0; - memcpy(k_, v, (uint32_t)20U * sizeof (uint8_t)); - if (additional_input_len != (uint32_t)0U) - { - memcpy(input0 + (uint32_t)21U, - additional_input, - additional_input_len * sizeof (uint8_t)); - } - input0[20U] = (uint8_t)0U; - EverCrypt_HMAC_compute_sha1(k_, k, (uint32_t)20U, input0, input_len); - EverCrypt_HMAC_compute_sha1(v, k_, (uint32_t)20U, v, (uint32_t)20U); - memcpy(k, k_, (uint32_t)20U * sizeof (uint8_t)); - if (additional_input_len != (uint32_t)0U) - { - uint32_t input_len0 = (uint32_t)21U + additional_input_len; - KRML_CHECK_SIZE(sizeof (uint8_t), input_len0); - { - uint8_t input[input_len0]; - memset(input, 0U, input_len0 * sizeof (uint8_t)); - { - uint8_t *k_0 = input; - memcpy(k_0, v, (uint32_t)20U * sizeof (uint8_t)); - if (additional_input_len != (uint32_t)0U) - { - memcpy(input + (uint32_t)21U, - additional_input, - additional_input_len * sizeof (uint8_t)); - } - input[20U] = (uint8_t)1U; - EverCrypt_HMAC_compute_sha1(k_0, k, (uint32_t)20U, input, input_len0); - EverCrypt_HMAC_compute_sha1(v, k_0, (uint32_t)20U, v, (uint32_t)20U); - memcpy(k, k_0, (uint32_t)20U * sizeof (uint8_t)); - } - } - } - { - uint32_t old_ctr = ctr[0U]; - ctr[0U] = old_ctr + (uint32_t)1U; - b = true; - } - } - } - } - } - } - } - return true; - } - } - } -} - -bool -EverCrypt_DRBG_generate_sha2_256( - uint8_t *output, - EverCrypt_DRBG_state_s *st, - uint32_t n, - uint8_t *additional_input, - uint32_t additional_input_len -) -{ - if - ( - additional_input_len - > Hacl_HMAC_DRBG_max_additional_input_length - || n > Hacl_HMAC_DRBG_max_output_length - ) - { - return false; - } - { - uint32_t entropy_input_len = Hacl_HMAC_DRBG_min_length(Spec_Hash_Definitions_SHA2_256); - bool ok0; - if (additional_input_len > Hacl_HMAC_DRBG_max_additional_input_length) - { - ok0 = false; - } - else - { - uint32_t entropy_input_len1 = Hacl_HMAC_DRBG_min_length(Spec_Hash_Definitions_SHA2_256); - KRML_CHECK_SIZE(sizeof (uint8_t), entropy_input_len1); - { - uint8_t entropy_input[entropy_input_len1]; - memset(entropy_input, 0U, entropy_input_len1 * sizeof (uint8_t)); - { - bool ok = Lib_RandomBuffer_System_randombytes(entropy_input, entropy_input_len1); - bool result; - if (!ok) - { - result = false; - } - else - { - EverCrypt_DRBG_state_s st_s = *st; - KRML_CHECK_SIZE(sizeof (uint8_t), entropy_input_len1 + additional_input_len); - { - uint8_t seed_material[entropy_input_len1 + additional_input_len]; - memset(seed_material, - 0U, - (entropy_input_len1 + additional_input_len) * sizeof (uint8_t)); - memcpy(seed_material, entropy_input, entropy_input_len1 * sizeof (uint8_t)); - memcpy(seed_material + entropy_input_len1, - additional_input, - additional_input_len * sizeof (uint8_t)); - { - Hacl_HMAC_DRBG_state uu____0; - if (st_s.tag == SHA2_256_s) - { - uu____0 = st_s.val.case_SHA2_256_s; - } - else - { - uu____0 = - KRML_EABORT(Hacl_HMAC_DRBG_state, - "unreachable (pattern matches are exhaustive in F*)"); - } - { - uint8_t *k = uu____0.k; - uint8_t *v = uu____0.v; - uint32_t *ctr = uu____0.reseed_counter; - uint32_t input_len = (uint32_t)33U + entropy_input_len1 + additional_input_len; - KRML_CHECK_SIZE(sizeof (uint8_t), input_len); - { - uint8_t input0[input_len]; - memset(input0, 0U, input_len * sizeof (uint8_t)); - { - uint8_t *k_ = input0; - memcpy(k_, v, (uint32_t)32U * sizeof (uint8_t)); - if (entropy_input_len1 + additional_input_len != (uint32_t)0U) - { - memcpy(input0 + (uint32_t)33U, - seed_material, - (entropy_input_len1 + additional_input_len) * sizeof (uint8_t)); - } - input0[32U] = (uint8_t)0U; - EverCrypt_HMAC_compute_sha2_256(k_, k, (uint32_t)32U, input0, input_len); - EverCrypt_HMAC_compute_sha2_256(v, k_, (uint32_t)32U, v, (uint32_t)32U); - memcpy(k, k_, (uint32_t)32U * sizeof (uint8_t)); - if (entropy_input_len1 + additional_input_len != (uint32_t)0U) - { - uint32_t - input_len0 = (uint32_t)33U + entropy_input_len1 + additional_input_len; - KRML_CHECK_SIZE(sizeof (uint8_t), input_len0); - { - uint8_t input[input_len0]; - memset(input, 0U, input_len0 * sizeof (uint8_t)); - { - uint8_t *k_0 = input; - memcpy(k_0, v, (uint32_t)32U * sizeof (uint8_t)); - if (entropy_input_len1 + additional_input_len != (uint32_t)0U) - { - memcpy(input + (uint32_t)33U, - seed_material, - (entropy_input_len1 + additional_input_len) * sizeof (uint8_t)); - } - input[32U] = (uint8_t)1U; - EverCrypt_HMAC_compute_sha2_256(k_0, - k, - (uint32_t)32U, - input, - input_len0); - EverCrypt_HMAC_compute_sha2_256(v, - k_0, - (uint32_t)32U, - v, - (uint32_t)32U); - memcpy(k, k_0, (uint32_t)32U * sizeof (uint8_t)); - } - } - } - ctr[0U] = (uint32_t)1U; - result = true; - } - } - } - } - } - } - ok0 = result; - } - } - } - if (!ok0) - { - return false; - } - { - EverCrypt_DRBG_state_s st_s = *st; - Hacl_HMAC_DRBG_state x1; - if (st_s.tag == SHA2_256_s) - { - x1 = st_s.val.case_SHA2_256_s; - } - else - { - x1 = KRML_EABORT(Hacl_HMAC_DRBG_state, "unreachable (pattern matches are exhaustive in F*)"); - } - { - bool b; - if (x1.reseed_counter[0U] > Hacl_HMAC_DRBG_reseed_interval) - { - b = false; - } - else - { - Hacl_HMAC_DRBG_state scrut; - if (st_s.tag == SHA2_256_s) - { - scrut = st_s.val.case_SHA2_256_s; - } - else - { - scrut = - KRML_EABORT(Hacl_HMAC_DRBG_state, - "unreachable (pattern matches are exhaustive in F*)"); - } - { - uint8_t *k = scrut.k; - uint8_t *v = scrut.v; - uint32_t *ctr = scrut.reseed_counter; - if (additional_input_len > (uint32_t)0U) - { - uint32_t input_len = (uint32_t)33U + additional_input_len; - KRML_CHECK_SIZE(sizeof (uint8_t), input_len); - { - uint8_t input0[input_len]; - memset(input0, 0U, input_len * sizeof (uint8_t)); - { - uint8_t *k_ = input0; - memcpy(k_, v, (uint32_t)32U * sizeof (uint8_t)); - if (additional_input_len != (uint32_t)0U) - { - memcpy(input0 + (uint32_t)33U, - additional_input, - additional_input_len * sizeof (uint8_t)); - } - input0[32U] = (uint8_t)0U; - EverCrypt_HMAC_compute_sha2_256(k_, k, (uint32_t)32U, input0, input_len); - EverCrypt_HMAC_compute_sha2_256(v, k_, (uint32_t)32U, v, (uint32_t)32U); - memcpy(k, k_, (uint32_t)32U * sizeof (uint8_t)); - if (additional_input_len != (uint32_t)0U) - { - uint32_t input_len0 = (uint32_t)33U + additional_input_len; - KRML_CHECK_SIZE(sizeof (uint8_t), input_len0); - { - uint8_t input[input_len0]; - memset(input, 0U, input_len0 * sizeof (uint8_t)); - { - uint8_t *k_0 = input; - memcpy(k_0, v, (uint32_t)32U * sizeof (uint8_t)); - if (additional_input_len != (uint32_t)0U) - { - memcpy(input + (uint32_t)33U, - additional_input, - additional_input_len * sizeof (uint8_t)); - } - input[32U] = (uint8_t)1U; - EverCrypt_HMAC_compute_sha2_256(k_0, k, (uint32_t)32U, input, input_len0); - EverCrypt_HMAC_compute_sha2_256(v, k_0, (uint32_t)32U, v, (uint32_t)32U); - memcpy(k, k_0, (uint32_t)32U * sizeof (uint8_t)); - } - } - } - } - } - } - { - uint8_t *output1 = output; - uint32_t max = n / (uint32_t)32U; - uint8_t *out = output1; - { - uint32_t i; - for (i = (uint32_t)0U; i < max; i++) - { - EverCrypt_HMAC_compute_sha2_256(v, k, (uint32_t)32U, v, (uint32_t)32U); - memcpy(out + i * (uint32_t)32U, v, (uint32_t)32U * sizeof (uint8_t)); - } - } - if (max * (uint32_t)32U < n) - { - uint8_t *block = output1 + max * (uint32_t)32U; - EverCrypt_HMAC_compute_sha2_256(v, k, (uint32_t)32U, v, (uint32_t)32U); - memcpy(block, v, (n - max * (uint32_t)32U) * sizeof (uint8_t)); - } - { - uint32_t input_len = (uint32_t)33U + additional_input_len; - KRML_CHECK_SIZE(sizeof (uint8_t), input_len); - { - uint8_t input0[input_len]; - memset(input0, 0U, input_len * sizeof (uint8_t)); - { - uint8_t *k_ = input0; - memcpy(k_, v, (uint32_t)32U * sizeof (uint8_t)); - if (additional_input_len != (uint32_t)0U) - { - memcpy(input0 + (uint32_t)33U, - additional_input, - additional_input_len * sizeof (uint8_t)); - } - input0[32U] = (uint8_t)0U; - EverCrypt_HMAC_compute_sha2_256(k_, k, (uint32_t)32U, input0, input_len); - EverCrypt_HMAC_compute_sha2_256(v, k_, (uint32_t)32U, v, (uint32_t)32U); - memcpy(k, k_, (uint32_t)32U * sizeof (uint8_t)); - if (additional_input_len != (uint32_t)0U) - { - uint32_t input_len0 = (uint32_t)33U + additional_input_len; - KRML_CHECK_SIZE(sizeof (uint8_t), input_len0); - { - uint8_t input[input_len0]; - memset(input, 0U, input_len0 * sizeof (uint8_t)); - { - uint8_t *k_0 = input; - memcpy(k_0, v, (uint32_t)32U * sizeof (uint8_t)); - if (additional_input_len != (uint32_t)0U) - { - memcpy(input + (uint32_t)33U, - additional_input, - additional_input_len * sizeof (uint8_t)); - } - input[32U] = (uint8_t)1U; - EverCrypt_HMAC_compute_sha2_256(k_0, k, (uint32_t)32U, input, input_len0); - EverCrypt_HMAC_compute_sha2_256(v, k_0, (uint32_t)32U, v, (uint32_t)32U); - memcpy(k, k_0, (uint32_t)32U * sizeof (uint8_t)); - } - } - } - { - uint32_t old_ctr = ctr[0U]; - ctr[0U] = old_ctr + (uint32_t)1U; - b = true; - } - } - } - } - } - } - } - return true; - } - } - } -} - -bool -EverCrypt_DRBG_generate_sha2_384( - uint8_t *output, - EverCrypt_DRBG_state_s *st, - uint32_t n, - uint8_t *additional_input, - uint32_t additional_input_len -) -{ - if - ( - additional_input_len - > Hacl_HMAC_DRBG_max_additional_input_length - || n > Hacl_HMAC_DRBG_max_output_length - ) - { - return false; - } - { - uint32_t entropy_input_len = Hacl_HMAC_DRBG_min_length(Spec_Hash_Definitions_SHA2_384); - bool ok0; - if (additional_input_len > Hacl_HMAC_DRBG_max_additional_input_length) - { - ok0 = false; - } - else - { - uint32_t entropy_input_len1 = Hacl_HMAC_DRBG_min_length(Spec_Hash_Definitions_SHA2_384); - KRML_CHECK_SIZE(sizeof (uint8_t), entropy_input_len1); - { - uint8_t entropy_input[entropy_input_len1]; - memset(entropy_input, 0U, entropy_input_len1 * sizeof (uint8_t)); - { - bool ok = Lib_RandomBuffer_System_randombytes(entropy_input, entropy_input_len1); - bool result; - if (!ok) - { - result = false; - } - else - { - EverCrypt_DRBG_state_s st_s = *st; - KRML_CHECK_SIZE(sizeof (uint8_t), entropy_input_len1 + additional_input_len); - { - uint8_t seed_material[entropy_input_len1 + additional_input_len]; - memset(seed_material, - 0U, - (entropy_input_len1 + additional_input_len) * sizeof (uint8_t)); - memcpy(seed_material, entropy_input, entropy_input_len1 * sizeof (uint8_t)); - memcpy(seed_material + entropy_input_len1, - additional_input, - additional_input_len * sizeof (uint8_t)); - { - Hacl_HMAC_DRBG_state uu____0; - if (st_s.tag == SHA2_384_s) - { - uu____0 = st_s.val.case_SHA2_384_s; - } - else - { - uu____0 = - KRML_EABORT(Hacl_HMAC_DRBG_state, - "unreachable (pattern matches are exhaustive in F*)"); - } - { - uint8_t *k = uu____0.k; - uint8_t *v = uu____0.v; - uint32_t *ctr = uu____0.reseed_counter; - uint32_t input_len = (uint32_t)49U + entropy_input_len1 + additional_input_len; - KRML_CHECK_SIZE(sizeof (uint8_t), input_len); - { - uint8_t input0[input_len]; - memset(input0, 0U, input_len * sizeof (uint8_t)); - { - uint8_t *k_ = input0; - memcpy(k_, v, (uint32_t)48U * sizeof (uint8_t)); - if (entropy_input_len1 + additional_input_len != (uint32_t)0U) - { - memcpy(input0 + (uint32_t)49U, - seed_material, - (entropy_input_len1 + additional_input_len) * sizeof (uint8_t)); - } - input0[48U] = (uint8_t)0U; - EverCrypt_HMAC_compute_sha2_384(k_, k, (uint32_t)48U, input0, input_len); - EverCrypt_HMAC_compute_sha2_384(v, k_, (uint32_t)48U, v, (uint32_t)48U); - memcpy(k, k_, (uint32_t)48U * sizeof (uint8_t)); - if (entropy_input_len1 + additional_input_len != (uint32_t)0U) - { - uint32_t - input_len0 = (uint32_t)49U + entropy_input_len1 + additional_input_len; - KRML_CHECK_SIZE(sizeof (uint8_t), input_len0); - { - uint8_t input[input_len0]; - memset(input, 0U, input_len0 * sizeof (uint8_t)); - { - uint8_t *k_0 = input; - memcpy(k_0, v, (uint32_t)48U * sizeof (uint8_t)); - if (entropy_input_len1 + additional_input_len != (uint32_t)0U) - { - memcpy(input + (uint32_t)49U, - seed_material, - (entropy_input_len1 + additional_input_len) * sizeof (uint8_t)); - } - input[48U] = (uint8_t)1U; - EverCrypt_HMAC_compute_sha2_384(k_0, - k, - (uint32_t)48U, - input, - input_len0); - EverCrypt_HMAC_compute_sha2_384(v, - k_0, - (uint32_t)48U, - v, - (uint32_t)48U); - memcpy(k, k_0, (uint32_t)48U * sizeof (uint8_t)); - } - } - } - ctr[0U] = (uint32_t)1U; - result = true; - } - } - } - } - } - } - ok0 = result; - } - } - } - if (!ok0) - { - return false; - } - { - EverCrypt_DRBG_state_s st_s = *st; - Hacl_HMAC_DRBG_state x1; - if (st_s.tag == SHA2_384_s) - { - x1 = st_s.val.case_SHA2_384_s; - } - else - { - x1 = KRML_EABORT(Hacl_HMAC_DRBG_state, "unreachable (pattern matches are exhaustive in F*)"); - } - { - bool b; - if (x1.reseed_counter[0U] > Hacl_HMAC_DRBG_reseed_interval) - { - b = false; - } - else - { - Hacl_HMAC_DRBG_state scrut; - if (st_s.tag == SHA2_384_s) - { - scrut = st_s.val.case_SHA2_384_s; - } - else - { - scrut = - KRML_EABORT(Hacl_HMAC_DRBG_state, - "unreachable (pattern matches are exhaustive in F*)"); - } - { - uint8_t *k = scrut.k; - uint8_t *v = scrut.v; - uint32_t *ctr = scrut.reseed_counter; - if (additional_input_len > (uint32_t)0U) - { - uint32_t input_len = (uint32_t)49U + additional_input_len; - KRML_CHECK_SIZE(sizeof (uint8_t), input_len); - { - uint8_t input0[input_len]; - memset(input0, 0U, input_len * sizeof (uint8_t)); - { - uint8_t *k_ = input0; - memcpy(k_, v, (uint32_t)48U * sizeof (uint8_t)); - if (additional_input_len != (uint32_t)0U) - { - memcpy(input0 + (uint32_t)49U, - additional_input, - additional_input_len * sizeof (uint8_t)); - } - input0[48U] = (uint8_t)0U; - EverCrypt_HMAC_compute_sha2_384(k_, k, (uint32_t)48U, input0, input_len); - EverCrypt_HMAC_compute_sha2_384(v, k_, (uint32_t)48U, v, (uint32_t)48U); - memcpy(k, k_, (uint32_t)48U * sizeof (uint8_t)); - if (additional_input_len != (uint32_t)0U) - { - uint32_t input_len0 = (uint32_t)49U + additional_input_len; - KRML_CHECK_SIZE(sizeof (uint8_t), input_len0); - { - uint8_t input[input_len0]; - memset(input, 0U, input_len0 * sizeof (uint8_t)); - { - uint8_t *k_0 = input; - memcpy(k_0, v, (uint32_t)48U * sizeof (uint8_t)); - if (additional_input_len != (uint32_t)0U) - { - memcpy(input + (uint32_t)49U, - additional_input, - additional_input_len * sizeof (uint8_t)); - } - input[48U] = (uint8_t)1U; - EverCrypt_HMAC_compute_sha2_384(k_0, k, (uint32_t)48U, input, input_len0); - EverCrypt_HMAC_compute_sha2_384(v, k_0, (uint32_t)48U, v, (uint32_t)48U); - memcpy(k, k_0, (uint32_t)48U * sizeof (uint8_t)); - } - } - } - } - } - } - { - uint8_t *output1 = output; - uint32_t max = n / (uint32_t)48U; - uint8_t *out = output1; - { - uint32_t i; - for (i = (uint32_t)0U; i < max; i++) - { - EverCrypt_HMAC_compute_sha2_384(v, k, (uint32_t)48U, v, (uint32_t)48U); - memcpy(out + i * (uint32_t)48U, v, (uint32_t)48U * sizeof (uint8_t)); - } - } - if (max * (uint32_t)48U < n) - { - uint8_t *block = output1 + max * (uint32_t)48U; - EverCrypt_HMAC_compute_sha2_384(v, k, (uint32_t)48U, v, (uint32_t)48U); - memcpy(block, v, (n - max * (uint32_t)48U) * sizeof (uint8_t)); - } - { - uint32_t input_len = (uint32_t)49U + additional_input_len; - KRML_CHECK_SIZE(sizeof (uint8_t), input_len); - { - uint8_t input0[input_len]; - memset(input0, 0U, input_len * sizeof (uint8_t)); - { - uint8_t *k_ = input0; - memcpy(k_, v, (uint32_t)48U * sizeof (uint8_t)); - if (additional_input_len != (uint32_t)0U) - { - memcpy(input0 + (uint32_t)49U, - additional_input, - additional_input_len * sizeof (uint8_t)); - } - input0[48U] = (uint8_t)0U; - EverCrypt_HMAC_compute_sha2_384(k_, k, (uint32_t)48U, input0, input_len); - EverCrypt_HMAC_compute_sha2_384(v, k_, (uint32_t)48U, v, (uint32_t)48U); - memcpy(k, k_, (uint32_t)48U * sizeof (uint8_t)); - if (additional_input_len != (uint32_t)0U) - { - uint32_t input_len0 = (uint32_t)49U + additional_input_len; - KRML_CHECK_SIZE(sizeof (uint8_t), input_len0); - { - uint8_t input[input_len0]; - memset(input, 0U, input_len0 * sizeof (uint8_t)); - { - uint8_t *k_0 = input; - memcpy(k_0, v, (uint32_t)48U * sizeof (uint8_t)); - if (additional_input_len != (uint32_t)0U) - { - memcpy(input + (uint32_t)49U, - additional_input, - additional_input_len * sizeof (uint8_t)); - } - input[48U] = (uint8_t)1U; - EverCrypt_HMAC_compute_sha2_384(k_0, k, (uint32_t)48U, input, input_len0); - EverCrypt_HMAC_compute_sha2_384(v, k_0, (uint32_t)48U, v, (uint32_t)48U); - memcpy(k, k_0, (uint32_t)48U * sizeof (uint8_t)); - } - } - } - { - uint32_t old_ctr = ctr[0U]; - ctr[0U] = old_ctr + (uint32_t)1U; - b = true; - } - } - } - } - } - } - } - return true; - } - } - } -} - -bool -EverCrypt_DRBG_generate_sha2_512( - uint8_t *output, - EverCrypt_DRBG_state_s *st, - uint32_t n, - uint8_t *additional_input, - uint32_t additional_input_len -) -{ - if - ( - additional_input_len - > Hacl_HMAC_DRBG_max_additional_input_length - || n > Hacl_HMAC_DRBG_max_output_length - ) - { - return false; - } - { - uint32_t entropy_input_len = Hacl_HMAC_DRBG_min_length(Spec_Hash_Definitions_SHA2_512); - bool ok0; - if (additional_input_len > Hacl_HMAC_DRBG_max_additional_input_length) - { - ok0 = false; - } - else - { - uint32_t entropy_input_len1 = Hacl_HMAC_DRBG_min_length(Spec_Hash_Definitions_SHA2_512); - KRML_CHECK_SIZE(sizeof (uint8_t), entropy_input_len1); - { - uint8_t entropy_input[entropy_input_len1]; - memset(entropy_input, 0U, entropy_input_len1 * sizeof (uint8_t)); - { - bool ok = Lib_RandomBuffer_System_randombytes(entropy_input, entropy_input_len1); - bool result; - if (!ok) - { - result = false; - } - else - { - EverCrypt_DRBG_state_s st_s = *st; - KRML_CHECK_SIZE(sizeof (uint8_t), entropy_input_len1 + additional_input_len); - { - uint8_t seed_material[entropy_input_len1 + additional_input_len]; - memset(seed_material, - 0U, - (entropy_input_len1 + additional_input_len) * sizeof (uint8_t)); - memcpy(seed_material, entropy_input, entropy_input_len1 * sizeof (uint8_t)); - memcpy(seed_material + entropy_input_len1, - additional_input, - additional_input_len * sizeof (uint8_t)); - { - Hacl_HMAC_DRBG_state uu____0; - if (st_s.tag == SHA2_512_s) - { - uu____0 = st_s.val.case_SHA2_512_s; - } - else - { - uu____0 = - KRML_EABORT(Hacl_HMAC_DRBG_state, - "unreachable (pattern matches are exhaustive in F*)"); - } - { - uint8_t *k = uu____0.k; - uint8_t *v = uu____0.v; - uint32_t *ctr = uu____0.reseed_counter; - uint32_t input_len = (uint32_t)65U + entropy_input_len1 + additional_input_len; - KRML_CHECK_SIZE(sizeof (uint8_t), input_len); - { - uint8_t input0[input_len]; - memset(input0, 0U, input_len * sizeof (uint8_t)); - { - uint8_t *k_ = input0; - memcpy(k_, v, (uint32_t)64U * sizeof (uint8_t)); - if (entropy_input_len1 + additional_input_len != (uint32_t)0U) - { - memcpy(input0 + (uint32_t)65U, - seed_material, - (entropy_input_len1 + additional_input_len) * sizeof (uint8_t)); - } - input0[64U] = (uint8_t)0U; - EverCrypt_HMAC_compute_sha2_512(k_, k, (uint32_t)64U, input0, input_len); - EverCrypt_HMAC_compute_sha2_512(v, k_, (uint32_t)64U, v, (uint32_t)64U); - memcpy(k, k_, (uint32_t)64U * sizeof (uint8_t)); - if (entropy_input_len1 + additional_input_len != (uint32_t)0U) - { - uint32_t - input_len0 = (uint32_t)65U + entropy_input_len1 + additional_input_len; - KRML_CHECK_SIZE(sizeof (uint8_t), input_len0); - { - uint8_t input[input_len0]; - memset(input, 0U, input_len0 * sizeof (uint8_t)); - { - uint8_t *k_0 = input; - memcpy(k_0, v, (uint32_t)64U * sizeof (uint8_t)); - if (entropy_input_len1 + additional_input_len != (uint32_t)0U) - { - memcpy(input + (uint32_t)65U, - seed_material, - (entropy_input_len1 + additional_input_len) * sizeof (uint8_t)); - } - input[64U] = (uint8_t)1U; - EverCrypt_HMAC_compute_sha2_512(k_0, - k, - (uint32_t)64U, - input, - input_len0); - EverCrypt_HMAC_compute_sha2_512(v, - k_0, - (uint32_t)64U, - v, - (uint32_t)64U); - memcpy(k, k_0, (uint32_t)64U * sizeof (uint8_t)); - } - } - } - ctr[0U] = (uint32_t)1U; - result = true; - } - } - } - } - } - } - ok0 = result; - } - } - } - if (!ok0) - { - return false; - } - { - EverCrypt_DRBG_state_s st_s = *st; - Hacl_HMAC_DRBG_state x1; - if (st_s.tag == SHA2_512_s) - { - x1 = st_s.val.case_SHA2_512_s; - } - else - { - x1 = KRML_EABORT(Hacl_HMAC_DRBG_state, "unreachable (pattern matches are exhaustive in F*)"); - } - { - bool b; - if (x1.reseed_counter[0U] > Hacl_HMAC_DRBG_reseed_interval) - { - b = false; - } - else - { - Hacl_HMAC_DRBG_state scrut; - if (st_s.tag == SHA2_512_s) - { - scrut = st_s.val.case_SHA2_512_s; - } - else - { - scrut = - KRML_EABORT(Hacl_HMAC_DRBG_state, - "unreachable (pattern matches are exhaustive in F*)"); - } - { - uint8_t *k = scrut.k; - uint8_t *v = scrut.v; - uint32_t *ctr = scrut.reseed_counter; - if (additional_input_len > (uint32_t)0U) - { - uint32_t input_len = (uint32_t)65U + additional_input_len; - KRML_CHECK_SIZE(sizeof (uint8_t), input_len); - { - uint8_t input0[input_len]; - memset(input0, 0U, input_len * sizeof (uint8_t)); - { - uint8_t *k_ = input0; - memcpy(k_, v, (uint32_t)64U * sizeof (uint8_t)); - if (additional_input_len != (uint32_t)0U) - { - memcpy(input0 + (uint32_t)65U, - additional_input, - additional_input_len * sizeof (uint8_t)); - } - input0[64U] = (uint8_t)0U; - EverCrypt_HMAC_compute_sha2_512(k_, k, (uint32_t)64U, input0, input_len); - EverCrypt_HMAC_compute_sha2_512(v, k_, (uint32_t)64U, v, (uint32_t)64U); - memcpy(k, k_, (uint32_t)64U * sizeof (uint8_t)); - if (additional_input_len != (uint32_t)0U) - { - uint32_t input_len0 = (uint32_t)65U + additional_input_len; - KRML_CHECK_SIZE(sizeof (uint8_t), input_len0); - { - uint8_t input[input_len0]; - memset(input, 0U, input_len0 * sizeof (uint8_t)); - { - uint8_t *k_0 = input; - memcpy(k_0, v, (uint32_t)64U * sizeof (uint8_t)); - if (additional_input_len != (uint32_t)0U) - { - memcpy(input + (uint32_t)65U, - additional_input, - additional_input_len * sizeof (uint8_t)); - } - input[64U] = (uint8_t)1U; - EverCrypt_HMAC_compute_sha2_512(k_0, k, (uint32_t)64U, input, input_len0); - EverCrypt_HMAC_compute_sha2_512(v, k_0, (uint32_t)64U, v, (uint32_t)64U); - memcpy(k, k_0, (uint32_t)64U * sizeof (uint8_t)); - } - } - } - } - } - } - { - uint8_t *output1 = output; - uint32_t max = n / (uint32_t)64U; - uint8_t *out = output1; - { - uint32_t i; - for (i = (uint32_t)0U; i < max; i++) - { - EverCrypt_HMAC_compute_sha2_512(v, k, (uint32_t)64U, v, (uint32_t)64U); - memcpy(out + i * (uint32_t)64U, v, (uint32_t)64U * sizeof (uint8_t)); - } - } - if (max * (uint32_t)64U < n) - { - uint8_t *block = output1 + max * (uint32_t)64U; - EverCrypt_HMAC_compute_sha2_512(v, k, (uint32_t)64U, v, (uint32_t)64U); - memcpy(block, v, (n - max * (uint32_t)64U) * sizeof (uint8_t)); - } - { - uint32_t input_len = (uint32_t)65U + additional_input_len; - KRML_CHECK_SIZE(sizeof (uint8_t), input_len); - { - uint8_t input0[input_len]; - memset(input0, 0U, input_len * sizeof (uint8_t)); - { - uint8_t *k_ = input0; - memcpy(k_, v, (uint32_t)64U * sizeof (uint8_t)); - if (additional_input_len != (uint32_t)0U) - { - memcpy(input0 + (uint32_t)65U, - additional_input, - additional_input_len * sizeof (uint8_t)); - } - input0[64U] = (uint8_t)0U; - EverCrypt_HMAC_compute_sha2_512(k_, k, (uint32_t)64U, input0, input_len); - EverCrypt_HMAC_compute_sha2_512(v, k_, (uint32_t)64U, v, (uint32_t)64U); - memcpy(k, k_, (uint32_t)64U * sizeof (uint8_t)); - if (additional_input_len != (uint32_t)0U) - { - uint32_t input_len0 = (uint32_t)65U + additional_input_len; - KRML_CHECK_SIZE(sizeof (uint8_t), input_len0); - { - uint8_t input[input_len0]; - memset(input, 0U, input_len0 * sizeof (uint8_t)); - { - uint8_t *k_0 = input; - memcpy(k_0, v, (uint32_t)64U * sizeof (uint8_t)); - if (additional_input_len != (uint32_t)0U) - { - memcpy(input + (uint32_t)65U, - additional_input, - additional_input_len * sizeof (uint8_t)); - } - input[64U] = (uint8_t)1U; - EverCrypt_HMAC_compute_sha2_512(k_0, k, (uint32_t)64U, input, input_len0); - EverCrypt_HMAC_compute_sha2_512(v, k_0, (uint32_t)64U, v, (uint32_t)64U); - memcpy(k, k_0, (uint32_t)64U * sizeof (uint8_t)); - } - } - } - { - uint32_t old_ctr = ctr[0U]; - ctr[0U] = old_ctr + (uint32_t)1U; - b = true; - } - } - } - } - } - } - } - return true; - } - } - } -} - -void EverCrypt_DRBG_uninstantiate_sha1(EverCrypt_DRBG_state_s *st) -{ - EverCrypt_DRBG_state_s st_s = *st; - Hacl_HMAC_DRBG_state s; - if (st_s.tag == SHA1_s) - { - s = st_s.val.case_SHA1_s; - } - else - { - s = KRML_EABORT(Hacl_HMAC_DRBG_state, "unreachable (pattern matches are exhaustive in F*)"); - } - { - uint8_t *k = s.k; - uint8_t *v = s.v; - uint32_t *ctr = s.reseed_counter; - Lib_Memzero0_memzero(k, (uint32_t)20U * sizeof (k[0U])); - Lib_Memzero0_memzero(v, (uint32_t)20U * sizeof (v[0U])); - ctr[0U] = (uint32_t)0U; - KRML_HOST_FREE(k); - KRML_HOST_FREE(v); - KRML_HOST_FREE(ctr); - KRML_HOST_FREE(st); - } -} - -void EverCrypt_DRBG_uninstantiate_sha2_256(EverCrypt_DRBG_state_s *st) -{ - EverCrypt_DRBG_state_s st_s = *st; - Hacl_HMAC_DRBG_state s; - if (st_s.tag == SHA2_256_s) - { - s = st_s.val.case_SHA2_256_s; - } - else - { - s = KRML_EABORT(Hacl_HMAC_DRBG_state, "unreachable (pattern matches are exhaustive in F*)"); - } - { - uint8_t *k = s.k; - uint8_t *v = s.v; - uint32_t *ctr = s.reseed_counter; - Lib_Memzero0_memzero(k, (uint32_t)32U * sizeof (k[0U])); - Lib_Memzero0_memzero(v, (uint32_t)32U * sizeof (v[0U])); - ctr[0U] = (uint32_t)0U; - KRML_HOST_FREE(k); - KRML_HOST_FREE(v); - KRML_HOST_FREE(ctr); - KRML_HOST_FREE(st); - } -} - -void EverCrypt_DRBG_uninstantiate_sha2_384(EverCrypt_DRBG_state_s *st) -{ - EverCrypt_DRBG_state_s st_s = *st; - Hacl_HMAC_DRBG_state s; - if (st_s.tag == SHA2_384_s) - { - s = st_s.val.case_SHA2_384_s; - } - else - { - s = KRML_EABORT(Hacl_HMAC_DRBG_state, "unreachable (pattern matches are exhaustive in F*)"); - } - { - uint8_t *k = s.k; - uint8_t *v = s.v; - uint32_t *ctr = s.reseed_counter; - Lib_Memzero0_memzero(k, (uint32_t)48U * sizeof (k[0U])); - Lib_Memzero0_memzero(v, (uint32_t)48U * sizeof (v[0U])); - ctr[0U] = (uint32_t)0U; - KRML_HOST_FREE(k); - KRML_HOST_FREE(v); - KRML_HOST_FREE(ctr); - KRML_HOST_FREE(st); - } -} - -void EverCrypt_DRBG_uninstantiate_sha2_512(EverCrypt_DRBG_state_s *st) -{ - EverCrypt_DRBG_state_s st_s = *st; - Hacl_HMAC_DRBG_state s; - if (st_s.tag == SHA2_512_s) - { - s = st_s.val.case_SHA2_512_s; - } - else - { - s = KRML_EABORT(Hacl_HMAC_DRBG_state, "unreachable (pattern matches are exhaustive in F*)"); - } - { - uint8_t *k = s.k; - uint8_t *v = s.v; - uint32_t *ctr = s.reseed_counter; - Lib_Memzero0_memzero(k, (uint32_t)64U * sizeof (k[0U])); - Lib_Memzero0_memzero(v, (uint32_t)64U * sizeof (v[0U])); - ctr[0U] = (uint32_t)0U; - KRML_HOST_FREE(k); - KRML_HOST_FREE(v); - KRML_HOST_FREE(ctr); - KRML_HOST_FREE(st); - } -} - -bool -EverCrypt_DRBG_instantiate( - EverCrypt_DRBG_state_s *st, - uint8_t *personalization_string, - uint32_t personalization_string_len -) -{ - EverCrypt_DRBG_state_s scrut = *st; - if (scrut.tag == SHA1_s) - { - return EverCrypt_DRBG_instantiate_sha1(st, personalization_string, personalization_string_len); - } - if (scrut.tag == SHA2_256_s) - { - return - EverCrypt_DRBG_instantiate_sha2_256(st, - personalization_string, - personalization_string_len); - } - if (scrut.tag == SHA2_384_s) - { - return - EverCrypt_DRBG_instantiate_sha2_384(st, - personalization_string, - personalization_string_len); - } - if (scrut.tag == SHA2_512_s) - { - return - EverCrypt_DRBG_instantiate_sha2_512(st, - personalization_string, - personalization_string_len); - } - KRML_HOST_PRINTF("KaRaMeL abort at %s:%d\n%s\n", - __FILE__, - __LINE__, - "unreachable (pattern matches are exhaustive in F*)"); - KRML_HOST_EXIT(255U); -} - -bool -EverCrypt_DRBG_reseed( - EverCrypt_DRBG_state_s *st, - uint8_t *additional_input, - uint32_t additional_input_len -) -{ - EverCrypt_DRBG_state_s scrut = *st; - if (scrut.tag == SHA1_s) - { - return EverCrypt_DRBG_reseed_sha1(st, additional_input, additional_input_len); - } - if (scrut.tag == SHA2_256_s) - { - return EverCrypt_DRBG_reseed_sha2_256(st, additional_input, additional_input_len); - } - if (scrut.tag == SHA2_384_s) - { - return EverCrypt_DRBG_reseed_sha2_384(st, additional_input, additional_input_len); - } - if (scrut.tag == SHA2_512_s) - { - return EverCrypt_DRBG_reseed_sha2_512(st, additional_input, additional_input_len); - } - KRML_HOST_PRINTF("KaRaMeL abort at %s:%d\n%s\n", - __FILE__, - __LINE__, - "unreachable (pattern matches are exhaustive in F*)"); - KRML_HOST_EXIT(255U); -} - -bool -EverCrypt_DRBG_generate( - uint8_t *output, - EverCrypt_DRBG_state_s *st, - uint32_t n, - uint8_t *additional_input, - uint32_t additional_input_len -) -{ - EverCrypt_DRBG_state_s scrut = *st; - if (scrut.tag == SHA1_s) - { - return EverCrypt_DRBG_generate_sha1(output, st, n, additional_input, additional_input_len); - } - if (scrut.tag == SHA2_256_s) - { - return EverCrypt_DRBG_generate_sha2_256(output, st, n, additional_input, additional_input_len); - } - if (scrut.tag == SHA2_384_s) - { - return EverCrypt_DRBG_generate_sha2_384(output, st, n, additional_input, additional_input_len); - } - if (scrut.tag == SHA2_512_s) - { - return EverCrypt_DRBG_generate_sha2_512(output, st, n, additional_input, additional_input_len); - } - KRML_HOST_PRINTF("KaRaMeL abort at %s:%d\n%s\n", - __FILE__, - __LINE__, - "unreachable (pattern matches are exhaustive in F*)"); - KRML_HOST_EXIT(255U); -} - -void EverCrypt_DRBG_uninstantiate(EverCrypt_DRBG_state_s *st) -{ - EverCrypt_DRBG_state_s scrut = *st; - if (scrut.tag == SHA1_s) - { - EverCrypt_DRBG_uninstantiate_sha1(st); - return; - } - if (scrut.tag == SHA2_256_s) - { - EverCrypt_DRBG_uninstantiate_sha2_256(st); - return; - } - if (scrut.tag == SHA2_384_s) - { - EverCrypt_DRBG_uninstantiate_sha2_384(st); - return; - } - if (scrut.tag == SHA2_512_s) - { - EverCrypt_DRBG_uninstantiate_sha2_512(st); - return; - } - KRML_HOST_PRINTF("KaRaMeL abort at %s:%d\n%s\n", - __FILE__, - __LINE__, - "unreachable (pattern matches are exhaustive in F*)"); - KRML_HOST_EXIT(255U); -} - diff --git a/dist/c89-compatible/EverCrypt_DRBG.h b/dist/c89-compatible/EverCrypt_DRBG.h deleted file mode 100644 index 11af99fc06..0000000000 --- a/dist/c89-compatible/EverCrypt_DRBG.h +++ /dev/null @@ -1,216 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#ifndef __EverCrypt_DRBG_H -#define __EverCrypt_DRBG_H - -#if defined(__cplusplus) -extern "C" { -#endif - -#include -#include "krml/internal/types.h" -#include "krml/lowstar_endianness.h" -#include "krml/internal/target.h" - - -#include "Lib_RandomBuffer_System.h" -#include "Lib_Memzero0.h" -#include "Hacl_Spec.h" -#include "Hacl_HMAC_DRBG.h" -#include "EverCrypt_HMAC.h" -#include "evercrypt_targetconfig.h" -typedef Spec_Hash_Definitions_hash_alg EverCrypt_DRBG_supported_alg; - -extern uint32_t EverCrypt_DRBG_reseed_interval; - -extern uint32_t EverCrypt_DRBG_max_output_length; - -extern uint32_t EverCrypt_DRBG_max_length; - -extern uint32_t EverCrypt_DRBG_max_personalization_string_length; - -extern uint32_t EverCrypt_DRBG_max_additional_input_length; - -uint32_t EverCrypt_DRBG_min_length(Spec_Hash_Definitions_hash_alg a); - -typedef struct EverCrypt_DRBG_state_s_s EverCrypt_DRBG_state_s; - -bool -EverCrypt_DRBG_uu___is_SHA1_s( - Spec_Hash_Definitions_hash_alg uu___, - EverCrypt_DRBG_state_s projectee -); - -bool -EverCrypt_DRBG_uu___is_SHA2_256_s( - Spec_Hash_Definitions_hash_alg uu___, - EverCrypt_DRBG_state_s projectee -); - -bool -EverCrypt_DRBG_uu___is_SHA2_384_s( - Spec_Hash_Definitions_hash_alg uu___, - EverCrypt_DRBG_state_s projectee -); - -bool -EverCrypt_DRBG_uu___is_SHA2_512_s( - Spec_Hash_Definitions_hash_alg uu___, - EverCrypt_DRBG_state_s projectee -); - -EverCrypt_DRBG_state_s *EverCrypt_DRBG_create(Spec_Hash_Definitions_hash_alg a); - -bool -EverCrypt_DRBG_instantiate_sha1( - EverCrypt_DRBG_state_s *st, - uint8_t *personalization_string, - uint32_t personalization_string_len -); - -bool -EverCrypt_DRBG_instantiate_sha2_256( - EverCrypt_DRBG_state_s *st, - uint8_t *personalization_string, - uint32_t personalization_string_len -); - -bool -EverCrypt_DRBG_instantiate_sha2_384( - EverCrypt_DRBG_state_s *st, - uint8_t *personalization_string, - uint32_t personalization_string_len -); - -bool -EverCrypt_DRBG_instantiate_sha2_512( - EverCrypt_DRBG_state_s *st, - uint8_t *personalization_string, - uint32_t personalization_string_len -); - -bool -EverCrypt_DRBG_reseed_sha1( - EverCrypt_DRBG_state_s *st, - uint8_t *additional_input, - uint32_t additional_input_len -); - -bool -EverCrypt_DRBG_reseed_sha2_256( - EverCrypt_DRBG_state_s *st, - uint8_t *additional_input, - uint32_t additional_input_len -); - -bool -EverCrypt_DRBG_reseed_sha2_384( - EverCrypt_DRBG_state_s *st, - uint8_t *additional_input, - uint32_t additional_input_len -); - -bool -EverCrypt_DRBG_reseed_sha2_512( - EverCrypt_DRBG_state_s *st, - uint8_t *additional_input, - uint32_t additional_input_len -); - -bool -EverCrypt_DRBG_generate_sha1( - uint8_t *output, - EverCrypt_DRBG_state_s *st, - uint32_t n, - uint8_t *additional_input, - uint32_t additional_input_len -); - -bool -EverCrypt_DRBG_generate_sha2_256( - uint8_t *output, - EverCrypt_DRBG_state_s *st, - uint32_t n, - uint8_t *additional_input, - uint32_t additional_input_len -); - -bool -EverCrypt_DRBG_generate_sha2_384( - uint8_t *output, - EverCrypt_DRBG_state_s *st, - uint32_t n, - uint8_t *additional_input, - uint32_t additional_input_len -); - -bool -EverCrypt_DRBG_generate_sha2_512( - uint8_t *output, - EverCrypt_DRBG_state_s *st, - uint32_t n, - uint8_t *additional_input, - uint32_t additional_input_len -); - -void EverCrypt_DRBG_uninstantiate_sha1(EverCrypt_DRBG_state_s *st); - -void EverCrypt_DRBG_uninstantiate_sha2_256(EverCrypt_DRBG_state_s *st); - -void EverCrypt_DRBG_uninstantiate_sha2_384(EverCrypt_DRBG_state_s *st); - -void EverCrypt_DRBG_uninstantiate_sha2_512(EverCrypt_DRBG_state_s *st); - -bool -EverCrypt_DRBG_instantiate( - EverCrypt_DRBG_state_s *st, - uint8_t *personalization_string, - uint32_t personalization_string_len -); - -bool -EverCrypt_DRBG_reseed( - EverCrypt_DRBG_state_s *st, - uint8_t *additional_input, - uint32_t additional_input_len -); - -bool -EverCrypt_DRBG_generate( - uint8_t *output, - EverCrypt_DRBG_state_s *st, - uint32_t n, - uint8_t *additional_input, - uint32_t additional_input_len -); - -void EverCrypt_DRBG_uninstantiate(EverCrypt_DRBG_state_s *st); - -#if defined(__cplusplus) -} -#endif - -#define __EverCrypt_DRBG_H_DEFINED -#endif diff --git a/dist/c89-compatible/EverCrypt_Ed25519.c b/dist/c89-compatible/EverCrypt_Ed25519.c deleted file mode 100644 index 36f42069b8..0000000000 --- a/dist/c89-compatible/EverCrypt_Ed25519.c +++ /dev/null @@ -1,71 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#include "EverCrypt_Ed25519.h" - - - -void EverCrypt_Ed25519_secret_to_public(uint8_t *public_key, uint8_t *private_key) -{ - Hacl_Ed25519_secret_to_public(public_key, private_key); -} - -void EverCrypt_Ed25519_expand_keys(uint8_t *expanded_keys, uint8_t *private_key) -{ - Hacl_Ed25519_expand_keys(expanded_keys, private_key); -} - -void -EverCrypt_Ed25519_sign_expanded( - uint8_t *signature, - uint8_t *expanded_keys, - uint32_t msg_len, - uint8_t *msg -) -{ - Hacl_Ed25519_sign_expanded(signature, expanded_keys, msg_len, msg); -} - -void -EverCrypt_Ed25519_sign( - uint8_t *signature, - uint8_t *private_key, - uint32_t msg_len, - uint8_t *msg -) -{ - Hacl_Ed25519_sign(signature, private_key, msg_len, msg); -} - -bool -EverCrypt_Ed25519_verify( - uint8_t *public_key, - uint32_t msg_len, - uint8_t *msg, - uint8_t *signature -) -{ - return Hacl_Ed25519_verify(public_key, msg_len, msg, signature); -} - diff --git a/dist/c89-compatible/EverCrypt_Ed25519.h b/dist/c89-compatible/EverCrypt_Ed25519.h deleted file mode 100644 index 401edc0b9d..0000000000 --- a/dist/c89-compatible/EverCrypt_Ed25519.h +++ /dev/null @@ -1,73 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#ifndef __EverCrypt_Ed25519_H -#define __EverCrypt_Ed25519_H - -#if defined(__cplusplus) -extern "C" { -#endif - -#include -#include "krml/internal/types.h" -#include "krml/lowstar_endianness.h" -#include "krml/internal/target.h" - - -#include "Hacl_Ed25519.h" -#include "evercrypt_targetconfig.h" -void EverCrypt_Ed25519_secret_to_public(uint8_t *public_key, uint8_t *private_key); - -void EverCrypt_Ed25519_expand_keys(uint8_t *expanded_keys, uint8_t *private_key); - -void -EverCrypt_Ed25519_sign_expanded( - uint8_t *signature, - uint8_t *expanded_keys, - uint32_t msg_len, - uint8_t *msg -); - -void -EverCrypt_Ed25519_sign( - uint8_t *signature, - uint8_t *private_key, - uint32_t msg_len, - uint8_t *msg -); - -bool -EverCrypt_Ed25519_verify( - uint8_t *public_key, - uint32_t msg_len, - uint8_t *msg, - uint8_t *signature -); - -#if defined(__cplusplus) -} -#endif - -#define __EverCrypt_Ed25519_H_DEFINED -#endif diff --git a/dist/c89-compatible/EverCrypt_Error.h b/dist/c89-compatible/EverCrypt_Error.h deleted file mode 100644 index 8fb670e6f5..0000000000 --- a/dist/c89-compatible/EverCrypt_Error.h +++ /dev/null @@ -1,55 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#ifndef __EverCrypt_Error_H -#define __EverCrypt_Error_H - -#if defined(__cplusplus) -extern "C" { -#endif - -#include -#include "krml/internal/types.h" -#include "krml/lowstar_endianness.h" -#include "krml/internal/target.h" - - - -#include "evercrypt_targetconfig.h" -#define EverCrypt_Error_Success 0 -#define EverCrypt_Error_UnsupportedAlgorithm 1 -#define EverCrypt_Error_InvalidKey 2 -#define EverCrypt_Error_AuthenticationFailure 3 -#define EverCrypt_Error_InvalidIVLength 4 -#define EverCrypt_Error_DecodeError 5 -#define EverCrypt_Error_MaximumLengthExceeded 6 - -typedef uint8_t EverCrypt_Error_error_code; - -#if defined(__cplusplus) -} -#endif - -#define __EverCrypt_Error_H_DEFINED -#endif diff --git a/dist/c89-compatible/EverCrypt_HKDF.c b/dist/c89-compatible/EverCrypt_HKDF.c deleted file mode 100644 index 4d5a4260e4..0000000000 --- a/dist/c89-compatible/EverCrypt_HKDF.c +++ /dev/null @@ -1,549 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#include "EverCrypt_HKDF.h" - - - -void -EverCrypt_HKDF_expand_sha1( - uint8_t *okm, - uint8_t *prk, - uint32_t prklen, - uint8_t *info, - uint32_t infolen, - uint32_t len -) -{ - uint32_t tlen = (uint32_t)20U; - uint32_t n = len / tlen; - uint8_t *output = okm; - KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + (uint32_t)1U); - { - uint8_t text[tlen + infolen + (uint32_t)1U]; - memset(text, 0U, (tlen + infolen + (uint32_t)1U) * sizeof (uint8_t)); - { - uint8_t *text0 = text + tlen; - uint8_t *tag = text; - uint8_t *ctr = text + tlen + infolen; - memcpy(text + tlen, info, infolen * sizeof (uint8_t)); - { - uint32_t i; - for (i = (uint32_t)0U; i < n; i++) - { - ctr[0U] = (uint8_t)(i + (uint32_t)1U); - if (i == (uint32_t)0U) - { - EverCrypt_HMAC_compute_sha1(tag, prk, prklen, text0, infolen + (uint32_t)1U); - } - else - { - EverCrypt_HMAC_compute_sha1(tag, prk, prklen, text, tlen + infolen + (uint32_t)1U); - } - memcpy(output + i * tlen, tag, tlen * sizeof (uint8_t)); - } - } - if (n * tlen < len) - { - ctr[0U] = (uint8_t)(n + (uint32_t)1U); - if (n == (uint32_t)0U) - { - EverCrypt_HMAC_compute_sha1(tag, prk, prklen, text0, infolen + (uint32_t)1U); - } - else - { - EverCrypt_HMAC_compute_sha1(tag, prk, prklen, text, tlen + infolen + (uint32_t)1U); - } - { - uint8_t *block = okm + n * tlen; - memcpy(block, tag, (len - n * tlen) * sizeof (uint8_t)); - } - } - } - } -} - -void -EverCrypt_HKDF_extract_sha1( - uint8_t *prk, - uint8_t *salt, - uint32_t saltlen, - uint8_t *ikm, - uint32_t ikmlen -) -{ - EverCrypt_HMAC_compute_sha1(prk, salt, saltlen, ikm, ikmlen); -} - -void -EverCrypt_HKDF_expand_sha2_256( - uint8_t *okm, - uint8_t *prk, - uint32_t prklen, - uint8_t *info, - uint32_t infolen, - uint32_t len -) -{ - uint32_t tlen = (uint32_t)32U; - uint32_t n = len / tlen; - uint8_t *output = okm; - KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + (uint32_t)1U); - { - uint8_t text[tlen + infolen + (uint32_t)1U]; - memset(text, 0U, (tlen + infolen + (uint32_t)1U) * sizeof (uint8_t)); - { - uint8_t *text0 = text + tlen; - uint8_t *tag = text; - uint8_t *ctr = text + tlen + infolen; - memcpy(text + tlen, info, infolen * sizeof (uint8_t)); - { - uint32_t i; - for (i = (uint32_t)0U; i < n; i++) - { - ctr[0U] = (uint8_t)(i + (uint32_t)1U); - if (i == (uint32_t)0U) - { - EverCrypt_HMAC_compute_sha2_256(tag, prk, prklen, text0, infolen + (uint32_t)1U); - } - else - { - EverCrypt_HMAC_compute_sha2_256(tag, prk, prklen, text, tlen + infolen + (uint32_t)1U); - } - memcpy(output + i * tlen, tag, tlen * sizeof (uint8_t)); - } - } - if (n * tlen < len) - { - ctr[0U] = (uint8_t)(n + (uint32_t)1U); - if (n == (uint32_t)0U) - { - EverCrypt_HMAC_compute_sha2_256(tag, prk, prklen, text0, infolen + (uint32_t)1U); - } - else - { - EverCrypt_HMAC_compute_sha2_256(tag, prk, prklen, text, tlen + infolen + (uint32_t)1U); - } - { - uint8_t *block = okm + n * tlen; - memcpy(block, tag, (len - n * tlen) * sizeof (uint8_t)); - } - } - } - } -} - -void -EverCrypt_HKDF_extract_sha2_256( - uint8_t *prk, - uint8_t *salt, - uint32_t saltlen, - uint8_t *ikm, - uint32_t ikmlen -) -{ - EverCrypt_HMAC_compute_sha2_256(prk, salt, saltlen, ikm, ikmlen); -} - -void -EverCrypt_HKDF_expand_sha2_384( - uint8_t *okm, - uint8_t *prk, - uint32_t prklen, - uint8_t *info, - uint32_t infolen, - uint32_t len -) -{ - uint32_t tlen = (uint32_t)48U; - uint32_t n = len / tlen; - uint8_t *output = okm; - KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + (uint32_t)1U); - { - uint8_t text[tlen + infolen + (uint32_t)1U]; - memset(text, 0U, (tlen + infolen + (uint32_t)1U) * sizeof (uint8_t)); - { - uint8_t *text0 = text + tlen; - uint8_t *tag = text; - uint8_t *ctr = text + tlen + infolen; - memcpy(text + tlen, info, infolen * sizeof (uint8_t)); - { - uint32_t i; - for (i = (uint32_t)0U; i < n; i++) - { - ctr[0U] = (uint8_t)(i + (uint32_t)1U); - if (i == (uint32_t)0U) - { - EverCrypt_HMAC_compute_sha2_384(tag, prk, prklen, text0, infolen + (uint32_t)1U); - } - else - { - EverCrypt_HMAC_compute_sha2_384(tag, prk, prklen, text, tlen + infolen + (uint32_t)1U); - } - memcpy(output + i * tlen, tag, tlen * sizeof (uint8_t)); - } - } - if (n * tlen < len) - { - ctr[0U] = (uint8_t)(n + (uint32_t)1U); - if (n == (uint32_t)0U) - { - EverCrypt_HMAC_compute_sha2_384(tag, prk, prklen, text0, infolen + (uint32_t)1U); - } - else - { - EverCrypt_HMAC_compute_sha2_384(tag, prk, prklen, text, tlen + infolen + (uint32_t)1U); - } - { - uint8_t *block = okm + n * tlen; - memcpy(block, tag, (len - n * tlen) * sizeof (uint8_t)); - } - } - } - } -} - -void -EverCrypt_HKDF_extract_sha2_384( - uint8_t *prk, - uint8_t *salt, - uint32_t saltlen, - uint8_t *ikm, - uint32_t ikmlen -) -{ - EverCrypt_HMAC_compute_sha2_384(prk, salt, saltlen, ikm, ikmlen); -} - -void -EverCrypt_HKDF_expand_sha2_512( - uint8_t *okm, - uint8_t *prk, - uint32_t prklen, - uint8_t *info, - uint32_t infolen, - uint32_t len -) -{ - uint32_t tlen = (uint32_t)64U; - uint32_t n = len / tlen; - uint8_t *output = okm; - KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + (uint32_t)1U); - { - uint8_t text[tlen + infolen + (uint32_t)1U]; - memset(text, 0U, (tlen + infolen + (uint32_t)1U) * sizeof (uint8_t)); - { - uint8_t *text0 = text + tlen; - uint8_t *tag = text; - uint8_t *ctr = text + tlen + infolen; - memcpy(text + tlen, info, infolen * sizeof (uint8_t)); - { - uint32_t i; - for (i = (uint32_t)0U; i < n; i++) - { - ctr[0U] = (uint8_t)(i + (uint32_t)1U); - if (i == (uint32_t)0U) - { - EverCrypt_HMAC_compute_sha2_512(tag, prk, prklen, text0, infolen + (uint32_t)1U); - } - else - { - EverCrypt_HMAC_compute_sha2_512(tag, prk, prklen, text, tlen + infolen + (uint32_t)1U); - } - memcpy(output + i * tlen, tag, tlen * sizeof (uint8_t)); - } - } - if (n * tlen < len) - { - ctr[0U] = (uint8_t)(n + (uint32_t)1U); - if (n == (uint32_t)0U) - { - EverCrypt_HMAC_compute_sha2_512(tag, prk, prklen, text0, infolen + (uint32_t)1U); - } - else - { - EverCrypt_HMAC_compute_sha2_512(tag, prk, prklen, text, tlen + infolen + (uint32_t)1U); - } - { - uint8_t *block = okm + n * tlen; - memcpy(block, tag, (len - n * tlen) * sizeof (uint8_t)); - } - } - } - } -} - -void -EverCrypt_HKDF_extract_sha2_512( - uint8_t *prk, - uint8_t *salt, - uint32_t saltlen, - uint8_t *ikm, - uint32_t ikmlen -) -{ - EverCrypt_HMAC_compute_sha2_512(prk, salt, saltlen, ikm, ikmlen); -} - -void -EverCrypt_HKDF_expand_blake2s( - uint8_t *okm, - uint8_t *prk, - uint32_t prklen, - uint8_t *info, - uint32_t infolen, - uint32_t len -) -{ - uint32_t tlen = (uint32_t)32U; - uint32_t n = len / tlen; - uint8_t *output = okm; - KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + (uint32_t)1U); - { - uint8_t text[tlen + infolen + (uint32_t)1U]; - memset(text, 0U, (tlen + infolen + (uint32_t)1U) * sizeof (uint8_t)); - { - uint8_t *text0 = text + tlen; - uint8_t *tag = text; - uint8_t *ctr = text + tlen + infolen; - memcpy(text + tlen, info, infolen * sizeof (uint8_t)); - { - uint32_t i; - for (i = (uint32_t)0U; i < n; i++) - { - ctr[0U] = (uint8_t)(i + (uint32_t)1U); - if (i == (uint32_t)0U) - { - EverCrypt_HMAC_compute_blake2s(tag, prk, prklen, text0, infolen + (uint32_t)1U); - } - else - { - EverCrypt_HMAC_compute_blake2s(tag, prk, prklen, text, tlen + infolen + (uint32_t)1U); - } - memcpy(output + i * tlen, tag, tlen * sizeof (uint8_t)); - } - } - if (n * tlen < len) - { - ctr[0U] = (uint8_t)(n + (uint32_t)1U); - if (n == (uint32_t)0U) - { - EverCrypt_HMAC_compute_blake2s(tag, prk, prklen, text0, infolen + (uint32_t)1U); - } - else - { - EverCrypt_HMAC_compute_blake2s(tag, prk, prklen, text, tlen + infolen + (uint32_t)1U); - } - { - uint8_t *block = okm + n * tlen; - memcpy(block, tag, (len - n * tlen) * sizeof (uint8_t)); - } - } - } - } -} - -void -EverCrypt_HKDF_extract_blake2s( - uint8_t *prk, - uint8_t *salt, - uint32_t saltlen, - uint8_t *ikm, - uint32_t ikmlen -) -{ - EverCrypt_HMAC_compute_blake2s(prk, salt, saltlen, ikm, ikmlen); -} - -void -EverCrypt_HKDF_expand_blake2b( - uint8_t *okm, - uint8_t *prk, - uint32_t prklen, - uint8_t *info, - uint32_t infolen, - uint32_t len -) -{ - uint32_t tlen = (uint32_t)64U; - uint32_t n = len / tlen; - uint8_t *output = okm; - KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + (uint32_t)1U); - { - uint8_t text[tlen + infolen + (uint32_t)1U]; - memset(text, 0U, (tlen + infolen + (uint32_t)1U) * sizeof (uint8_t)); - { - uint8_t *text0 = text + tlen; - uint8_t *tag = text; - uint8_t *ctr = text + tlen + infolen; - memcpy(text + tlen, info, infolen * sizeof (uint8_t)); - { - uint32_t i; - for (i = (uint32_t)0U; i < n; i++) - { - ctr[0U] = (uint8_t)(i + (uint32_t)1U); - if (i == (uint32_t)0U) - { - EverCrypt_HMAC_compute_blake2b(tag, prk, prklen, text0, infolen + (uint32_t)1U); - } - else - { - EverCrypt_HMAC_compute_blake2b(tag, prk, prklen, text, tlen + infolen + (uint32_t)1U); - } - memcpy(output + i * tlen, tag, tlen * sizeof (uint8_t)); - } - } - if (n * tlen < len) - { - ctr[0U] = (uint8_t)(n + (uint32_t)1U); - if (n == (uint32_t)0U) - { - EverCrypt_HMAC_compute_blake2b(tag, prk, prklen, text0, infolen + (uint32_t)1U); - } - else - { - EverCrypt_HMAC_compute_blake2b(tag, prk, prklen, text, tlen + infolen + (uint32_t)1U); - } - { - uint8_t *block = okm + n * tlen; - memcpy(block, tag, (len - n * tlen) * sizeof (uint8_t)); - } - } - } - } -} - -void -EverCrypt_HKDF_extract_blake2b( - uint8_t *prk, - uint8_t *salt, - uint32_t saltlen, - uint8_t *ikm, - uint32_t ikmlen -) -{ - EverCrypt_HMAC_compute_blake2b(prk, salt, saltlen, ikm, ikmlen); -} - -void -EverCrypt_HKDF_expand( - Spec_Hash_Definitions_hash_alg a, - uint8_t *okm, - uint8_t *prk, - uint32_t prklen, - uint8_t *info, - uint32_t infolen, - uint32_t len -) -{ - switch (a) - { - case Spec_Hash_Definitions_SHA1: - { - EverCrypt_HKDF_expand_sha1(okm, prk, prklen, info, infolen, len); - break; - } - case Spec_Hash_Definitions_SHA2_256: - { - EverCrypt_HKDF_expand_sha2_256(okm, prk, prklen, info, infolen, len); - break; - } - case Spec_Hash_Definitions_SHA2_384: - { - EverCrypt_HKDF_expand_sha2_384(okm, prk, prklen, info, infolen, len); - break; - } - case Spec_Hash_Definitions_SHA2_512: - { - EverCrypt_HKDF_expand_sha2_512(okm, prk, prklen, info, infolen, len); - break; - } - case Spec_Hash_Definitions_Blake2S: - { - EverCrypt_HKDF_expand_blake2s(okm, prk, prklen, info, infolen, len); - break; - } - case Spec_Hash_Definitions_Blake2B: - { - EverCrypt_HKDF_expand_blake2b(okm, prk, prklen, info, infolen, len); - break; - } - default: - { - KRML_HOST_PRINTF("KaRaMeL incomplete match at %s:%d\n", __FILE__, __LINE__); - KRML_HOST_EXIT(253U); - } - } -} - -void -EverCrypt_HKDF_extract( - Spec_Hash_Definitions_hash_alg a, - uint8_t *prk, - uint8_t *salt, - uint32_t saltlen, - uint8_t *ikm, - uint32_t ikmlen -) -{ - switch (a) - { - case Spec_Hash_Definitions_SHA1: - { - EverCrypt_HKDF_extract_sha1(prk, salt, saltlen, ikm, ikmlen); - break; - } - case Spec_Hash_Definitions_SHA2_256: - { - EverCrypt_HKDF_extract_sha2_256(prk, salt, saltlen, ikm, ikmlen); - break; - } - case Spec_Hash_Definitions_SHA2_384: - { - EverCrypt_HKDF_extract_sha2_384(prk, salt, saltlen, ikm, ikmlen); - break; - } - case Spec_Hash_Definitions_SHA2_512: - { - EverCrypt_HKDF_extract_sha2_512(prk, salt, saltlen, ikm, ikmlen); - break; - } - case Spec_Hash_Definitions_Blake2S: - { - EverCrypt_HKDF_extract_blake2s(prk, salt, saltlen, ikm, ikmlen); - break; - } - case Spec_Hash_Definitions_Blake2B: - { - EverCrypt_HKDF_extract_blake2b(prk, salt, saltlen, ikm, ikmlen); - break; - } - default: - { - KRML_HOST_PRINTF("KaRaMeL incomplete match at %s:%d\n", __FILE__, __LINE__); - KRML_HOST_EXIT(253U); - } - } -} - diff --git a/dist/c89-compatible/EverCrypt_HKDF.h b/dist/c89-compatible/EverCrypt_HKDF.h deleted file mode 100644 index 3d2d873c56..0000000000 --- a/dist/c89-compatible/EverCrypt_HKDF.h +++ /dev/null @@ -1,181 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#ifndef __EverCrypt_HKDF_H -#define __EverCrypt_HKDF_H - -#if defined(__cplusplus) -extern "C" { -#endif - -#include -#include "krml/internal/types.h" -#include "krml/lowstar_endianness.h" -#include "krml/internal/target.h" - - -#include "Hacl_Spec.h" -#include "EverCrypt_HMAC.h" -#include "evercrypt_targetconfig.h" -void -EverCrypt_HKDF_expand_sha1( - uint8_t *okm, - uint8_t *prk, - uint32_t prklen, - uint8_t *info, - uint32_t infolen, - uint32_t len -); - -void -EverCrypt_HKDF_extract_sha1( - uint8_t *prk, - uint8_t *salt, - uint32_t saltlen, - uint8_t *ikm, - uint32_t ikmlen -); - -void -EverCrypt_HKDF_expand_sha2_256( - uint8_t *okm, - uint8_t *prk, - uint32_t prklen, - uint8_t *info, - uint32_t infolen, - uint32_t len -); - -void -EverCrypt_HKDF_extract_sha2_256( - uint8_t *prk, - uint8_t *salt, - uint32_t saltlen, - uint8_t *ikm, - uint32_t ikmlen -); - -void -EverCrypt_HKDF_expand_sha2_384( - uint8_t *okm, - uint8_t *prk, - uint32_t prklen, - uint8_t *info, - uint32_t infolen, - uint32_t len -); - -void -EverCrypt_HKDF_extract_sha2_384( - uint8_t *prk, - uint8_t *salt, - uint32_t saltlen, - uint8_t *ikm, - uint32_t ikmlen -); - -void -EverCrypt_HKDF_expand_sha2_512( - uint8_t *okm, - uint8_t *prk, - uint32_t prklen, - uint8_t *info, - uint32_t infolen, - uint32_t len -); - -void -EverCrypt_HKDF_extract_sha2_512( - uint8_t *prk, - uint8_t *salt, - uint32_t saltlen, - uint8_t *ikm, - uint32_t ikmlen -); - -void -EverCrypt_HKDF_expand_blake2s( - uint8_t *okm, - uint8_t *prk, - uint32_t prklen, - uint8_t *info, - uint32_t infolen, - uint32_t len -); - -void -EverCrypt_HKDF_extract_blake2s( - uint8_t *prk, - uint8_t *salt, - uint32_t saltlen, - uint8_t *ikm, - uint32_t ikmlen -); - -void -EverCrypt_HKDF_expand_blake2b( - uint8_t *okm, - uint8_t *prk, - uint32_t prklen, - uint8_t *info, - uint32_t infolen, - uint32_t len -); - -void -EverCrypt_HKDF_extract_blake2b( - uint8_t *prk, - uint8_t *salt, - uint32_t saltlen, - uint8_t *ikm, - uint32_t ikmlen -); - -void -EverCrypt_HKDF_expand( - Spec_Hash_Definitions_hash_alg a, - uint8_t *okm, - uint8_t *prk, - uint32_t prklen, - uint8_t *info, - uint32_t infolen, - uint32_t len -); - -void -EverCrypt_HKDF_extract( - Spec_Hash_Definitions_hash_alg a, - uint8_t *prk, - uint8_t *salt, - uint32_t saltlen, - uint8_t *ikm, - uint32_t ikmlen -); - -#if defined(__cplusplus) -} -#endif - -#define __EverCrypt_HKDF_H_DEFINED -#endif diff --git a/dist/c89-compatible/EverCrypt_HMAC.c b/dist/c89-compatible/EverCrypt_HMAC.c deleted file mode 100644 index 0a7beb2129..0000000000 --- a/dist/c89-compatible/EverCrypt_HMAC.c +++ /dev/null @@ -1,1239 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#include "EverCrypt_HMAC.h" - -#include "internal/Hacl_Hash_SHA2.h" -#include "internal/Hacl_Hash_SHA1.h" -#include "internal/Hacl_Hash_Blake2.h" -#include "internal/Hacl_HMAC.h" - -void -EverCrypt_HMAC_compute_sha1( - uint8_t *dst, - uint8_t *key, - uint32_t key_len, - uint8_t *data, - uint32_t data_len -) -{ - uint32_t l = (uint32_t)64U; - KRML_CHECK_SIZE(sizeof (uint8_t), l); - { - uint8_t key_block[l]; - memset(key_block, 0U, l * sizeof (uint8_t)); - { - uint32_t i0; - if (key_len <= (uint32_t)64U) - { - i0 = key_len; - } - else - { - i0 = (uint32_t)20U; - } - { - uint8_t *nkey = key_block; - if (key_len <= (uint32_t)64U) - { - memcpy(nkey, key, key_len * sizeof (uint8_t)); - } - else - { - Hacl_Hash_SHA1_legacy_hash(key, key_len, nkey); - } - KRML_CHECK_SIZE(sizeof (uint8_t), l); - { - uint8_t ipad[l]; - memset(ipad, (uint8_t)0x36U, l * sizeof (uint8_t)); - { - uint32_t i; - for (i = (uint32_t)0U; i < l; i++) - { - uint8_t xi = ipad[i]; - uint8_t yi = key_block[i]; - ipad[i] = xi ^ yi; - } - } - KRML_CHECK_SIZE(sizeof (uint8_t), l); - { - uint8_t opad[l]; - memset(opad, (uint8_t)0x5cU, l * sizeof (uint8_t)); - { - uint32_t scrut0[5]; - uint32_t *s; - uint8_t *dst1; - uint8_t *hash1; - uint32_t block_len; - uint32_t n_blocks0; - uint32_t rem0; - K___uint32_t_uint32_t scrut1; - uint32_t n_blocks1; - uint32_t rem_len0; - uint32_t full_blocks_len0; - uint8_t *full_blocks0; - uint8_t *rem1; - { - uint32_t i; - for (i = (uint32_t)0U; i < l; i++) - { - uint8_t xi = opad[i]; - uint8_t yi = key_block[i]; - opad[i] = xi ^ yi; - } - } - scrut0[0U] = (uint32_t)0x67452301U; - scrut0[1U] = (uint32_t)0xefcdab89U; - scrut0[2U] = (uint32_t)0x98badcfeU; - scrut0[3U] = (uint32_t)0x10325476U; - scrut0[4U] = (uint32_t)0xc3d2e1f0U; - s = scrut0; - dst1 = ipad; - Hacl_Hash_Core_SHA1_legacy_init(s); - if (data_len == (uint32_t)0U) - { - Hacl_Hash_SHA1_legacy_update_last(s, (uint64_t)0U, ipad, (uint32_t)64U); - } - else - { - Hacl_Hash_SHA1_legacy_update_multi(s, ipad, (uint32_t)1U); - { - uint32_t block_len0 = (uint32_t)64U; - uint32_t n_blocks2 = data_len / block_len0; - uint32_t rem = data_len % block_len0; - K___uint32_t_uint32_t scrut; - if (n_blocks2 > (uint32_t)0U && rem == (uint32_t)0U) - { - uint32_t n_blocks_ = n_blocks2 - (uint32_t)1U; - K___uint32_t_uint32_t lit; - lit.fst = n_blocks_; - lit.snd = data_len - n_blocks_ * block_len0; - scrut = lit; - } - else - { - K___uint32_t_uint32_t lit; - lit.fst = n_blocks2; - lit.snd = rem; - scrut = lit; - } - { - uint32_t n_blocks = scrut.fst; - uint32_t rem_len = scrut.snd; - uint32_t full_blocks_len = n_blocks * block_len0; - uint8_t *full_blocks = data; - Hacl_Hash_SHA1_legacy_update_multi(s, full_blocks, n_blocks); - { - uint8_t *rem2 = data + full_blocks_len; - Hacl_Hash_SHA1_legacy_update_last(s, - (uint64_t)(uint32_t)64U + (uint64_t)full_blocks_len, - rem2, - rem_len); - } - } - } - } - Hacl_Hash_Core_SHA1_legacy_finish(s, dst1); - hash1 = ipad; - Hacl_Hash_Core_SHA1_legacy_init(s); - Hacl_Hash_SHA1_legacy_update_multi(s, opad, (uint32_t)1U); - block_len = (uint32_t)64U; - n_blocks0 = (uint32_t)20U / block_len; - rem0 = (uint32_t)20U % block_len; - if (n_blocks0 > (uint32_t)0U && rem0 == (uint32_t)0U) - { - uint32_t n_blocks_ = n_blocks0 - (uint32_t)1U; - K___uint32_t_uint32_t lit; - lit.fst = n_blocks_; - lit.snd = (uint32_t)20U - n_blocks_ * block_len; - scrut1 = lit; - } - else - { - K___uint32_t_uint32_t lit; - lit.fst = n_blocks0; - lit.snd = rem0; - scrut1 = lit; - } - n_blocks1 = scrut1.fst; - rem_len0 = scrut1.snd; - full_blocks_len0 = n_blocks1 * block_len; - full_blocks0 = hash1; - Hacl_Hash_SHA1_legacy_update_multi(s, full_blocks0, n_blocks1); - rem1 = hash1 + full_blocks_len0; - Hacl_Hash_SHA1_legacy_update_last(s, - (uint64_t)(uint32_t)64U + (uint64_t)full_blocks_len0, - rem1, - rem_len0); - Hacl_Hash_Core_SHA1_legacy_finish(s, dst); - } - } - } - } - } - } -} - -void -EverCrypt_HMAC_compute_sha2_256( - uint8_t *dst, - uint8_t *key, - uint32_t key_len, - uint8_t *data, - uint32_t data_len -) -{ - uint32_t l = (uint32_t)64U; - KRML_CHECK_SIZE(sizeof (uint8_t), l); - { - uint8_t key_block[l]; - memset(key_block, 0U, l * sizeof (uint8_t)); - { - uint32_t i0; - if (key_len <= (uint32_t)64U) - { - i0 = key_len; - } - else - { - i0 = (uint32_t)32U; - } - { - uint8_t *nkey = key_block; - if (key_len <= (uint32_t)64U) - { - memcpy(nkey, key, key_len * sizeof (uint8_t)); - } - else - { - EverCrypt_Hash_hash_256(key, key_len, nkey); - } - KRML_CHECK_SIZE(sizeof (uint8_t), l); - { - uint8_t ipad[l]; - memset(ipad, (uint8_t)0x36U, l * sizeof (uint8_t)); - { - uint32_t i; - for (i = (uint32_t)0U; i < l; i++) - { - uint8_t xi = ipad[i]; - uint8_t yi = key_block[i]; - ipad[i] = xi ^ yi; - } - } - KRML_CHECK_SIZE(sizeof (uint8_t), l); - { - uint8_t opad[l]; - memset(opad, (uint8_t)0x5cU, l * sizeof (uint8_t)); - { - uint32_t scrut0[8]; - uint32_t *s; - uint8_t *dst1; - uint8_t *hash1; - uint32_t block_len; - uint32_t n_blocks0; - uint32_t rem0; - K___uint32_t_uint32_t scrut1; - uint32_t n_blocks1; - uint32_t rem_len0; - uint32_t full_blocks_len0; - uint8_t *full_blocks0; - uint8_t *rem1; - { - uint32_t i; - for (i = (uint32_t)0U; i < l; i++) - { - uint8_t xi = opad[i]; - uint8_t yi = key_block[i]; - opad[i] = xi ^ yi; - } - } - scrut0[0U] = (uint32_t)0x6a09e667U; - scrut0[1U] = (uint32_t)0xbb67ae85U; - scrut0[2U] = (uint32_t)0x3c6ef372U; - scrut0[3U] = (uint32_t)0xa54ff53aU; - scrut0[4U] = (uint32_t)0x510e527fU; - scrut0[5U] = (uint32_t)0x9b05688cU; - scrut0[6U] = (uint32_t)0x1f83d9abU; - scrut0[7U] = (uint32_t)0x5be0cd19U; - s = scrut0; - dst1 = ipad; - Hacl_Hash_Core_SHA2_init_256(s); - if (data_len == (uint32_t)0U) - { - EverCrypt_Hash_update_last_256(s, (uint64_t)0U, ipad, (uint32_t)64U); - } - else - { - EverCrypt_Hash_update_multi_256(s, ipad, (uint32_t)1U); - { - uint32_t block_len0 = (uint32_t)64U; - uint32_t n_blocks2 = data_len / block_len0; - uint32_t rem = data_len % block_len0; - K___uint32_t_uint32_t scrut; - if (n_blocks2 > (uint32_t)0U && rem == (uint32_t)0U) - { - uint32_t n_blocks_ = n_blocks2 - (uint32_t)1U; - K___uint32_t_uint32_t lit; - lit.fst = n_blocks_; - lit.snd = data_len - n_blocks_ * block_len0; - scrut = lit; - } - else - { - K___uint32_t_uint32_t lit; - lit.fst = n_blocks2; - lit.snd = rem; - scrut = lit; - } - { - uint32_t n_blocks = scrut.fst; - uint32_t rem_len = scrut.snd; - uint32_t full_blocks_len = n_blocks * block_len0; - uint8_t *full_blocks = data; - EverCrypt_Hash_update_multi_256(s, full_blocks, n_blocks); - { - uint8_t *rem2 = data + full_blocks_len; - EverCrypt_Hash_update_last_256(s, - (uint64_t)(uint32_t)64U + (uint64_t)full_blocks_len, - rem2, - rem_len); - } - } - } - } - Hacl_Hash_Core_SHA2_finish_256(s, dst1); - hash1 = ipad; - Hacl_Hash_Core_SHA2_init_256(s); - EverCrypt_Hash_update_multi_256(s, opad, (uint32_t)1U); - block_len = (uint32_t)64U; - n_blocks0 = (uint32_t)32U / block_len; - rem0 = (uint32_t)32U % block_len; - if (n_blocks0 > (uint32_t)0U && rem0 == (uint32_t)0U) - { - uint32_t n_blocks_ = n_blocks0 - (uint32_t)1U; - K___uint32_t_uint32_t lit; - lit.fst = n_blocks_; - lit.snd = (uint32_t)32U - n_blocks_ * block_len; - scrut1 = lit; - } - else - { - K___uint32_t_uint32_t lit; - lit.fst = n_blocks0; - lit.snd = rem0; - scrut1 = lit; - } - n_blocks1 = scrut1.fst; - rem_len0 = scrut1.snd; - full_blocks_len0 = n_blocks1 * block_len; - full_blocks0 = hash1; - EverCrypt_Hash_update_multi_256(s, full_blocks0, n_blocks1); - rem1 = hash1 + full_blocks_len0; - EverCrypt_Hash_update_last_256(s, - (uint64_t)(uint32_t)64U + (uint64_t)full_blocks_len0, - rem1, - rem_len0); - Hacl_Hash_Core_SHA2_finish_256(s, dst); - } - } - } - } - } - } -} - -void -EverCrypt_HMAC_compute_sha2_384( - uint8_t *dst, - uint8_t *key, - uint32_t key_len, - uint8_t *data, - uint32_t data_len -) -{ - uint32_t l = (uint32_t)128U; - KRML_CHECK_SIZE(sizeof (uint8_t), l); - { - uint8_t key_block[l]; - memset(key_block, 0U, l * sizeof (uint8_t)); - { - uint32_t i0; - if (key_len <= (uint32_t)128U) - { - i0 = key_len; - } - else - { - i0 = (uint32_t)48U; - } - { - uint8_t *nkey = key_block; - if (key_len <= (uint32_t)128U) - { - memcpy(nkey, key, key_len * sizeof (uint8_t)); - } - else - { - Hacl_Hash_SHA2_hash_384(key, key_len, nkey); - } - KRML_CHECK_SIZE(sizeof (uint8_t), l); - { - uint8_t ipad[l]; - memset(ipad, (uint8_t)0x36U, l * sizeof (uint8_t)); - { - uint32_t i; - for (i = (uint32_t)0U; i < l; i++) - { - uint8_t xi = ipad[i]; - uint8_t yi = key_block[i]; - ipad[i] = xi ^ yi; - } - } - KRML_CHECK_SIZE(sizeof (uint8_t), l); - { - uint8_t opad[l]; - memset(opad, (uint8_t)0x5cU, l * sizeof (uint8_t)); - { - uint64_t scrut0[8]; - uint64_t *s; - uint8_t *dst1; - uint8_t *hash1; - uint32_t block_len; - uint32_t n_blocks0; - uint32_t rem0; - K___uint32_t_uint32_t scrut1; - uint32_t n_blocks1; - uint32_t rem_len0; - uint32_t full_blocks_len0; - uint8_t *full_blocks0; - uint8_t *rem1; - { - uint32_t i; - for (i = (uint32_t)0U; i < l; i++) - { - uint8_t xi = opad[i]; - uint8_t yi = key_block[i]; - opad[i] = xi ^ yi; - } - } - scrut0[0U] = (uint64_t)0xcbbb9d5dc1059ed8U; - scrut0[1U] = (uint64_t)0x629a292a367cd507U; - scrut0[2U] = (uint64_t)0x9159015a3070dd17U; - scrut0[3U] = (uint64_t)0x152fecd8f70e5939U; - scrut0[4U] = (uint64_t)0x67332667ffc00b31U; - scrut0[5U] = (uint64_t)0x8eb44a8768581511U; - scrut0[6U] = (uint64_t)0xdb0c2e0d64f98fa7U; - scrut0[7U] = (uint64_t)0x47b5481dbefa4fa4U; - s = scrut0; - dst1 = ipad; - Hacl_Hash_Core_SHA2_init_384(s); - if (data_len == (uint32_t)0U) - { - Hacl_Hash_SHA2_update_last_384(s, - FStar_UInt128_uint64_to_uint128((uint64_t)0U), - ipad, - (uint32_t)128U); - } - else - { - Hacl_Hash_SHA2_update_multi_384(s, ipad, (uint32_t)1U); - { - uint32_t block_len0 = (uint32_t)128U; - uint32_t n_blocks2 = data_len / block_len0; - uint32_t rem = data_len % block_len0; - K___uint32_t_uint32_t scrut; - if (n_blocks2 > (uint32_t)0U && rem == (uint32_t)0U) - { - uint32_t n_blocks_ = n_blocks2 - (uint32_t)1U; - K___uint32_t_uint32_t lit; - lit.fst = n_blocks_; - lit.snd = data_len - n_blocks_ * block_len0; - scrut = lit; - } - else - { - K___uint32_t_uint32_t lit; - lit.fst = n_blocks2; - lit.snd = rem; - scrut = lit; - } - { - uint32_t n_blocks = scrut.fst; - uint32_t rem_len = scrut.snd; - uint32_t full_blocks_len = n_blocks * block_len0; - uint8_t *full_blocks = data; - Hacl_Hash_SHA2_update_multi_384(s, full_blocks, n_blocks); - { - uint8_t *rem2 = data + full_blocks_len; - Hacl_Hash_SHA2_update_last_384(s, - FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)(uint32_t)128U), - FStar_UInt128_uint64_to_uint128((uint64_t)full_blocks_len)), - rem2, - rem_len); - } - } - } - } - Hacl_Hash_Core_SHA2_finish_384(s, dst1); - hash1 = ipad; - Hacl_Hash_Core_SHA2_init_384(s); - Hacl_Hash_SHA2_update_multi_384(s, opad, (uint32_t)1U); - block_len = (uint32_t)128U; - n_blocks0 = (uint32_t)48U / block_len; - rem0 = (uint32_t)48U % block_len; - if (n_blocks0 > (uint32_t)0U && rem0 == (uint32_t)0U) - { - uint32_t n_blocks_ = n_blocks0 - (uint32_t)1U; - K___uint32_t_uint32_t lit; - lit.fst = n_blocks_; - lit.snd = (uint32_t)48U - n_blocks_ * block_len; - scrut1 = lit; - } - else - { - K___uint32_t_uint32_t lit; - lit.fst = n_blocks0; - lit.snd = rem0; - scrut1 = lit; - } - n_blocks1 = scrut1.fst; - rem_len0 = scrut1.snd; - full_blocks_len0 = n_blocks1 * block_len; - full_blocks0 = hash1; - Hacl_Hash_SHA2_update_multi_384(s, full_blocks0, n_blocks1); - rem1 = hash1 + full_blocks_len0; - Hacl_Hash_SHA2_update_last_384(s, - FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)(uint32_t)128U), - FStar_UInt128_uint64_to_uint128((uint64_t)full_blocks_len0)), - rem1, - rem_len0); - Hacl_Hash_Core_SHA2_finish_384(s, dst); - } - } - } - } - } - } -} - -void -EverCrypt_HMAC_compute_sha2_512( - uint8_t *dst, - uint8_t *key, - uint32_t key_len, - uint8_t *data, - uint32_t data_len -) -{ - uint32_t l = (uint32_t)128U; - KRML_CHECK_SIZE(sizeof (uint8_t), l); - { - uint8_t key_block[l]; - memset(key_block, 0U, l * sizeof (uint8_t)); - { - uint32_t i0; - if (key_len <= (uint32_t)128U) - { - i0 = key_len; - } - else - { - i0 = (uint32_t)64U; - } - { - uint8_t *nkey = key_block; - if (key_len <= (uint32_t)128U) - { - memcpy(nkey, key, key_len * sizeof (uint8_t)); - } - else - { - Hacl_Hash_SHA2_hash_512(key, key_len, nkey); - } - KRML_CHECK_SIZE(sizeof (uint8_t), l); - { - uint8_t ipad[l]; - memset(ipad, (uint8_t)0x36U, l * sizeof (uint8_t)); - { - uint32_t i; - for (i = (uint32_t)0U; i < l; i++) - { - uint8_t xi = ipad[i]; - uint8_t yi = key_block[i]; - ipad[i] = xi ^ yi; - } - } - KRML_CHECK_SIZE(sizeof (uint8_t), l); - { - uint8_t opad[l]; - memset(opad, (uint8_t)0x5cU, l * sizeof (uint8_t)); - { - uint64_t scrut0[8]; - uint64_t *s; - uint8_t *dst1; - uint8_t *hash1; - uint32_t block_len; - uint32_t n_blocks0; - uint32_t rem0; - K___uint32_t_uint32_t scrut1; - uint32_t n_blocks1; - uint32_t rem_len0; - uint32_t full_blocks_len0; - uint8_t *full_blocks0; - uint8_t *rem1; - { - uint32_t i; - for (i = (uint32_t)0U; i < l; i++) - { - uint8_t xi = opad[i]; - uint8_t yi = key_block[i]; - opad[i] = xi ^ yi; - } - } - scrut0[0U] = (uint64_t)0x6a09e667f3bcc908U; - scrut0[1U] = (uint64_t)0xbb67ae8584caa73bU; - scrut0[2U] = (uint64_t)0x3c6ef372fe94f82bU; - scrut0[3U] = (uint64_t)0xa54ff53a5f1d36f1U; - scrut0[4U] = (uint64_t)0x510e527fade682d1U; - scrut0[5U] = (uint64_t)0x9b05688c2b3e6c1fU; - scrut0[6U] = (uint64_t)0x1f83d9abfb41bd6bU; - scrut0[7U] = (uint64_t)0x5be0cd19137e2179U; - s = scrut0; - dst1 = ipad; - Hacl_Hash_Core_SHA2_init_512(s); - if (data_len == (uint32_t)0U) - { - Hacl_Hash_SHA2_update_last_512(s, - FStar_UInt128_uint64_to_uint128((uint64_t)0U), - ipad, - (uint32_t)128U); - } - else - { - Hacl_Hash_SHA2_update_multi_512(s, ipad, (uint32_t)1U); - { - uint32_t block_len0 = (uint32_t)128U; - uint32_t n_blocks2 = data_len / block_len0; - uint32_t rem = data_len % block_len0; - K___uint32_t_uint32_t scrut; - if (n_blocks2 > (uint32_t)0U && rem == (uint32_t)0U) - { - uint32_t n_blocks_ = n_blocks2 - (uint32_t)1U; - K___uint32_t_uint32_t lit; - lit.fst = n_blocks_; - lit.snd = data_len - n_blocks_ * block_len0; - scrut = lit; - } - else - { - K___uint32_t_uint32_t lit; - lit.fst = n_blocks2; - lit.snd = rem; - scrut = lit; - } - { - uint32_t n_blocks = scrut.fst; - uint32_t rem_len = scrut.snd; - uint32_t full_blocks_len = n_blocks * block_len0; - uint8_t *full_blocks = data; - Hacl_Hash_SHA2_update_multi_512(s, full_blocks, n_blocks); - { - uint8_t *rem2 = data + full_blocks_len; - Hacl_Hash_SHA2_update_last_512(s, - FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)(uint32_t)128U), - FStar_UInt128_uint64_to_uint128((uint64_t)full_blocks_len)), - rem2, - rem_len); - } - } - } - } - Hacl_Hash_Core_SHA2_finish_512(s, dst1); - hash1 = ipad; - Hacl_Hash_Core_SHA2_init_512(s); - Hacl_Hash_SHA2_update_multi_512(s, opad, (uint32_t)1U); - block_len = (uint32_t)128U; - n_blocks0 = (uint32_t)64U / block_len; - rem0 = (uint32_t)64U % block_len; - if (n_blocks0 > (uint32_t)0U && rem0 == (uint32_t)0U) - { - uint32_t n_blocks_ = n_blocks0 - (uint32_t)1U; - K___uint32_t_uint32_t lit; - lit.fst = n_blocks_; - lit.snd = (uint32_t)64U - n_blocks_ * block_len; - scrut1 = lit; - } - else - { - K___uint32_t_uint32_t lit; - lit.fst = n_blocks0; - lit.snd = rem0; - scrut1 = lit; - } - n_blocks1 = scrut1.fst; - rem_len0 = scrut1.snd; - full_blocks_len0 = n_blocks1 * block_len; - full_blocks0 = hash1; - Hacl_Hash_SHA2_update_multi_512(s, full_blocks0, n_blocks1); - rem1 = hash1 + full_blocks_len0; - Hacl_Hash_SHA2_update_last_512(s, - FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)(uint32_t)128U), - FStar_UInt128_uint64_to_uint128((uint64_t)full_blocks_len0)), - rem1, - rem_len0); - Hacl_Hash_Core_SHA2_finish_512(s, dst); - } - } - } - } - } - } -} - -void -EverCrypt_HMAC_compute_blake2s( - uint8_t *dst, - uint8_t *key, - uint32_t key_len, - uint8_t *data, - uint32_t data_len -) -{ - uint32_t l = (uint32_t)64U; - KRML_CHECK_SIZE(sizeof (uint8_t), l); - { - uint8_t key_block[l]; - memset(key_block, 0U, l * sizeof (uint8_t)); - { - uint32_t i0; - if (key_len <= (uint32_t)64U) - { - i0 = key_len; - } - else - { - i0 = (uint32_t)32U; - } - { - uint8_t *nkey = key_block; - if (key_len <= (uint32_t)64U) - { - memcpy(nkey, key, key_len * sizeof (uint8_t)); - } - else - { - Hacl_Hash_Blake2_hash_blake2s_32(key, key_len, nkey); - } - KRML_CHECK_SIZE(sizeof (uint8_t), l); - { - uint8_t ipad[l]; - memset(ipad, (uint8_t)0x36U, l * sizeof (uint8_t)); - { - uint32_t i; - for (i = (uint32_t)0U; i < l; i++) - { - uint8_t xi = ipad[i]; - uint8_t yi = key_block[i]; - ipad[i] = xi ^ yi; - } - } - KRML_CHECK_SIZE(sizeof (uint8_t), l); - { - uint8_t opad[l]; - memset(opad, (uint8_t)0x5cU, l * sizeof (uint8_t)); - { - uint32_t i; - for (i = (uint32_t)0U; i < l; i++) - { - uint8_t xi = opad[i]; - uint8_t yi = key_block[i]; - opad[i] = xi ^ yi; - } - } - { - uint32_t s0[16U] = { 0U }; - uint32_t *r0 = s0; - uint32_t *r1 = s0 + (uint32_t)4U; - uint32_t *r2 = s0 + (uint32_t)8U; - uint32_t *r3 = s0 + (uint32_t)12U; - uint32_t iv0 = Hacl_Impl_Blake2_Constants_ivTable_S[0U]; - uint32_t iv1 = Hacl_Impl_Blake2_Constants_ivTable_S[1U]; - uint32_t iv2 = Hacl_Impl_Blake2_Constants_ivTable_S[2U]; - uint32_t iv3 = Hacl_Impl_Blake2_Constants_ivTable_S[3U]; - uint32_t iv4 = Hacl_Impl_Blake2_Constants_ivTable_S[4U]; - uint32_t iv5 = Hacl_Impl_Blake2_Constants_ivTable_S[5U]; - uint32_t iv6 = Hacl_Impl_Blake2_Constants_ivTable_S[6U]; - uint32_t iv7 = Hacl_Impl_Blake2_Constants_ivTable_S[7U]; - uint32_t kk_shift_8; - uint32_t iv0_; - uint64_t es; - r2[0U] = iv0; - r2[1U] = iv1; - r2[2U] = iv2; - r2[3U] = iv3; - r3[0U] = iv4; - r3[1U] = iv5; - r3[2U] = iv6; - r3[3U] = iv7; - kk_shift_8 = (uint32_t)0U; - iv0_ = iv0 ^ ((uint32_t)0x01010000U ^ (kk_shift_8 ^ (uint32_t)32U)); - r0[0U] = iv0_; - r0[1U] = iv1; - r0[2U] = iv2; - r0[3U] = iv3; - r1[0U] = iv4; - r1[1U] = iv5; - r1[2U] = iv6; - r1[3U] = iv7; - es = (uint64_t)0U; - { - K____uint32_t__uint64_t scrut0; - uint32_t *s; - uint8_t *dst1; - uint64_t ev0; - uint64_t ev10; - uint8_t *hash1; - uint64_t ev; - uint64_t ev11; - uint32_t block_len0; - uint32_t n_blocks0; - uint32_t rem0; - K___uint32_t_uint32_t scrut1; - uint32_t n_blocks1; - uint32_t rem_len0; - uint32_t full_blocks_len0; - uint8_t *full_blocks0; - uint64_t ev2; - uint8_t *rem1; - uint64_t ev3; - uint64_t ev1; - scrut0.fst = s0; - scrut0.snd = es; - s = scrut0.fst; - dst1 = ipad; - ev0 = Hacl_Hash_Core_Blake2_init_blake2s_32(s); - if (data_len == (uint32_t)0U) - { - uint64_t - ev12 = - Hacl_Hash_Blake2_update_last_blake2s_32(s, - ev0, - (uint64_t)0U, - ipad, - (uint32_t)64U); - ev10 = ev12; - } - else - { - uint64_t - ev12 = Hacl_Hash_Blake2_update_multi_blake2s_32(s, ev0, ipad, (uint32_t)1U); - uint32_t block_len = (uint32_t)64U; - uint32_t n_blocks2 = data_len / block_len; - uint32_t rem2 = data_len % block_len; - K___uint32_t_uint32_t scrut; - if (n_blocks2 > (uint32_t)0U && rem2 == (uint32_t)0U) - { - uint32_t n_blocks_ = n_blocks2 - (uint32_t)1U; - K___uint32_t_uint32_t lit; - lit.fst = n_blocks_; - lit.snd = data_len - n_blocks_ * block_len; - scrut = lit; - } - else - { - K___uint32_t_uint32_t lit; - lit.fst = n_blocks2; - lit.snd = rem2; - scrut = lit; - } - { - uint32_t n_blocks = scrut.fst; - uint32_t rem_len = scrut.snd; - uint32_t full_blocks_len = n_blocks * block_len; - uint8_t *full_blocks = data; - uint64_t - ev20 = Hacl_Hash_Blake2_update_multi_blake2s_32(s, ev12, full_blocks, n_blocks); - uint8_t *rem = data + full_blocks_len; - uint64_t - ev30 = - Hacl_Hash_Blake2_update_last_blake2s_32(s, - ev20, - (uint64_t)(uint32_t)64U + (uint64_t)full_blocks_len, - rem, - rem_len); - ev10 = ev30; - } - } - Hacl_Hash_Core_Blake2_finish_blake2s_32(s, ev10, dst1); - hash1 = ipad; - ev = Hacl_Hash_Core_Blake2_init_blake2s_32(s); - ev11 = Hacl_Hash_Blake2_update_multi_blake2s_32(s, ev, opad, (uint32_t)1U); - block_len0 = (uint32_t)64U; - n_blocks0 = (uint32_t)32U / block_len0; - rem0 = (uint32_t)32U % block_len0; - if (n_blocks0 > (uint32_t)0U && rem0 == (uint32_t)0U) - { - uint32_t n_blocks_ = n_blocks0 - (uint32_t)1U; - K___uint32_t_uint32_t lit; - lit.fst = n_blocks_; - lit.snd = (uint32_t)32U - n_blocks_ * block_len0; - scrut1 = lit; - } - else - { - K___uint32_t_uint32_t lit; - lit.fst = n_blocks0; - lit.snd = rem0; - scrut1 = lit; - } - n_blocks1 = scrut1.fst; - rem_len0 = scrut1.snd; - full_blocks_len0 = n_blocks1 * block_len0; - full_blocks0 = hash1; - ev2 = Hacl_Hash_Blake2_update_multi_blake2s_32(s, ev11, full_blocks0, n_blocks1); - rem1 = hash1 + full_blocks_len0; - ev3 = - Hacl_Hash_Blake2_update_last_blake2s_32(s, - ev2, - (uint64_t)(uint32_t)64U + (uint64_t)full_blocks_len0, - rem1, - rem_len0); - ev1 = ev3; - Hacl_Hash_Core_Blake2_finish_blake2s_32(s, ev1, dst); - } - } - } - } - } - } - } -} - -void -EverCrypt_HMAC_compute_blake2b( - uint8_t *dst, - uint8_t *key, - uint32_t key_len, - uint8_t *data, - uint32_t data_len -) -{ - uint32_t l = (uint32_t)128U; - KRML_CHECK_SIZE(sizeof (uint8_t), l); - { - uint8_t key_block[l]; - memset(key_block, 0U, l * sizeof (uint8_t)); - { - uint32_t i0; - if (key_len <= (uint32_t)128U) - { - i0 = key_len; - } - else - { - i0 = (uint32_t)64U; - } - { - uint8_t *nkey = key_block; - if (key_len <= (uint32_t)128U) - { - memcpy(nkey, key, key_len * sizeof (uint8_t)); - } - else - { - Hacl_Hash_Blake2_hash_blake2b_32(key, key_len, nkey); - } - KRML_CHECK_SIZE(sizeof (uint8_t), l); - { - uint8_t ipad[l]; - memset(ipad, (uint8_t)0x36U, l * sizeof (uint8_t)); - { - uint32_t i; - for (i = (uint32_t)0U; i < l; i++) - { - uint8_t xi = ipad[i]; - uint8_t yi = key_block[i]; - ipad[i] = xi ^ yi; - } - } - KRML_CHECK_SIZE(sizeof (uint8_t), l); - { - uint8_t opad[l]; - memset(opad, (uint8_t)0x5cU, l * sizeof (uint8_t)); - { - uint32_t i; - for (i = (uint32_t)0U; i < l; i++) - { - uint8_t xi = opad[i]; - uint8_t yi = key_block[i]; - opad[i] = xi ^ yi; - } - } - { - uint64_t s0[16U] = { 0U }; - uint64_t *r0 = s0; - uint64_t *r1 = s0 + (uint32_t)4U; - uint64_t *r2 = s0 + (uint32_t)8U; - uint64_t *r3 = s0 + (uint32_t)12U; - uint64_t iv0 = Hacl_Impl_Blake2_Constants_ivTable_B[0U]; - uint64_t iv1 = Hacl_Impl_Blake2_Constants_ivTable_B[1U]; - uint64_t iv2 = Hacl_Impl_Blake2_Constants_ivTable_B[2U]; - uint64_t iv3 = Hacl_Impl_Blake2_Constants_ivTable_B[3U]; - uint64_t iv4 = Hacl_Impl_Blake2_Constants_ivTable_B[4U]; - uint64_t iv5 = Hacl_Impl_Blake2_Constants_ivTable_B[5U]; - uint64_t iv6 = Hacl_Impl_Blake2_Constants_ivTable_B[6U]; - uint64_t iv7 = Hacl_Impl_Blake2_Constants_ivTable_B[7U]; - uint64_t kk_shift_8; - uint64_t iv0_; - FStar_UInt128_uint128 es; - r2[0U] = iv0; - r2[1U] = iv1; - r2[2U] = iv2; - r2[3U] = iv3; - r3[0U] = iv4; - r3[1U] = iv5; - r3[2U] = iv6; - r3[3U] = iv7; - kk_shift_8 = (uint64_t)(uint32_t)0U << (uint32_t)8U; - iv0_ = iv0 ^ ((uint64_t)0x01010000U ^ (kk_shift_8 ^ (uint64_t)(uint32_t)64U)); - r0[0U] = iv0_; - r0[1U] = iv1; - r0[2U] = iv2; - r0[3U] = iv3; - r1[0U] = iv4; - r1[1U] = iv5; - r1[2U] = iv6; - r1[3U] = iv7; - es = FStar_UInt128_uint64_to_uint128((uint64_t)0U); - { - K____uint64_t__FStar_UInt128_uint128 scrut0; - uint64_t *s; - uint8_t *dst1; - FStar_UInt128_uint128 ev0; - FStar_UInt128_uint128 ev10; - uint8_t *hash1; - FStar_UInt128_uint128 ev; - FStar_UInt128_uint128 ev11; - uint32_t block_len0; - uint32_t n_blocks0; - uint32_t rem0; - K___uint32_t_uint32_t scrut1; - uint32_t n_blocks1; - uint32_t rem_len0; - uint32_t full_blocks_len0; - uint8_t *full_blocks0; - FStar_UInt128_uint128 ev2; - uint8_t *rem1; - FStar_UInt128_uint128 ev3; - FStar_UInt128_uint128 ev1; - scrut0.fst = s0; - scrut0.snd = es; - s = scrut0.fst; - dst1 = ipad; - ev0 = Hacl_Hash_Core_Blake2_init_blake2b_32(s); - if (data_len == (uint32_t)0U) - { - FStar_UInt128_uint128 - ev12 = - Hacl_Hash_Blake2_update_last_blake2b_32(s, - ev0, - FStar_UInt128_uint64_to_uint128((uint64_t)0U), - ipad, - (uint32_t)128U); - ev10 = ev12; - } - else - { - FStar_UInt128_uint128 - ev12 = Hacl_Hash_Blake2_update_multi_blake2b_32(s, ev0, ipad, (uint32_t)1U); - uint32_t block_len = (uint32_t)128U; - uint32_t n_blocks2 = data_len / block_len; - uint32_t rem2 = data_len % block_len; - K___uint32_t_uint32_t scrut; - if (n_blocks2 > (uint32_t)0U && rem2 == (uint32_t)0U) - { - uint32_t n_blocks_ = n_blocks2 - (uint32_t)1U; - K___uint32_t_uint32_t lit; - lit.fst = n_blocks_; - lit.snd = data_len - n_blocks_ * block_len; - scrut = lit; - } - else - { - K___uint32_t_uint32_t lit; - lit.fst = n_blocks2; - lit.snd = rem2; - scrut = lit; - } - { - uint32_t n_blocks = scrut.fst; - uint32_t rem_len = scrut.snd; - uint32_t full_blocks_len = n_blocks * block_len; - uint8_t *full_blocks = data; - FStar_UInt128_uint128 - ev20 = Hacl_Hash_Blake2_update_multi_blake2b_32(s, ev12, full_blocks, n_blocks); - uint8_t *rem = data + full_blocks_len; - FStar_UInt128_uint128 - ev30 = - Hacl_Hash_Blake2_update_last_blake2b_32(s, - ev20, - FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)(uint32_t)128U), - FStar_UInt128_uint64_to_uint128((uint64_t)full_blocks_len)), - rem, - rem_len); - ev10 = ev30; - } - } - Hacl_Hash_Core_Blake2_finish_blake2b_32(s, ev10, dst1); - hash1 = ipad; - ev = Hacl_Hash_Core_Blake2_init_blake2b_32(s); - ev11 = Hacl_Hash_Blake2_update_multi_blake2b_32(s, ev, opad, (uint32_t)1U); - block_len0 = (uint32_t)128U; - n_blocks0 = (uint32_t)64U / block_len0; - rem0 = (uint32_t)64U % block_len0; - if (n_blocks0 > (uint32_t)0U && rem0 == (uint32_t)0U) - { - uint32_t n_blocks_ = n_blocks0 - (uint32_t)1U; - K___uint32_t_uint32_t lit; - lit.fst = n_blocks_; - lit.snd = (uint32_t)64U - n_blocks_ * block_len0; - scrut1 = lit; - } - else - { - K___uint32_t_uint32_t lit; - lit.fst = n_blocks0; - lit.snd = rem0; - scrut1 = lit; - } - n_blocks1 = scrut1.fst; - rem_len0 = scrut1.snd; - full_blocks_len0 = n_blocks1 * block_len0; - full_blocks0 = hash1; - ev2 = Hacl_Hash_Blake2_update_multi_blake2b_32(s, ev11, full_blocks0, n_blocks1); - rem1 = hash1 + full_blocks_len0; - ev3 = - Hacl_Hash_Blake2_update_last_blake2b_32(s, - ev2, - FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)(uint32_t)128U), - FStar_UInt128_uint64_to_uint128((uint64_t)full_blocks_len0)), - rem1, - rem_len0); - ev1 = ev3; - Hacl_Hash_Core_Blake2_finish_blake2b_32(s, ev1, dst); - } - } - } - } - } - } - } -} - -bool EverCrypt_HMAC_is_supported_alg(Spec_Hash_Definitions_hash_alg uu___) -{ - switch (uu___) - { - case Spec_Hash_Definitions_SHA1: - { - return true; - } - case Spec_Hash_Definitions_SHA2_256: - { - return true; - } - case Spec_Hash_Definitions_SHA2_384: - { - return true; - } - case Spec_Hash_Definitions_SHA2_512: - { - return true; - } - case Spec_Hash_Definitions_Blake2S: - { - return true; - } - case Spec_Hash_Definitions_Blake2B: - { - return true; - } - default: - { - return false; - } - } -} - -void -EverCrypt_HMAC_compute( - Spec_Hash_Definitions_hash_alg a, - uint8_t *mac, - uint8_t *key, - uint32_t keylen, - uint8_t *data, - uint32_t datalen -) -{ - switch (a) - { - case Spec_Hash_Definitions_SHA1: - { - EverCrypt_HMAC_compute_sha1(mac, key, keylen, data, datalen); - break; - } - case Spec_Hash_Definitions_SHA2_256: - { - EverCrypt_HMAC_compute_sha2_256(mac, key, keylen, data, datalen); - break; - } - case Spec_Hash_Definitions_SHA2_384: - { - EverCrypt_HMAC_compute_sha2_384(mac, key, keylen, data, datalen); - break; - } - case Spec_Hash_Definitions_SHA2_512: - { - EverCrypt_HMAC_compute_sha2_512(mac, key, keylen, data, datalen); - break; - } - case Spec_Hash_Definitions_Blake2S: - { - EverCrypt_HMAC_compute_blake2s(mac, key, keylen, data, datalen); - break; - } - case Spec_Hash_Definitions_Blake2B: - { - EverCrypt_HMAC_compute_blake2b(mac, key, keylen, data, datalen); - break; - } - default: - { - KRML_HOST_PRINTF("KaRaMeL incomplete match at %s:%d\n", __FILE__, __LINE__); - KRML_HOST_EXIT(253U); - } - } -} - diff --git a/dist/c89-compatible/EverCrypt_HMAC.h b/dist/c89-compatible/EverCrypt_HMAC.h deleted file mode 100644 index 37202635b5..0000000000 --- a/dist/c89-compatible/EverCrypt_HMAC.h +++ /dev/null @@ -1,118 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#ifndef __EverCrypt_HMAC_H -#define __EverCrypt_HMAC_H - -#if defined(__cplusplus) -extern "C" { -#endif - -#include -#include "krml/internal/types.h" -#include "krml/lowstar_endianness.h" -#include "krml/internal/target.h" - - -#include "Hacl_Spec.h" -#include "Hacl_Krmllib.h" -#include "Hacl_Impl_Blake2_Constants.h" -#include "Hacl_Hash_SHA2.h" -#include "Hacl_Hash_SHA1.h" -#include "EverCrypt_Hash.h" -#include "evercrypt_targetconfig.h" -void -EverCrypt_HMAC_compute_sha1( - uint8_t *dst, - uint8_t *key, - uint32_t key_len, - uint8_t *data, - uint32_t data_len -); - -void -EverCrypt_HMAC_compute_sha2_256( - uint8_t *dst, - uint8_t *key, - uint32_t key_len, - uint8_t *data, - uint32_t data_len -); - -void -EverCrypt_HMAC_compute_sha2_384( - uint8_t *dst, - uint8_t *key, - uint32_t key_len, - uint8_t *data, - uint32_t data_len -); - -void -EverCrypt_HMAC_compute_sha2_512( - uint8_t *dst, - uint8_t *key, - uint32_t key_len, - uint8_t *data, - uint32_t data_len -); - -void -EverCrypt_HMAC_compute_blake2s( - uint8_t *dst, - uint8_t *key, - uint32_t key_len, - uint8_t *data, - uint32_t data_len -); - -void -EverCrypt_HMAC_compute_blake2b( - uint8_t *dst, - uint8_t *key, - uint32_t key_len, - uint8_t *data, - uint32_t data_len -); - -bool EverCrypt_HMAC_is_supported_alg(Spec_Hash_Definitions_hash_alg uu___); - -typedef Spec_Hash_Definitions_hash_alg EverCrypt_HMAC_supported_alg; - -void -EverCrypt_HMAC_compute( - Spec_Hash_Definitions_hash_alg a, - uint8_t *mac, - uint8_t *key, - uint32_t keylen, - uint8_t *data, - uint32_t datalen -); - -#if defined(__cplusplus) -} -#endif - -#define __EverCrypt_HMAC_H_DEFINED -#endif diff --git a/dist/c89-compatible/EverCrypt_Hash.c b/dist/c89-compatible/EverCrypt_Hash.c deleted file mode 100644 index 4f2a2a9e19..0000000000 --- a/dist/c89-compatible/EverCrypt_Hash.c +++ /dev/null @@ -1,2771 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#include "EverCrypt_Hash.h" - -#include "internal/Vale.h" -#include "internal/Hacl_Hash_SHA2.h" -#include "internal/Hacl_Hash_SHA1.h" -#include "internal/Hacl_Hash_MD5.h" -#include "internal/Hacl_Hash_Blake2s_128.h" -#include "internal/Hacl_Hash_Blake2b_256.h" -#include "internal/Hacl_Hash_Blake2.h" - -C_String_t EverCrypt_Hash_string_of_alg(Spec_Hash_Definitions_hash_alg uu___) -{ - switch (uu___) - { - case Spec_Hash_Definitions_MD5: - { - return "MD5"; - } - case Spec_Hash_Definitions_SHA1: - { - return "SHA1"; - } - case Spec_Hash_Definitions_SHA2_224: - { - return "SHA2_224"; - } - case Spec_Hash_Definitions_SHA2_256: - { - return "SHA2_256"; - } - case Spec_Hash_Definitions_SHA2_384: - { - return "SHA2_384"; - } - case Spec_Hash_Definitions_SHA2_512: - { - return "SHA2_512"; - } - case Spec_Hash_Definitions_SHA3_256: - { - return "SHA3_256"; - } - case Spec_Hash_Definitions_Blake2S: - { - return "Blake2S"; - } - case Spec_Hash_Definitions_Blake2B: - { - return "Blake2B"; - } - default: - { - KRML_HOST_PRINTF("KaRaMeL incomplete match at %s:%d\n", __FILE__, __LINE__); - KRML_HOST_EXIT(253U); - } - } -} - -#define MD5_s 0 -#define SHA1_s 1 -#define SHA2_224_s 2 -#define SHA2_256_s 3 -#define SHA2_384_s 4 -#define SHA2_512_s 5 -#define SHA3_256_s 6 -#define Blake2S_s 7 -#define Blake2S_128_s 8 -#define Blake2B_s 9 -#define Blake2B_256_s 10 - -typedef uint8_t state_s_tags; - -typedef struct EverCrypt_Hash_state_s_s -{ - state_s_tags tag; - union { - uint32_t *case_MD5_s; - uint32_t *case_SHA1_s; - uint32_t *case_SHA2_224_s; - uint32_t *case_SHA2_256_s; - uint64_t *case_SHA2_384_s; - uint64_t *case_SHA2_512_s; - uint64_t *case_SHA3_256_s; - uint32_t *case_Blake2S_s; - Lib_IntVector_Intrinsics_vec128 *case_Blake2S_128_s; - uint64_t *case_Blake2B_s; - Lib_IntVector_Intrinsics_vec256 *case_Blake2B_256_s; - } - val; -} -EverCrypt_Hash_state_s; - -bool -EverCrypt_Hash_uu___is_MD5_s( - Spec_Hash_Definitions_hash_alg uu___, - EverCrypt_Hash_state_s projectee -) -{ - if (projectee.tag == MD5_s) - { - return true; - } - return false; -} - -bool -EverCrypt_Hash_uu___is_SHA1_s( - Spec_Hash_Definitions_hash_alg uu___, - EverCrypt_Hash_state_s projectee -) -{ - if (projectee.tag == SHA1_s) - { - return true; - } - return false; -} - -bool -EverCrypt_Hash_uu___is_SHA2_224_s( - Spec_Hash_Definitions_hash_alg uu___, - EverCrypt_Hash_state_s projectee -) -{ - if (projectee.tag == SHA2_224_s) - { - return true; - } - return false; -} - -bool -EverCrypt_Hash_uu___is_SHA2_256_s( - Spec_Hash_Definitions_hash_alg uu___, - EverCrypt_Hash_state_s projectee -) -{ - if (projectee.tag == SHA2_256_s) - { - return true; - } - return false; -} - -bool -EverCrypt_Hash_uu___is_SHA2_384_s( - Spec_Hash_Definitions_hash_alg uu___, - EverCrypt_Hash_state_s projectee -) -{ - if (projectee.tag == SHA2_384_s) - { - return true; - } - return false; -} - -bool -EverCrypt_Hash_uu___is_SHA2_512_s( - Spec_Hash_Definitions_hash_alg uu___, - EverCrypt_Hash_state_s projectee -) -{ - if (projectee.tag == SHA2_512_s) - { - return true; - } - return false; -} - -bool -EverCrypt_Hash_uu___is_SHA3_256_s( - Spec_Hash_Definitions_hash_alg uu___, - EverCrypt_Hash_state_s projectee -) -{ - if (projectee.tag == SHA3_256_s) - { - return true; - } - return false; -} - -bool -EverCrypt_Hash_uu___is_Blake2S_s( - Spec_Hash_Definitions_hash_alg uu___, - EverCrypt_Hash_state_s projectee -) -{ - if (projectee.tag == Blake2S_s) - { - return true; - } - return false; -} - -bool -EverCrypt_Hash_uu___is_Blake2S_128_s( - Spec_Hash_Definitions_hash_alg uu___, - EverCrypt_Hash_state_s projectee -) -{ - if (projectee.tag == Blake2S_128_s) - { - return true; - } - return false; -} - -bool -EverCrypt_Hash_uu___is_Blake2B_s( - Spec_Hash_Definitions_hash_alg uu___, - EverCrypt_Hash_state_s projectee -) -{ - if (projectee.tag == Blake2B_s) - { - return true; - } - return false; -} - -bool -EverCrypt_Hash_uu___is_Blake2B_256_s( - Spec_Hash_Definitions_hash_alg uu___, - EverCrypt_Hash_state_s projectee -) -{ - if (projectee.tag == Blake2B_256_s) - { - return true; - } - return false; -} - -Spec_Hash_Definitions_hash_alg EverCrypt_Hash_alg_of_state(EverCrypt_Hash_state_s *s) -{ - EverCrypt_Hash_state_s scrut = *s; - if (scrut.tag == MD5_s) - { - return Spec_Hash_Definitions_MD5; - } - if (scrut.tag == SHA1_s) - { - return Spec_Hash_Definitions_SHA1; - } - if (scrut.tag == SHA2_224_s) - { - return Spec_Hash_Definitions_SHA2_224; - } - if (scrut.tag == SHA2_256_s) - { - return Spec_Hash_Definitions_SHA2_256; - } - if (scrut.tag == SHA2_384_s) - { - return Spec_Hash_Definitions_SHA2_384; - } - if (scrut.tag == SHA2_512_s) - { - return Spec_Hash_Definitions_SHA2_512; - } - if (scrut.tag == SHA3_256_s) - { - return Spec_Hash_Definitions_SHA3_256; - } - if (scrut.tag == Blake2S_s) - { - return Spec_Hash_Definitions_Blake2S; - } - if (scrut.tag == Blake2S_128_s) - { - return Spec_Hash_Definitions_Blake2S; - } - if (scrut.tag == Blake2B_s) - { - return Spec_Hash_Definitions_Blake2B; - } - if (scrut.tag == Blake2B_256_s) - { - return Spec_Hash_Definitions_Blake2B; - } - KRML_HOST_PRINTF("KaRaMeL abort at %s:%d\n%s\n", - __FILE__, - __LINE__, - "unreachable (pattern matches are exhaustive in F*)"); - KRML_HOST_EXIT(255U); -} - -EverCrypt_Hash_state_s *EverCrypt_Hash_create_in(Spec_Hash_Definitions_hash_alg a) -{ - EverCrypt_Hash_state_s s; - switch (a) - { - case Spec_Hash_Definitions_MD5: - { - EverCrypt_Hash_state_s lit; - lit.tag = MD5_s; - { - uint32_t *buf = (uint32_t *)KRML_HOST_CALLOC((uint32_t)4U, sizeof (uint32_t)); - lit.val.case_MD5_s = buf; - s = lit; - } - break; - } - case Spec_Hash_Definitions_SHA1: - { - EverCrypt_Hash_state_s lit; - lit.tag = SHA1_s; - { - uint32_t *buf = (uint32_t *)KRML_HOST_CALLOC((uint32_t)5U, sizeof (uint32_t)); - lit.val.case_SHA1_s = buf; - s = lit; - } - break; - } - case Spec_Hash_Definitions_SHA2_224: - { - EverCrypt_Hash_state_s lit; - lit.tag = SHA2_224_s; - { - uint32_t *buf = (uint32_t *)KRML_HOST_CALLOC((uint32_t)8U, sizeof (uint32_t)); - lit.val.case_SHA2_224_s = buf; - s = lit; - } - break; - } - case Spec_Hash_Definitions_SHA2_256: - { - EverCrypt_Hash_state_s lit; - lit.tag = SHA2_256_s; - { - uint32_t *buf = (uint32_t *)KRML_HOST_CALLOC((uint32_t)8U, sizeof (uint32_t)); - lit.val.case_SHA2_256_s = buf; - s = lit; - } - break; - } - case Spec_Hash_Definitions_SHA2_384: - { - EverCrypt_Hash_state_s lit; - lit.tag = SHA2_384_s; - { - uint64_t *buf = (uint64_t *)KRML_HOST_CALLOC((uint32_t)8U, sizeof (uint64_t)); - lit.val.case_SHA2_384_s = buf; - s = lit; - } - break; - } - case Spec_Hash_Definitions_SHA2_512: - { - EverCrypt_Hash_state_s lit; - lit.tag = SHA2_512_s; - { - uint64_t *buf = (uint64_t *)KRML_HOST_CALLOC((uint32_t)8U, sizeof (uint64_t)); - lit.val.case_SHA2_512_s = buf; - s = lit; - } - break; - } - case Spec_Hash_Definitions_SHA3_256: - { - EverCrypt_Hash_state_s lit; - lit.tag = SHA3_256_s; - { - uint64_t *buf = (uint64_t *)KRML_HOST_CALLOC((uint32_t)25U, sizeof (uint64_t)); - lit.val.case_SHA3_256_s = buf; - s = lit; - } - break; - } - case Spec_Hash_Definitions_Blake2S: - { - bool vec128 = EverCrypt_AutoConfig2_has_vec128(); - #if HACL_CAN_COMPILE_VEC128 - if (vec128) - { - EverCrypt_Hash_state_s lit; - lit.tag = Blake2S_128_s; - lit.val.case_Blake2S_128_s = Hacl_Hash_Blake2s_128_malloc_blake2s_128(); - s = lit; - } - else - { - EverCrypt_Hash_state_s lit; - lit.tag = Blake2S_s; - { - uint32_t *buf = (uint32_t *)KRML_HOST_CALLOC((uint32_t)16U, sizeof (uint32_t)); - lit.val.case_Blake2S_s = buf; - s = lit; - } - } - #else - EverCrypt_Hash_state_s lit; - lit.tag = Blake2S_s; - { - uint32_t *buf = (uint32_t *)KRML_HOST_CALLOC((uint32_t)16U, sizeof (uint32_t)); - lit.val.case_Blake2S_s = buf; - s = lit; - } - #endif - break; - } - case Spec_Hash_Definitions_Blake2B: - { - bool vec256 = EverCrypt_AutoConfig2_has_vec256(); - #if HACL_CAN_COMPILE_VEC256 - if (vec256) - { - EverCrypt_Hash_state_s lit; - lit.tag = Blake2B_256_s; - lit.val.case_Blake2B_256_s = Hacl_Hash_Blake2b_256_malloc_blake2b_256(); - s = lit; - } - else - { - EverCrypt_Hash_state_s lit; - lit.tag = Blake2B_s; - { - uint64_t *buf = (uint64_t *)KRML_HOST_CALLOC((uint32_t)16U, sizeof (uint64_t)); - lit.val.case_Blake2B_s = buf; - s = lit; - } - } - #else - EverCrypt_Hash_state_s lit; - lit.tag = Blake2B_s; - { - uint64_t *buf = (uint64_t *)KRML_HOST_CALLOC((uint32_t)16U, sizeof (uint64_t)); - lit.val.case_Blake2B_s = buf; - s = lit; - } - #endif - break; - } - default: - { - KRML_HOST_PRINTF("KaRaMeL incomplete match at %s:%d\n", __FILE__, __LINE__); - KRML_HOST_EXIT(253U); - } - } - KRML_CHECK_SIZE(sizeof (EverCrypt_Hash_state_s), (uint32_t)1U); - { - EverCrypt_Hash_state_s - *buf = (EverCrypt_Hash_state_s *)KRML_HOST_MALLOC(sizeof (EverCrypt_Hash_state_s)); - buf[0U] = s; - return buf; - } -} - -EverCrypt_Hash_state_s *EverCrypt_Hash_create(Spec_Hash_Definitions_hash_alg a) -{ - return EverCrypt_Hash_create_in(a); -} - -void EverCrypt_Hash_init(EverCrypt_Hash_state_s *s) -{ - EverCrypt_Hash_state_s scrut = *s; - if (scrut.tag == MD5_s) - { - uint32_t *p1 = scrut.val.case_MD5_s; - Hacl_Hash_Core_MD5_legacy_init(p1); - return; - } - if (scrut.tag == SHA1_s) - { - uint32_t *p1 = scrut.val.case_SHA1_s; - Hacl_Hash_Core_SHA1_legacy_init(p1); - return; - } - if (scrut.tag == SHA2_224_s) - { - uint32_t *p1 = scrut.val.case_SHA2_224_s; - Hacl_Hash_Core_SHA2_init_224(p1); - return; - } - if (scrut.tag == SHA2_256_s) - { - uint32_t *p1 = scrut.val.case_SHA2_256_s; - Hacl_Hash_Core_SHA2_init_256(p1); - return; - } - if (scrut.tag == SHA2_384_s) - { - uint64_t *p1 = scrut.val.case_SHA2_384_s; - Hacl_Hash_Core_SHA2_init_384(p1); - return; - } - if (scrut.tag == SHA2_512_s) - { - uint64_t *p1 = scrut.val.case_SHA2_512_s; - Hacl_Hash_Core_SHA2_init_512(p1); - return; - } - if (scrut.tag == SHA3_256_s) - { - uint64_t *p1 = scrut.val.case_SHA3_256_s; - memset(p1, 0U, (uint32_t)25U * sizeof (uint64_t)); - return; - } - if (scrut.tag == Blake2S_s) - { - uint32_t *p1 = scrut.val.case_Blake2S_s; - uint64_t uu____0 = Hacl_Hash_Core_Blake2_init_blake2s_32(p1); - return; - } - if (scrut.tag == Blake2S_128_s) - { - Lib_IntVector_Intrinsics_vec128 *p1 = scrut.val.case_Blake2S_128_s; - #if HACL_CAN_COMPILE_VEC128 - uint64_t uu____1 = Hacl_Hash_Blake2s_128_init_blake2s_128(p1); - return; - #else - return; - #endif - } - if (scrut.tag == Blake2B_s) - { - uint64_t *p1 = scrut.val.case_Blake2B_s; - FStar_UInt128_uint128 uu____2 = Hacl_Hash_Core_Blake2_init_blake2b_32(p1); - return; - } - if (scrut.tag == Blake2B_256_s) - { - Lib_IntVector_Intrinsics_vec256 *p1 = scrut.val.case_Blake2B_256_s; - #if HACL_CAN_COMPILE_VEC256 - FStar_UInt128_uint128 uu____3 = Hacl_Hash_Blake2b_256_init_blake2b_256(p1); - return; - #else - return; - #endif - } - KRML_HOST_PRINTF("KaRaMeL abort at %s:%d\n%s\n", - __FILE__, - __LINE__, - "unreachable (pattern matches are exhaustive in F*)"); - KRML_HOST_EXIT(255U); -} - -static uint32_t -k224_256[64U] = - { - (uint32_t)0x428a2f98U, (uint32_t)0x71374491U, (uint32_t)0xb5c0fbcfU, (uint32_t)0xe9b5dba5U, - (uint32_t)0x3956c25bU, (uint32_t)0x59f111f1U, (uint32_t)0x923f82a4U, (uint32_t)0xab1c5ed5U, - (uint32_t)0xd807aa98U, (uint32_t)0x12835b01U, (uint32_t)0x243185beU, (uint32_t)0x550c7dc3U, - (uint32_t)0x72be5d74U, (uint32_t)0x80deb1feU, (uint32_t)0x9bdc06a7U, (uint32_t)0xc19bf174U, - (uint32_t)0xe49b69c1U, (uint32_t)0xefbe4786U, (uint32_t)0x0fc19dc6U, (uint32_t)0x240ca1ccU, - (uint32_t)0x2de92c6fU, (uint32_t)0x4a7484aaU, (uint32_t)0x5cb0a9dcU, (uint32_t)0x76f988daU, - (uint32_t)0x983e5152U, (uint32_t)0xa831c66dU, (uint32_t)0xb00327c8U, (uint32_t)0xbf597fc7U, - (uint32_t)0xc6e00bf3U, (uint32_t)0xd5a79147U, (uint32_t)0x06ca6351U, (uint32_t)0x14292967U, - (uint32_t)0x27b70a85U, (uint32_t)0x2e1b2138U, (uint32_t)0x4d2c6dfcU, (uint32_t)0x53380d13U, - (uint32_t)0x650a7354U, (uint32_t)0x766a0abbU, (uint32_t)0x81c2c92eU, (uint32_t)0x92722c85U, - (uint32_t)0xa2bfe8a1U, (uint32_t)0xa81a664bU, (uint32_t)0xc24b8b70U, (uint32_t)0xc76c51a3U, - (uint32_t)0xd192e819U, (uint32_t)0xd6990624U, (uint32_t)0xf40e3585U, (uint32_t)0x106aa070U, - (uint32_t)0x19a4c116U, (uint32_t)0x1e376c08U, (uint32_t)0x2748774cU, (uint32_t)0x34b0bcb5U, - (uint32_t)0x391c0cb3U, (uint32_t)0x4ed8aa4aU, (uint32_t)0x5b9cca4fU, (uint32_t)0x682e6ff3U, - (uint32_t)0x748f82eeU, (uint32_t)0x78a5636fU, (uint32_t)0x84c87814U, (uint32_t)0x8cc70208U, - (uint32_t)0x90befffaU, (uint32_t)0xa4506cebU, (uint32_t)0xbef9a3f7U, (uint32_t)0xc67178f2U - }; - -void EverCrypt_Hash_update_multi_256(uint32_t *s, uint8_t *blocks, uint32_t n) -{ - bool has_shaext = EverCrypt_AutoConfig2_has_shaext(); - bool has_sse = EverCrypt_AutoConfig2_has_sse(); - #if HACL_CAN_COMPILE_VALE - if (has_shaext && has_sse) - { - uint64_t n1 = (uint64_t)n; - uint64_t scrut = sha256_update(s, blocks, n1, k224_256); - return; - } - #endif - Hacl_Hash_SHA2_update_multi_256(s, blocks, n); -} - -void EverCrypt_Hash_update2(EverCrypt_Hash_state_s *s, uint64_t prevlen, uint8_t *block) -{ - EverCrypt_Hash_state_s scrut = *s; - if (scrut.tag == MD5_s) - { - uint32_t *p1 = scrut.val.case_MD5_s; - Hacl_Hash_Core_MD5_legacy_update(p1, block); - return; - } - if (scrut.tag == SHA1_s) - { - uint32_t *p1 = scrut.val.case_SHA1_s; - Hacl_Hash_Core_SHA1_legacy_update(p1, block); - return; - } - if (scrut.tag == SHA2_224_s) - { - uint32_t *p1 = scrut.val.case_SHA2_224_s; - EverCrypt_Hash_update_multi_256(p1, block, (uint32_t)1U); - return; - } - if (scrut.tag == SHA2_256_s) - { - uint32_t *p1 = scrut.val.case_SHA2_256_s; - EverCrypt_Hash_update_multi_256(p1, block, (uint32_t)1U); - return; - } - if (scrut.tag == SHA2_384_s) - { - uint64_t *p1 = scrut.val.case_SHA2_384_s; - Hacl_Hash_Core_SHA2_update_384(p1, block); - return; - } - if (scrut.tag == SHA2_512_s) - { - uint64_t *p1 = scrut.val.case_SHA2_512_s; - Hacl_Hash_Core_SHA2_update_512(p1, block); - return; - } - if (scrut.tag == SHA3_256_s) - { - uint64_t *p1 = scrut.val.case_SHA3_256_s; - Hacl_Impl_SHA3_loadState((uint32_t)136U, block, p1); - Hacl_Impl_SHA3_state_permute(p1); - return; - } - if (scrut.tag == Blake2S_s) - { - uint32_t *p1 = scrut.val.case_Blake2S_s; - uint64_t uu____0 = Hacl_Hash_Core_Blake2_update_blake2s_32(p1, prevlen, block); - return; - } - if (scrut.tag == Blake2S_128_s) - { - Lib_IntVector_Intrinsics_vec128 *p1 = scrut.val.case_Blake2S_128_s; - #if HACL_CAN_COMPILE_VEC128 - uint64_t uu____1 = Hacl_Hash_Blake2s_128_update_blake2s_128(p1, prevlen, block); - return; - #else - return; - #endif - } - if (scrut.tag == Blake2B_s) - { - uint64_t *p1 = scrut.val.case_Blake2B_s; - FStar_UInt128_uint128 - uu____2 = - Hacl_Hash_Core_Blake2_update_blake2b_32(p1, - FStar_UInt128_uint64_to_uint128(prevlen), - block); - return; - } - if (scrut.tag == Blake2B_256_s) - { - Lib_IntVector_Intrinsics_vec256 *p1 = scrut.val.case_Blake2B_256_s; - #if HACL_CAN_COMPILE_VEC256 - FStar_UInt128_uint128 - uu____3 = - Hacl_Hash_Blake2b_256_update_blake2b_256(p1, - FStar_UInt128_uint64_to_uint128(prevlen), - block); - return; - #else - return; - #endif - } - KRML_HOST_PRINTF("KaRaMeL abort at %s:%d\n%s\n", - __FILE__, - __LINE__, - "unreachable (pattern matches are exhaustive in F*)"); - KRML_HOST_EXIT(255U); -} - -KRML_DEPRECATED("Use update2 instead") - -void EverCrypt_Hash_update(EverCrypt_Hash_state_s *s, uint8_t *block) -{ - EverCrypt_Hash_update2(s, (uint64_t)0U, block); -} - -void -EverCrypt_Hash_update_multi2( - EverCrypt_Hash_state_s *s, - uint64_t prevlen, - uint8_t *blocks, - uint32_t len -) -{ - EverCrypt_Hash_state_s scrut = *s; - if (scrut.tag == MD5_s) - { - uint32_t *p1 = scrut.val.case_MD5_s; - uint32_t n = len / (uint32_t)64U; - Hacl_Hash_MD5_legacy_update_multi(p1, blocks, n); - return; - } - if (scrut.tag == SHA1_s) - { - uint32_t *p1 = scrut.val.case_SHA1_s; - uint32_t n = len / (uint32_t)64U; - Hacl_Hash_SHA1_legacy_update_multi(p1, blocks, n); - return; - } - if (scrut.tag == SHA2_224_s) - { - uint32_t *p1 = scrut.val.case_SHA2_224_s; - uint32_t n = len / (uint32_t)64U; - EverCrypt_Hash_update_multi_256(p1, blocks, n); - return; - } - if (scrut.tag == SHA2_256_s) - { - uint32_t *p1 = scrut.val.case_SHA2_256_s; - uint32_t n = len / (uint32_t)64U; - EverCrypt_Hash_update_multi_256(p1, blocks, n); - return; - } - if (scrut.tag == SHA2_384_s) - { - uint64_t *p1 = scrut.val.case_SHA2_384_s; - uint32_t n = len / (uint32_t)128U; - Hacl_Hash_SHA2_update_multi_384(p1, blocks, n); - return; - } - if (scrut.tag == SHA2_512_s) - { - uint64_t *p1 = scrut.val.case_SHA2_512_s; - uint32_t n = len / (uint32_t)128U; - Hacl_Hash_SHA2_update_multi_512(p1, blocks, n); - return; - } - if (scrut.tag == SHA3_256_s) - { - uint64_t *p1 = scrut.val.case_SHA3_256_s; - uint32_t n = len / (uint32_t)136U; - uint32_t i; - for (i = (uint32_t)0U; i < n; i++) - { - uint32_t sz = (uint32_t)136U; - uint8_t *block = blocks + sz * i; - Hacl_Impl_SHA3_loadState((uint32_t)136U, block, p1); - Hacl_Impl_SHA3_state_permute(p1); - } - return; - } - if (scrut.tag == Blake2S_s) - { - uint32_t *p1 = scrut.val.case_Blake2S_s; - uint32_t n = len / (uint32_t)64U; - uint64_t uu____0 = Hacl_Hash_Blake2_update_multi_blake2s_32(p1, prevlen, blocks, n); - return; - } - if (scrut.tag == Blake2S_128_s) - { - Lib_IntVector_Intrinsics_vec128 *p1 = scrut.val.case_Blake2S_128_s; - #if HACL_CAN_COMPILE_VEC128 - uint32_t n = len / (uint32_t)64U; - uint64_t uu____1 = Hacl_Hash_Blake2s_128_update_multi_blake2s_128(p1, prevlen, blocks, n); - return; - #else - return; - #endif - } - if (scrut.tag == Blake2B_s) - { - uint64_t *p1 = scrut.val.case_Blake2B_s; - uint32_t n = len / (uint32_t)128U; - FStar_UInt128_uint128 - uu____2 = - Hacl_Hash_Blake2_update_multi_blake2b_32(p1, - FStar_UInt128_uint64_to_uint128(prevlen), - blocks, - n); - return; - } - if (scrut.tag == Blake2B_256_s) - { - Lib_IntVector_Intrinsics_vec256 *p1 = scrut.val.case_Blake2B_256_s; - #if HACL_CAN_COMPILE_VEC256 - uint32_t n = len / (uint32_t)128U; - FStar_UInt128_uint128 - uu____3 = - Hacl_Hash_Blake2b_256_update_multi_blake2b_256(p1, - FStar_UInt128_uint64_to_uint128(prevlen), - blocks, - n); - return; - #else - return; - #endif - } - KRML_HOST_PRINTF("KaRaMeL abort at %s:%d\n%s\n", - __FILE__, - __LINE__, - "unreachable (pattern matches are exhaustive in F*)"); - KRML_HOST_EXIT(255U); -} - -KRML_DEPRECATED("Use update_multi2 instead") - -void EverCrypt_Hash_update_multi(EverCrypt_Hash_state_s *s, uint8_t *blocks, uint32_t len) -{ - EverCrypt_Hash_update_multi2(s, (uint64_t)0U, blocks, len); -} - -void -EverCrypt_Hash_update_last_256( - uint32_t *s, - uint64_t input, - uint8_t *input_len, - uint32_t input_len1 -) -{ - uint32_t blocks_n = input_len1 / (uint32_t)64U; - uint32_t blocks_len = blocks_n * (uint32_t)64U; - uint8_t *blocks = input_len; - uint32_t rest_len = input_len1 - blocks_len; - uint8_t *rest = input_len + blocks_len; - uint64_t total_input_len; - uint32_t pad_len; - uint32_t tmp_len; - EverCrypt_Hash_update_multi_256(s, blocks, blocks_n); - total_input_len = input + (uint64_t)input_len1; - pad_len = - (uint32_t)1U - + - ((uint32_t)128U - ((uint32_t)9U + (uint32_t)(total_input_len % (uint64_t)(uint32_t)64U))) - % (uint32_t)64U - + (uint32_t)8U; - tmp_len = rest_len + pad_len; - { - uint8_t tmp_twoblocks[128U] = { 0U }; - uint8_t *tmp = tmp_twoblocks; - uint8_t *tmp_rest = tmp; - uint8_t *tmp_pad = tmp + rest_len; - memcpy(tmp_rest, rest, rest_len * sizeof (uint8_t)); - Hacl_Hash_Core_SHA2_pad_256(total_input_len, tmp_pad); - EverCrypt_Hash_update_multi_256(s, tmp, tmp_len / (uint32_t)64U); - } -} - -void -EverCrypt_Hash_update_last2( - EverCrypt_Hash_state_s *s, - uint64_t prev_len, - uint8_t *last, - uint32_t last_len -) -{ - EverCrypt_Hash_state_s scrut = *s; - if (scrut.tag == MD5_s) - { - uint32_t *p1 = scrut.val.case_MD5_s; - Hacl_Hash_MD5_legacy_update_last(p1, prev_len, last, last_len); - return; - } - if (scrut.tag == SHA1_s) - { - uint32_t *p1 = scrut.val.case_SHA1_s; - Hacl_Hash_SHA1_legacy_update_last(p1, prev_len, last, last_len); - return; - } - if (scrut.tag == SHA2_224_s) - { - uint32_t *p1 = scrut.val.case_SHA2_224_s; - EverCrypt_Hash_update_last_256(p1, prev_len, last, last_len); - return; - } - if (scrut.tag == SHA2_256_s) - { - uint32_t *p1 = scrut.val.case_SHA2_256_s; - EverCrypt_Hash_update_last_256(p1, prev_len, last, last_len); - return; - } - if (scrut.tag == SHA2_384_s) - { - uint64_t *p1 = scrut.val.case_SHA2_384_s; - Hacl_Hash_SHA2_update_last_384(p1, FStar_UInt128_uint64_to_uint128(prev_len), last, last_len); - return; - } - if (scrut.tag == SHA2_512_s) - { - uint64_t *p1 = scrut.val.case_SHA2_512_s; - Hacl_Hash_SHA2_update_last_512(p1, FStar_UInt128_uint64_to_uint128(prev_len), last, last_len); - return; - } - if (scrut.tag == SHA3_256_s) - { - uint64_t *p1 = scrut.val.case_SHA3_256_s; - if (last_len == (uint32_t)136U) - { - uint8_t *uu____0; - Hacl_Impl_SHA3_loadState((uint32_t)136U, last, p1); - Hacl_Impl_SHA3_state_permute(p1); - uu____0 = last + last_len; - { - uint8_t b[136U] = { 0U }; - memcpy(b, uu____0, (uint32_t)0U * sizeof (uint8_t)); - b[0U] = (uint8_t)0x06U; - Hacl_Impl_SHA3_loadState((uint32_t)136U, b, p1); - { - uint8_t b1[136U] = { 0U }; - b1[135U] = (uint8_t)0x80U; - Hacl_Impl_SHA3_loadState((uint32_t)136U, b1, p1); - Hacl_Impl_SHA3_state_permute(p1); - Lib_Memzero0_memzero(b1, (uint32_t)136U * sizeof (b1[0U])); - Lib_Memzero0_memzero(b, (uint32_t)136U * sizeof (b[0U])); - return; - } - } - } - { - uint8_t b[136U] = { 0U }; - memcpy(b, last, last_len * sizeof (uint8_t)); - b[last_len] = (uint8_t)0x06U; - Hacl_Impl_SHA3_loadState((uint32_t)136U, b, p1); - { - uint8_t b1[136U] = { 0U }; - b1[135U] = (uint8_t)0x80U; - Hacl_Impl_SHA3_loadState((uint32_t)136U, b1, p1); - Hacl_Impl_SHA3_state_permute(p1); - Lib_Memzero0_memzero(b1, (uint32_t)136U * sizeof (b1[0U])); - Lib_Memzero0_memzero(b, (uint32_t)136U * sizeof (b[0U])); - return; - } - } - } - if (scrut.tag == Blake2S_s) - { - uint32_t *p1 = scrut.val.case_Blake2S_s; - uint64_t x = Hacl_Hash_Blake2_update_last_blake2s_32(p1, prev_len, prev_len, last, last_len); - return; - } - if (scrut.tag == Blake2S_128_s) - { - Lib_IntVector_Intrinsics_vec128 *p1 = scrut.val.case_Blake2S_128_s; - #if HACL_CAN_COMPILE_VEC128 - uint64_t - x = Hacl_Hash_Blake2s_128_update_last_blake2s_128(p1, prev_len, prev_len, last, last_len); - return; - #else - return; - #endif - } - if (scrut.tag == Blake2B_s) - { - uint64_t *p1 = scrut.val.case_Blake2B_s; - FStar_UInt128_uint128 - x = - Hacl_Hash_Blake2_update_last_blake2b_32(p1, - FStar_UInt128_uint64_to_uint128(prev_len), - FStar_UInt128_uint64_to_uint128(prev_len), - last, - last_len); - return; - } - if (scrut.tag == Blake2B_256_s) - { - Lib_IntVector_Intrinsics_vec256 *p1 = scrut.val.case_Blake2B_256_s; - #if HACL_CAN_COMPILE_VEC256 - FStar_UInt128_uint128 - x = - Hacl_Hash_Blake2b_256_update_last_blake2b_256(p1, - FStar_UInt128_uint64_to_uint128(prev_len), - FStar_UInt128_uint64_to_uint128(prev_len), - last, - last_len); - return; - #else - return; - #endif - } - KRML_HOST_PRINTF("KaRaMeL abort at %s:%d\n%s\n", - __FILE__, - __LINE__, - "unreachable (pattern matches are exhaustive in F*)"); - KRML_HOST_EXIT(255U); -} - -KRML_DEPRECATED("Use update_last2 instead") - -void EverCrypt_Hash_update_last(EverCrypt_Hash_state_s *s, uint8_t *last, uint64_t total_len) -{ - Spec_Hash_Definitions_hash_alg a = EverCrypt_Hash_alg_of_state(s); - uint32_t sw; - switch (a) - { - case Spec_Hash_Definitions_MD5: - { - sw = (uint32_t)64U; - break; - } - case Spec_Hash_Definitions_SHA1: - { - sw = (uint32_t)64U; - break; - } - case Spec_Hash_Definitions_SHA2_224: - { - sw = (uint32_t)64U; - break; - } - case Spec_Hash_Definitions_SHA2_256: - { - sw = (uint32_t)64U; - break; - } - case Spec_Hash_Definitions_SHA2_384: - { - sw = (uint32_t)128U; - break; - } - case Spec_Hash_Definitions_SHA2_512: - { - sw = (uint32_t)128U; - break; - } - case Spec_Hash_Definitions_SHA3_256: - { - sw = (uint32_t)136U; - break; - } - case Spec_Hash_Definitions_Blake2S: - { - sw = (uint32_t)64U; - break; - } - case Spec_Hash_Definitions_Blake2B: - { - sw = (uint32_t)128U; - break; - } - default: - { - KRML_HOST_PRINTF("KaRaMeL incomplete match at %s:%d\n", __FILE__, __LINE__); - KRML_HOST_EXIT(253U); - } - } - { - uint64_t last_len = total_len % (uint64_t)sw; - uint64_t prev_len = total_len - last_len; - EverCrypt_Hash_update_last2(s, prev_len, last, (uint32_t)last_len); - } -} - -void EverCrypt_Hash_finish(EverCrypt_Hash_state_s *s, uint8_t *dst) -{ - EverCrypt_Hash_state_s scrut = *s; - if (scrut.tag == MD5_s) - { - uint32_t *p1 = scrut.val.case_MD5_s; - Hacl_Hash_Core_MD5_legacy_finish(p1, dst); - return; - } - if (scrut.tag == SHA1_s) - { - uint32_t *p1 = scrut.val.case_SHA1_s; - Hacl_Hash_Core_SHA1_legacy_finish(p1, dst); - return; - } - if (scrut.tag == SHA2_224_s) - { - uint32_t *p1 = scrut.val.case_SHA2_224_s; - Hacl_Hash_Core_SHA2_finish_224(p1, dst); - return; - } - if (scrut.tag == SHA2_256_s) - { - uint32_t *p1 = scrut.val.case_SHA2_256_s; - Hacl_Hash_Core_SHA2_finish_256(p1, dst); - return; - } - if (scrut.tag == SHA2_384_s) - { - uint64_t *p1 = scrut.val.case_SHA2_384_s; - Hacl_Hash_Core_SHA2_finish_384(p1, dst); - return; - } - if (scrut.tag == SHA2_512_s) - { - uint64_t *p1 = scrut.val.case_SHA2_512_s; - Hacl_Hash_Core_SHA2_finish_512(p1, dst); - return; - } - if (scrut.tag == SHA3_256_s) - { - uint64_t *p1 = scrut.val.case_SHA3_256_s; - Hacl_Impl_SHA3_squeeze(p1, (uint32_t)136U, (uint32_t)32U, dst); - return; - } - if (scrut.tag == Blake2S_s) - { - uint32_t *p1 = scrut.val.case_Blake2S_s; - Hacl_Hash_Core_Blake2_finish_blake2s_32(p1, (uint64_t)0U, dst); - return; - } - if (scrut.tag == Blake2S_128_s) - { - Lib_IntVector_Intrinsics_vec128 *p1 = scrut.val.case_Blake2S_128_s; - #if HACL_CAN_COMPILE_VEC128 - Hacl_Hash_Blake2s_128_finish_blake2s_128(p1, (uint64_t)0U, dst); - return; - #else - return; - #endif - } - if (scrut.tag == Blake2B_s) - { - uint64_t *p1 = scrut.val.case_Blake2B_s; - Hacl_Hash_Core_Blake2_finish_blake2b_32(p1, - FStar_UInt128_uint64_to_uint128((uint64_t)0U), - dst); - return; - } - if (scrut.tag == Blake2B_256_s) - { - Lib_IntVector_Intrinsics_vec256 *p1 = scrut.val.case_Blake2B_256_s; - #if HACL_CAN_COMPILE_VEC256 - Hacl_Hash_Blake2b_256_finish_blake2b_256(p1, - FStar_UInt128_uint64_to_uint128((uint64_t)0U), - dst); - return; - #else - return; - #endif - } - KRML_HOST_PRINTF("KaRaMeL abort at %s:%d\n%s\n", - __FILE__, - __LINE__, - "unreachable (pattern matches are exhaustive in F*)"); - KRML_HOST_EXIT(255U); -} - -void EverCrypt_Hash_free(EverCrypt_Hash_state_s *s) -{ - EverCrypt_Hash_state_s scrut = *s; - if (scrut.tag == MD5_s) - { - uint32_t *p1 = scrut.val.case_MD5_s; - KRML_HOST_FREE(p1); - } - else if (scrut.tag == SHA1_s) - { - uint32_t *p1 = scrut.val.case_SHA1_s; - KRML_HOST_FREE(p1); - } - else if (scrut.tag == SHA2_224_s) - { - uint32_t *p1 = scrut.val.case_SHA2_224_s; - KRML_HOST_FREE(p1); - } - else if (scrut.tag == SHA2_256_s) - { - uint32_t *p1 = scrut.val.case_SHA2_256_s; - KRML_HOST_FREE(p1); - } - else if (scrut.tag == SHA2_384_s) - { - uint64_t *p1 = scrut.val.case_SHA2_384_s; - KRML_HOST_FREE(p1); - } - else if (scrut.tag == SHA2_512_s) - { - uint64_t *p1 = scrut.val.case_SHA2_512_s; - KRML_HOST_FREE(p1); - } - else if (scrut.tag == SHA3_256_s) - { - uint64_t *p1 = scrut.val.case_SHA3_256_s; - KRML_HOST_FREE(p1); - } - else if (scrut.tag == Blake2S_s) - { - uint32_t *p1 = scrut.val.case_Blake2S_s; - KRML_HOST_FREE(p1); - } - else if (scrut.tag == Blake2S_128_s) - { - Lib_IntVector_Intrinsics_vec128 *p1 = scrut.val.case_Blake2S_128_s; - KRML_ALIGNED_FREE(p1); - } - else if (scrut.tag == Blake2B_s) - { - uint64_t *p1 = scrut.val.case_Blake2B_s; - KRML_HOST_FREE(p1); - } - else if (scrut.tag == Blake2B_256_s) - { - Lib_IntVector_Intrinsics_vec256 *p1 = scrut.val.case_Blake2B_256_s; - KRML_ALIGNED_FREE(p1); - } - else - { - KRML_HOST_PRINTF("KaRaMeL abort at %s:%d\n%s\n", - __FILE__, - __LINE__, - "unreachable (pattern matches are exhaustive in F*)"); - KRML_HOST_EXIT(255U); - } - KRML_HOST_FREE(s); -} - -void EverCrypt_Hash_copy(EverCrypt_Hash_state_s *s_src, EverCrypt_Hash_state_s *s_dst) -{ - EverCrypt_Hash_state_s scrut0 = *s_src; - if (scrut0.tag == MD5_s) - { - uint32_t *p_src = scrut0.val.case_MD5_s; - EverCrypt_Hash_state_s x1 = *s_dst; - uint32_t *p_dst; - if (x1.tag == MD5_s) - { - p_dst = x1.val.case_MD5_s; - } - else - { - p_dst = KRML_EABORT(uint32_t *, "unreachable (pattern matches are exhaustive in F*)"); - } - memcpy(p_dst, p_src, (uint32_t)4U * sizeof (uint32_t)); - return; - } - if (scrut0.tag == SHA1_s) - { - uint32_t *p_src = scrut0.val.case_SHA1_s; - EverCrypt_Hash_state_s x1 = *s_dst; - uint32_t *p_dst; - if (x1.tag == SHA1_s) - { - p_dst = x1.val.case_SHA1_s; - } - else - { - p_dst = KRML_EABORT(uint32_t *, "unreachable (pattern matches are exhaustive in F*)"); - } - memcpy(p_dst, p_src, (uint32_t)5U * sizeof (uint32_t)); - return; - } - if (scrut0.tag == SHA2_224_s) - { - uint32_t *p_src = scrut0.val.case_SHA2_224_s; - EverCrypt_Hash_state_s x1 = *s_dst; - uint32_t *p_dst; - if (x1.tag == SHA2_224_s) - { - p_dst = x1.val.case_SHA2_224_s; - } - else - { - p_dst = KRML_EABORT(uint32_t *, "unreachable (pattern matches are exhaustive in F*)"); - } - memcpy(p_dst, p_src, (uint32_t)8U * sizeof (uint32_t)); - return; - } - if (scrut0.tag == SHA2_256_s) - { - uint32_t *p_src = scrut0.val.case_SHA2_256_s; - EverCrypt_Hash_state_s x1 = *s_dst; - uint32_t *p_dst; - if (x1.tag == SHA2_256_s) - { - p_dst = x1.val.case_SHA2_256_s; - } - else - { - p_dst = KRML_EABORT(uint32_t *, "unreachable (pattern matches are exhaustive in F*)"); - } - memcpy(p_dst, p_src, (uint32_t)8U * sizeof (uint32_t)); - return; - } - if (scrut0.tag == SHA2_384_s) - { - uint64_t *p_src = scrut0.val.case_SHA2_384_s; - EverCrypt_Hash_state_s x1 = *s_dst; - uint64_t *p_dst; - if (x1.tag == SHA2_384_s) - { - p_dst = x1.val.case_SHA2_384_s; - } - else - { - p_dst = KRML_EABORT(uint64_t *, "unreachable (pattern matches are exhaustive in F*)"); - } - memcpy(p_dst, p_src, (uint32_t)8U * sizeof (uint64_t)); - return; - } - if (scrut0.tag == SHA2_512_s) - { - uint64_t *p_src = scrut0.val.case_SHA2_512_s; - EverCrypt_Hash_state_s x1 = *s_dst; - uint64_t *p_dst; - if (x1.tag == SHA2_512_s) - { - p_dst = x1.val.case_SHA2_512_s; - } - else - { - p_dst = KRML_EABORT(uint64_t *, "unreachable (pattern matches are exhaustive in F*)"); - } - memcpy(p_dst, p_src, (uint32_t)8U * sizeof (uint64_t)); - return; - } - if (scrut0.tag == SHA3_256_s) - { - uint64_t *p_src = scrut0.val.case_SHA3_256_s; - EverCrypt_Hash_state_s x1 = *s_dst; - uint64_t *p_dst; - if (x1.tag == SHA3_256_s) - { - p_dst = x1.val.case_SHA3_256_s; - } - else - { - p_dst = KRML_EABORT(uint64_t *, "unreachable (pattern matches are exhaustive in F*)"); - } - memcpy(p_dst, p_src, (uint32_t)25U * sizeof (uint64_t)); - return; - } - if (scrut0.tag == Blake2S_s) - { - uint32_t *p_src = scrut0.val.case_Blake2S_s; - EverCrypt_Hash_state_s scrut = *s_dst; - if (scrut.tag == Blake2S_s) - { - uint32_t *p_dst = scrut.val.case_Blake2S_s; - memcpy(p_dst, p_src, (uint32_t)16U * sizeof (uint32_t)); - return; - } - if (scrut.tag == Blake2S_128_s) - { - Lib_IntVector_Intrinsics_vec128 *p_dst = scrut.val.case_Blake2S_128_s; - #if HACL_CAN_COMPILE_VEC128 - Hacl_Blake2s_128_load_state128s_from_state32(p_dst, p_src); - return; - #else - return; - #endif - } - KRML_HOST_PRINTF("KaRaMeL abort at %s:%d\n%s\n", - __FILE__, - __LINE__, - "unreachable (pattern matches are exhaustive in F*)"); - KRML_HOST_EXIT(255U); - } - if (scrut0.tag == Blake2B_s) - { - uint64_t *p_src = scrut0.val.case_Blake2B_s; - EverCrypt_Hash_state_s scrut = *s_dst; - if (scrut.tag == Blake2B_s) - { - uint64_t *p_dst = scrut.val.case_Blake2B_s; - memcpy(p_dst, p_src, (uint32_t)16U * sizeof (uint64_t)); - return; - } - if (scrut.tag == Blake2B_256_s) - { - Lib_IntVector_Intrinsics_vec256 *p_dst = scrut.val.case_Blake2B_256_s; - #if HACL_CAN_COMPILE_VEC256 - Hacl_Blake2b_256_load_state256b_from_state32(p_dst, p_src); - return; - #else - return; - #endif - } - KRML_HOST_PRINTF("KaRaMeL abort at %s:%d\n%s\n", - __FILE__, - __LINE__, - "unreachable (pattern matches are exhaustive in F*)"); - KRML_HOST_EXIT(255U); - } - if (scrut0.tag == Blake2S_128_s) - { - Lib_IntVector_Intrinsics_vec128 *p_src = scrut0.val.case_Blake2S_128_s; - EverCrypt_Hash_state_s scrut = *s_dst; - if (scrut.tag == Blake2S_128_s) - { - Lib_IntVector_Intrinsics_vec128 *p_dst = scrut.val.case_Blake2S_128_s; - memcpy(p_dst, p_src, (uint32_t)4U * sizeof (Lib_IntVector_Intrinsics_vec128)); - return; - } - if (scrut.tag == Blake2S_s) - { - uint32_t *p_dst = scrut.val.case_Blake2S_s; - #if HACL_CAN_COMPILE_VEC128 - Hacl_Blake2s_128_store_state128s_to_state32(p_dst, p_src); - return; - #else - return; - #endif - } - KRML_HOST_PRINTF("KaRaMeL abort at %s:%d\n%s\n", - __FILE__, - __LINE__, - "unreachable (pattern matches are exhaustive in F*)"); - KRML_HOST_EXIT(255U); - } - if (scrut0.tag == Blake2B_256_s) - { - Lib_IntVector_Intrinsics_vec256 *p_src = scrut0.val.case_Blake2B_256_s; - EverCrypt_Hash_state_s scrut = *s_dst; - if (scrut.tag == Blake2B_256_s) - { - Lib_IntVector_Intrinsics_vec256 *p_dst = scrut.val.case_Blake2B_256_s; - memcpy(p_dst, p_src, (uint32_t)4U * sizeof (Lib_IntVector_Intrinsics_vec256)); - return; - } - if (scrut.tag == Blake2B_s) - { - uint64_t *p_dst = scrut.val.case_Blake2B_s; - #if HACL_CAN_COMPILE_VEC256 - Hacl_Blake2b_256_store_state256b_to_state32(p_dst, p_src); - return; - #else - return; - #endif - } - KRML_HOST_PRINTF("KaRaMeL abort at %s:%d\n%s\n", - __FILE__, - __LINE__, - "unreachable (pattern matches are exhaustive in F*)"); - KRML_HOST_EXIT(255U); - } - KRML_HOST_PRINTF("KaRaMeL abort at %s:%d\n%s\n", - __FILE__, - __LINE__, - "unreachable (pattern matches are exhaustive in F*)"); - KRML_HOST_EXIT(255U); -} - -void EverCrypt_Hash_hash_256(uint8_t *input, uint32_t input_len, uint8_t *dst) -{ - uint32_t - scrut[8U] = - { - (uint32_t)0x6a09e667U, (uint32_t)0xbb67ae85U, (uint32_t)0x3c6ef372U, (uint32_t)0xa54ff53aU, - (uint32_t)0x510e527fU, (uint32_t)0x9b05688cU, (uint32_t)0x1f83d9abU, (uint32_t)0x5be0cd19U - }; - uint32_t *s = scrut; - uint32_t blocks_n0 = input_len / (uint32_t)64U; - uint32_t blocks_n1; - if (input_len % (uint32_t)64U == (uint32_t)0U && blocks_n0 > (uint32_t)0U) - { - blocks_n1 = blocks_n0 - (uint32_t)1U; - } - else - { - blocks_n1 = blocks_n0; - } - { - uint32_t blocks_len0 = blocks_n1 * (uint32_t)64U; - uint8_t *blocks0 = input; - uint32_t rest_len0 = input_len - blocks_len0; - uint8_t *rest0 = input + blocks_len0; - uint32_t blocks_n = blocks_n1; - uint32_t blocks_len = blocks_len0; - uint8_t *blocks = blocks0; - uint32_t rest_len = rest_len0; - uint8_t *rest = rest0; - EverCrypt_Hash_update_multi_256(s, blocks, blocks_n); - EverCrypt_Hash_update_last_256(s, (uint64_t)blocks_len, rest, rest_len); - Hacl_Hash_Core_SHA2_finish_256(s, dst); - } -} - -void EverCrypt_Hash_hash_224(uint8_t *input, uint32_t input_len, uint8_t *dst) -{ - uint32_t - scrut[8U] = - { - (uint32_t)0xc1059ed8U, (uint32_t)0x367cd507U, (uint32_t)0x3070dd17U, (uint32_t)0xf70e5939U, - (uint32_t)0xffc00b31U, (uint32_t)0x68581511U, (uint32_t)0x64f98fa7U, (uint32_t)0xbefa4fa4U - }; - uint32_t *s = scrut; - uint32_t blocks_n0 = input_len / (uint32_t)64U; - uint32_t blocks_n1; - if (input_len % (uint32_t)64U == (uint32_t)0U && blocks_n0 > (uint32_t)0U) - { - blocks_n1 = blocks_n0 - (uint32_t)1U; - } - else - { - blocks_n1 = blocks_n0; - } - { - uint32_t blocks_len0 = blocks_n1 * (uint32_t)64U; - uint8_t *blocks0 = input; - uint32_t rest_len0 = input_len - blocks_len0; - uint8_t *rest0 = input + blocks_len0; - uint32_t blocks_n = blocks_n1; - uint32_t blocks_len = blocks_len0; - uint8_t *blocks = blocks0; - uint32_t rest_len = rest_len0; - uint8_t *rest = rest0; - EverCrypt_Hash_update_multi_256(s, blocks, blocks_n); - EverCrypt_Hash_update_last_256(s, (uint64_t)blocks_len, rest, rest_len); - Hacl_Hash_Core_SHA2_finish_224(s, dst); - } -} - -void -EverCrypt_Hash_hash( - Spec_Hash_Definitions_hash_alg a, - uint8_t *dst, - uint8_t *input, - uint32_t len -) -{ - bool vec128; - bool vec256; - switch (a) - { - case Spec_Hash_Definitions_MD5: - { - Hacl_Hash_MD5_legacy_hash(input, len, dst); - break; - } - case Spec_Hash_Definitions_SHA1: - { - Hacl_Hash_SHA1_legacy_hash(input, len, dst); - break; - } - case Spec_Hash_Definitions_SHA2_224: - { - EverCrypt_Hash_hash_224(input, len, dst); - break; - } - case Spec_Hash_Definitions_SHA2_256: - { - EverCrypt_Hash_hash_256(input, len, dst); - break; - } - case Spec_Hash_Definitions_SHA2_384: - { - Hacl_Hash_SHA2_hash_384(input, len, dst); - break; - } - case Spec_Hash_Definitions_SHA2_512: - { - Hacl_Hash_SHA2_hash_512(input, len, dst); - break; - } - case Spec_Hash_Definitions_SHA3_256: - { - Hacl_SHA3_sha3_256(len, input, dst); - break; - } - case Spec_Hash_Definitions_Blake2S: - { - vec128 = EverCrypt_AutoConfig2_has_vec128(); - #if HACL_CAN_COMPILE_VEC128 - if (vec128) - { - Hacl_Hash_Blake2s_128_hash_blake2s_128(input, len, dst); - return; - } - #endif - Hacl_Hash_Blake2_hash_blake2s_32(input, len, dst); - break; - } - case Spec_Hash_Definitions_Blake2B: - { - vec256 = EverCrypt_AutoConfig2_has_vec256(); - #if HACL_CAN_COMPILE_VEC256 - if (vec256) - { - Hacl_Hash_Blake2b_256_hash_blake2b_256(input, len, dst); - return; - } - #endif - Hacl_Hash_Blake2_hash_blake2b_32(input, len, dst); - break; - } - default: - { - KRML_HOST_PRINTF("KaRaMeL incomplete match at %s:%d\n", __FILE__, __LINE__); - KRML_HOST_EXIT(253U); - } - } -} - -uint32_t EverCrypt_Hash_Incremental_hash_len(Spec_Hash_Definitions_hash_alg a) -{ - switch (a) - { - case Spec_Hash_Definitions_MD5: - { - return (uint32_t)16U; - } - case Spec_Hash_Definitions_SHA1: - { - return (uint32_t)20U; - } - case Spec_Hash_Definitions_SHA2_224: - { - return (uint32_t)28U; - } - case Spec_Hash_Definitions_SHA2_256: - { - return (uint32_t)32U; - } - case Spec_Hash_Definitions_SHA2_384: - { - return (uint32_t)48U; - } - case Spec_Hash_Definitions_SHA2_512: - { - return (uint32_t)64U; - } - case Spec_Hash_Definitions_SHA3_256: - { - return (uint32_t)32U; - } - case Spec_Hash_Definitions_Blake2S: - { - return (uint32_t)32U; - } - case Spec_Hash_Definitions_Blake2B: - { - return (uint32_t)64U; - } - default: - { - KRML_HOST_PRINTF("KaRaMeL incomplete match at %s:%d\n", __FILE__, __LINE__); - KRML_HOST_EXIT(253U); - } - } -} - -uint32_t EverCrypt_Hash_Incremental_block_len(Spec_Hash_Definitions_hash_alg a) -{ - switch (a) - { - case Spec_Hash_Definitions_MD5: - { - return (uint32_t)64U; - } - case Spec_Hash_Definitions_SHA1: - { - return (uint32_t)64U; - } - case Spec_Hash_Definitions_SHA2_224: - { - return (uint32_t)64U; - } - case Spec_Hash_Definitions_SHA2_256: - { - return (uint32_t)64U; - } - case Spec_Hash_Definitions_SHA2_384: - { - return (uint32_t)128U; - } - case Spec_Hash_Definitions_SHA2_512: - { - return (uint32_t)128U; - } - case Spec_Hash_Definitions_SHA3_256: - { - return (uint32_t)136U; - } - case Spec_Hash_Definitions_Blake2S: - { - return (uint32_t)64U; - } - case Spec_Hash_Definitions_Blake2B: - { - return (uint32_t)128U; - } - default: - { - KRML_HOST_PRINTF("KaRaMeL incomplete match at %s:%d\n", __FILE__, __LINE__); - KRML_HOST_EXIT(253U); - } - } -} - -Hacl_Streaming_Functor_state_s___EverCrypt_Hash_state_s____ -*EverCrypt_Hash_Incremental_create_in(Spec_Hash_Definitions_hash_alg a) -{ - KRML_CHECK_SIZE(sizeof (uint8_t), EverCrypt_Hash_Incremental_block_len(a)); - { - uint8_t - *buf = (uint8_t *)KRML_HOST_CALLOC(EverCrypt_Hash_Incremental_block_len(a), sizeof (uint8_t)); - EverCrypt_Hash_state_s *block_state = EverCrypt_Hash_create_in(a); - Hacl_Streaming_Functor_state_s___EverCrypt_Hash_state_s____ s; - s.block_state = block_state; - s.buf = buf; - s.total_len = (uint64_t)0U; - KRML_CHECK_SIZE(sizeof (Hacl_Streaming_Functor_state_s___EverCrypt_Hash_state_s____), - (uint32_t)1U); - { - Hacl_Streaming_Functor_state_s___EverCrypt_Hash_state_s____ - *p = - (Hacl_Streaming_Functor_state_s___EverCrypt_Hash_state_s____ *)KRML_HOST_MALLOC(sizeof ( - Hacl_Streaming_Functor_state_s___EverCrypt_Hash_state_s____ - )); - p[0U] = s; - EverCrypt_Hash_init(block_state); - return p; - } - } -} - -void -EverCrypt_Hash_Incremental_init(Hacl_Streaming_Functor_state_s___EverCrypt_Hash_state_s____ *s) -{ - Hacl_Streaming_Functor_state_s___EverCrypt_Hash_state_s____ scrut = *s; - uint8_t *buf = scrut.buf; - EverCrypt_Hash_state_s *block_state = scrut.block_state; - Spec_Hash_Definitions_hash_alg i = EverCrypt_Hash_alg_of_state(block_state); - EverCrypt_Hash_init(block_state); - { - Hacl_Streaming_Functor_state_s___EverCrypt_Hash_state_s____ lit; - lit.block_state = block_state; - lit.buf = buf; - lit.total_len = (uint64_t)0U; - s[0U] = lit; - } -} - -uint64_t EverCrypt_Hash_Incremental_max_input_len64(Spec_Hash_Definitions_hash_alg a) -{ - switch (a) - { - case Spec_Hash_Definitions_MD5: - { - return (uint64_t)2305843009213693951U; - } - case Spec_Hash_Definitions_SHA1: - { - return (uint64_t)2305843009213693951U; - } - case Spec_Hash_Definitions_SHA2_224: - { - return (uint64_t)2305843009213693951U; - } - case Spec_Hash_Definitions_SHA2_256: - { - return (uint64_t)2305843009213693951U; - } - case Spec_Hash_Definitions_SHA2_384: - { - return (uint64_t)18446744073709551615U; - } - case Spec_Hash_Definitions_SHA2_512: - { - return (uint64_t)18446744073709551615U; - } - case Spec_Hash_Definitions_Blake2S: - { - return (uint64_t)18446744073709551615U; - } - case Spec_Hash_Definitions_Blake2B: - { - return (uint64_t)18446744073709551615U; - } - case Spec_Hash_Definitions_SHA3_256: - { - return (uint64_t)18446744073709551615U; - } - default: - { - KRML_HOST_PRINTF("KaRaMeL incomplete match at %s:%d\n", __FILE__, __LINE__); - KRML_HOST_EXIT(253U); - } - } -} - -EverCrypt_Error_error_code -EverCrypt_Hash_Incremental_update( - Hacl_Streaming_Functor_state_s___EverCrypt_Hash_state_s____ *s, - uint8_t *data, - uint32_t len -) -{ - Hacl_Streaming_Functor_state_s___EverCrypt_Hash_state_s____ scrut0 = *s; - EverCrypt_Hash_state_s *block_state0 = scrut0.block_state; - Spec_Hash_Definitions_hash_alg alg = EverCrypt_Hash_alg_of_state(block_state0); - Hacl_Streaming_Functor_state_s___EverCrypt_Hash_state_s____ scrut = *s; - uint64_t uu____0 = scrut.total_len; - if ((uint64_t)len <= EverCrypt_Hash_Incremental_max_input_len64(alg) - uu____0) - { - Hacl_Streaming_Functor_state_s___EverCrypt_Hash_state_s____ s1 = *s; - EverCrypt_Hash_state_s *block_state = s1.block_state; - uint64_t total_len = s1.total_len; - Spec_Hash_Definitions_hash_alg i1 = EverCrypt_Hash_alg_of_state(block_state); - uint32_t sz; - if - ( - total_len - % (uint64_t)EverCrypt_Hash_Incremental_block_len(i1) - == (uint64_t)0U - && total_len > (uint64_t)0U - ) - { - sz = EverCrypt_Hash_Incremental_block_len(i1); - } - else - { - sz = (uint32_t)(total_len % (uint64_t)EverCrypt_Hash_Incremental_block_len(i1)); - } - if (len <= EverCrypt_Hash_Incremental_block_len(i1) - sz) - { - Hacl_Streaming_Functor_state_s___EverCrypt_Hash_state_s____ s2 = *s; - EverCrypt_Hash_state_s *block_state1 = s2.block_state; - uint8_t *buf = s2.buf; - uint64_t total_len1 = s2.total_len; - Spec_Hash_Definitions_hash_alg i2 = EverCrypt_Hash_alg_of_state(block_state1); - uint32_t sz1; - if - ( - total_len1 - % (uint64_t)EverCrypt_Hash_Incremental_block_len(i2) - == (uint64_t)0U - && total_len1 > (uint64_t)0U - ) - { - sz1 = EverCrypt_Hash_Incremental_block_len(i2); - } - else - { - sz1 = (uint32_t)(total_len1 % (uint64_t)EverCrypt_Hash_Incremental_block_len(i2)); - } - { - uint8_t *buf2 = buf + sz1; - memcpy(buf2, data, len * sizeof (uint8_t)); - { - uint64_t total_len2 = total_len1 + (uint64_t)len; - Hacl_Streaming_Functor_state_s___EverCrypt_Hash_state_s____ lit; - lit.block_state = block_state1; - lit.buf = buf; - lit.total_len = total_len2; - *s = lit; - } - } - } - else if (sz == (uint32_t)0U) - { - Hacl_Streaming_Functor_state_s___EverCrypt_Hash_state_s____ s2 = *s; - EverCrypt_Hash_state_s *block_state1 = s2.block_state; - uint8_t *buf = s2.buf; - uint64_t total_len1 = s2.total_len; - Spec_Hash_Definitions_hash_alg i2 = EverCrypt_Hash_alg_of_state(block_state1); - uint32_t sz1; - if - ( - total_len1 - % (uint64_t)EverCrypt_Hash_Incremental_block_len(i2) - == (uint64_t)0U - && total_len1 > (uint64_t)0U - ) - { - sz1 = EverCrypt_Hash_Incremental_block_len(i2); - } - else - { - sz1 = (uint32_t)(total_len1 % (uint64_t)EverCrypt_Hash_Incremental_block_len(i2)); - } - if (!(sz1 == (uint32_t)0U)) - { - uint64_t prevlen = total_len1 - (uint64_t)sz1; - EverCrypt_Hash_update_multi2(block_state1, - prevlen, - buf, - EverCrypt_Hash_Incremental_block_len(i2)); - } - { - uint32_t ite; - if - ( - (uint64_t)len - % (uint64_t)EverCrypt_Hash_Incremental_block_len(i2) - == (uint64_t)0U - && (uint64_t)len > (uint64_t)0U - ) - { - ite = EverCrypt_Hash_Incremental_block_len(i2); - } - else - { - ite = (uint32_t)((uint64_t)len % (uint64_t)EverCrypt_Hash_Incremental_block_len(i2)); - } - { - uint32_t n_blocks = (len - ite) / EverCrypt_Hash_Incremental_block_len(i2); - uint32_t data1_len = n_blocks * EverCrypt_Hash_Incremental_block_len(i2); - uint32_t data2_len = len - data1_len; - uint8_t *data1 = data; - uint8_t *data2 = data + data1_len; - EverCrypt_Hash_update_multi2(block_state1, total_len1, data1, data1_len); - { - uint8_t *dst = buf; - memcpy(dst, data2, data2_len * sizeof (uint8_t)); - { - Hacl_Streaming_Functor_state_s___EverCrypt_Hash_state_s____ lit; - lit.block_state = block_state1; - lit.buf = buf; - lit.total_len = total_len1 + (uint64_t)len; - *s = lit; - } - } - } - } - } - else - { - uint32_t diff = EverCrypt_Hash_Incremental_block_len(i1) - sz; - uint8_t *data1 = data; - uint8_t *data2 = data + diff; - Hacl_Streaming_Functor_state_s___EverCrypt_Hash_state_s____ s2 = *s; - EverCrypt_Hash_state_s *block_state10 = s2.block_state; - uint8_t *buf0 = s2.buf; - uint64_t total_len10 = s2.total_len; - Spec_Hash_Definitions_hash_alg i20 = EverCrypt_Hash_alg_of_state(block_state10); - uint32_t sz10; - if - ( - total_len10 - % (uint64_t)EverCrypt_Hash_Incremental_block_len(i20) - == (uint64_t)0U - && total_len10 > (uint64_t)0U - ) - { - sz10 = EverCrypt_Hash_Incremental_block_len(i20); - } - else - { - sz10 = (uint32_t)(total_len10 % (uint64_t)EverCrypt_Hash_Incremental_block_len(i20)); - } - { - uint8_t *buf2 = buf0 + sz10; - memcpy(buf2, data1, diff * sizeof (uint8_t)); - { - uint64_t total_len2 = total_len10 + (uint64_t)diff; - Hacl_Streaming_Functor_state_s___EverCrypt_Hash_state_s____ lit; - lit.block_state = block_state10; - lit.buf = buf0; - lit.total_len = total_len2; - *s = lit; - { - Hacl_Streaming_Functor_state_s___EverCrypt_Hash_state_s____ s20 = *s; - EverCrypt_Hash_state_s *block_state1 = s20.block_state; - uint8_t *buf = s20.buf; - uint64_t total_len1 = s20.total_len; - Spec_Hash_Definitions_hash_alg i2 = EverCrypt_Hash_alg_of_state(block_state1); - uint32_t sz1; - if - ( - total_len1 - % (uint64_t)EverCrypt_Hash_Incremental_block_len(i2) - == (uint64_t)0U - && total_len1 > (uint64_t)0U - ) - { - sz1 = EverCrypt_Hash_Incremental_block_len(i2); - } - else - { - sz1 = (uint32_t)(total_len1 % (uint64_t)EverCrypt_Hash_Incremental_block_len(i2)); - } - if (!(sz1 == (uint32_t)0U)) - { - uint64_t prevlen = total_len1 - (uint64_t)sz1; - EverCrypt_Hash_update_multi2(block_state1, - prevlen, - buf, - EverCrypt_Hash_Incremental_block_len(i2)); - } - { - uint32_t ite; - if - ( - (uint64_t)(len - diff) - % (uint64_t)EverCrypt_Hash_Incremental_block_len(i2) - == (uint64_t)0U - && (uint64_t)(len - diff) > (uint64_t)0U - ) - { - ite = EverCrypt_Hash_Incremental_block_len(i2); - } - else - { - ite = - (uint32_t)((uint64_t)(len - diff) - % (uint64_t)EverCrypt_Hash_Incremental_block_len(i2)); - } - { - uint32_t n_blocks = (len - diff - ite) / EverCrypt_Hash_Incremental_block_len(i2); - uint32_t data1_len = n_blocks * EverCrypt_Hash_Incremental_block_len(i2); - uint32_t data2_len = len - diff - data1_len; - uint8_t *data11 = data2; - uint8_t *data21 = data2 + data1_len; - EverCrypt_Hash_update_multi2(block_state1, total_len1, data11, data1_len); - { - uint8_t *dst = buf; - memcpy(dst, data21, data2_len * sizeof (uint8_t)); - { - Hacl_Streaming_Functor_state_s___EverCrypt_Hash_state_s____ lit0; - lit0.block_state = block_state1; - lit0.buf = buf; - lit0.total_len = total_len1 + (uint64_t)(len - diff); - *s = lit0; - } - } - } - } - } - } - } - } - return EverCrypt_Error_Success; - } - return EverCrypt_Error_MaximumLengthExceeded; -} - -void -EverCrypt_Hash_Incremental_finish_md5( - Hacl_Streaming_Functor_state_s___EverCrypt_Hash_state_s____ *p, - uint8_t *dst -) -{ - Hacl_Streaming_Functor_state_s___EverCrypt_Hash_state_s____ scrut = *p; - EverCrypt_Hash_state_s *block_state = scrut.block_state; - uint8_t *buf_ = scrut.buf; - uint64_t total_len = scrut.total_len; - uint32_t r; - if - ( - total_len - % (uint64_t)EverCrypt_Hash_Incremental_block_len(Spec_Hash_Definitions_MD5) - == (uint64_t)0U - && total_len > (uint64_t)0U - ) - { - r = EverCrypt_Hash_Incremental_block_len(Spec_Hash_Definitions_MD5); - } - else - { - r = - (uint32_t)(total_len - % (uint64_t)EverCrypt_Hash_Incremental_block_len(Spec_Hash_Definitions_MD5)); - } - { - uint8_t *buf_1 = buf_; - EverCrypt_Hash_state_s s; - s.tag = MD5_s; - { - uint32_t buf[4U] = { 0U }; - s.val.case_MD5_s = buf; - { - EverCrypt_Hash_state_s tmp_block_state = s; - uint64_t prev_len; - uint32_t ite; - uint8_t *buf_last; - uint8_t *buf_multi; - uint64_t prev_len_last; - EverCrypt_Hash_copy(block_state, &tmp_block_state); - prev_len = total_len - (uint64_t)r; - if - ( - r - % EverCrypt_Hash_Incremental_block_len(Spec_Hash_Definitions_MD5) - == (uint32_t)0U - && r > (uint32_t)0U - ) - { - ite = EverCrypt_Hash_Incremental_block_len(Spec_Hash_Definitions_MD5); - } - else - { - ite = r % EverCrypt_Hash_Incremental_block_len(Spec_Hash_Definitions_MD5); - } - buf_last = buf_1 + r - ite; - buf_multi = buf_1; - EverCrypt_Hash_update_multi2(&tmp_block_state, prev_len, buf_multi, (uint32_t)0U); - prev_len_last = total_len - (uint64_t)r; - EverCrypt_Hash_update_last2(&tmp_block_state, prev_len_last, buf_last, r); - EverCrypt_Hash_finish(&tmp_block_state, dst); - } - } - } -} - -void -EverCrypt_Hash_Incremental_finish_sha1( - Hacl_Streaming_Functor_state_s___EverCrypt_Hash_state_s____ *p, - uint8_t *dst -) -{ - Hacl_Streaming_Functor_state_s___EverCrypt_Hash_state_s____ scrut = *p; - EverCrypt_Hash_state_s *block_state = scrut.block_state; - uint8_t *buf_ = scrut.buf; - uint64_t total_len = scrut.total_len; - uint32_t r; - if - ( - total_len - % (uint64_t)EverCrypt_Hash_Incremental_block_len(Spec_Hash_Definitions_SHA1) - == (uint64_t)0U - && total_len > (uint64_t)0U - ) - { - r = EverCrypt_Hash_Incremental_block_len(Spec_Hash_Definitions_SHA1); - } - else - { - r = - (uint32_t)(total_len - % (uint64_t)EverCrypt_Hash_Incremental_block_len(Spec_Hash_Definitions_SHA1)); - } - { - uint8_t *buf_1 = buf_; - EverCrypt_Hash_state_s s; - s.tag = SHA1_s; - { - uint32_t buf[5U] = { 0U }; - s.val.case_SHA1_s = buf; - { - EverCrypt_Hash_state_s tmp_block_state = s; - uint64_t prev_len; - uint32_t ite; - uint8_t *buf_last; - uint8_t *buf_multi; - uint64_t prev_len_last; - EverCrypt_Hash_copy(block_state, &tmp_block_state); - prev_len = total_len - (uint64_t)r; - if - ( - r - % EverCrypt_Hash_Incremental_block_len(Spec_Hash_Definitions_SHA1) - == (uint32_t)0U - && r > (uint32_t)0U - ) - { - ite = EverCrypt_Hash_Incremental_block_len(Spec_Hash_Definitions_SHA1); - } - else - { - ite = r % EverCrypt_Hash_Incremental_block_len(Spec_Hash_Definitions_SHA1); - } - buf_last = buf_1 + r - ite; - buf_multi = buf_1; - EverCrypt_Hash_update_multi2(&tmp_block_state, prev_len, buf_multi, (uint32_t)0U); - prev_len_last = total_len - (uint64_t)r; - EverCrypt_Hash_update_last2(&tmp_block_state, prev_len_last, buf_last, r); - EverCrypt_Hash_finish(&tmp_block_state, dst); - } - } - } -} - -void -EverCrypt_Hash_Incremental_finish_sha224( - Hacl_Streaming_Functor_state_s___EverCrypt_Hash_state_s____ *p, - uint8_t *dst -) -{ - Hacl_Streaming_Functor_state_s___EverCrypt_Hash_state_s____ scrut = *p; - EverCrypt_Hash_state_s *block_state = scrut.block_state; - uint8_t *buf_ = scrut.buf; - uint64_t total_len = scrut.total_len; - uint32_t r; - if - ( - total_len - % (uint64_t)EverCrypt_Hash_Incremental_block_len(Spec_Hash_Definitions_SHA2_224) - == (uint64_t)0U - && total_len > (uint64_t)0U - ) - { - r = EverCrypt_Hash_Incremental_block_len(Spec_Hash_Definitions_SHA2_224); - } - else - { - r = - (uint32_t)(total_len - % (uint64_t)EverCrypt_Hash_Incremental_block_len(Spec_Hash_Definitions_SHA2_224)); - } - { - uint8_t *buf_1 = buf_; - EverCrypt_Hash_state_s s; - s.tag = SHA2_224_s; - { - uint32_t buf[8U] = { 0U }; - s.val.case_SHA2_224_s = buf; - { - EverCrypt_Hash_state_s tmp_block_state = s; - uint64_t prev_len; - uint32_t ite; - uint8_t *buf_last; - uint8_t *buf_multi; - uint64_t prev_len_last; - EverCrypt_Hash_copy(block_state, &tmp_block_state); - prev_len = total_len - (uint64_t)r; - if - ( - r - % EverCrypt_Hash_Incremental_block_len(Spec_Hash_Definitions_SHA2_224) - == (uint32_t)0U - && r > (uint32_t)0U - ) - { - ite = EverCrypt_Hash_Incremental_block_len(Spec_Hash_Definitions_SHA2_224); - } - else - { - ite = r % EverCrypt_Hash_Incremental_block_len(Spec_Hash_Definitions_SHA2_224); - } - buf_last = buf_1 + r - ite; - buf_multi = buf_1; - EverCrypt_Hash_update_multi2(&tmp_block_state, prev_len, buf_multi, (uint32_t)0U); - prev_len_last = total_len - (uint64_t)r; - EverCrypt_Hash_update_last2(&tmp_block_state, prev_len_last, buf_last, r); - EverCrypt_Hash_finish(&tmp_block_state, dst); - } - } - } -} - -void -EverCrypt_Hash_Incremental_finish_sha256( - Hacl_Streaming_Functor_state_s___EverCrypt_Hash_state_s____ *p, - uint8_t *dst -) -{ - Hacl_Streaming_Functor_state_s___EverCrypt_Hash_state_s____ scrut = *p; - EverCrypt_Hash_state_s *block_state = scrut.block_state; - uint8_t *buf_ = scrut.buf; - uint64_t total_len = scrut.total_len; - uint32_t r; - if - ( - total_len - % (uint64_t)EverCrypt_Hash_Incremental_block_len(Spec_Hash_Definitions_SHA2_256) - == (uint64_t)0U - && total_len > (uint64_t)0U - ) - { - r = EverCrypt_Hash_Incremental_block_len(Spec_Hash_Definitions_SHA2_256); - } - else - { - r = - (uint32_t)(total_len - % (uint64_t)EverCrypt_Hash_Incremental_block_len(Spec_Hash_Definitions_SHA2_256)); - } - { - uint8_t *buf_1 = buf_; - EverCrypt_Hash_state_s s; - s.tag = SHA2_256_s; - { - uint32_t buf[8U] = { 0U }; - s.val.case_SHA2_256_s = buf; - { - EverCrypt_Hash_state_s tmp_block_state = s; - uint64_t prev_len; - uint32_t ite; - uint8_t *buf_last; - uint8_t *buf_multi; - uint64_t prev_len_last; - EverCrypt_Hash_copy(block_state, &tmp_block_state); - prev_len = total_len - (uint64_t)r; - if - ( - r - % EverCrypt_Hash_Incremental_block_len(Spec_Hash_Definitions_SHA2_256) - == (uint32_t)0U - && r > (uint32_t)0U - ) - { - ite = EverCrypt_Hash_Incremental_block_len(Spec_Hash_Definitions_SHA2_256); - } - else - { - ite = r % EverCrypt_Hash_Incremental_block_len(Spec_Hash_Definitions_SHA2_256); - } - buf_last = buf_1 + r - ite; - buf_multi = buf_1; - EverCrypt_Hash_update_multi2(&tmp_block_state, prev_len, buf_multi, (uint32_t)0U); - prev_len_last = total_len - (uint64_t)r; - EverCrypt_Hash_update_last2(&tmp_block_state, prev_len_last, buf_last, r); - EverCrypt_Hash_finish(&tmp_block_state, dst); - } - } - } -} - -void -EverCrypt_Hash_Incremental_finish_sha3_256( - Hacl_Streaming_Functor_state_s___EverCrypt_Hash_state_s____ *p, - uint8_t *dst -) -{ - Hacl_Streaming_Functor_state_s___EverCrypt_Hash_state_s____ scrut = *p; - EverCrypt_Hash_state_s *block_state = scrut.block_state; - uint8_t *buf_ = scrut.buf; - uint64_t total_len = scrut.total_len; - uint32_t r; - if - ( - total_len - % (uint64_t)EverCrypt_Hash_Incremental_block_len(Spec_Hash_Definitions_SHA3_256) - == (uint64_t)0U - && total_len > (uint64_t)0U - ) - { - r = EverCrypt_Hash_Incremental_block_len(Spec_Hash_Definitions_SHA3_256); - } - else - { - r = - (uint32_t)(total_len - % (uint64_t)EverCrypt_Hash_Incremental_block_len(Spec_Hash_Definitions_SHA3_256)); - } - { - uint8_t *buf_1 = buf_; - EverCrypt_Hash_state_s s; - s.tag = SHA3_256_s; - { - uint64_t buf[25U] = { 0U }; - s.val.case_SHA3_256_s = buf; - { - EverCrypt_Hash_state_s tmp_block_state = s; - uint64_t prev_len; - uint32_t ite; - uint8_t *buf_last; - uint8_t *buf_multi; - uint64_t prev_len_last; - EverCrypt_Hash_copy(block_state, &tmp_block_state); - prev_len = total_len - (uint64_t)r; - if - ( - r - % EverCrypt_Hash_Incremental_block_len(Spec_Hash_Definitions_SHA3_256) - == (uint32_t)0U - && r > (uint32_t)0U - ) - { - ite = EverCrypt_Hash_Incremental_block_len(Spec_Hash_Definitions_SHA3_256); - } - else - { - ite = r % EverCrypt_Hash_Incremental_block_len(Spec_Hash_Definitions_SHA3_256); - } - buf_last = buf_1 + r - ite; - buf_multi = buf_1; - EverCrypt_Hash_update_multi2(&tmp_block_state, prev_len, buf_multi, (uint32_t)0U); - prev_len_last = total_len - (uint64_t)r; - EverCrypt_Hash_update_last2(&tmp_block_state, prev_len_last, buf_last, r); - EverCrypt_Hash_finish(&tmp_block_state, dst); - } - } - } -} - -void -EverCrypt_Hash_Incremental_finish_sha384( - Hacl_Streaming_Functor_state_s___EverCrypt_Hash_state_s____ *p, - uint8_t *dst -) -{ - Hacl_Streaming_Functor_state_s___EverCrypt_Hash_state_s____ scrut = *p; - EverCrypt_Hash_state_s *block_state = scrut.block_state; - uint8_t *buf_ = scrut.buf; - uint64_t total_len = scrut.total_len; - uint32_t r; - if - ( - total_len - % (uint64_t)EverCrypt_Hash_Incremental_block_len(Spec_Hash_Definitions_SHA2_384) - == (uint64_t)0U - && total_len > (uint64_t)0U - ) - { - r = EverCrypt_Hash_Incremental_block_len(Spec_Hash_Definitions_SHA2_384); - } - else - { - r = - (uint32_t)(total_len - % (uint64_t)EverCrypt_Hash_Incremental_block_len(Spec_Hash_Definitions_SHA2_384)); - } - { - uint8_t *buf_1 = buf_; - EverCrypt_Hash_state_s s; - s.tag = SHA2_384_s; - { - uint64_t buf[8U] = { 0U }; - s.val.case_SHA2_384_s = buf; - { - EverCrypt_Hash_state_s tmp_block_state = s; - uint64_t prev_len; - uint32_t ite; - uint8_t *buf_last; - uint8_t *buf_multi; - uint64_t prev_len_last; - EverCrypt_Hash_copy(block_state, &tmp_block_state); - prev_len = total_len - (uint64_t)r; - if - ( - r - % EverCrypt_Hash_Incremental_block_len(Spec_Hash_Definitions_SHA2_384) - == (uint32_t)0U - && r > (uint32_t)0U - ) - { - ite = EverCrypt_Hash_Incremental_block_len(Spec_Hash_Definitions_SHA2_384); - } - else - { - ite = r % EverCrypt_Hash_Incremental_block_len(Spec_Hash_Definitions_SHA2_384); - } - buf_last = buf_1 + r - ite; - buf_multi = buf_1; - EverCrypt_Hash_update_multi2(&tmp_block_state, prev_len, buf_multi, (uint32_t)0U); - prev_len_last = total_len - (uint64_t)r; - EverCrypt_Hash_update_last2(&tmp_block_state, prev_len_last, buf_last, r); - EverCrypt_Hash_finish(&tmp_block_state, dst); - } - } - } -} - -void -EverCrypt_Hash_Incremental_finish_sha512( - Hacl_Streaming_Functor_state_s___EverCrypt_Hash_state_s____ *p, - uint8_t *dst -) -{ - Hacl_Streaming_Functor_state_s___EverCrypt_Hash_state_s____ scrut = *p; - EverCrypt_Hash_state_s *block_state = scrut.block_state; - uint8_t *buf_ = scrut.buf; - uint64_t total_len = scrut.total_len; - uint32_t r; - if - ( - total_len - % (uint64_t)EverCrypt_Hash_Incremental_block_len(Spec_Hash_Definitions_SHA2_512) - == (uint64_t)0U - && total_len > (uint64_t)0U - ) - { - r = EverCrypt_Hash_Incremental_block_len(Spec_Hash_Definitions_SHA2_512); - } - else - { - r = - (uint32_t)(total_len - % (uint64_t)EverCrypt_Hash_Incremental_block_len(Spec_Hash_Definitions_SHA2_512)); - } - { - uint8_t *buf_1 = buf_; - EverCrypt_Hash_state_s s; - s.tag = SHA2_512_s; - { - uint64_t buf[8U] = { 0U }; - s.val.case_SHA2_512_s = buf; - { - EverCrypt_Hash_state_s tmp_block_state = s; - uint64_t prev_len; - uint32_t ite; - uint8_t *buf_last; - uint8_t *buf_multi; - uint64_t prev_len_last; - EverCrypt_Hash_copy(block_state, &tmp_block_state); - prev_len = total_len - (uint64_t)r; - if - ( - r - % EverCrypt_Hash_Incremental_block_len(Spec_Hash_Definitions_SHA2_512) - == (uint32_t)0U - && r > (uint32_t)0U - ) - { - ite = EverCrypt_Hash_Incremental_block_len(Spec_Hash_Definitions_SHA2_512); - } - else - { - ite = r % EverCrypt_Hash_Incremental_block_len(Spec_Hash_Definitions_SHA2_512); - } - buf_last = buf_1 + r - ite; - buf_multi = buf_1; - EverCrypt_Hash_update_multi2(&tmp_block_state, prev_len, buf_multi, (uint32_t)0U); - prev_len_last = total_len - (uint64_t)r; - EverCrypt_Hash_update_last2(&tmp_block_state, prev_len_last, buf_last, r); - EverCrypt_Hash_finish(&tmp_block_state, dst); - } - } - } -} - -void -EverCrypt_Hash_Incremental_finish_blake2s( - Hacl_Streaming_Functor_state_s___EverCrypt_Hash_state_s____ *p, - uint8_t *dst -) -{ - Hacl_Streaming_Functor_state_s___EverCrypt_Hash_state_s____ scrut = *p; - EverCrypt_Hash_state_s *block_state = scrut.block_state; - uint8_t *buf_ = scrut.buf; - uint64_t total_len = scrut.total_len; - uint32_t r; - if - ( - total_len - % (uint64_t)EverCrypt_Hash_Incremental_block_len(Spec_Hash_Definitions_Blake2S) - == (uint64_t)0U - && total_len > (uint64_t)0U - ) - { - r = EverCrypt_Hash_Incremental_block_len(Spec_Hash_Definitions_Blake2S); - } - else - { - r = - (uint32_t)(total_len - % (uint64_t)EverCrypt_Hash_Incremental_block_len(Spec_Hash_Definitions_Blake2S)); - } - { - uint8_t *buf_1 = buf_; - bool vec128 = EverCrypt_AutoConfig2_has_vec128(); - EverCrypt_Hash_state_s s; - #if HACL_CAN_COMPILE_VEC128 - KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 buf0[4U] KRML_POST_ALIGN(16) = { 0U }; - uint32_t buf[16U] = { 0U }; - if (vec128) - { - EverCrypt_Hash_state_s lit; - lit.tag = Blake2S_128_s; - lit.val.case_Blake2S_128_s = buf0; - s = lit; - } - else - { - EverCrypt_Hash_state_s lit; - lit.tag = Blake2S_s; - lit.val.case_Blake2S_s = buf; - s = lit; - } - #else - EverCrypt_Hash_state_s lit; - lit.tag = Blake2S_s; - { - uint32_t buf[16U] = { 0U }; - lit.val.case_Blake2S_s = buf; - s = lit; - } - #endif - { - EverCrypt_Hash_state_s tmp_block_state = s; - uint64_t prev_len; - uint32_t ite; - uint8_t *buf_last; - uint8_t *buf_multi; - uint64_t prev_len_last; - EverCrypt_Hash_copy(block_state, &tmp_block_state); - prev_len = total_len - (uint64_t)r; - if - ( - r - % EverCrypt_Hash_Incremental_block_len(Spec_Hash_Definitions_Blake2S) - == (uint32_t)0U - && r > (uint32_t)0U - ) - { - ite = EverCrypt_Hash_Incremental_block_len(Spec_Hash_Definitions_Blake2S); - } - else - { - ite = r % EverCrypt_Hash_Incremental_block_len(Spec_Hash_Definitions_Blake2S); - } - buf_last = buf_1 + r - ite; - buf_multi = buf_1; - EverCrypt_Hash_update_multi2(&tmp_block_state, prev_len, buf_multi, (uint32_t)0U); - prev_len_last = total_len - (uint64_t)r; - EverCrypt_Hash_update_last2(&tmp_block_state, prev_len_last, buf_last, r); - EverCrypt_Hash_finish(&tmp_block_state, dst); - } - } -} - -void -EverCrypt_Hash_Incremental_finish_blake2b( - Hacl_Streaming_Functor_state_s___EverCrypt_Hash_state_s____ *p, - uint8_t *dst -) -{ - Hacl_Streaming_Functor_state_s___EverCrypt_Hash_state_s____ scrut = *p; - EverCrypt_Hash_state_s *block_state = scrut.block_state; - uint8_t *buf_ = scrut.buf; - uint64_t total_len = scrut.total_len; - uint32_t r; - if - ( - total_len - % (uint64_t)EverCrypt_Hash_Incremental_block_len(Spec_Hash_Definitions_Blake2B) - == (uint64_t)0U - && total_len > (uint64_t)0U - ) - { - r = EverCrypt_Hash_Incremental_block_len(Spec_Hash_Definitions_Blake2B); - } - else - { - r = - (uint32_t)(total_len - % (uint64_t)EverCrypt_Hash_Incremental_block_len(Spec_Hash_Definitions_Blake2B)); - } - { - uint8_t *buf_1 = buf_; - bool vec256 = EverCrypt_AutoConfig2_has_vec256(); - EverCrypt_Hash_state_s s; - #if HACL_CAN_COMPILE_VEC256 - KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 buf0[4U] KRML_POST_ALIGN(32) = { 0U }; - uint64_t buf[16U] = { 0U }; - if (vec256) - { - EverCrypt_Hash_state_s lit; - lit.tag = Blake2B_256_s; - lit.val.case_Blake2B_256_s = buf0; - s = lit; - } - else - { - EverCrypt_Hash_state_s lit; - lit.tag = Blake2B_s; - lit.val.case_Blake2B_s = buf; - s = lit; - } - #else - EverCrypt_Hash_state_s lit; - lit.tag = Blake2B_s; - { - uint64_t buf[16U] = { 0U }; - lit.val.case_Blake2B_s = buf; - s = lit; - } - #endif - { - EverCrypt_Hash_state_s tmp_block_state = s; - uint64_t prev_len; - uint32_t ite; - uint8_t *buf_last; - uint8_t *buf_multi; - uint64_t prev_len_last; - EverCrypt_Hash_copy(block_state, &tmp_block_state); - prev_len = total_len - (uint64_t)r; - if - ( - r - % EverCrypt_Hash_Incremental_block_len(Spec_Hash_Definitions_Blake2B) - == (uint32_t)0U - && r > (uint32_t)0U - ) - { - ite = EverCrypt_Hash_Incremental_block_len(Spec_Hash_Definitions_Blake2B); - } - else - { - ite = r % EverCrypt_Hash_Incremental_block_len(Spec_Hash_Definitions_Blake2B); - } - buf_last = buf_1 + r - ite; - buf_multi = buf_1; - EverCrypt_Hash_update_multi2(&tmp_block_state, prev_len, buf_multi, (uint32_t)0U); - prev_len_last = total_len - (uint64_t)r; - EverCrypt_Hash_update_last2(&tmp_block_state, prev_len_last, buf_last, r); - EverCrypt_Hash_finish(&tmp_block_state, dst); - } - } -} - -Spec_Hash_Definitions_hash_alg -EverCrypt_Hash_Incremental_alg_of_state( - Hacl_Streaming_Functor_state_s___EverCrypt_Hash_state_s____ *s -) -{ - Hacl_Streaming_Functor_state_s___EverCrypt_Hash_state_s____ scrut = *s; - EverCrypt_Hash_state_s *block_state = scrut.block_state; - return EverCrypt_Hash_alg_of_state(block_state); -} - -void -EverCrypt_Hash_Incremental_finish( - Hacl_Streaming_Functor_state_s___EverCrypt_Hash_state_s____ *s, - uint8_t *dst -) -{ - Spec_Hash_Definitions_hash_alg a1 = EverCrypt_Hash_Incremental_alg_of_state(s); - switch (a1) - { - case Spec_Hash_Definitions_MD5: - { - EverCrypt_Hash_Incremental_finish_md5(s, dst); - break; - } - case Spec_Hash_Definitions_SHA1: - { - EverCrypt_Hash_Incremental_finish_sha1(s, dst); - break; - } - case Spec_Hash_Definitions_SHA2_224: - { - EverCrypt_Hash_Incremental_finish_sha224(s, dst); - break; - } - case Spec_Hash_Definitions_SHA2_256: - { - EverCrypt_Hash_Incremental_finish_sha256(s, dst); - break; - } - case Spec_Hash_Definitions_SHA2_384: - { - EverCrypt_Hash_Incremental_finish_sha384(s, dst); - break; - } - case Spec_Hash_Definitions_SHA2_512: - { - EverCrypt_Hash_Incremental_finish_sha512(s, dst); - break; - } - case Spec_Hash_Definitions_SHA3_256: - { - EverCrypt_Hash_Incremental_finish_sha3_256(s, dst); - break; - } - case Spec_Hash_Definitions_Blake2S: - { - EverCrypt_Hash_Incremental_finish_blake2s(s, dst); - break; - } - case Spec_Hash_Definitions_Blake2B: - { - EverCrypt_Hash_Incremental_finish_blake2b(s, dst); - break; - } - default: - { - KRML_HOST_PRINTF("KaRaMeL incomplete match at %s:%d\n", __FILE__, __LINE__); - KRML_HOST_EXIT(253U); - } - } -} - -void -EverCrypt_Hash_Incremental_free(Hacl_Streaming_Functor_state_s___EverCrypt_Hash_state_s____ *s) -{ - Hacl_Streaming_Functor_state_s___EverCrypt_Hash_state_s____ scrut = *s; - uint8_t *buf = scrut.buf; - EverCrypt_Hash_state_s *block_state = scrut.block_state; - EverCrypt_Hash_free(block_state); - KRML_HOST_FREE(buf); - KRML_HOST_FREE(s); -} - diff --git a/dist/c89-compatible/EverCrypt_Hash.h b/dist/c89-compatible/EverCrypt_Hash.h deleted file mode 100644 index d06578bfcd..0000000000 --- a/dist/c89-compatible/EverCrypt_Hash.h +++ /dev/null @@ -1,299 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#ifndef __EverCrypt_Hash_H -#define __EverCrypt_Hash_H - -#if defined(__cplusplus) -extern "C" { -#endif - -#include -#include "krml/internal/types.h" -#include "krml/lowstar_endianness.h" -#include "krml/internal/target.h" - - -#include "Lib_Memzero0.h" -#include "Hacl_Spec.h" -#include "Hacl_SHA3.h" -#include "Hacl_Krmllib.h" -#include "Hacl_Hash_SHA2.h" -#include "Hacl_Hash_SHA1.h" -#include "Hacl_Hash_MD5.h" -#include "Hacl_Hash_Blake2s_128.h" -#include "Hacl_Hash_Blake2b_256.h" -#include "EverCrypt_Error.h" -#include "EverCrypt_AutoConfig2.h" -#include "evercrypt_targetconfig.h" -typedef Spec_Hash_Definitions_hash_alg EverCrypt_Hash_alg; - -C_String_t EverCrypt_Hash_string_of_alg(Spec_Hash_Definitions_hash_alg uu___); - -typedef Spec_Hash_Definitions_hash_alg EverCrypt_Hash_broken_alg; - -typedef Spec_Hash_Definitions_hash_alg EverCrypt_Hash_alg13; - -typedef void *EverCrypt_Hash_e_alg; - -typedef struct EverCrypt_Hash_state_s_s EverCrypt_Hash_state_s; - -bool -EverCrypt_Hash_uu___is_MD5_s( - Spec_Hash_Definitions_hash_alg uu___, - EverCrypt_Hash_state_s projectee -); - -bool -EverCrypt_Hash_uu___is_SHA1_s( - Spec_Hash_Definitions_hash_alg uu___, - EverCrypt_Hash_state_s projectee -); - -bool -EverCrypt_Hash_uu___is_SHA2_224_s( - Spec_Hash_Definitions_hash_alg uu___, - EverCrypt_Hash_state_s projectee -); - -bool -EverCrypt_Hash_uu___is_SHA2_256_s( - Spec_Hash_Definitions_hash_alg uu___, - EverCrypt_Hash_state_s projectee -); - -bool -EverCrypt_Hash_uu___is_SHA2_384_s( - Spec_Hash_Definitions_hash_alg uu___, - EverCrypt_Hash_state_s projectee -); - -bool -EverCrypt_Hash_uu___is_SHA2_512_s( - Spec_Hash_Definitions_hash_alg uu___, - EverCrypt_Hash_state_s projectee -); - -bool -EverCrypt_Hash_uu___is_SHA3_256_s( - Spec_Hash_Definitions_hash_alg uu___, - EverCrypt_Hash_state_s projectee -); - -bool -EverCrypt_Hash_uu___is_Blake2S_s( - Spec_Hash_Definitions_hash_alg uu___, - EverCrypt_Hash_state_s projectee -); - -bool -EverCrypt_Hash_uu___is_Blake2S_128_s( - Spec_Hash_Definitions_hash_alg uu___, - EverCrypt_Hash_state_s projectee -); - -bool -EverCrypt_Hash_uu___is_Blake2B_s( - Spec_Hash_Definitions_hash_alg uu___, - EverCrypt_Hash_state_s projectee -); - -bool -EverCrypt_Hash_uu___is_Blake2B_256_s( - Spec_Hash_Definitions_hash_alg uu___, - EverCrypt_Hash_state_s projectee -); - -typedef EverCrypt_Hash_state_s *EverCrypt_Hash_state; - -Spec_Hash_Definitions_hash_alg EverCrypt_Hash_alg_of_state(EverCrypt_Hash_state_s *s); - -EverCrypt_Hash_state_s *EverCrypt_Hash_create_in(Spec_Hash_Definitions_hash_alg a); - -EverCrypt_Hash_state_s *EverCrypt_Hash_create(Spec_Hash_Definitions_hash_alg a); - -void EverCrypt_Hash_init(EverCrypt_Hash_state_s *s); - -void EverCrypt_Hash_update_multi_256(uint32_t *s, uint8_t *blocks, uint32_t n); - -void EverCrypt_Hash_update2(EverCrypt_Hash_state_s *s, uint64_t prevlen, uint8_t *block); - -KRML_DEPRECATED("Use update2 instead") - -void EverCrypt_Hash_update(EverCrypt_Hash_state_s *s, uint8_t *block); - -void -EverCrypt_Hash_update_multi2( - EverCrypt_Hash_state_s *s, - uint64_t prevlen, - uint8_t *blocks, - uint32_t len -); - -KRML_DEPRECATED("Use update_multi2 instead") - -void EverCrypt_Hash_update_multi(EverCrypt_Hash_state_s *s, uint8_t *blocks, uint32_t len); - -void -EverCrypt_Hash_update_last_256( - uint32_t *s, - uint64_t input, - uint8_t *input_len, - uint32_t input_len1 -); - -void -EverCrypt_Hash_update_last2( - EverCrypt_Hash_state_s *s, - uint64_t prev_len, - uint8_t *last, - uint32_t last_len -); - -KRML_DEPRECATED("Use update_last2 instead") - -void EverCrypt_Hash_update_last(EverCrypt_Hash_state_s *s, uint8_t *last, uint64_t total_len); - -void EverCrypt_Hash_finish(EverCrypt_Hash_state_s *s, uint8_t *dst); - -void EverCrypt_Hash_free(EverCrypt_Hash_state_s *s); - -void EverCrypt_Hash_copy(EverCrypt_Hash_state_s *s_src, EverCrypt_Hash_state_s *s_dst); - -void EverCrypt_Hash_hash_256(uint8_t *input, uint32_t input_len, uint8_t *dst); - -void EverCrypt_Hash_hash_224(uint8_t *input, uint32_t input_len, uint8_t *dst); - -void -EverCrypt_Hash_hash( - Spec_Hash_Definitions_hash_alg a, - uint8_t *dst, - uint8_t *input, - uint32_t len -); - -uint32_t EverCrypt_Hash_Incremental_hash_len(Spec_Hash_Definitions_hash_alg a); - -uint32_t EverCrypt_Hash_Incremental_block_len(Spec_Hash_Definitions_hash_alg a); - -typedef struct Hacl_Streaming_Functor_state_s___EverCrypt_Hash_state_s_____s -{ - EverCrypt_Hash_state_s *block_state; - uint8_t *buf; - uint64_t total_len; -} -Hacl_Streaming_Functor_state_s___EverCrypt_Hash_state_s____; - -Hacl_Streaming_Functor_state_s___EverCrypt_Hash_state_s____ -*EverCrypt_Hash_Incremental_create_in(Spec_Hash_Definitions_hash_alg a); - -void -EverCrypt_Hash_Incremental_init(Hacl_Streaming_Functor_state_s___EverCrypt_Hash_state_s____ *s); - -uint64_t EverCrypt_Hash_Incremental_max_input_len64(Spec_Hash_Definitions_hash_alg a); - -EverCrypt_Error_error_code -EverCrypt_Hash_Incremental_update( - Hacl_Streaming_Functor_state_s___EverCrypt_Hash_state_s____ *s, - uint8_t *data, - uint32_t len -); - -void -EverCrypt_Hash_Incremental_finish_md5( - Hacl_Streaming_Functor_state_s___EverCrypt_Hash_state_s____ *p, - uint8_t *dst -); - -void -EverCrypt_Hash_Incremental_finish_sha1( - Hacl_Streaming_Functor_state_s___EverCrypt_Hash_state_s____ *p, - uint8_t *dst -); - -void -EverCrypt_Hash_Incremental_finish_sha224( - Hacl_Streaming_Functor_state_s___EverCrypt_Hash_state_s____ *p, - uint8_t *dst -); - -void -EverCrypt_Hash_Incremental_finish_sha256( - Hacl_Streaming_Functor_state_s___EverCrypt_Hash_state_s____ *p, - uint8_t *dst -); - -void -EverCrypt_Hash_Incremental_finish_sha3_256( - Hacl_Streaming_Functor_state_s___EverCrypt_Hash_state_s____ *p, - uint8_t *dst -); - -void -EverCrypt_Hash_Incremental_finish_sha384( - Hacl_Streaming_Functor_state_s___EverCrypt_Hash_state_s____ *p, - uint8_t *dst -); - -void -EverCrypt_Hash_Incremental_finish_sha512( - Hacl_Streaming_Functor_state_s___EverCrypt_Hash_state_s____ *p, - uint8_t *dst -); - -void -EverCrypt_Hash_Incremental_finish_blake2s( - Hacl_Streaming_Functor_state_s___EverCrypt_Hash_state_s____ *p, - uint8_t *dst -); - -void -EverCrypt_Hash_Incremental_finish_blake2b( - Hacl_Streaming_Functor_state_s___EverCrypt_Hash_state_s____ *p, - uint8_t *dst -); - -Spec_Hash_Definitions_hash_alg -EverCrypt_Hash_Incremental_alg_of_state( - Hacl_Streaming_Functor_state_s___EverCrypt_Hash_state_s____ *s -); - -void -EverCrypt_Hash_Incremental_finish( - Hacl_Streaming_Functor_state_s___EverCrypt_Hash_state_s____ *s, - uint8_t *dst -); - -void -EverCrypt_Hash_Incremental_free(Hacl_Streaming_Functor_state_s___EverCrypt_Hash_state_s____ *s); - -typedef Hacl_Streaming_Functor_state_s___EverCrypt_Hash_state_s____ -*EverCrypt_Hash_Incremental_state; - -#if defined(__cplusplus) -} -#endif - -#define __EverCrypt_Hash_H_DEFINED -#endif diff --git a/dist/c89-compatible/EverCrypt_Poly1305.c b/dist/c89-compatible/EverCrypt_Poly1305.c deleted file mode 100644 index 0308d3ba79..0000000000 --- a/dist/c89-compatible/EverCrypt_Poly1305.c +++ /dev/null @@ -1,84 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#include "EverCrypt_Poly1305.h" - -#include "internal/Vale.h" - -static void poly1305_vale(uint8_t *dst, uint8_t *src, uint32_t len, uint8_t *key) -{ - uint8_t ctx[192U] = { 0U }; - uint32_t n_blocks; - uint32_t n_extra; - memcpy(ctx + (uint32_t)24U, key, (uint32_t)32U * sizeof (uint8_t)); - n_blocks = len / (uint32_t)16U; - n_extra = len % (uint32_t)16U; - { - uint8_t tmp[16U] = { 0U }; - if (n_extra == (uint32_t)0U) - { - uint64_t scrut = x64_poly1305(ctx, src, (uint64_t)len, (uint64_t)1U); - } - else - { - uint32_t len16 = n_blocks * (uint32_t)16U; - uint8_t *src16 = src; - memcpy(tmp, src + len16, n_extra * sizeof (uint8_t)); - { - uint64_t scrut = x64_poly1305(ctx, src16, (uint64_t)len16, (uint64_t)0U); - memcpy(ctx + (uint32_t)24U, key, (uint32_t)32U * sizeof (uint8_t)); - { - uint64_t scrut0 = x64_poly1305(ctx, tmp, (uint64_t)n_extra, (uint64_t)1U); - } - } - } - memcpy(dst, ctx, (uint32_t)16U * sizeof (uint8_t)); - } -} - -void EverCrypt_Poly1305_poly1305(uint8_t *dst, uint8_t *src, uint32_t len, uint8_t *key) -{ - bool vec256 = EverCrypt_AutoConfig2_has_vec256(); - bool vec128 = EverCrypt_AutoConfig2_has_vec128(); - #if HACL_CAN_COMPILE_VEC256 - if (vec256) - { - Hacl_Poly1305_256_poly1305_mac(dst, len, src, key); - return; - } - #endif - #if HACL_CAN_COMPILE_VEC128 - if (vec128) - { - Hacl_Poly1305_128_poly1305_mac(dst, len, src, key); - return; - } - #endif - #if HACL_CAN_COMPILE_VALE - poly1305_vale(dst, src, len, key); - #else - Hacl_Poly1305_32_poly1305_mac(dst, len, src, key); - #endif -} - diff --git a/dist/c89-compatible/EverCrypt_Poly1305.h b/dist/c89-compatible/EverCrypt_Poly1305.h deleted file mode 100644 index 16b4d67352..0000000000 --- a/dist/c89-compatible/EverCrypt_Poly1305.h +++ /dev/null @@ -1,50 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#ifndef __EverCrypt_Poly1305_H -#define __EverCrypt_Poly1305_H - -#if defined(__cplusplus) -extern "C" { -#endif - -#include -#include "krml/internal/types.h" -#include "krml/lowstar_endianness.h" -#include "krml/internal/target.h" - - -#include "Hacl_Poly1305_32.h" -#include "Hacl_Poly1305_256.h" -#include "Hacl_Poly1305_128.h" -#include "EverCrypt_AutoConfig2.h" -#include "evercrypt_targetconfig.h" -void EverCrypt_Poly1305_poly1305(uint8_t *dst, uint8_t *src, uint32_t len, uint8_t *key); - -#if defined(__cplusplus) -} -#endif - -#define __EverCrypt_Poly1305_H_DEFINED -#endif diff --git a/dist/c89-compatible/Hacl_AES128.h b/dist/c89-compatible/Hacl_AES128.h deleted file mode 100644 index 8833d00f84..0000000000 --- a/dist/c89-compatible/Hacl_AES128.h +++ /dev/null @@ -1,50 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#ifndef __Hacl_AES128_H -#define __Hacl_AES128_H - -#if defined(__cplusplus) -extern "C" { -#endif - -#include -#include "krml/internal/types.h" -#include "krml/lowstar_endianness.h" -#include "krml/internal/target.h" - - - -#include "evercrypt_targetconfig.h" -extern void Hacl_AES128_aes128_key_expansion(uint8_t *key, uint8_t *expanded_key); - -extern void -Hacl_AES128_aes128_encrypt_block(uint16_t *cipher, uint16_t *plain, uint8_t *expanded_key); - -#if defined(__cplusplus) -} -#endif - -#define __Hacl_AES128_H_DEFINED -#endif diff --git a/dist/c89-compatible/Hacl_Bignum.c b/dist/c89-compatible/Hacl_Bignum.c deleted file mode 100644 index 9f1d34cda8..0000000000 --- a/dist/c89-compatible/Hacl_Bignum.c +++ /dev/null @@ -1,3291 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#include "internal/Hacl_Bignum.h" - - - -void -Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint32( - uint32_t aLen, - uint32_t *a, - uint32_t *b, - uint32_t *tmp, - uint32_t *res -) -{ - if (aLen < (uint32_t)32U || aLen % (uint32_t)2U == (uint32_t)1U) - { - Hacl_Bignum_Multiplication_bn_mul_u32(aLen, a, aLen, b, res); - return; - } - { - uint32_t len2 = aLen / (uint32_t)2U; - uint32_t *a0 = a; - uint32_t *a1 = a + len2; - uint32_t *b0 = b; - uint32_t *b1 = b + len2; - uint32_t *t0 = tmp; - uint32_t *t1 = tmp + len2; - uint32_t *tmp_ = tmp + aLen; - uint32_t c0 = Hacl_Bignum_Addition_bn_sub_eq_len_u32(len2, a0, a1, tmp_); - uint32_t c10 = Hacl_Bignum_Addition_bn_sub_eq_len_u32(len2, a1, a0, t0); - uint32_t c00; - uint32_t c010; - uint32_t c11; - uint32_t c1; - uint32_t *t23; - uint32_t *tmp1; - uint32_t *r01; - uint32_t *r23; - uint32_t *r011; - uint32_t *r231; - uint32_t *t01; - uint32_t *t231; - uint32_t *t45; - uint32_t *t67; - uint32_t c2; - uint32_t c_sign; - uint32_t c3; - uint32_t c31; - uint32_t c4; - uint32_t c41; - uint32_t mask; - uint32_t c5; - uint32_t aLen2; - uint32_t *r0; - uint32_t r10; - uint32_t c9; - uint32_t c6; - uint32_t c7; - uint32_t *r; - uint32_t c01; - uint32_t r1; - uint32_t c8; - uint32_t c12; - uint32_t c13; - { - uint32_t i; - for (i = (uint32_t)0U; i < len2; i++) - { - uint32_t *os = t0; - uint32_t x = (((uint32_t)0U - c0) & t0[i]) | (~((uint32_t)0U - c0) & tmp_[i]); - os[i] = x; - } - } - c00 = c0; - c010 = Hacl_Bignum_Addition_bn_sub_eq_len_u32(len2, b0, b1, tmp_); - c11 = Hacl_Bignum_Addition_bn_sub_eq_len_u32(len2, b1, b0, t1); - { - uint32_t i; - for (i = (uint32_t)0U; i < len2; i++) - { - uint32_t *os = t1; - uint32_t x = (((uint32_t)0U - c010) & t1[i]) | (~((uint32_t)0U - c010) & tmp_[i]); - os[i] = x; - } - } - c1 = c010; - t23 = tmp + aLen; - tmp1 = tmp + aLen + aLen; - Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint32(len2, t0, t1, tmp1, t23); - r01 = res; - r23 = res + aLen; - Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint32(len2, a0, b0, tmp1, r01); - Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint32(len2, a1, b1, tmp1, r23); - r011 = res; - r231 = res + aLen; - t01 = tmp; - t231 = tmp + aLen; - t45 = tmp + (uint32_t)2U * aLen; - t67 = tmp + (uint32_t)3U * aLen; - c2 = Hacl_Bignum_Addition_bn_add_eq_len_u32(aLen, r011, r231, t01); - c_sign = c00 ^ c1; - c3 = Hacl_Bignum_Addition_bn_sub_eq_len_u32(aLen, t01, t231, t67); - c31 = c2 - c3; - c4 = Hacl_Bignum_Addition_bn_add_eq_len_u32(aLen, t01, t231, t45); - c41 = c2 + c4; - mask = (uint32_t)0U - c_sign; - { - uint32_t i; - for (i = (uint32_t)0U; i < aLen; i++) - { - uint32_t *os = t45; - uint32_t x = (mask & t45[i]) | (~mask & t67[i]); - os[i] = x; - } - } - c5 = (mask & c41) | (~mask & c31); - aLen2 = aLen / (uint32_t)2U; - r0 = res + aLen2; - r10 = Hacl_Bignum_Addition_bn_add_eq_len_u32(aLen, r0, t45, r0); - c9 = r10; - c6 = c9; - c7 = c5 + c6; - r = res + aLen + aLen2; - c01 = Lib_IntTypes_Intrinsics_add_carry_u32((uint32_t)0U, r[0U], c7, r); - if ((uint32_t)1U < aLen + aLen - (aLen + aLen2)) - { - uint32_t *a11 = r + (uint32_t)1U; - uint32_t *res1 = r + (uint32_t)1U; - uint32_t c = c01; - { - uint32_t i; - for - (i - = (uint32_t)0U; - i - < (aLen + aLen - (aLen + aLen2) - (uint32_t)1U) / (uint32_t)4U; - i++) - { - uint32_t t11 = a11[(uint32_t)4U * i]; - uint32_t *res_i0 = res1 + (uint32_t)4U * i; - c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t11, (uint32_t)0U, res_i0); - { - uint32_t t110 = a11[(uint32_t)4U * i + (uint32_t)1U]; - uint32_t *res_i1 = res1 + (uint32_t)4U * i + (uint32_t)1U; - c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t110, (uint32_t)0U, res_i1); - { - uint32_t t111 = a11[(uint32_t)4U * i + (uint32_t)2U]; - uint32_t *res_i2 = res1 + (uint32_t)4U * i + (uint32_t)2U; - c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t111, (uint32_t)0U, res_i2); - { - uint32_t t112 = a11[(uint32_t)4U * i + (uint32_t)3U]; - uint32_t *res_i = res1 + (uint32_t)4U * i + (uint32_t)3U; - c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t112, (uint32_t)0U, res_i); - } - } - } - } - } - { - uint32_t i; - for - (i - = (aLen + aLen - (aLen + aLen2) - (uint32_t)1U) / (uint32_t)4U * (uint32_t)4U; - i - < aLen + aLen - (aLen + aLen2) - (uint32_t)1U; - i++) - { - uint32_t t11 = a11[i]; - uint32_t *res_i = res1 + i; - c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t11, (uint32_t)0U, res_i); - } - } - { - uint32_t c110 = c; - r1 = c110; - } - } - else - { - r1 = c01; - } - c8 = r1; - c12 = c8; - c13 = c12; - } -} - -void -Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint64( - uint32_t aLen, - uint64_t *a, - uint64_t *b, - uint64_t *tmp, - uint64_t *res -) -{ - if (aLen < (uint32_t)32U || aLen % (uint32_t)2U == (uint32_t)1U) - { - Hacl_Bignum_Multiplication_bn_mul_u64(aLen, a, aLen, b, res); - return; - } - { - uint32_t len2 = aLen / (uint32_t)2U; - uint64_t *a0 = a; - uint64_t *a1 = a + len2; - uint64_t *b0 = b; - uint64_t *b1 = b + len2; - uint64_t *t0 = tmp; - uint64_t *t1 = tmp + len2; - uint64_t *tmp_ = tmp + aLen; - uint64_t c0 = Hacl_Bignum_Addition_bn_sub_eq_len_u64(len2, a0, a1, tmp_); - uint64_t c10 = Hacl_Bignum_Addition_bn_sub_eq_len_u64(len2, a1, a0, t0); - uint64_t c00; - uint64_t c010; - uint64_t c11; - uint64_t c1; - uint64_t *t23; - uint64_t *tmp1; - uint64_t *r01; - uint64_t *r23; - uint64_t *r011; - uint64_t *r231; - uint64_t *t01; - uint64_t *t231; - uint64_t *t45; - uint64_t *t67; - uint64_t c2; - uint64_t c_sign; - uint64_t c3; - uint64_t c31; - uint64_t c4; - uint64_t c41; - uint64_t mask; - uint64_t c5; - uint32_t aLen2; - uint64_t *r0; - uint64_t r10; - uint64_t c9; - uint64_t c6; - uint64_t c7; - uint64_t *r; - uint64_t c01; - uint64_t r1; - uint64_t c8; - uint64_t c12; - uint64_t c13; - { - uint32_t i; - for (i = (uint32_t)0U; i < len2; i++) - { - uint64_t *os = t0; - uint64_t x = (((uint64_t)0U - c0) & t0[i]) | (~((uint64_t)0U - c0) & tmp_[i]); - os[i] = x; - } - } - c00 = c0; - c010 = Hacl_Bignum_Addition_bn_sub_eq_len_u64(len2, b0, b1, tmp_); - c11 = Hacl_Bignum_Addition_bn_sub_eq_len_u64(len2, b1, b0, t1); - { - uint32_t i; - for (i = (uint32_t)0U; i < len2; i++) - { - uint64_t *os = t1; - uint64_t x = (((uint64_t)0U - c010) & t1[i]) | (~((uint64_t)0U - c010) & tmp_[i]); - os[i] = x; - } - } - c1 = c010; - t23 = tmp + aLen; - tmp1 = tmp + aLen + aLen; - Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint64(len2, t0, t1, tmp1, t23); - r01 = res; - r23 = res + aLen; - Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint64(len2, a0, b0, tmp1, r01); - Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint64(len2, a1, b1, tmp1, r23); - r011 = res; - r231 = res + aLen; - t01 = tmp; - t231 = tmp + aLen; - t45 = tmp + (uint32_t)2U * aLen; - t67 = tmp + (uint32_t)3U * aLen; - c2 = Hacl_Bignum_Addition_bn_add_eq_len_u64(aLen, r011, r231, t01); - c_sign = c00 ^ c1; - c3 = Hacl_Bignum_Addition_bn_sub_eq_len_u64(aLen, t01, t231, t67); - c31 = c2 - c3; - c4 = Hacl_Bignum_Addition_bn_add_eq_len_u64(aLen, t01, t231, t45); - c41 = c2 + c4; - mask = (uint64_t)0U - c_sign; - { - uint32_t i; - for (i = (uint32_t)0U; i < aLen; i++) - { - uint64_t *os = t45; - uint64_t x = (mask & t45[i]) | (~mask & t67[i]); - os[i] = x; - } - } - c5 = (mask & c41) | (~mask & c31); - aLen2 = aLen / (uint32_t)2U; - r0 = res + aLen2; - r10 = Hacl_Bignum_Addition_bn_add_eq_len_u64(aLen, r0, t45, r0); - c9 = r10; - c6 = c9; - c7 = c5 + c6; - r = res + aLen + aLen2; - c01 = Lib_IntTypes_Intrinsics_add_carry_u64((uint64_t)0U, r[0U], c7, r); - if ((uint32_t)1U < aLen + aLen - (aLen + aLen2)) - { - uint64_t *a11 = r + (uint32_t)1U; - uint64_t *res1 = r + (uint32_t)1U; - uint64_t c = c01; - { - uint32_t i; - for - (i - = (uint32_t)0U; - i - < (aLen + aLen - (aLen + aLen2) - (uint32_t)1U) / (uint32_t)4U; - i++) - { - uint64_t t11 = a11[(uint32_t)4U * i]; - uint64_t *res_i0 = res1 + (uint32_t)4U * i; - c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t11, (uint64_t)0U, res_i0); - { - uint64_t t110 = a11[(uint32_t)4U * i + (uint32_t)1U]; - uint64_t *res_i1 = res1 + (uint32_t)4U * i + (uint32_t)1U; - c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t110, (uint64_t)0U, res_i1); - { - uint64_t t111 = a11[(uint32_t)4U * i + (uint32_t)2U]; - uint64_t *res_i2 = res1 + (uint32_t)4U * i + (uint32_t)2U; - c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t111, (uint64_t)0U, res_i2); - { - uint64_t t112 = a11[(uint32_t)4U * i + (uint32_t)3U]; - uint64_t *res_i = res1 + (uint32_t)4U * i + (uint32_t)3U; - c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t112, (uint64_t)0U, res_i); - } - } - } - } - } - { - uint32_t i; - for - (i - = (aLen + aLen - (aLen + aLen2) - (uint32_t)1U) / (uint32_t)4U * (uint32_t)4U; - i - < aLen + aLen - (aLen + aLen2) - (uint32_t)1U; - i++) - { - uint64_t t11 = a11[i]; - uint64_t *res_i = res1 + i; - c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t11, (uint64_t)0U, res_i); - } - } - { - uint64_t c110 = c; - r1 = c110; - } - } - else - { - r1 = c01; - } - c8 = r1; - c12 = c8; - c13 = c12; - } -} - -void -Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint32( - uint32_t aLen, - uint32_t *a, - uint32_t *tmp, - uint32_t *res -) -{ - if (aLen < (uint32_t)32U || aLen % (uint32_t)2U == (uint32_t)1U) - { - Hacl_Bignum_Multiplication_bn_sqr_u32(aLen, a, res); - return; - } - { - uint32_t len2 = aLen / (uint32_t)2U; - uint32_t *a0 = a; - uint32_t *a1 = a + len2; - uint32_t *t0 = tmp; - uint32_t *tmp_ = tmp + aLen; - uint32_t c0 = Hacl_Bignum_Addition_bn_sub_eq_len_u32(len2, a0, a1, tmp_); - uint32_t c1 = Hacl_Bignum_Addition_bn_sub_eq_len_u32(len2, a1, a0, t0); - uint32_t c00; - uint32_t *t23; - uint32_t *tmp1; - uint32_t *r01; - uint32_t *r23; - uint32_t *r011; - uint32_t *r231; - uint32_t *t01; - uint32_t *t231; - uint32_t *t45; - uint32_t c2; - uint32_t c3; - uint32_t c5; - uint32_t aLen2; - uint32_t *r0; - uint32_t r10; - uint32_t c4; - uint32_t c6; - uint32_t c7; - uint32_t *r; - uint32_t c01; - uint32_t r1; - uint32_t c8; - uint32_t c9; - uint32_t c10; - { - uint32_t i; - for (i = (uint32_t)0U; i < len2; i++) - { - uint32_t *os = t0; - uint32_t x = (((uint32_t)0U - c0) & t0[i]) | (~((uint32_t)0U - c0) & tmp_[i]); - os[i] = x; - } - } - c00 = c0; - t23 = tmp + aLen; - tmp1 = tmp + aLen + aLen; - Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint32(len2, t0, tmp1, t23); - r01 = res; - r23 = res + aLen; - Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint32(len2, a0, tmp1, r01); - Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint32(len2, a1, tmp1, r23); - r011 = res; - r231 = res + aLen; - t01 = tmp; - t231 = tmp + aLen; - t45 = tmp + (uint32_t)2U * aLen; - c2 = Hacl_Bignum_Addition_bn_add_eq_len_u32(aLen, r011, r231, t01); - c3 = Hacl_Bignum_Addition_bn_sub_eq_len_u32(aLen, t01, t231, t45); - c5 = c2 - c3; - aLen2 = aLen / (uint32_t)2U; - r0 = res + aLen2; - r10 = Hacl_Bignum_Addition_bn_add_eq_len_u32(aLen, r0, t45, r0); - c4 = r10; - c6 = c4; - c7 = c5 + c6; - r = res + aLen + aLen2; - c01 = Lib_IntTypes_Intrinsics_add_carry_u32((uint32_t)0U, r[0U], c7, r); - if ((uint32_t)1U < aLen + aLen - (aLen + aLen2)) - { - uint32_t *a11 = r + (uint32_t)1U; - uint32_t *res1 = r + (uint32_t)1U; - uint32_t c = c01; - { - uint32_t i; - for - (i - = (uint32_t)0U; - i - < (aLen + aLen - (aLen + aLen2) - (uint32_t)1U) / (uint32_t)4U; - i++) - { - uint32_t t1 = a11[(uint32_t)4U * i]; - uint32_t *res_i0 = res1 + (uint32_t)4U * i; - c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t1, (uint32_t)0U, res_i0); - { - uint32_t t10 = a11[(uint32_t)4U * i + (uint32_t)1U]; - uint32_t *res_i1 = res1 + (uint32_t)4U * i + (uint32_t)1U; - c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t10, (uint32_t)0U, res_i1); - { - uint32_t t11 = a11[(uint32_t)4U * i + (uint32_t)2U]; - uint32_t *res_i2 = res1 + (uint32_t)4U * i + (uint32_t)2U; - c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t11, (uint32_t)0U, res_i2); - { - uint32_t t12 = a11[(uint32_t)4U * i + (uint32_t)3U]; - uint32_t *res_i = res1 + (uint32_t)4U * i + (uint32_t)3U; - c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t12, (uint32_t)0U, res_i); - } - } - } - } - } - { - uint32_t i; - for - (i - = (aLen + aLen - (aLen + aLen2) - (uint32_t)1U) / (uint32_t)4U * (uint32_t)4U; - i - < aLen + aLen - (aLen + aLen2) - (uint32_t)1U; - i++) - { - uint32_t t1 = a11[i]; - uint32_t *res_i = res1 + i; - c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t1, (uint32_t)0U, res_i); - } - } - { - uint32_t c11 = c; - r1 = c11; - } - } - else - { - r1 = c01; - } - c8 = r1; - c9 = c8; - c10 = c9; - } -} - -void -Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint64( - uint32_t aLen, - uint64_t *a, - uint64_t *tmp, - uint64_t *res -) -{ - if (aLen < (uint32_t)32U || aLen % (uint32_t)2U == (uint32_t)1U) - { - Hacl_Bignum_Multiplication_bn_sqr_u64(aLen, a, res); - return; - } - { - uint32_t len2 = aLen / (uint32_t)2U; - uint64_t *a0 = a; - uint64_t *a1 = a + len2; - uint64_t *t0 = tmp; - uint64_t *tmp_ = tmp + aLen; - uint64_t c0 = Hacl_Bignum_Addition_bn_sub_eq_len_u64(len2, a0, a1, tmp_); - uint64_t c1 = Hacl_Bignum_Addition_bn_sub_eq_len_u64(len2, a1, a0, t0); - uint64_t c00; - uint64_t *t23; - uint64_t *tmp1; - uint64_t *r01; - uint64_t *r23; - uint64_t *r011; - uint64_t *r231; - uint64_t *t01; - uint64_t *t231; - uint64_t *t45; - uint64_t c2; - uint64_t c3; - uint64_t c5; - uint32_t aLen2; - uint64_t *r0; - uint64_t r10; - uint64_t c4; - uint64_t c6; - uint64_t c7; - uint64_t *r; - uint64_t c01; - uint64_t r1; - uint64_t c8; - uint64_t c9; - uint64_t c10; - { - uint32_t i; - for (i = (uint32_t)0U; i < len2; i++) - { - uint64_t *os = t0; - uint64_t x = (((uint64_t)0U - c0) & t0[i]) | (~((uint64_t)0U - c0) & tmp_[i]); - os[i] = x; - } - } - c00 = c0; - t23 = tmp + aLen; - tmp1 = tmp + aLen + aLen; - Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint64(len2, t0, tmp1, t23); - r01 = res; - r23 = res + aLen; - Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint64(len2, a0, tmp1, r01); - Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint64(len2, a1, tmp1, r23); - r011 = res; - r231 = res + aLen; - t01 = tmp; - t231 = tmp + aLen; - t45 = tmp + (uint32_t)2U * aLen; - c2 = Hacl_Bignum_Addition_bn_add_eq_len_u64(aLen, r011, r231, t01); - c3 = Hacl_Bignum_Addition_bn_sub_eq_len_u64(aLen, t01, t231, t45); - c5 = c2 - c3; - aLen2 = aLen / (uint32_t)2U; - r0 = res + aLen2; - r10 = Hacl_Bignum_Addition_bn_add_eq_len_u64(aLen, r0, t45, r0); - c4 = r10; - c6 = c4; - c7 = c5 + c6; - r = res + aLen + aLen2; - c01 = Lib_IntTypes_Intrinsics_add_carry_u64((uint64_t)0U, r[0U], c7, r); - if ((uint32_t)1U < aLen + aLen - (aLen + aLen2)) - { - uint64_t *a11 = r + (uint32_t)1U; - uint64_t *res1 = r + (uint32_t)1U; - uint64_t c = c01; - { - uint32_t i; - for - (i - = (uint32_t)0U; - i - < (aLen + aLen - (aLen + aLen2) - (uint32_t)1U) / (uint32_t)4U; - i++) - { - uint64_t t1 = a11[(uint32_t)4U * i]; - uint64_t *res_i0 = res1 + (uint32_t)4U * i; - c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t1, (uint64_t)0U, res_i0); - { - uint64_t t10 = a11[(uint32_t)4U * i + (uint32_t)1U]; - uint64_t *res_i1 = res1 + (uint32_t)4U * i + (uint32_t)1U; - c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t10, (uint64_t)0U, res_i1); - { - uint64_t t11 = a11[(uint32_t)4U * i + (uint32_t)2U]; - uint64_t *res_i2 = res1 + (uint32_t)4U * i + (uint32_t)2U; - c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t11, (uint64_t)0U, res_i2); - { - uint64_t t12 = a11[(uint32_t)4U * i + (uint32_t)3U]; - uint64_t *res_i = res1 + (uint32_t)4U * i + (uint32_t)3U; - c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t12, (uint64_t)0U, res_i); - } - } - } - } - } - { - uint32_t i; - for - (i - = (aLen + aLen - (aLen + aLen2) - (uint32_t)1U) / (uint32_t)4U * (uint32_t)4U; - i - < aLen + aLen - (aLen + aLen2) - (uint32_t)1U; - i++) - { - uint64_t t1 = a11[i]; - uint64_t *res_i = res1 + i; - c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t1, (uint64_t)0U, res_i); - } - } - { - uint64_t c11 = c; - r1 = c11; - } - } - else - { - r1 = c01; - } - c8 = r1; - c9 = c8; - c10 = c9; - } -} - -void -Hacl_Bignum_bn_add_mod_n_u32( - uint32_t len1, - uint32_t *n, - uint32_t *a, - uint32_t *b, - uint32_t *res -) -{ - uint32_t c2 = (uint32_t)0U; - uint32_t c0; - { - uint32_t i; - for (i = (uint32_t)0U; i < len1 / (uint32_t)4U; i++) - { - uint32_t t1 = a[(uint32_t)4U * i]; - uint32_t t20 = b[(uint32_t)4U * i]; - uint32_t *res_i0 = res + (uint32_t)4U * i; - c2 = Lib_IntTypes_Intrinsics_add_carry_u32(c2, t1, t20, res_i0); - { - uint32_t t10 = a[(uint32_t)4U * i + (uint32_t)1U]; - uint32_t t21 = b[(uint32_t)4U * i + (uint32_t)1U]; - uint32_t *res_i1 = res + (uint32_t)4U * i + (uint32_t)1U; - c2 = Lib_IntTypes_Intrinsics_add_carry_u32(c2, t10, t21, res_i1); - { - uint32_t t11 = a[(uint32_t)4U * i + (uint32_t)2U]; - uint32_t t22 = b[(uint32_t)4U * i + (uint32_t)2U]; - uint32_t *res_i2 = res + (uint32_t)4U * i + (uint32_t)2U; - c2 = Lib_IntTypes_Intrinsics_add_carry_u32(c2, t11, t22, res_i2); - { - uint32_t t12 = a[(uint32_t)4U * i + (uint32_t)3U]; - uint32_t t2 = b[(uint32_t)4U * i + (uint32_t)3U]; - uint32_t *res_i = res + (uint32_t)4U * i + (uint32_t)3U; - c2 = Lib_IntTypes_Intrinsics_add_carry_u32(c2, t12, t2, res_i); - } - } - } - } - } - { - uint32_t i; - for (i = len1 / (uint32_t)4U * (uint32_t)4U; i < len1; i++) - { - uint32_t t1 = a[i]; - uint32_t t2 = b[i]; - uint32_t *res_i = res + i; - c2 = Lib_IntTypes_Intrinsics_add_carry_u32(c2, t1, t2, res_i); - } - } - c0 = c2; - KRML_CHECK_SIZE(sizeof (uint32_t), len1); - { - uint32_t tmp[len1]; - memset(tmp, 0U, len1 * sizeof (uint32_t)); - { - uint32_t c3 = (uint32_t)0U; - uint32_t c1; - uint32_t c; - { - uint32_t i; - for (i = (uint32_t)0U; i < len1 / (uint32_t)4U; i++) - { - uint32_t t1 = res[(uint32_t)4U * i]; - uint32_t t20 = n[(uint32_t)4U * i]; - uint32_t *res_i0 = tmp + (uint32_t)4U * i; - c3 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c3, t1, t20, res_i0); - { - uint32_t t10 = res[(uint32_t)4U * i + (uint32_t)1U]; - uint32_t t21 = n[(uint32_t)4U * i + (uint32_t)1U]; - uint32_t *res_i1 = tmp + (uint32_t)4U * i + (uint32_t)1U; - c3 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c3, t10, t21, res_i1); - { - uint32_t t11 = res[(uint32_t)4U * i + (uint32_t)2U]; - uint32_t t22 = n[(uint32_t)4U * i + (uint32_t)2U]; - uint32_t *res_i2 = tmp + (uint32_t)4U * i + (uint32_t)2U; - c3 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c3, t11, t22, res_i2); - { - uint32_t t12 = res[(uint32_t)4U * i + (uint32_t)3U]; - uint32_t t2 = n[(uint32_t)4U * i + (uint32_t)3U]; - uint32_t *res_i = tmp + (uint32_t)4U * i + (uint32_t)3U; - c3 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c3, t12, t2, res_i); - } - } - } - } - } - { - uint32_t i; - for (i = len1 / (uint32_t)4U * (uint32_t)4U; i < len1; i++) - { - uint32_t t1 = res[i]; - uint32_t t2 = n[i]; - uint32_t *res_i = tmp + i; - c3 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c3, t1, t2, res_i); - } - } - c1 = c3; - c = c0 - c1; - { - uint32_t i; - for (i = (uint32_t)0U; i < len1; i++) - { - uint32_t *os = res; - uint32_t x = (c & res[i]) | (~c & tmp[i]); - os[i] = x; - } - } - } - } -} - -void -Hacl_Bignum_bn_add_mod_n_u64( - uint32_t len1, - uint64_t *n, - uint64_t *a, - uint64_t *b, - uint64_t *res -) -{ - uint64_t c2 = (uint64_t)0U; - uint64_t c0; - { - uint32_t i; - for (i = (uint32_t)0U; i < len1 / (uint32_t)4U; i++) - { - uint64_t t1 = a[(uint32_t)4U * i]; - uint64_t t20 = b[(uint32_t)4U * i]; - uint64_t *res_i0 = res + (uint32_t)4U * i; - c2 = Lib_IntTypes_Intrinsics_add_carry_u64(c2, t1, t20, res_i0); - { - uint64_t t10 = a[(uint32_t)4U * i + (uint32_t)1U]; - uint64_t t21 = b[(uint32_t)4U * i + (uint32_t)1U]; - uint64_t *res_i1 = res + (uint32_t)4U * i + (uint32_t)1U; - c2 = Lib_IntTypes_Intrinsics_add_carry_u64(c2, t10, t21, res_i1); - { - uint64_t t11 = a[(uint32_t)4U * i + (uint32_t)2U]; - uint64_t t22 = b[(uint32_t)4U * i + (uint32_t)2U]; - uint64_t *res_i2 = res + (uint32_t)4U * i + (uint32_t)2U; - c2 = Lib_IntTypes_Intrinsics_add_carry_u64(c2, t11, t22, res_i2); - { - uint64_t t12 = a[(uint32_t)4U * i + (uint32_t)3U]; - uint64_t t2 = b[(uint32_t)4U * i + (uint32_t)3U]; - uint64_t *res_i = res + (uint32_t)4U * i + (uint32_t)3U; - c2 = Lib_IntTypes_Intrinsics_add_carry_u64(c2, t12, t2, res_i); - } - } - } - } - } - { - uint32_t i; - for (i = len1 / (uint32_t)4U * (uint32_t)4U; i < len1; i++) - { - uint64_t t1 = a[i]; - uint64_t t2 = b[i]; - uint64_t *res_i = res + i; - c2 = Lib_IntTypes_Intrinsics_add_carry_u64(c2, t1, t2, res_i); - } - } - c0 = c2; - KRML_CHECK_SIZE(sizeof (uint64_t), len1); - { - uint64_t tmp[len1]; - memset(tmp, 0U, len1 * sizeof (uint64_t)); - { - uint64_t c3 = (uint64_t)0U; - uint64_t c1; - uint64_t c; - { - uint32_t i; - for (i = (uint32_t)0U; i < len1 / (uint32_t)4U; i++) - { - uint64_t t1 = res[(uint32_t)4U * i]; - uint64_t t20 = n[(uint32_t)4U * i]; - uint64_t *res_i0 = tmp + (uint32_t)4U * i; - c3 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c3, t1, t20, res_i0); - { - uint64_t t10 = res[(uint32_t)4U * i + (uint32_t)1U]; - uint64_t t21 = n[(uint32_t)4U * i + (uint32_t)1U]; - uint64_t *res_i1 = tmp + (uint32_t)4U * i + (uint32_t)1U; - c3 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c3, t10, t21, res_i1); - { - uint64_t t11 = res[(uint32_t)4U * i + (uint32_t)2U]; - uint64_t t22 = n[(uint32_t)4U * i + (uint32_t)2U]; - uint64_t *res_i2 = tmp + (uint32_t)4U * i + (uint32_t)2U; - c3 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c3, t11, t22, res_i2); - { - uint64_t t12 = res[(uint32_t)4U * i + (uint32_t)3U]; - uint64_t t2 = n[(uint32_t)4U * i + (uint32_t)3U]; - uint64_t *res_i = tmp + (uint32_t)4U * i + (uint32_t)3U; - c3 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c3, t12, t2, res_i); - } - } - } - } - } - { - uint32_t i; - for (i = len1 / (uint32_t)4U * (uint32_t)4U; i < len1; i++) - { - uint64_t t1 = res[i]; - uint64_t t2 = n[i]; - uint64_t *res_i = tmp + i; - c3 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c3, t1, t2, res_i); - } - } - c1 = c3; - c = c0 - c1; - { - uint32_t i; - for (i = (uint32_t)0U; i < len1; i++) - { - uint64_t *os = res; - uint64_t x = (c & res[i]) | (~c & tmp[i]); - os[i] = x; - } - } - } - } -} - -void -Hacl_Bignum_bn_sub_mod_n_u32( - uint32_t len1, - uint32_t *n, - uint32_t *a, - uint32_t *b, - uint32_t *res -) -{ - uint32_t c2 = (uint32_t)0U; - uint32_t c0; - { - uint32_t i; - for (i = (uint32_t)0U; i < len1 / (uint32_t)4U; i++) - { - uint32_t t1 = a[(uint32_t)4U * i]; - uint32_t t20 = b[(uint32_t)4U * i]; - uint32_t *res_i0 = res + (uint32_t)4U * i; - c2 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c2, t1, t20, res_i0); - { - uint32_t t10 = a[(uint32_t)4U * i + (uint32_t)1U]; - uint32_t t21 = b[(uint32_t)4U * i + (uint32_t)1U]; - uint32_t *res_i1 = res + (uint32_t)4U * i + (uint32_t)1U; - c2 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c2, t10, t21, res_i1); - { - uint32_t t11 = a[(uint32_t)4U * i + (uint32_t)2U]; - uint32_t t22 = b[(uint32_t)4U * i + (uint32_t)2U]; - uint32_t *res_i2 = res + (uint32_t)4U * i + (uint32_t)2U; - c2 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c2, t11, t22, res_i2); - { - uint32_t t12 = a[(uint32_t)4U * i + (uint32_t)3U]; - uint32_t t2 = b[(uint32_t)4U * i + (uint32_t)3U]; - uint32_t *res_i = res + (uint32_t)4U * i + (uint32_t)3U; - c2 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c2, t12, t2, res_i); - } - } - } - } - } - { - uint32_t i; - for (i = len1 / (uint32_t)4U * (uint32_t)4U; i < len1; i++) - { - uint32_t t1 = a[i]; - uint32_t t2 = b[i]; - uint32_t *res_i = res + i; - c2 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c2, t1, t2, res_i); - } - } - c0 = c2; - KRML_CHECK_SIZE(sizeof (uint32_t), len1); - { - uint32_t tmp[len1]; - memset(tmp, 0U, len1 * sizeof (uint32_t)); - { - uint32_t c3 = (uint32_t)0U; - uint32_t c1; - uint32_t c; - { - uint32_t i; - for (i = (uint32_t)0U; i < len1 / (uint32_t)4U; i++) - { - uint32_t t1 = res[(uint32_t)4U * i]; - uint32_t t20 = n[(uint32_t)4U * i]; - uint32_t *res_i0 = tmp + (uint32_t)4U * i; - c3 = Lib_IntTypes_Intrinsics_add_carry_u32(c3, t1, t20, res_i0); - { - uint32_t t10 = res[(uint32_t)4U * i + (uint32_t)1U]; - uint32_t t21 = n[(uint32_t)4U * i + (uint32_t)1U]; - uint32_t *res_i1 = tmp + (uint32_t)4U * i + (uint32_t)1U; - c3 = Lib_IntTypes_Intrinsics_add_carry_u32(c3, t10, t21, res_i1); - { - uint32_t t11 = res[(uint32_t)4U * i + (uint32_t)2U]; - uint32_t t22 = n[(uint32_t)4U * i + (uint32_t)2U]; - uint32_t *res_i2 = tmp + (uint32_t)4U * i + (uint32_t)2U; - c3 = Lib_IntTypes_Intrinsics_add_carry_u32(c3, t11, t22, res_i2); - { - uint32_t t12 = res[(uint32_t)4U * i + (uint32_t)3U]; - uint32_t t2 = n[(uint32_t)4U * i + (uint32_t)3U]; - uint32_t *res_i = tmp + (uint32_t)4U * i + (uint32_t)3U; - c3 = Lib_IntTypes_Intrinsics_add_carry_u32(c3, t12, t2, res_i); - } - } - } - } - } - { - uint32_t i; - for (i = len1 / (uint32_t)4U * (uint32_t)4U; i < len1; i++) - { - uint32_t t1 = res[i]; - uint32_t t2 = n[i]; - uint32_t *res_i = tmp + i; - c3 = Lib_IntTypes_Intrinsics_add_carry_u32(c3, t1, t2, res_i); - } - } - c1 = c3; - c = (uint32_t)0U - c0; - { - uint32_t i; - for (i = (uint32_t)0U; i < len1; i++) - { - uint32_t *os = res; - uint32_t x = (c & tmp[i]) | (~c & res[i]); - os[i] = x; - } - } - } - } -} - -void -Hacl_Bignum_bn_sub_mod_n_u64( - uint32_t len1, - uint64_t *n, - uint64_t *a, - uint64_t *b, - uint64_t *res -) -{ - uint64_t c2 = (uint64_t)0U; - uint64_t c0; - { - uint32_t i; - for (i = (uint32_t)0U; i < len1 / (uint32_t)4U; i++) - { - uint64_t t1 = a[(uint32_t)4U * i]; - uint64_t t20 = b[(uint32_t)4U * i]; - uint64_t *res_i0 = res + (uint32_t)4U * i; - c2 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c2, t1, t20, res_i0); - { - uint64_t t10 = a[(uint32_t)4U * i + (uint32_t)1U]; - uint64_t t21 = b[(uint32_t)4U * i + (uint32_t)1U]; - uint64_t *res_i1 = res + (uint32_t)4U * i + (uint32_t)1U; - c2 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c2, t10, t21, res_i1); - { - uint64_t t11 = a[(uint32_t)4U * i + (uint32_t)2U]; - uint64_t t22 = b[(uint32_t)4U * i + (uint32_t)2U]; - uint64_t *res_i2 = res + (uint32_t)4U * i + (uint32_t)2U; - c2 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c2, t11, t22, res_i2); - { - uint64_t t12 = a[(uint32_t)4U * i + (uint32_t)3U]; - uint64_t t2 = b[(uint32_t)4U * i + (uint32_t)3U]; - uint64_t *res_i = res + (uint32_t)4U * i + (uint32_t)3U; - c2 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c2, t12, t2, res_i); - } - } - } - } - } - { - uint32_t i; - for (i = len1 / (uint32_t)4U * (uint32_t)4U; i < len1; i++) - { - uint64_t t1 = a[i]; - uint64_t t2 = b[i]; - uint64_t *res_i = res + i; - c2 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c2, t1, t2, res_i); - } - } - c0 = c2; - KRML_CHECK_SIZE(sizeof (uint64_t), len1); - { - uint64_t tmp[len1]; - memset(tmp, 0U, len1 * sizeof (uint64_t)); - { - uint64_t c3 = (uint64_t)0U; - uint64_t c1; - uint64_t c; - { - uint32_t i; - for (i = (uint32_t)0U; i < len1 / (uint32_t)4U; i++) - { - uint64_t t1 = res[(uint32_t)4U * i]; - uint64_t t20 = n[(uint32_t)4U * i]; - uint64_t *res_i0 = tmp + (uint32_t)4U * i; - c3 = Lib_IntTypes_Intrinsics_add_carry_u64(c3, t1, t20, res_i0); - { - uint64_t t10 = res[(uint32_t)4U * i + (uint32_t)1U]; - uint64_t t21 = n[(uint32_t)4U * i + (uint32_t)1U]; - uint64_t *res_i1 = tmp + (uint32_t)4U * i + (uint32_t)1U; - c3 = Lib_IntTypes_Intrinsics_add_carry_u64(c3, t10, t21, res_i1); - { - uint64_t t11 = res[(uint32_t)4U * i + (uint32_t)2U]; - uint64_t t22 = n[(uint32_t)4U * i + (uint32_t)2U]; - uint64_t *res_i2 = tmp + (uint32_t)4U * i + (uint32_t)2U; - c3 = Lib_IntTypes_Intrinsics_add_carry_u64(c3, t11, t22, res_i2); - { - uint64_t t12 = res[(uint32_t)4U * i + (uint32_t)3U]; - uint64_t t2 = n[(uint32_t)4U * i + (uint32_t)3U]; - uint64_t *res_i = tmp + (uint32_t)4U * i + (uint32_t)3U; - c3 = Lib_IntTypes_Intrinsics_add_carry_u64(c3, t12, t2, res_i); - } - } - } - } - } - { - uint32_t i; - for (i = len1 / (uint32_t)4U * (uint32_t)4U; i < len1; i++) - { - uint64_t t1 = res[i]; - uint64_t t2 = n[i]; - uint64_t *res_i = tmp + i; - c3 = Lib_IntTypes_Intrinsics_add_carry_u64(c3, t1, t2, res_i); - } - } - c1 = c3; - c = (uint64_t)0U - c0; - { - uint32_t i; - for (i = (uint32_t)0U; i < len1; i++) - { - uint64_t *os = res; - uint64_t x = (c & tmp[i]) | (~c & res[i]); - os[i] = x; - } - } - } - } -} - -uint32_t Hacl_Bignum_ModInvLimb_mod_inv_uint32(uint32_t n0) -{ - uint32_t alpha = (uint32_t)2147483648U; - uint32_t beta = n0; - uint32_t ub = (uint32_t)0U; - uint32_t vb = (uint32_t)0U; - ub = (uint32_t)1U; - vb = (uint32_t)0U; - { - uint32_t i; - for (i = (uint32_t)0U; i < (uint32_t)32U; i++) - { - uint32_t us = ub; - uint32_t vs = vb; - uint32_t u_is_odd = (uint32_t)0U - (us & (uint32_t)1U); - uint32_t beta_if_u_is_odd = beta & u_is_odd; - ub = ((us ^ beta_if_u_is_odd) >> (uint32_t)1U) + (us & beta_if_u_is_odd); - { - uint32_t alpha_if_u_is_odd = alpha & u_is_odd; - vb = (vs >> (uint32_t)1U) + alpha_if_u_is_odd; - } - } - } - return vb; -} - -uint64_t Hacl_Bignum_ModInvLimb_mod_inv_uint64(uint64_t n0) -{ - uint64_t alpha = (uint64_t)9223372036854775808U; - uint64_t beta = n0; - uint64_t ub = (uint64_t)0U; - uint64_t vb = (uint64_t)0U; - ub = (uint64_t)1U; - vb = (uint64_t)0U; - { - uint32_t i; - for (i = (uint32_t)0U; i < (uint32_t)64U; i++) - { - uint64_t us = ub; - uint64_t vs = vb; - uint64_t u_is_odd = (uint64_t)0U - (us & (uint64_t)1U); - uint64_t beta_if_u_is_odd = beta & u_is_odd; - ub = ((us ^ beta_if_u_is_odd) >> (uint32_t)1U) + (us & beta_if_u_is_odd); - { - uint64_t alpha_if_u_is_odd = alpha & u_is_odd; - vb = (vs >> (uint32_t)1U) + alpha_if_u_is_odd; - } - } - } - return vb; -} - -uint32_t Hacl_Bignum_Montgomery_bn_check_modulus_u32(uint32_t len, uint32_t *n) -{ - KRML_CHECK_SIZE(sizeof (uint32_t), len); - { - uint32_t one[len]; - memset(one, 0U, len * sizeof (uint32_t)); - { - uint32_t bit0; - uint32_t m0; - memset(one, 0U, len * sizeof (uint32_t)); - one[0U] = (uint32_t)1U; - bit0 = n[0U] & (uint32_t)1U; - m0 = (uint32_t)0U - bit0; - { - uint32_t acc = (uint32_t)0U; - uint32_t m1; - { - uint32_t i; - for (i = (uint32_t)0U; i < len; i++) - { - uint32_t beq = FStar_UInt32_eq_mask(one[i], n[i]); - uint32_t blt = ~FStar_UInt32_gte_mask(one[i], n[i]); - acc = (beq & acc) | (~beq & ((blt & (uint32_t)0xFFFFFFFFU) | (~blt & (uint32_t)0U))); - } - } - m1 = acc; - return m0 & m1; - } - } - } -} - -void -Hacl_Bignum_Montgomery_bn_precomp_r2_mod_n_u32( - uint32_t len, - uint32_t nBits, - uint32_t *n, - uint32_t *res -) -{ - uint32_t i0; - uint32_t j; - uint32_t i; - memset(res, 0U, len * sizeof (uint32_t)); - i0 = nBits / (uint32_t)32U; - j = nBits % (uint32_t)32U; - res[i0] = res[i0] | (uint32_t)1U << j; - for (i = (uint32_t)0U; i < (uint32_t)64U * len - nBits; i++) - { - Hacl_Bignum_bn_add_mod_n_u32(len, n, res, res, res); - } -} - -void -Hacl_Bignum_Montgomery_bn_mont_reduction_u32( - uint32_t len, - uint32_t *n, - uint32_t nInv, - uint32_t *c, - uint32_t *res -) -{ - uint32_t c00 = (uint32_t)0U; - uint32_t c0; - { - uint32_t i0; - for (i0 = (uint32_t)0U; i0 < len; i0++) - { - uint32_t qj = nInv * c[i0]; - uint32_t *res_j0 = c + i0; - uint32_t c1 = (uint32_t)0U; - { - uint32_t i; - for (i = (uint32_t)0U; i < len / (uint32_t)4U; i++) - { - uint32_t a_i = n[(uint32_t)4U * i]; - uint32_t *res_i0 = res_j0 + (uint32_t)4U * i; - c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i, qj, c1, res_i0); - { - uint32_t a_i0 = n[(uint32_t)4U * i + (uint32_t)1U]; - uint32_t *res_i1 = res_j0 + (uint32_t)4U * i + (uint32_t)1U; - c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i0, qj, c1, res_i1); - { - uint32_t a_i1 = n[(uint32_t)4U * i + (uint32_t)2U]; - uint32_t *res_i2 = res_j0 + (uint32_t)4U * i + (uint32_t)2U; - c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i1, qj, c1, res_i2); - { - uint32_t a_i2 = n[(uint32_t)4U * i + (uint32_t)3U]; - uint32_t *res_i = res_j0 + (uint32_t)4U * i + (uint32_t)3U; - c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i2, qj, c1, res_i); - } - } - } - } - } - { - uint32_t i; - for (i = len / (uint32_t)4U * (uint32_t)4U; i < len; i++) - { - uint32_t a_i = n[i]; - uint32_t *res_i = res_j0 + i; - c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i, qj, c1, res_i); - } - } - { - uint32_t r = c1; - uint32_t c10 = r; - uint32_t *resb = c + len + i0; - uint32_t res_j = c[len + i0]; - c00 = Lib_IntTypes_Intrinsics_add_carry_u32(c00, c10, res_j, resb); - } - } - } - memcpy(res, c + len, (len + len - len) * sizeof (uint32_t)); - c0 = c00; - KRML_CHECK_SIZE(sizeof (uint32_t), len); - { - uint32_t tmp[len]; - memset(tmp, 0U, len * sizeof (uint32_t)); - { - uint32_t c10 = (uint32_t)0U; - uint32_t c1; - uint32_t c2; - { - uint32_t i; - for (i = (uint32_t)0U; i < len / (uint32_t)4U; i++) - { - uint32_t t1 = res[(uint32_t)4U * i]; - uint32_t t20 = n[(uint32_t)4U * i]; - uint32_t *res_i0 = tmp + (uint32_t)4U * i; - c10 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c10, t1, t20, res_i0); - { - uint32_t t10 = res[(uint32_t)4U * i + (uint32_t)1U]; - uint32_t t21 = n[(uint32_t)4U * i + (uint32_t)1U]; - uint32_t *res_i1 = tmp + (uint32_t)4U * i + (uint32_t)1U; - c10 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c10, t10, t21, res_i1); - { - uint32_t t11 = res[(uint32_t)4U * i + (uint32_t)2U]; - uint32_t t22 = n[(uint32_t)4U * i + (uint32_t)2U]; - uint32_t *res_i2 = tmp + (uint32_t)4U * i + (uint32_t)2U; - c10 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c10, t11, t22, res_i2); - { - uint32_t t12 = res[(uint32_t)4U * i + (uint32_t)3U]; - uint32_t t2 = n[(uint32_t)4U * i + (uint32_t)3U]; - uint32_t *res_i = tmp + (uint32_t)4U * i + (uint32_t)3U; - c10 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c10, t12, t2, res_i); - } - } - } - } - } - { - uint32_t i; - for (i = len / (uint32_t)4U * (uint32_t)4U; i < len; i++) - { - uint32_t t1 = res[i]; - uint32_t t2 = n[i]; - uint32_t *res_i = tmp + i; - c10 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c10, t1, t2, res_i); - } - } - c1 = c10; - c2 = c0 - c1; - { - uint32_t i; - for (i = (uint32_t)0U; i < len; i++) - { - uint32_t *os = res; - uint32_t x = (c2 & res[i]) | (~c2 & tmp[i]); - os[i] = x; - } - } - } - } -} - -void -Hacl_Bignum_Montgomery_bn_to_mont_u32( - uint32_t len, - uint32_t *n, - uint32_t nInv, - uint32_t *r2, - uint32_t *a, - uint32_t *aM -) -{ - KRML_CHECK_SIZE(sizeof (uint32_t), len + len); - { - uint32_t c[len + len]; - memset(c, 0U, (len + len) * sizeof (uint32_t)); - KRML_CHECK_SIZE(sizeof (uint32_t), (uint32_t)4U * len); - { - uint32_t tmp[(uint32_t)4U * len]; - memset(tmp, 0U, (uint32_t)4U * len * sizeof (uint32_t)); - Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint32(len, a, r2, tmp, c); - Hacl_Bignum_Montgomery_bn_mont_reduction_u32(len, n, nInv, c, aM); - } - } -} - -void -Hacl_Bignum_Montgomery_bn_from_mont_u32( - uint32_t len, - uint32_t *n, - uint32_t nInv_u64, - uint32_t *aM, - uint32_t *a -) -{ - KRML_CHECK_SIZE(sizeof (uint32_t), len + len); - { - uint32_t tmp[len + len]; - memset(tmp, 0U, (len + len) * sizeof (uint32_t)); - memcpy(tmp, aM, len * sizeof (uint32_t)); - Hacl_Bignum_Montgomery_bn_mont_reduction_u32(len, n, nInv_u64, tmp, a); - } -} - -void -Hacl_Bignum_Montgomery_bn_mont_mul_u32( - uint32_t len, - uint32_t *n, - uint32_t nInv_u64, - uint32_t *aM, - uint32_t *bM, - uint32_t *resM -) -{ - KRML_CHECK_SIZE(sizeof (uint32_t), len + len); - { - uint32_t c[len + len]; - memset(c, 0U, (len + len) * sizeof (uint32_t)); - KRML_CHECK_SIZE(sizeof (uint32_t), (uint32_t)4U * len); - { - uint32_t tmp[(uint32_t)4U * len]; - memset(tmp, 0U, (uint32_t)4U * len * sizeof (uint32_t)); - Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint32(len, aM, bM, tmp, c); - Hacl_Bignum_Montgomery_bn_mont_reduction_u32(len, n, nInv_u64, c, resM); - } - } -} - -void -Hacl_Bignum_Montgomery_bn_mont_sqr_u32( - uint32_t len, - uint32_t *n, - uint32_t nInv_u64, - uint32_t *aM, - uint32_t *resM -) -{ - KRML_CHECK_SIZE(sizeof (uint32_t), len + len); - { - uint32_t c[len + len]; - memset(c, 0U, (len + len) * sizeof (uint32_t)); - KRML_CHECK_SIZE(sizeof (uint32_t), (uint32_t)4U * len); - { - uint32_t tmp[(uint32_t)4U * len]; - memset(tmp, 0U, (uint32_t)4U * len * sizeof (uint32_t)); - Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint32(len, aM, tmp, c); - Hacl_Bignum_Montgomery_bn_mont_reduction_u32(len, n, nInv_u64, c, resM); - } - } -} - -uint64_t Hacl_Bignum_Montgomery_bn_check_modulus_u64(uint32_t len, uint64_t *n) -{ - KRML_CHECK_SIZE(sizeof (uint64_t), len); - { - uint64_t one[len]; - memset(one, 0U, len * sizeof (uint64_t)); - { - uint64_t bit0; - uint64_t m0; - memset(one, 0U, len * sizeof (uint64_t)); - one[0U] = (uint64_t)1U; - bit0 = n[0U] & (uint64_t)1U; - m0 = (uint64_t)0U - bit0; - { - uint64_t acc = (uint64_t)0U; - uint64_t m1; - { - uint32_t i; - for (i = (uint32_t)0U; i < len; i++) - { - uint64_t beq = FStar_UInt64_eq_mask(one[i], n[i]); - uint64_t blt = ~FStar_UInt64_gte_mask(one[i], n[i]); - acc = - (beq & acc) - | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U))); - } - } - m1 = acc; - return m0 & m1; - } - } - } -} - -void -Hacl_Bignum_Montgomery_bn_precomp_r2_mod_n_u64( - uint32_t len, - uint32_t nBits, - uint64_t *n, - uint64_t *res -) -{ - uint32_t i0; - uint32_t j; - uint32_t i; - memset(res, 0U, len * sizeof (uint64_t)); - i0 = nBits / (uint32_t)64U; - j = nBits % (uint32_t)64U; - res[i0] = res[i0] | (uint64_t)1U << j; - for (i = (uint32_t)0U; i < (uint32_t)128U * len - nBits; i++) - { - Hacl_Bignum_bn_add_mod_n_u64(len, n, res, res, res); - } -} - -void -Hacl_Bignum_Montgomery_bn_mont_reduction_u64( - uint32_t len, - uint64_t *n, - uint64_t nInv, - uint64_t *c, - uint64_t *res -) -{ - uint64_t c00 = (uint64_t)0U; - uint64_t c0; - { - uint32_t i0; - for (i0 = (uint32_t)0U; i0 < len; i0++) - { - uint64_t qj = nInv * c[i0]; - uint64_t *res_j0 = c + i0; - uint64_t c1 = (uint64_t)0U; - { - uint32_t i; - for (i = (uint32_t)0U; i < len / (uint32_t)4U; i++) - { - uint64_t a_i = n[(uint32_t)4U * i]; - uint64_t *res_i0 = res_j0 + (uint32_t)4U * i; - c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, qj, c1, res_i0); - { - uint64_t a_i0 = n[(uint32_t)4U * i + (uint32_t)1U]; - uint64_t *res_i1 = res_j0 + (uint32_t)4U * i + (uint32_t)1U; - c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i0, qj, c1, res_i1); - { - uint64_t a_i1 = n[(uint32_t)4U * i + (uint32_t)2U]; - uint64_t *res_i2 = res_j0 + (uint32_t)4U * i + (uint32_t)2U; - c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i1, qj, c1, res_i2); - { - uint64_t a_i2 = n[(uint32_t)4U * i + (uint32_t)3U]; - uint64_t *res_i = res_j0 + (uint32_t)4U * i + (uint32_t)3U; - c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i2, qj, c1, res_i); - } - } - } - } - } - { - uint32_t i; - for (i = len / (uint32_t)4U * (uint32_t)4U; i < len; i++) - { - uint64_t a_i = n[i]; - uint64_t *res_i = res_j0 + i; - c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, qj, c1, res_i); - } - } - { - uint64_t r = c1; - uint64_t c10 = r; - uint64_t *resb = c + len + i0; - uint64_t res_j = c[len + i0]; - c00 = Lib_IntTypes_Intrinsics_add_carry_u64(c00, c10, res_j, resb); - } - } - } - memcpy(res, c + len, (len + len - len) * sizeof (uint64_t)); - c0 = c00; - KRML_CHECK_SIZE(sizeof (uint64_t), len); - { - uint64_t tmp[len]; - memset(tmp, 0U, len * sizeof (uint64_t)); - { - uint64_t c10 = (uint64_t)0U; - uint64_t c1; - uint64_t c2; - { - uint32_t i; - for (i = (uint32_t)0U; i < len / (uint32_t)4U; i++) - { - uint64_t t1 = res[(uint32_t)4U * i]; - uint64_t t20 = n[(uint32_t)4U * i]; - uint64_t *res_i0 = tmp + (uint32_t)4U * i; - c10 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c10, t1, t20, res_i0); - { - uint64_t t10 = res[(uint32_t)4U * i + (uint32_t)1U]; - uint64_t t21 = n[(uint32_t)4U * i + (uint32_t)1U]; - uint64_t *res_i1 = tmp + (uint32_t)4U * i + (uint32_t)1U; - c10 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c10, t10, t21, res_i1); - { - uint64_t t11 = res[(uint32_t)4U * i + (uint32_t)2U]; - uint64_t t22 = n[(uint32_t)4U * i + (uint32_t)2U]; - uint64_t *res_i2 = tmp + (uint32_t)4U * i + (uint32_t)2U; - c10 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c10, t11, t22, res_i2); - { - uint64_t t12 = res[(uint32_t)4U * i + (uint32_t)3U]; - uint64_t t2 = n[(uint32_t)4U * i + (uint32_t)3U]; - uint64_t *res_i = tmp + (uint32_t)4U * i + (uint32_t)3U; - c10 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c10, t12, t2, res_i); - } - } - } - } - } - { - uint32_t i; - for (i = len / (uint32_t)4U * (uint32_t)4U; i < len; i++) - { - uint64_t t1 = res[i]; - uint64_t t2 = n[i]; - uint64_t *res_i = tmp + i; - c10 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c10, t1, t2, res_i); - } - } - c1 = c10; - c2 = c0 - c1; - { - uint32_t i; - for (i = (uint32_t)0U; i < len; i++) - { - uint64_t *os = res; - uint64_t x = (c2 & res[i]) | (~c2 & tmp[i]); - os[i] = x; - } - } - } - } -} - -void -Hacl_Bignum_Montgomery_bn_to_mont_u64( - uint32_t len, - uint64_t *n, - uint64_t nInv, - uint64_t *r2, - uint64_t *a, - uint64_t *aM -) -{ - KRML_CHECK_SIZE(sizeof (uint64_t), len + len); - { - uint64_t c[len + len]; - memset(c, 0U, (len + len) * sizeof (uint64_t)); - KRML_CHECK_SIZE(sizeof (uint64_t), (uint32_t)4U * len); - { - uint64_t tmp[(uint32_t)4U * len]; - memset(tmp, 0U, (uint32_t)4U * len * sizeof (uint64_t)); - Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint64(len, a, r2, tmp, c); - Hacl_Bignum_Montgomery_bn_mont_reduction_u64(len, n, nInv, c, aM); - } - } -} - -void -Hacl_Bignum_Montgomery_bn_from_mont_u64( - uint32_t len, - uint64_t *n, - uint64_t nInv_u64, - uint64_t *aM, - uint64_t *a -) -{ - KRML_CHECK_SIZE(sizeof (uint64_t), len + len); - { - uint64_t tmp[len + len]; - memset(tmp, 0U, (len + len) * sizeof (uint64_t)); - memcpy(tmp, aM, len * sizeof (uint64_t)); - Hacl_Bignum_Montgomery_bn_mont_reduction_u64(len, n, nInv_u64, tmp, a); - } -} - -void -Hacl_Bignum_Montgomery_bn_mont_mul_u64( - uint32_t len, - uint64_t *n, - uint64_t nInv_u64, - uint64_t *aM, - uint64_t *bM, - uint64_t *resM -) -{ - KRML_CHECK_SIZE(sizeof (uint64_t), len + len); - { - uint64_t c[len + len]; - memset(c, 0U, (len + len) * sizeof (uint64_t)); - KRML_CHECK_SIZE(sizeof (uint64_t), (uint32_t)4U * len); - { - uint64_t tmp[(uint32_t)4U * len]; - memset(tmp, 0U, (uint32_t)4U * len * sizeof (uint64_t)); - Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint64(len, aM, bM, tmp, c); - Hacl_Bignum_Montgomery_bn_mont_reduction_u64(len, n, nInv_u64, c, resM); - } - } -} - -void -Hacl_Bignum_Montgomery_bn_mont_sqr_u64( - uint32_t len, - uint64_t *n, - uint64_t nInv_u64, - uint64_t *aM, - uint64_t *resM -) -{ - KRML_CHECK_SIZE(sizeof (uint64_t), len + len); - { - uint64_t c[len + len]; - memset(c, 0U, (len + len) * sizeof (uint64_t)); - KRML_CHECK_SIZE(sizeof (uint64_t), (uint32_t)4U * len); - { - uint64_t tmp[(uint32_t)4U * len]; - memset(tmp, 0U, (uint32_t)4U * len * sizeof (uint64_t)); - Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint64(len, aM, tmp, c); - Hacl_Bignum_Montgomery_bn_mont_reduction_u64(len, n, nInv_u64, c, resM); - } - } -} - -static void -bn_almost_mont_reduction_u32( - uint32_t len, - uint32_t *n, - uint32_t nInv, - uint32_t *c, - uint32_t *res -) -{ - uint32_t c00 = (uint32_t)0U; - uint32_t c0; - { - uint32_t i0; - for (i0 = (uint32_t)0U; i0 < len; i0++) - { - uint32_t qj = nInv * c[i0]; - uint32_t *res_j0 = c + i0; - uint32_t c1 = (uint32_t)0U; - { - uint32_t i; - for (i = (uint32_t)0U; i < len / (uint32_t)4U; i++) - { - uint32_t a_i = n[(uint32_t)4U * i]; - uint32_t *res_i0 = res_j0 + (uint32_t)4U * i; - c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i, qj, c1, res_i0); - { - uint32_t a_i0 = n[(uint32_t)4U * i + (uint32_t)1U]; - uint32_t *res_i1 = res_j0 + (uint32_t)4U * i + (uint32_t)1U; - c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i0, qj, c1, res_i1); - { - uint32_t a_i1 = n[(uint32_t)4U * i + (uint32_t)2U]; - uint32_t *res_i2 = res_j0 + (uint32_t)4U * i + (uint32_t)2U; - c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i1, qj, c1, res_i2); - { - uint32_t a_i2 = n[(uint32_t)4U * i + (uint32_t)3U]; - uint32_t *res_i = res_j0 + (uint32_t)4U * i + (uint32_t)3U; - c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i2, qj, c1, res_i); - } - } - } - } - } - { - uint32_t i; - for (i = len / (uint32_t)4U * (uint32_t)4U; i < len; i++) - { - uint32_t a_i = n[i]; - uint32_t *res_i = res_j0 + i; - c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i, qj, c1, res_i); - } - } - { - uint32_t r = c1; - uint32_t c10 = r; - uint32_t *resb = c + len + i0; - uint32_t res_j = c[len + i0]; - c00 = Lib_IntTypes_Intrinsics_add_carry_u32(c00, c10, res_j, resb); - } - } - } - memcpy(res, c + len, (len + len - len) * sizeof (uint32_t)); - c0 = c00; - KRML_CHECK_SIZE(sizeof (uint32_t), len); - { - uint32_t tmp[len]; - memset(tmp, 0U, len * sizeof (uint32_t)); - { - uint32_t c1 = Hacl_Bignum_Addition_bn_sub_eq_len_u32(len, res, n, tmp); - uint32_t m = (uint32_t)0U - c0; - { - uint32_t i; - for (i = (uint32_t)0U; i < len; i++) - { - uint32_t *os = res; - uint32_t x = (m & tmp[i]) | (~m & res[i]); - os[i] = x; - } - } - } - } -} - -static void -bn_almost_mont_mul_u32( - uint32_t len, - uint32_t *n, - uint32_t nInv_u64, - uint32_t *aM, - uint32_t *bM, - uint32_t *resM -) -{ - KRML_CHECK_SIZE(sizeof (uint32_t), len + len); - { - uint32_t c[len + len]; - memset(c, 0U, (len + len) * sizeof (uint32_t)); - KRML_CHECK_SIZE(sizeof (uint32_t), (uint32_t)4U * len); - { - uint32_t tmp[(uint32_t)4U * len]; - memset(tmp, 0U, (uint32_t)4U * len * sizeof (uint32_t)); - Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint32(len, aM, bM, tmp, c); - bn_almost_mont_reduction_u32(len, n, nInv_u64, c, resM); - } - } -} - -static void -bn_almost_mont_sqr_u32( - uint32_t len, - uint32_t *n, - uint32_t nInv_u64, - uint32_t *aM, - uint32_t *resM -) -{ - KRML_CHECK_SIZE(sizeof (uint32_t), len + len); - { - uint32_t c[len + len]; - memset(c, 0U, (len + len) * sizeof (uint32_t)); - KRML_CHECK_SIZE(sizeof (uint32_t), (uint32_t)4U * len); - { - uint32_t tmp[(uint32_t)4U * len]; - memset(tmp, 0U, (uint32_t)4U * len * sizeof (uint32_t)); - Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint32(len, aM, tmp, c); - bn_almost_mont_reduction_u32(len, n, nInv_u64, c, resM); - } - } -} - -static void -bn_almost_mont_reduction_u64( - uint32_t len, - uint64_t *n, - uint64_t nInv, - uint64_t *c, - uint64_t *res -) -{ - uint64_t c00 = (uint64_t)0U; - uint64_t c0; - { - uint32_t i0; - for (i0 = (uint32_t)0U; i0 < len; i0++) - { - uint64_t qj = nInv * c[i0]; - uint64_t *res_j0 = c + i0; - uint64_t c1 = (uint64_t)0U; - { - uint32_t i; - for (i = (uint32_t)0U; i < len / (uint32_t)4U; i++) - { - uint64_t a_i = n[(uint32_t)4U * i]; - uint64_t *res_i0 = res_j0 + (uint32_t)4U * i; - c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, qj, c1, res_i0); - { - uint64_t a_i0 = n[(uint32_t)4U * i + (uint32_t)1U]; - uint64_t *res_i1 = res_j0 + (uint32_t)4U * i + (uint32_t)1U; - c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i0, qj, c1, res_i1); - { - uint64_t a_i1 = n[(uint32_t)4U * i + (uint32_t)2U]; - uint64_t *res_i2 = res_j0 + (uint32_t)4U * i + (uint32_t)2U; - c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i1, qj, c1, res_i2); - { - uint64_t a_i2 = n[(uint32_t)4U * i + (uint32_t)3U]; - uint64_t *res_i = res_j0 + (uint32_t)4U * i + (uint32_t)3U; - c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i2, qj, c1, res_i); - } - } - } - } - } - { - uint32_t i; - for (i = len / (uint32_t)4U * (uint32_t)4U; i < len; i++) - { - uint64_t a_i = n[i]; - uint64_t *res_i = res_j0 + i; - c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, qj, c1, res_i); - } - } - { - uint64_t r = c1; - uint64_t c10 = r; - uint64_t *resb = c + len + i0; - uint64_t res_j = c[len + i0]; - c00 = Lib_IntTypes_Intrinsics_add_carry_u64(c00, c10, res_j, resb); - } - } - } - memcpy(res, c + len, (len + len - len) * sizeof (uint64_t)); - c0 = c00; - KRML_CHECK_SIZE(sizeof (uint64_t), len); - { - uint64_t tmp[len]; - memset(tmp, 0U, len * sizeof (uint64_t)); - { - uint64_t c1 = Hacl_Bignum_Addition_bn_sub_eq_len_u64(len, res, n, tmp); - uint64_t m = (uint64_t)0U - c0; - { - uint32_t i; - for (i = (uint32_t)0U; i < len; i++) - { - uint64_t *os = res; - uint64_t x = (m & tmp[i]) | (~m & res[i]); - os[i] = x; - } - } - } - } -} - -static void -bn_almost_mont_mul_u64( - uint32_t len, - uint64_t *n, - uint64_t nInv_u64, - uint64_t *aM, - uint64_t *bM, - uint64_t *resM -) -{ - KRML_CHECK_SIZE(sizeof (uint64_t), len + len); - { - uint64_t c[len + len]; - memset(c, 0U, (len + len) * sizeof (uint64_t)); - KRML_CHECK_SIZE(sizeof (uint64_t), (uint32_t)4U * len); - { - uint64_t tmp[(uint32_t)4U * len]; - memset(tmp, 0U, (uint32_t)4U * len * sizeof (uint64_t)); - Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint64(len, aM, bM, tmp, c); - bn_almost_mont_reduction_u64(len, n, nInv_u64, c, resM); - } - } -} - -static void -bn_almost_mont_sqr_u64( - uint32_t len, - uint64_t *n, - uint64_t nInv_u64, - uint64_t *aM, - uint64_t *resM -) -{ - KRML_CHECK_SIZE(sizeof (uint64_t), len + len); - { - uint64_t c[len + len]; - memset(c, 0U, (len + len) * sizeof (uint64_t)); - KRML_CHECK_SIZE(sizeof (uint64_t), (uint32_t)4U * len); - { - uint64_t tmp[(uint32_t)4U * len]; - memset(tmp, 0U, (uint32_t)4U * len * sizeof (uint64_t)); - Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint64(len, aM, tmp, c); - bn_almost_mont_reduction_u64(len, n, nInv_u64, c, resM); - } - } -} - -uint32_t -Hacl_Bignum_Exponentiation_bn_check_mod_exp_u32( - uint32_t len, - uint32_t *n, - uint32_t *a, - uint32_t bBits, - uint32_t *b -) -{ - KRML_CHECK_SIZE(sizeof (uint32_t), len); - { - uint32_t one[len]; - memset(one, 0U, len * sizeof (uint32_t)); - { - uint32_t bit0; - uint32_t m00; - memset(one, 0U, len * sizeof (uint32_t)); - one[0U] = (uint32_t)1U; - bit0 = n[0U] & (uint32_t)1U; - m00 = (uint32_t)0U - bit0; - { - uint32_t acc0 = (uint32_t)0U; - uint32_t m10; - uint32_t m0; - uint32_t bLen; - uint32_t m1; - { - uint32_t i; - for (i = (uint32_t)0U; i < len; i++) - { - uint32_t beq = FStar_UInt32_eq_mask(one[i], n[i]); - uint32_t blt = ~FStar_UInt32_gte_mask(one[i], n[i]); - acc0 = (beq & acc0) | (~beq & ((blt & (uint32_t)0xFFFFFFFFU) | (~blt & (uint32_t)0U))); - } - } - m10 = acc0; - m0 = m00 & m10; - if (bBits == (uint32_t)0U) - { - bLen = (uint32_t)1U; - } - else - { - bLen = (bBits - (uint32_t)1U) / (uint32_t)32U + (uint32_t)1U; - } - if (bBits < (uint32_t)32U * bLen) - { - KRML_CHECK_SIZE(sizeof (uint32_t), bLen); - { - uint32_t b2[bLen]; - memset(b2, 0U, bLen * sizeof (uint32_t)); - { - uint32_t i0 = bBits / (uint32_t)32U; - uint32_t j = bBits % (uint32_t)32U; - b2[i0] = b2[i0] | (uint32_t)1U << j; - { - uint32_t acc = (uint32_t)0U; - { - uint32_t i; - for (i = (uint32_t)0U; i < bLen; i++) - { - uint32_t beq = FStar_UInt32_eq_mask(b[i], b2[i]); - uint32_t blt = ~FStar_UInt32_gte_mask(b[i], b2[i]); - acc = - (beq & acc) - | (~beq & ((blt & (uint32_t)0xFFFFFFFFU) | (~blt & (uint32_t)0U))); - } - } - { - uint32_t res = acc; - m1 = res; - } - } - } - } - } - else - { - m1 = (uint32_t)0xFFFFFFFFU; - } - { - uint32_t acc = (uint32_t)0U; - uint32_t m2; - uint32_t m; - { - uint32_t i; - for (i = (uint32_t)0U; i < len; i++) - { - uint32_t beq = FStar_UInt32_eq_mask(a[i], n[i]); - uint32_t blt = ~FStar_UInt32_gte_mask(a[i], n[i]); - acc = (beq & acc) | (~beq & ((blt & (uint32_t)0xFFFFFFFFU) | (~blt & (uint32_t)0U))); - } - } - m2 = acc; - m = m1 & m2; - return m0 & m; - } - } - } - } -} - -void -Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_precomp_u32( - uint32_t len, - uint32_t *n, - uint32_t mu, - uint32_t *r2, - uint32_t *a, - uint32_t bBits, - uint32_t *b, - uint32_t *res -) -{ - if (bBits < (uint32_t)200U) - { - KRML_CHECK_SIZE(sizeof (uint32_t), len); - { - uint32_t aM[len]; - memset(aM, 0U, len * sizeof (uint32_t)); - KRML_CHECK_SIZE(sizeof (uint32_t), len + len); - { - uint32_t c[len + len]; - memset(c, 0U, (len + len) * sizeof (uint32_t)); - KRML_CHECK_SIZE(sizeof (uint32_t), (uint32_t)4U * len); - { - uint32_t tmp0[(uint32_t)4U * len]; - memset(tmp0, 0U, (uint32_t)4U * len * sizeof (uint32_t)); - Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint32(len, a, r2, tmp0, c); - Hacl_Bignum_Montgomery_bn_mont_reduction_u32(len, n, mu, c, aM); - KRML_CHECK_SIZE(sizeof (uint32_t), len); - { - uint32_t resM[len]; - memset(resM, 0U, len * sizeof (uint32_t)); - KRML_CHECK_SIZE(sizeof (uint32_t), len + len); - { - uint32_t ctx[len + len]; - memset(ctx, 0U, (len + len) * sizeof (uint32_t)); - { - uint32_t *ctx_n; - uint32_t *ctx_r2; - memcpy(ctx, n, len * sizeof (uint32_t)); - memcpy(ctx + len, r2, len * sizeof (uint32_t)); - ctx_n = ctx; - ctx_r2 = ctx + len; - Hacl_Bignum_Montgomery_bn_from_mont_u32(len, ctx_n, mu, ctx_r2, resM); - { - uint32_t i; - for (i = (uint32_t)0U; i < bBits; i++) - { - uint32_t i1 = i / (uint32_t)32U; - uint32_t j = i % (uint32_t)32U; - uint32_t tmp = b[i1]; - uint32_t bit = tmp >> j & (uint32_t)1U; - if (!(bit == (uint32_t)0U)) - { - uint32_t *ctx_n0 = ctx; - bn_almost_mont_mul_u32(len, ctx_n0, mu, resM, aM, resM); - } - { - uint32_t *ctx_n0 = ctx; - bn_almost_mont_sqr_u32(len, ctx_n0, mu, aM, aM); - } - } - } - KRML_CHECK_SIZE(sizeof (uint32_t), len + len); - { - uint32_t tmp[len + len]; - memset(tmp, 0U, (len + len) * sizeof (uint32_t)); - memcpy(tmp, resM, len * sizeof (uint32_t)); - Hacl_Bignum_Montgomery_bn_mont_reduction_u32(len, n, mu, tmp, res); - return; - } - } - } - } - } - } - } - } - KRML_CHECK_SIZE(sizeof (uint32_t), len); - { - uint32_t aM[len]; - memset(aM, 0U, len * sizeof (uint32_t)); - KRML_CHECK_SIZE(sizeof (uint32_t), len + len); - { - uint32_t c[len + len]; - memset(c, 0U, (len + len) * sizeof (uint32_t)); - KRML_CHECK_SIZE(sizeof (uint32_t), (uint32_t)4U * len); - { - uint32_t tmp0[(uint32_t)4U * len]; - memset(tmp0, 0U, (uint32_t)4U * len * sizeof (uint32_t)); - Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint32(len, a, r2, tmp0, c); - Hacl_Bignum_Montgomery_bn_mont_reduction_u32(len, n, mu, c, aM); - KRML_CHECK_SIZE(sizeof (uint32_t), len); - { - uint32_t resM[len]; - memset(resM, 0U, len * sizeof (uint32_t)); - { - uint32_t bLen; - if (bBits == (uint32_t)0U) - { - bLen = (uint32_t)1U; - } - else - { - bLen = (bBits - (uint32_t)1U) / (uint32_t)32U + (uint32_t)1U; - } - KRML_CHECK_SIZE(sizeof (uint32_t), len + len); - { - uint32_t ctx[len + len]; - memset(ctx, 0U, (len + len) * sizeof (uint32_t)); - memcpy(ctx, n, len * sizeof (uint32_t)); - memcpy(ctx + len, r2, len * sizeof (uint32_t)); - KRML_CHECK_SIZE(sizeof (uint32_t), (uint32_t)16U * len); - { - uint32_t table[(uint32_t)16U * len]; - memset(table, 0U, (uint32_t)16U * len * sizeof (uint32_t)); - KRML_CHECK_SIZE(sizeof (uint32_t), len); - { - uint32_t tmp[len]; - memset(tmp, 0U, len * sizeof (uint32_t)); - { - uint32_t *t0 = table; - uint32_t *t1 = table + len; - uint32_t *ctx_n0 = ctx; - uint32_t *ctx_r20 = ctx + len; - Hacl_Bignum_Montgomery_bn_from_mont_u32(len, ctx_n0, mu, ctx_r20, t0); - memcpy(t1, aM, len * sizeof (uint32_t)); - KRML_MAYBE_FOR7(i, - (uint32_t)0U, - (uint32_t)7U, - (uint32_t)1U, - uint32_t *t11 = table + (i + (uint32_t)1U) * len; - uint32_t *ctx_n1 = ctx; - bn_almost_mont_sqr_u32(len, ctx_n1, mu, t11, tmp); - memcpy(table + ((uint32_t)2U * i + (uint32_t)2U) * len, - tmp, - len * sizeof (uint32_t)); - uint32_t *t2 = table + ((uint32_t)2U * i + (uint32_t)2U) * len; - uint32_t *ctx_n = ctx; - bn_almost_mont_mul_u32(len, ctx_n, mu, aM, t2, tmp); - memcpy(table + ((uint32_t)2U * i + (uint32_t)3U) * len, - tmp, - len * sizeof (uint32_t));); - if (bBits % (uint32_t)4U != (uint32_t)0U) - { - uint32_t mask_l = (uint32_t)15U; - uint32_t i = bBits / (uint32_t)4U * (uint32_t)4U / (uint32_t)32U; - uint32_t j = bBits / (uint32_t)4U * (uint32_t)4U % (uint32_t)32U; - uint32_t p1 = b[i] >> j; - uint32_t ite; - if (i + (uint32_t)1U < bLen && (uint32_t)0U < j) - { - ite = p1 | b[i + (uint32_t)1U] << ((uint32_t)32U - j); - } - else - { - ite = p1; - } - { - uint32_t bits_c = ite & mask_l; - uint32_t bits_l32 = bits_c; - uint32_t *a_bits_l = table + bits_l32 * len; - memcpy(resM, a_bits_l, len * sizeof (uint32_t)); - } - } - else - { - uint32_t *ctx_n = ctx; - uint32_t *ctx_r2 = ctx + len; - Hacl_Bignum_Montgomery_bn_from_mont_u32(len, ctx_n, mu, ctx_r2, resM); - } - { - uint32_t i; - for (i = (uint32_t)0U; i < bBits / (uint32_t)4U; i++) - { - KRML_MAYBE_FOR4(i0, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint32_t *ctx_n = ctx; - bn_almost_mont_sqr_u32(len, ctx_n, mu, resM, resM);); - { - uint32_t bk = bBits - bBits % (uint32_t)4U; - uint32_t mask_l = (uint32_t)15U; - uint32_t i1 = (bk - (uint32_t)4U * i - (uint32_t)4U) / (uint32_t)32U; - uint32_t j = (bk - (uint32_t)4U * i - (uint32_t)4U) % (uint32_t)32U; - uint32_t p1 = b[i1] >> j; - uint32_t ite; - if (i1 + (uint32_t)1U < bLen && (uint32_t)0U < j) - { - ite = p1 | b[i1 + (uint32_t)1U] << ((uint32_t)32U - j); - } - else - { - ite = p1; - } - { - uint32_t bits_l = ite & mask_l; - KRML_CHECK_SIZE(sizeof (uint32_t), len); - { - uint32_t a_bits_l[len]; - memset(a_bits_l, 0U, len * sizeof (uint32_t)); - { - uint32_t bits_l32 = bits_l; - uint32_t *a_bits_l1 = table + bits_l32 * len; - memcpy(a_bits_l, a_bits_l1, len * sizeof (uint32_t)); - { - uint32_t *ctx_n = ctx; - bn_almost_mont_mul_u32(len, ctx_n, mu, resM, a_bits_l, resM); - } - } - } - } - } - } - } - KRML_CHECK_SIZE(sizeof (uint32_t), len + len); - { - uint32_t tmp1[len + len]; - memset(tmp1, 0U, (len + len) * sizeof (uint32_t)); - memcpy(tmp1, resM, len * sizeof (uint32_t)); - Hacl_Bignum_Montgomery_bn_mont_reduction_u32(len, n, mu, tmp1, res); - } - } - } - } - } - } - } - } - } - } -} - -void -Hacl_Bignum_Exponentiation_bn_mod_exp_consttime_precomp_u32( - uint32_t len, - uint32_t *n, - uint32_t mu, - uint32_t *r2, - uint32_t *a, - uint32_t bBits, - uint32_t *b, - uint32_t *res -) -{ - if (bBits < (uint32_t)200U) - { - KRML_CHECK_SIZE(sizeof (uint32_t), len); - { - uint32_t aM[len]; - memset(aM, 0U, len * sizeof (uint32_t)); - KRML_CHECK_SIZE(sizeof (uint32_t), len + len); - { - uint32_t c[len + len]; - memset(c, 0U, (len + len) * sizeof (uint32_t)); - KRML_CHECK_SIZE(sizeof (uint32_t), (uint32_t)4U * len); - { - uint32_t tmp0[(uint32_t)4U * len]; - memset(tmp0, 0U, (uint32_t)4U * len * sizeof (uint32_t)); - Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint32(len, a, r2, tmp0, c); - Hacl_Bignum_Montgomery_bn_mont_reduction_u32(len, n, mu, c, aM); - KRML_CHECK_SIZE(sizeof (uint32_t), len); - { - uint32_t resM[len]; - memset(resM, 0U, len * sizeof (uint32_t)); - KRML_CHECK_SIZE(sizeof (uint32_t), len + len); - { - uint32_t ctx[len + len]; - memset(ctx, 0U, (len + len) * sizeof (uint32_t)); - memcpy(ctx, n, len * sizeof (uint32_t)); - memcpy(ctx + len, r2, len * sizeof (uint32_t)); - { - uint32_t sw = (uint32_t)0U; - uint32_t *ctx_n = ctx; - uint32_t *ctx_r2 = ctx + len; - uint32_t sw0; - Hacl_Bignum_Montgomery_bn_from_mont_u32(len, ctx_n, mu, ctx_r2, resM); - { - uint32_t i0; - for (i0 = (uint32_t)0U; i0 < bBits; i0++) - { - uint32_t i1 = (bBits - i0 - (uint32_t)1U) / (uint32_t)32U; - uint32_t j = (bBits - i0 - (uint32_t)1U) % (uint32_t)32U; - uint32_t tmp = b[i1]; - uint32_t bit = tmp >> j & (uint32_t)1U; - uint32_t sw1 = bit ^ sw; - { - uint32_t i; - for (i = (uint32_t)0U; i < len; i++) - { - uint32_t dummy = ((uint32_t)0U - sw1) & (resM[i] ^ aM[i]); - resM[i] = resM[i] ^ dummy; - aM[i] = aM[i] ^ dummy; - } - } - { - uint32_t *ctx_n0 = ctx; - bn_almost_mont_mul_u32(len, ctx_n0, mu, aM, resM, aM); - { - uint32_t *ctx_n1 = ctx; - bn_almost_mont_sqr_u32(len, ctx_n1, mu, resM, resM); - sw = bit; - } - } - } - } - sw0 = sw; - { - uint32_t i; - for (i = (uint32_t)0U; i < len; i++) - { - uint32_t dummy = ((uint32_t)0U - sw0) & (resM[i] ^ aM[i]); - resM[i] = resM[i] ^ dummy; - aM[i] = aM[i] ^ dummy; - } - } - KRML_CHECK_SIZE(sizeof (uint32_t), len + len); - { - uint32_t tmp[len + len]; - memset(tmp, 0U, (len + len) * sizeof (uint32_t)); - memcpy(tmp, resM, len * sizeof (uint32_t)); - Hacl_Bignum_Montgomery_bn_mont_reduction_u32(len, n, mu, tmp, res); - return; - } - } - } - } - } - } - } - } - KRML_CHECK_SIZE(sizeof (uint32_t), len); - { - uint32_t aM[len]; - memset(aM, 0U, len * sizeof (uint32_t)); - KRML_CHECK_SIZE(sizeof (uint32_t), len + len); - { - uint32_t c0[len + len]; - memset(c0, 0U, (len + len) * sizeof (uint32_t)); - KRML_CHECK_SIZE(sizeof (uint32_t), (uint32_t)4U * len); - { - uint32_t tmp0[(uint32_t)4U * len]; - memset(tmp0, 0U, (uint32_t)4U * len * sizeof (uint32_t)); - Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint32(len, a, r2, tmp0, c0); - Hacl_Bignum_Montgomery_bn_mont_reduction_u32(len, n, mu, c0, aM); - KRML_CHECK_SIZE(sizeof (uint32_t), len); - { - uint32_t resM[len]; - memset(resM, 0U, len * sizeof (uint32_t)); - { - uint32_t bLen; - if (bBits == (uint32_t)0U) - { - bLen = (uint32_t)1U; - } - else - { - bLen = (bBits - (uint32_t)1U) / (uint32_t)32U + (uint32_t)1U; - } - KRML_CHECK_SIZE(sizeof (uint32_t), len + len); - { - uint32_t ctx[len + len]; - memset(ctx, 0U, (len + len) * sizeof (uint32_t)); - memcpy(ctx, n, len * sizeof (uint32_t)); - memcpy(ctx + len, r2, len * sizeof (uint32_t)); - KRML_CHECK_SIZE(sizeof (uint32_t), (uint32_t)16U * len); - { - uint32_t table[(uint32_t)16U * len]; - memset(table, 0U, (uint32_t)16U * len * sizeof (uint32_t)); - KRML_CHECK_SIZE(sizeof (uint32_t), len); - { - uint32_t tmp[len]; - memset(tmp, 0U, len * sizeof (uint32_t)); - { - uint32_t *t0 = table; - uint32_t *t1 = table + len; - uint32_t *ctx_n0 = ctx; - uint32_t *ctx_r20 = ctx + len; - Hacl_Bignum_Montgomery_bn_from_mont_u32(len, ctx_n0, mu, ctx_r20, t0); - memcpy(t1, aM, len * sizeof (uint32_t)); - KRML_MAYBE_FOR7(i, - (uint32_t)0U, - (uint32_t)7U, - (uint32_t)1U, - uint32_t *t11 = table + (i + (uint32_t)1U) * len; - uint32_t *ctx_n1 = ctx; - bn_almost_mont_sqr_u32(len, ctx_n1, mu, t11, tmp); - memcpy(table + ((uint32_t)2U * i + (uint32_t)2U) * len, - tmp, - len * sizeof (uint32_t)); - uint32_t *t2 = table + ((uint32_t)2U * i + (uint32_t)2U) * len; - uint32_t *ctx_n = ctx; - bn_almost_mont_mul_u32(len, ctx_n, mu, aM, t2, tmp); - memcpy(table + ((uint32_t)2U * i + (uint32_t)3U) * len, - tmp, - len * sizeof (uint32_t));); - if (bBits % (uint32_t)4U != (uint32_t)0U) - { - uint32_t mask_l = (uint32_t)15U; - uint32_t i0 = bBits / (uint32_t)4U * (uint32_t)4U / (uint32_t)32U; - uint32_t j = bBits / (uint32_t)4U * (uint32_t)4U % (uint32_t)32U; - uint32_t p1 = b[i0] >> j; - uint32_t ite; - if (i0 + (uint32_t)1U < bLen && (uint32_t)0U < j) - { - ite = p1 | b[i0 + (uint32_t)1U] << ((uint32_t)32U - j); - } - else - { - ite = p1; - } - { - uint32_t bits_c = ite & mask_l; - memcpy(resM, table + (uint32_t)0U * len, len * sizeof (uint32_t)); - KRML_MAYBE_FOR15(i1, - (uint32_t)0U, - (uint32_t)15U, - (uint32_t)1U, - uint32_t c = FStar_UInt32_eq_mask(bits_c, i1 + (uint32_t)1U); - uint32_t *res_j = table + (i1 + (uint32_t)1U) * len; - { - uint32_t i; - for (i = (uint32_t)0U; i < len; i++) - { - uint32_t *os = resM; - uint32_t x = (c & res_j[i]) | (~c & resM[i]); - os[i] = x; - } - }); - } - } - else - { - uint32_t *ctx_n = ctx; - uint32_t *ctx_r2 = ctx + len; - Hacl_Bignum_Montgomery_bn_from_mont_u32(len, ctx_n, mu, ctx_r2, resM); - } - { - uint32_t i0; - for (i0 = (uint32_t)0U; i0 < bBits / (uint32_t)4U; i0++) - { - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint32_t *ctx_n = ctx; - bn_almost_mont_sqr_u32(len, ctx_n, mu, resM, resM);); - { - uint32_t bk = bBits - bBits % (uint32_t)4U; - uint32_t mask_l = (uint32_t)15U; - uint32_t i1 = (bk - (uint32_t)4U * i0 - (uint32_t)4U) / (uint32_t)32U; - uint32_t j = (bk - (uint32_t)4U * i0 - (uint32_t)4U) % (uint32_t)32U; - uint32_t p1 = b[i1] >> j; - uint32_t ite; - if (i1 + (uint32_t)1U < bLen && (uint32_t)0U < j) - { - ite = p1 | b[i1 + (uint32_t)1U] << ((uint32_t)32U - j); - } - else - { - ite = p1; - } - { - uint32_t bits_l = ite & mask_l; - KRML_CHECK_SIZE(sizeof (uint32_t), len); - { - uint32_t a_bits_l[len]; - memset(a_bits_l, 0U, len * sizeof (uint32_t)); - memcpy(a_bits_l, table + (uint32_t)0U * len, len * sizeof (uint32_t)); - KRML_MAYBE_FOR15(i2, - (uint32_t)0U, - (uint32_t)15U, - (uint32_t)1U, - uint32_t c = FStar_UInt32_eq_mask(bits_l, i2 + (uint32_t)1U); - uint32_t *res_j = table + (i2 + (uint32_t)1U) * len; - { - uint32_t i; - for (i = (uint32_t)0U; i < len; i++) - { - uint32_t *os = a_bits_l; - uint32_t x = (c & res_j[i]) | (~c & a_bits_l[i]); - os[i] = x; - } - }); - { - uint32_t *ctx_n = ctx; - bn_almost_mont_mul_u32(len, ctx_n, mu, resM, a_bits_l, resM); - } - } - } - } - } - } - KRML_CHECK_SIZE(sizeof (uint32_t), len + len); - { - uint32_t tmp1[len + len]; - memset(tmp1, 0U, (len + len) * sizeof (uint32_t)); - memcpy(tmp1, resM, len * sizeof (uint32_t)); - Hacl_Bignum_Montgomery_bn_mont_reduction_u32(len, n, mu, tmp1, res); - } - } - } - } - } - } - } - } - } - } -} - -void -Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_u32( - uint32_t len, - uint32_t nBits, - uint32_t *n, - uint32_t *a, - uint32_t bBits, - uint32_t *b, - uint32_t *res -) -{ - KRML_CHECK_SIZE(sizeof (uint32_t), len); - { - uint32_t r2[len]; - memset(r2, 0U, len * sizeof (uint32_t)); - { - uint32_t mu; - Hacl_Bignum_Montgomery_bn_precomp_r2_mod_n_u32(len, nBits, n, r2); - mu = Hacl_Bignum_ModInvLimb_mod_inv_uint32(n[0U]); - Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_precomp_u32(len, n, mu, r2, a, bBits, b, res); - } - } -} - -void -Hacl_Bignum_Exponentiation_bn_mod_exp_consttime_u32( - uint32_t len, - uint32_t nBits, - uint32_t *n, - uint32_t *a, - uint32_t bBits, - uint32_t *b, - uint32_t *res -) -{ - KRML_CHECK_SIZE(sizeof (uint32_t), len); - { - uint32_t r2[len]; - memset(r2, 0U, len * sizeof (uint32_t)); - { - uint32_t mu; - Hacl_Bignum_Montgomery_bn_precomp_r2_mod_n_u32(len, nBits, n, r2); - mu = Hacl_Bignum_ModInvLimb_mod_inv_uint32(n[0U]); - Hacl_Bignum_Exponentiation_bn_mod_exp_consttime_precomp_u32(len, n, mu, r2, a, bBits, b, res); - } - } -} - -uint64_t -Hacl_Bignum_Exponentiation_bn_check_mod_exp_u64( - uint32_t len, - uint64_t *n, - uint64_t *a, - uint32_t bBits, - uint64_t *b -) -{ - KRML_CHECK_SIZE(sizeof (uint64_t), len); - { - uint64_t one[len]; - memset(one, 0U, len * sizeof (uint64_t)); - { - uint64_t bit0; - uint64_t m00; - memset(one, 0U, len * sizeof (uint64_t)); - one[0U] = (uint64_t)1U; - bit0 = n[0U] & (uint64_t)1U; - m00 = (uint64_t)0U - bit0; - { - uint64_t acc0 = (uint64_t)0U; - uint64_t m10; - uint64_t m0; - uint32_t bLen; - uint64_t m1; - { - uint32_t i; - for (i = (uint32_t)0U; i < len; i++) - { - uint64_t beq = FStar_UInt64_eq_mask(one[i], n[i]); - uint64_t blt = ~FStar_UInt64_gte_mask(one[i], n[i]); - acc0 = - (beq & acc0) - | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U))); - } - } - m10 = acc0; - m0 = m00 & m10; - if (bBits == (uint32_t)0U) - { - bLen = (uint32_t)1U; - } - else - { - bLen = (bBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U; - } - if (bBits < (uint32_t)64U * bLen) - { - KRML_CHECK_SIZE(sizeof (uint64_t), bLen); - { - uint64_t b2[bLen]; - memset(b2, 0U, bLen * sizeof (uint64_t)); - { - uint32_t i0 = bBits / (uint32_t)64U; - uint32_t j = bBits % (uint32_t)64U; - b2[i0] = b2[i0] | (uint64_t)1U << j; - { - uint64_t acc = (uint64_t)0U; - { - uint32_t i; - for (i = (uint32_t)0U; i < bLen; i++) - { - uint64_t beq = FStar_UInt64_eq_mask(b[i], b2[i]); - uint64_t blt = ~FStar_UInt64_gte_mask(b[i], b2[i]); - acc = - (beq & acc) - | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U))); - } - } - { - uint64_t res = acc; - m1 = res; - } - } - } - } - } - else - { - m1 = (uint64_t)0xFFFFFFFFFFFFFFFFU; - } - { - uint64_t acc = (uint64_t)0U; - uint64_t m2; - uint64_t m; - { - uint32_t i; - for (i = (uint32_t)0U; i < len; i++) - { - uint64_t beq = FStar_UInt64_eq_mask(a[i], n[i]); - uint64_t blt = ~FStar_UInt64_gte_mask(a[i], n[i]); - acc = - (beq & acc) - | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U))); - } - } - m2 = acc; - m = m1 & m2; - return m0 & m; - } - } - } - } -} - -void -Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_precomp_u64( - uint32_t len, - uint64_t *n, - uint64_t mu, - uint64_t *r2, - uint64_t *a, - uint32_t bBits, - uint64_t *b, - uint64_t *res -) -{ - if (bBits < (uint32_t)200U) - { - KRML_CHECK_SIZE(sizeof (uint64_t), len); - { - uint64_t aM[len]; - memset(aM, 0U, len * sizeof (uint64_t)); - KRML_CHECK_SIZE(sizeof (uint64_t), len + len); - { - uint64_t c[len + len]; - memset(c, 0U, (len + len) * sizeof (uint64_t)); - KRML_CHECK_SIZE(sizeof (uint64_t), (uint32_t)4U * len); - { - uint64_t tmp0[(uint32_t)4U * len]; - memset(tmp0, 0U, (uint32_t)4U * len * sizeof (uint64_t)); - Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint64(len, a, r2, tmp0, c); - Hacl_Bignum_Montgomery_bn_mont_reduction_u64(len, n, mu, c, aM); - KRML_CHECK_SIZE(sizeof (uint64_t), len); - { - uint64_t resM[len]; - memset(resM, 0U, len * sizeof (uint64_t)); - KRML_CHECK_SIZE(sizeof (uint64_t), len + len); - { - uint64_t ctx[len + len]; - memset(ctx, 0U, (len + len) * sizeof (uint64_t)); - { - uint64_t *ctx_n; - uint64_t *ctx_r2; - memcpy(ctx, n, len * sizeof (uint64_t)); - memcpy(ctx + len, r2, len * sizeof (uint64_t)); - ctx_n = ctx; - ctx_r2 = ctx + len; - Hacl_Bignum_Montgomery_bn_from_mont_u64(len, ctx_n, mu, ctx_r2, resM); - { - uint32_t i; - for (i = (uint32_t)0U; i < bBits; i++) - { - uint32_t i1 = i / (uint32_t)64U; - uint32_t j = i % (uint32_t)64U; - uint64_t tmp = b[i1]; - uint64_t bit = tmp >> j & (uint64_t)1U; - if (!(bit == (uint64_t)0U)) - { - uint64_t *ctx_n0 = ctx; - bn_almost_mont_mul_u64(len, ctx_n0, mu, resM, aM, resM); - } - { - uint64_t *ctx_n0 = ctx; - bn_almost_mont_sqr_u64(len, ctx_n0, mu, aM, aM); - } - } - } - KRML_CHECK_SIZE(sizeof (uint64_t), len + len); - { - uint64_t tmp[len + len]; - memset(tmp, 0U, (len + len) * sizeof (uint64_t)); - memcpy(tmp, resM, len * sizeof (uint64_t)); - Hacl_Bignum_Montgomery_bn_mont_reduction_u64(len, n, mu, tmp, res); - return; - } - } - } - } - } - } - } - } - KRML_CHECK_SIZE(sizeof (uint64_t), len); - { - uint64_t aM[len]; - memset(aM, 0U, len * sizeof (uint64_t)); - KRML_CHECK_SIZE(sizeof (uint64_t), len + len); - { - uint64_t c[len + len]; - memset(c, 0U, (len + len) * sizeof (uint64_t)); - KRML_CHECK_SIZE(sizeof (uint64_t), (uint32_t)4U * len); - { - uint64_t tmp0[(uint32_t)4U * len]; - memset(tmp0, 0U, (uint32_t)4U * len * sizeof (uint64_t)); - Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint64(len, a, r2, tmp0, c); - Hacl_Bignum_Montgomery_bn_mont_reduction_u64(len, n, mu, c, aM); - KRML_CHECK_SIZE(sizeof (uint64_t), len); - { - uint64_t resM[len]; - memset(resM, 0U, len * sizeof (uint64_t)); - { - uint32_t bLen; - if (bBits == (uint32_t)0U) - { - bLen = (uint32_t)1U; - } - else - { - bLen = (bBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U; - } - KRML_CHECK_SIZE(sizeof (uint64_t), len + len); - { - uint64_t ctx[len + len]; - memset(ctx, 0U, (len + len) * sizeof (uint64_t)); - memcpy(ctx, n, len * sizeof (uint64_t)); - memcpy(ctx + len, r2, len * sizeof (uint64_t)); - KRML_CHECK_SIZE(sizeof (uint64_t), (uint32_t)16U * len); - { - uint64_t table[(uint32_t)16U * len]; - memset(table, 0U, (uint32_t)16U * len * sizeof (uint64_t)); - KRML_CHECK_SIZE(sizeof (uint64_t), len); - { - uint64_t tmp[len]; - memset(tmp, 0U, len * sizeof (uint64_t)); - { - uint64_t *t0 = table; - uint64_t *t1 = table + len; - uint64_t *ctx_n0 = ctx; - uint64_t *ctx_r20 = ctx + len; - Hacl_Bignum_Montgomery_bn_from_mont_u64(len, ctx_n0, mu, ctx_r20, t0); - memcpy(t1, aM, len * sizeof (uint64_t)); - KRML_MAYBE_FOR7(i, - (uint32_t)0U, - (uint32_t)7U, - (uint32_t)1U, - uint64_t *t11 = table + (i + (uint32_t)1U) * len; - uint64_t *ctx_n1 = ctx; - bn_almost_mont_sqr_u64(len, ctx_n1, mu, t11, tmp); - memcpy(table + ((uint32_t)2U * i + (uint32_t)2U) * len, - tmp, - len * sizeof (uint64_t)); - uint64_t *t2 = table + ((uint32_t)2U * i + (uint32_t)2U) * len; - uint64_t *ctx_n = ctx; - bn_almost_mont_mul_u64(len, ctx_n, mu, aM, t2, tmp); - memcpy(table + ((uint32_t)2U * i + (uint32_t)3U) * len, - tmp, - len * sizeof (uint64_t));); - if (bBits % (uint32_t)4U != (uint32_t)0U) - { - uint64_t mask_l = (uint64_t)15U; - uint32_t i = bBits / (uint32_t)4U * (uint32_t)4U / (uint32_t)64U; - uint32_t j = bBits / (uint32_t)4U * (uint32_t)4U % (uint32_t)64U; - uint64_t p1 = b[i] >> j; - uint64_t ite; - if (i + (uint32_t)1U < bLen && (uint32_t)0U < j) - { - ite = p1 | b[i + (uint32_t)1U] << ((uint32_t)64U - j); - } - else - { - ite = p1; - } - { - uint64_t bits_c = ite & mask_l; - uint32_t bits_l32 = (uint32_t)bits_c; - uint64_t *a_bits_l = table + bits_l32 * len; - memcpy(resM, a_bits_l, len * sizeof (uint64_t)); - } - } - else - { - uint64_t *ctx_n = ctx; - uint64_t *ctx_r2 = ctx + len; - Hacl_Bignum_Montgomery_bn_from_mont_u64(len, ctx_n, mu, ctx_r2, resM); - } - { - uint32_t i; - for (i = (uint32_t)0U; i < bBits / (uint32_t)4U; i++) - { - KRML_MAYBE_FOR4(i0, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *ctx_n = ctx; - bn_almost_mont_sqr_u64(len, ctx_n, mu, resM, resM);); - { - uint32_t bk = bBits - bBits % (uint32_t)4U; - uint64_t mask_l = (uint64_t)15U; - uint32_t i1 = (bk - (uint32_t)4U * i - (uint32_t)4U) / (uint32_t)64U; - uint32_t j = (bk - (uint32_t)4U * i - (uint32_t)4U) % (uint32_t)64U; - uint64_t p1 = b[i1] >> j; - uint64_t ite; - if (i1 + (uint32_t)1U < bLen && (uint32_t)0U < j) - { - ite = p1 | b[i1 + (uint32_t)1U] << ((uint32_t)64U - j); - } - else - { - ite = p1; - } - { - uint64_t bits_l = ite & mask_l; - KRML_CHECK_SIZE(sizeof (uint64_t), len); - { - uint64_t a_bits_l[len]; - memset(a_bits_l, 0U, len * sizeof (uint64_t)); - { - uint32_t bits_l32 = (uint32_t)bits_l; - uint64_t *a_bits_l1 = table + bits_l32 * len; - memcpy(a_bits_l, a_bits_l1, len * sizeof (uint64_t)); - { - uint64_t *ctx_n = ctx; - bn_almost_mont_mul_u64(len, ctx_n, mu, resM, a_bits_l, resM); - } - } - } - } - } - } - } - KRML_CHECK_SIZE(sizeof (uint64_t), len + len); - { - uint64_t tmp1[len + len]; - memset(tmp1, 0U, (len + len) * sizeof (uint64_t)); - memcpy(tmp1, resM, len * sizeof (uint64_t)); - Hacl_Bignum_Montgomery_bn_mont_reduction_u64(len, n, mu, tmp1, res); - } - } - } - } - } - } - } - } - } - } -} - -void -Hacl_Bignum_Exponentiation_bn_mod_exp_consttime_precomp_u64( - uint32_t len, - uint64_t *n, - uint64_t mu, - uint64_t *r2, - uint64_t *a, - uint32_t bBits, - uint64_t *b, - uint64_t *res -) -{ - if (bBits < (uint32_t)200U) - { - KRML_CHECK_SIZE(sizeof (uint64_t), len); - { - uint64_t aM[len]; - memset(aM, 0U, len * sizeof (uint64_t)); - KRML_CHECK_SIZE(sizeof (uint64_t), len + len); - { - uint64_t c[len + len]; - memset(c, 0U, (len + len) * sizeof (uint64_t)); - KRML_CHECK_SIZE(sizeof (uint64_t), (uint32_t)4U * len); - { - uint64_t tmp0[(uint32_t)4U * len]; - memset(tmp0, 0U, (uint32_t)4U * len * sizeof (uint64_t)); - Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint64(len, a, r2, tmp0, c); - Hacl_Bignum_Montgomery_bn_mont_reduction_u64(len, n, mu, c, aM); - KRML_CHECK_SIZE(sizeof (uint64_t), len); - { - uint64_t resM[len]; - memset(resM, 0U, len * sizeof (uint64_t)); - KRML_CHECK_SIZE(sizeof (uint64_t), len + len); - { - uint64_t ctx[len + len]; - memset(ctx, 0U, (len + len) * sizeof (uint64_t)); - memcpy(ctx, n, len * sizeof (uint64_t)); - memcpy(ctx + len, r2, len * sizeof (uint64_t)); - { - uint64_t sw = (uint64_t)0U; - uint64_t *ctx_n = ctx; - uint64_t *ctx_r2 = ctx + len; - uint64_t sw0; - Hacl_Bignum_Montgomery_bn_from_mont_u64(len, ctx_n, mu, ctx_r2, resM); - { - uint32_t i0; - for (i0 = (uint32_t)0U; i0 < bBits; i0++) - { - uint32_t i1 = (bBits - i0 - (uint32_t)1U) / (uint32_t)64U; - uint32_t j = (bBits - i0 - (uint32_t)1U) % (uint32_t)64U; - uint64_t tmp = b[i1]; - uint64_t bit = tmp >> j & (uint64_t)1U; - uint64_t sw1 = bit ^ sw; - { - uint32_t i; - for (i = (uint32_t)0U; i < len; i++) - { - uint64_t dummy = ((uint64_t)0U - sw1) & (resM[i] ^ aM[i]); - resM[i] = resM[i] ^ dummy; - aM[i] = aM[i] ^ dummy; - } - } - { - uint64_t *ctx_n0 = ctx; - bn_almost_mont_mul_u64(len, ctx_n0, mu, aM, resM, aM); - { - uint64_t *ctx_n1 = ctx; - bn_almost_mont_sqr_u64(len, ctx_n1, mu, resM, resM); - sw = bit; - } - } - } - } - sw0 = sw; - { - uint32_t i; - for (i = (uint32_t)0U; i < len; i++) - { - uint64_t dummy = ((uint64_t)0U - sw0) & (resM[i] ^ aM[i]); - resM[i] = resM[i] ^ dummy; - aM[i] = aM[i] ^ dummy; - } - } - KRML_CHECK_SIZE(sizeof (uint64_t), len + len); - { - uint64_t tmp[len + len]; - memset(tmp, 0U, (len + len) * sizeof (uint64_t)); - memcpy(tmp, resM, len * sizeof (uint64_t)); - Hacl_Bignum_Montgomery_bn_mont_reduction_u64(len, n, mu, tmp, res); - return; - } - } - } - } - } - } - } - } - KRML_CHECK_SIZE(sizeof (uint64_t), len); - { - uint64_t aM[len]; - memset(aM, 0U, len * sizeof (uint64_t)); - KRML_CHECK_SIZE(sizeof (uint64_t), len + len); - { - uint64_t c0[len + len]; - memset(c0, 0U, (len + len) * sizeof (uint64_t)); - KRML_CHECK_SIZE(sizeof (uint64_t), (uint32_t)4U * len); - { - uint64_t tmp0[(uint32_t)4U * len]; - memset(tmp0, 0U, (uint32_t)4U * len * sizeof (uint64_t)); - Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint64(len, a, r2, tmp0, c0); - Hacl_Bignum_Montgomery_bn_mont_reduction_u64(len, n, mu, c0, aM); - KRML_CHECK_SIZE(sizeof (uint64_t), len); - { - uint64_t resM[len]; - memset(resM, 0U, len * sizeof (uint64_t)); - { - uint32_t bLen; - if (bBits == (uint32_t)0U) - { - bLen = (uint32_t)1U; - } - else - { - bLen = (bBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U; - } - KRML_CHECK_SIZE(sizeof (uint64_t), len + len); - { - uint64_t ctx[len + len]; - memset(ctx, 0U, (len + len) * sizeof (uint64_t)); - memcpy(ctx, n, len * sizeof (uint64_t)); - memcpy(ctx + len, r2, len * sizeof (uint64_t)); - KRML_CHECK_SIZE(sizeof (uint64_t), (uint32_t)16U * len); - { - uint64_t table[(uint32_t)16U * len]; - memset(table, 0U, (uint32_t)16U * len * sizeof (uint64_t)); - KRML_CHECK_SIZE(sizeof (uint64_t), len); - { - uint64_t tmp[len]; - memset(tmp, 0U, len * sizeof (uint64_t)); - { - uint64_t *t0 = table; - uint64_t *t1 = table + len; - uint64_t *ctx_n0 = ctx; - uint64_t *ctx_r20 = ctx + len; - Hacl_Bignum_Montgomery_bn_from_mont_u64(len, ctx_n0, mu, ctx_r20, t0); - memcpy(t1, aM, len * sizeof (uint64_t)); - KRML_MAYBE_FOR7(i, - (uint32_t)0U, - (uint32_t)7U, - (uint32_t)1U, - uint64_t *t11 = table + (i + (uint32_t)1U) * len; - uint64_t *ctx_n1 = ctx; - bn_almost_mont_sqr_u64(len, ctx_n1, mu, t11, tmp); - memcpy(table + ((uint32_t)2U * i + (uint32_t)2U) * len, - tmp, - len * sizeof (uint64_t)); - uint64_t *t2 = table + ((uint32_t)2U * i + (uint32_t)2U) * len; - uint64_t *ctx_n = ctx; - bn_almost_mont_mul_u64(len, ctx_n, mu, aM, t2, tmp); - memcpy(table + ((uint32_t)2U * i + (uint32_t)3U) * len, - tmp, - len * sizeof (uint64_t));); - if (bBits % (uint32_t)4U != (uint32_t)0U) - { - uint64_t mask_l = (uint64_t)15U; - uint32_t i0 = bBits / (uint32_t)4U * (uint32_t)4U / (uint32_t)64U; - uint32_t j = bBits / (uint32_t)4U * (uint32_t)4U % (uint32_t)64U; - uint64_t p1 = b[i0] >> j; - uint64_t ite; - if (i0 + (uint32_t)1U < bLen && (uint32_t)0U < j) - { - ite = p1 | b[i0 + (uint32_t)1U] << ((uint32_t)64U - j); - } - else - { - ite = p1; - } - { - uint64_t bits_c = ite & mask_l; - memcpy(resM, table + (uint32_t)0U * len, len * sizeof (uint64_t)); - KRML_MAYBE_FOR15(i1, - (uint32_t)0U, - (uint32_t)15U, - (uint32_t)1U, - uint64_t c = FStar_UInt64_eq_mask(bits_c, (uint64_t)(i1 + (uint32_t)1U)); - uint64_t *res_j = table + (i1 + (uint32_t)1U) * len; - { - uint32_t i; - for (i = (uint32_t)0U; i < len; i++) - { - uint64_t *os = resM; - uint64_t x = (c & res_j[i]) | (~c & resM[i]); - os[i] = x; - } - }); - } - } - else - { - uint64_t *ctx_n = ctx; - uint64_t *ctx_r2 = ctx + len; - Hacl_Bignum_Montgomery_bn_from_mont_u64(len, ctx_n, mu, ctx_r2, resM); - } - { - uint32_t i0; - for (i0 = (uint32_t)0U; i0 < bBits / (uint32_t)4U; i0++) - { - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *ctx_n = ctx; - bn_almost_mont_sqr_u64(len, ctx_n, mu, resM, resM);); - { - uint32_t bk = bBits - bBits % (uint32_t)4U; - uint64_t mask_l = (uint64_t)15U; - uint32_t i1 = (bk - (uint32_t)4U * i0 - (uint32_t)4U) / (uint32_t)64U; - uint32_t j = (bk - (uint32_t)4U * i0 - (uint32_t)4U) % (uint32_t)64U; - uint64_t p1 = b[i1] >> j; - uint64_t ite; - if (i1 + (uint32_t)1U < bLen && (uint32_t)0U < j) - { - ite = p1 | b[i1 + (uint32_t)1U] << ((uint32_t)64U - j); - } - else - { - ite = p1; - } - { - uint64_t bits_l = ite & mask_l; - KRML_CHECK_SIZE(sizeof (uint64_t), len); - { - uint64_t a_bits_l[len]; - memset(a_bits_l, 0U, len * sizeof (uint64_t)); - memcpy(a_bits_l, table + (uint32_t)0U * len, len * sizeof (uint64_t)); - KRML_MAYBE_FOR15(i2, - (uint32_t)0U, - (uint32_t)15U, - (uint32_t)1U, - uint64_t - c = FStar_UInt64_eq_mask(bits_l, (uint64_t)(i2 + (uint32_t)1U)); - uint64_t *res_j = table + (i2 + (uint32_t)1U) * len; - { - uint32_t i; - for (i = (uint32_t)0U; i < len; i++) - { - uint64_t *os = a_bits_l; - uint64_t x = (c & res_j[i]) | (~c & a_bits_l[i]); - os[i] = x; - } - }); - { - uint64_t *ctx_n = ctx; - bn_almost_mont_mul_u64(len, ctx_n, mu, resM, a_bits_l, resM); - } - } - } - } - } - } - KRML_CHECK_SIZE(sizeof (uint64_t), len + len); - { - uint64_t tmp1[len + len]; - memset(tmp1, 0U, (len + len) * sizeof (uint64_t)); - memcpy(tmp1, resM, len * sizeof (uint64_t)); - Hacl_Bignum_Montgomery_bn_mont_reduction_u64(len, n, mu, tmp1, res); - } - } - } - } - } - } - } - } - } - } -} - -void -Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_u64( - uint32_t len, - uint32_t nBits, - uint64_t *n, - uint64_t *a, - uint32_t bBits, - uint64_t *b, - uint64_t *res -) -{ - KRML_CHECK_SIZE(sizeof (uint64_t), len); - { - uint64_t r2[len]; - memset(r2, 0U, len * sizeof (uint64_t)); - { - uint64_t mu; - Hacl_Bignum_Montgomery_bn_precomp_r2_mod_n_u64(len, nBits, n, r2); - mu = Hacl_Bignum_ModInvLimb_mod_inv_uint64(n[0U]); - Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_precomp_u64(len, n, mu, r2, a, bBits, b, res); - } - } -} - -void -Hacl_Bignum_Exponentiation_bn_mod_exp_consttime_u64( - uint32_t len, - uint32_t nBits, - uint64_t *n, - uint64_t *a, - uint32_t bBits, - uint64_t *b, - uint64_t *res -) -{ - KRML_CHECK_SIZE(sizeof (uint64_t), len); - { - uint64_t r2[len]; - memset(r2, 0U, len * sizeof (uint64_t)); - { - uint64_t mu; - Hacl_Bignum_Montgomery_bn_precomp_r2_mod_n_u64(len, nBits, n, r2); - mu = Hacl_Bignum_ModInvLimb_mod_inv_uint64(n[0U]); - Hacl_Bignum_Exponentiation_bn_mod_exp_consttime_precomp_u64(len, n, mu, r2, a, bBits, b, res); - } - } -} - diff --git a/dist/c89-compatible/Hacl_Bignum.h b/dist/c89-compatible/Hacl_Bignum.h deleted file mode 100644 index 8f49d59fd4..0000000000 --- a/dist/c89-compatible/Hacl_Bignum.h +++ /dev/null @@ -1,64 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#ifndef __Hacl_Bignum_H -#define __Hacl_Bignum_H - -#if defined(__cplusplus) -extern "C" { -#endif - -#include -#include "krml/internal/types.h" -#include "krml/lowstar_endianness.h" -#include "krml/internal/target.h" - - -#include "Hacl_Bignum_Base.h" -#include "evercrypt_targetconfig.h" -#include "lib_intrinsics.h" -typedef struct Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32_s -{ - uint32_t len; - uint32_t *n; - uint32_t mu; - uint32_t *r2; -} -Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32; - -typedef struct Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64_s -{ - uint32_t len; - uint64_t *n; - uint64_t mu; - uint64_t *r2; -} -Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64; - -#if defined(__cplusplus) -} -#endif - -#define __Hacl_Bignum_H_DEFINED -#endif diff --git a/dist/c89-compatible/Hacl_Bignum25519_51.h b/dist/c89-compatible/Hacl_Bignum25519_51.h deleted file mode 100644 index 96e5a7d22e..0000000000 --- a/dist/c89-compatible/Hacl_Bignum25519_51.h +++ /dev/null @@ -1,678 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#ifndef __Hacl_Bignum25519_51_H -#define __Hacl_Bignum25519_51_H - -#if defined(__cplusplus) -extern "C" { -#endif - -#include -#include "krml/internal/types.h" -#include "krml/lowstar_endianness.h" -#include "krml/internal/target.h" - - -#include "Hacl_Krmllib.h" -#include "evercrypt_targetconfig.h" -static inline void Hacl_Impl_Curve25519_Field51_fadd(uint64_t *out, uint64_t *f1, uint64_t *f2) -{ - uint64_t f10 = f1[0U]; - uint64_t f20 = f2[0U]; - uint64_t f11 = f1[1U]; - uint64_t f21 = f2[1U]; - uint64_t f12 = f1[2U]; - uint64_t f22 = f2[2U]; - uint64_t f13 = f1[3U]; - uint64_t f23 = f2[3U]; - uint64_t f14 = f1[4U]; - uint64_t f24 = f2[4U]; - out[0U] = f10 + f20; - out[1U] = f11 + f21; - out[2U] = f12 + f22; - out[3U] = f13 + f23; - out[4U] = f14 + f24; -} - -static inline void Hacl_Impl_Curve25519_Field51_fsub(uint64_t *out, uint64_t *f1, uint64_t *f2) -{ - uint64_t f10 = f1[0U]; - uint64_t f20 = f2[0U]; - uint64_t f11 = f1[1U]; - uint64_t f21 = f2[1U]; - uint64_t f12 = f1[2U]; - uint64_t f22 = f2[2U]; - uint64_t f13 = f1[3U]; - uint64_t f23 = f2[3U]; - uint64_t f14 = f1[4U]; - uint64_t f24 = f2[4U]; - out[0U] = f10 + (uint64_t)0x3fffffffffff68U - f20; - out[1U] = f11 + (uint64_t)0x3ffffffffffff8U - f21; - out[2U] = f12 + (uint64_t)0x3ffffffffffff8U - f22; - out[3U] = f13 + (uint64_t)0x3ffffffffffff8U - f23; - out[4U] = f14 + (uint64_t)0x3ffffffffffff8U - f24; -} - -static inline void -Hacl_Impl_Curve25519_Field51_fmul( - uint64_t *out, - uint64_t *f1, - uint64_t *f2, - FStar_UInt128_uint128 *uu___ -) -{ - uint64_t f10 = f1[0U]; - uint64_t f11 = f1[1U]; - uint64_t f12 = f1[2U]; - uint64_t f13 = f1[3U]; - uint64_t f14 = f1[4U]; - uint64_t f20 = f2[0U]; - uint64_t f21 = f2[1U]; - uint64_t f22 = f2[2U]; - uint64_t f23 = f2[3U]; - uint64_t f24 = f2[4U]; - uint64_t tmp1 = f21 * (uint64_t)19U; - uint64_t tmp2 = f22 * (uint64_t)19U; - uint64_t tmp3 = f23 * (uint64_t)19U; - uint64_t tmp4 = f24 * (uint64_t)19U; - FStar_UInt128_uint128 o00 = FStar_UInt128_mul_wide(f10, f20); - FStar_UInt128_uint128 o10 = FStar_UInt128_mul_wide(f10, f21); - FStar_UInt128_uint128 o20 = FStar_UInt128_mul_wide(f10, f22); - FStar_UInt128_uint128 o30 = FStar_UInt128_mul_wide(f10, f23); - FStar_UInt128_uint128 o40 = FStar_UInt128_mul_wide(f10, f24); - FStar_UInt128_uint128 o01 = FStar_UInt128_add(o00, FStar_UInt128_mul_wide(f11, tmp4)); - FStar_UInt128_uint128 o11 = FStar_UInt128_add(o10, FStar_UInt128_mul_wide(f11, f20)); - FStar_UInt128_uint128 o21 = FStar_UInt128_add(o20, FStar_UInt128_mul_wide(f11, f21)); - FStar_UInt128_uint128 o31 = FStar_UInt128_add(o30, FStar_UInt128_mul_wide(f11, f22)); - FStar_UInt128_uint128 o41 = FStar_UInt128_add(o40, FStar_UInt128_mul_wide(f11, f23)); - FStar_UInt128_uint128 o02 = FStar_UInt128_add(o01, FStar_UInt128_mul_wide(f12, tmp3)); - FStar_UInt128_uint128 o12 = FStar_UInt128_add(o11, FStar_UInt128_mul_wide(f12, tmp4)); - FStar_UInt128_uint128 o22 = FStar_UInt128_add(o21, FStar_UInt128_mul_wide(f12, f20)); - FStar_UInt128_uint128 o32 = FStar_UInt128_add(o31, FStar_UInt128_mul_wide(f12, f21)); - FStar_UInt128_uint128 o42 = FStar_UInt128_add(o41, FStar_UInt128_mul_wide(f12, f22)); - FStar_UInt128_uint128 o03 = FStar_UInt128_add(o02, FStar_UInt128_mul_wide(f13, tmp2)); - FStar_UInt128_uint128 o13 = FStar_UInt128_add(o12, FStar_UInt128_mul_wide(f13, tmp3)); - FStar_UInt128_uint128 o23 = FStar_UInt128_add(o22, FStar_UInt128_mul_wide(f13, tmp4)); - FStar_UInt128_uint128 o33 = FStar_UInt128_add(o32, FStar_UInt128_mul_wide(f13, f20)); - FStar_UInt128_uint128 o43 = FStar_UInt128_add(o42, FStar_UInt128_mul_wide(f13, f21)); - FStar_UInt128_uint128 o04 = FStar_UInt128_add(o03, FStar_UInt128_mul_wide(f14, tmp1)); - FStar_UInt128_uint128 o14 = FStar_UInt128_add(o13, FStar_UInt128_mul_wide(f14, tmp2)); - FStar_UInt128_uint128 o24 = FStar_UInt128_add(o23, FStar_UInt128_mul_wide(f14, tmp3)); - FStar_UInt128_uint128 o34 = FStar_UInt128_add(o33, FStar_UInt128_mul_wide(f14, tmp4)); - FStar_UInt128_uint128 o44 = FStar_UInt128_add(o43, FStar_UInt128_mul_wide(f14, f20)); - FStar_UInt128_uint128 tmp_w0 = o04; - FStar_UInt128_uint128 tmp_w1 = o14; - FStar_UInt128_uint128 tmp_w2 = o24; - FStar_UInt128_uint128 tmp_w3 = o34; - FStar_UInt128_uint128 tmp_w4 = o44; - FStar_UInt128_uint128 - l_ = FStar_UInt128_add(tmp_w0, FStar_UInt128_uint64_to_uint128((uint64_t)0U)); - uint64_t tmp01 = FStar_UInt128_uint128_to_uint64(l_) & (uint64_t)0x7ffffffffffffU; - uint64_t c0 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_, (uint32_t)51U)); - FStar_UInt128_uint128 l_0 = FStar_UInt128_add(tmp_w1, FStar_UInt128_uint64_to_uint128(c0)); - uint64_t tmp11 = FStar_UInt128_uint128_to_uint64(l_0) & (uint64_t)0x7ffffffffffffU; - uint64_t c1 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_0, (uint32_t)51U)); - FStar_UInt128_uint128 l_1 = FStar_UInt128_add(tmp_w2, FStar_UInt128_uint64_to_uint128(c1)); - uint64_t tmp21 = FStar_UInt128_uint128_to_uint64(l_1) & (uint64_t)0x7ffffffffffffU; - uint64_t c2 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_1, (uint32_t)51U)); - FStar_UInt128_uint128 l_2 = FStar_UInt128_add(tmp_w3, FStar_UInt128_uint64_to_uint128(c2)); - uint64_t tmp31 = FStar_UInt128_uint128_to_uint64(l_2) & (uint64_t)0x7ffffffffffffU; - uint64_t c3 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_2, (uint32_t)51U)); - FStar_UInt128_uint128 l_3 = FStar_UInt128_add(tmp_w4, FStar_UInt128_uint64_to_uint128(c3)); - uint64_t tmp41 = FStar_UInt128_uint128_to_uint64(l_3) & (uint64_t)0x7ffffffffffffU; - uint64_t c4 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_3, (uint32_t)51U)); - uint64_t l_4 = tmp01 + c4 * (uint64_t)19U; - uint64_t tmp0_ = l_4 & (uint64_t)0x7ffffffffffffU; - uint64_t c5 = l_4 >> (uint32_t)51U; - uint64_t o0 = tmp0_; - uint64_t o1 = tmp11 + c5; - uint64_t o2 = tmp21; - uint64_t o3 = tmp31; - uint64_t o4 = tmp41; - out[0U] = o0; - out[1U] = o1; - out[2U] = o2; - out[3U] = o3; - out[4U] = o4; -} - -static inline void -Hacl_Impl_Curve25519_Field51_fmul2( - uint64_t *out, - uint64_t *f1, - uint64_t *f2, - FStar_UInt128_uint128 *uu___ -) -{ - uint64_t f10 = f1[0U]; - uint64_t f11 = f1[1U]; - uint64_t f12 = f1[2U]; - uint64_t f13 = f1[3U]; - uint64_t f14 = f1[4U]; - uint64_t f20 = f2[0U]; - uint64_t f21 = f2[1U]; - uint64_t f22 = f2[2U]; - uint64_t f23 = f2[3U]; - uint64_t f24 = f2[4U]; - uint64_t f30 = f1[5U]; - uint64_t f31 = f1[6U]; - uint64_t f32 = f1[7U]; - uint64_t f33 = f1[8U]; - uint64_t f34 = f1[9U]; - uint64_t f40 = f2[5U]; - uint64_t f41 = f2[6U]; - uint64_t f42 = f2[7U]; - uint64_t f43 = f2[8U]; - uint64_t f44 = f2[9U]; - uint64_t tmp11 = f21 * (uint64_t)19U; - uint64_t tmp12 = f22 * (uint64_t)19U; - uint64_t tmp13 = f23 * (uint64_t)19U; - uint64_t tmp14 = f24 * (uint64_t)19U; - uint64_t tmp21 = f41 * (uint64_t)19U; - uint64_t tmp22 = f42 * (uint64_t)19U; - uint64_t tmp23 = f43 * (uint64_t)19U; - uint64_t tmp24 = f44 * (uint64_t)19U; - FStar_UInt128_uint128 o00 = FStar_UInt128_mul_wide(f10, f20); - FStar_UInt128_uint128 o15 = FStar_UInt128_mul_wide(f10, f21); - FStar_UInt128_uint128 o25 = FStar_UInt128_mul_wide(f10, f22); - FStar_UInt128_uint128 o30 = FStar_UInt128_mul_wide(f10, f23); - FStar_UInt128_uint128 o40 = FStar_UInt128_mul_wide(f10, f24); - FStar_UInt128_uint128 o010 = FStar_UInt128_add(o00, FStar_UInt128_mul_wide(f11, tmp14)); - FStar_UInt128_uint128 o110 = FStar_UInt128_add(o15, FStar_UInt128_mul_wide(f11, f20)); - FStar_UInt128_uint128 o210 = FStar_UInt128_add(o25, FStar_UInt128_mul_wide(f11, f21)); - FStar_UInt128_uint128 o310 = FStar_UInt128_add(o30, FStar_UInt128_mul_wide(f11, f22)); - FStar_UInt128_uint128 o410 = FStar_UInt128_add(o40, FStar_UInt128_mul_wide(f11, f23)); - FStar_UInt128_uint128 o020 = FStar_UInt128_add(o010, FStar_UInt128_mul_wide(f12, tmp13)); - FStar_UInt128_uint128 o120 = FStar_UInt128_add(o110, FStar_UInt128_mul_wide(f12, tmp14)); - FStar_UInt128_uint128 o220 = FStar_UInt128_add(o210, FStar_UInt128_mul_wide(f12, f20)); - FStar_UInt128_uint128 o320 = FStar_UInt128_add(o310, FStar_UInt128_mul_wide(f12, f21)); - FStar_UInt128_uint128 o420 = FStar_UInt128_add(o410, FStar_UInt128_mul_wide(f12, f22)); - FStar_UInt128_uint128 o030 = FStar_UInt128_add(o020, FStar_UInt128_mul_wide(f13, tmp12)); - FStar_UInt128_uint128 o130 = FStar_UInt128_add(o120, FStar_UInt128_mul_wide(f13, tmp13)); - FStar_UInt128_uint128 o230 = FStar_UInt128_add(o220, FStar_UInt128_mul_wide(f13, tmp14)); - FStar_UInt128_uint128 o330 = FStar_UInt128_add(o320, FStar_UInt128_mul_wide(f13, f20)); - FStar_UInt128_uint128 o430 = FStar_UInt128_add(o420, FStar_UInt128_mul_wide(f13, f21)); - FStar_UInt128_uint128 o040 = FStar_UInt128_add(o030, FStar_UInt128_mul_wide(f14, tmp11)); - FStar_UInt128_uint128 o140 = FStar_UInt128_add(o130, FStar_UInt128_mul_wide(f14, tmp12)); - FStar_UInt128_uint128 o240 = FStar_UInt128_add(o230, FStar_UInt128_mul_wide(f14, tmp13)); - FStar_UInt128_uint128 o340 = FStar_UInt128_add(o330, FStar_UInt128_mul_wide(f14, tmp14)); - FStar_UInt128_uint128 o440 = FStar_UInt128_add(o430, FStar_UInt128_mul_wide(f14, f20)); - FStar_UInt128_uint128 tmp_w10 = o040; - FStar_UInt128_uint128 tmp_w11 = o140; - FStar_UInt128_uint128 tmp_w12 = o240; - FStar_UInt128_uint128 tmp_w13 = o340; - FStar_UInt128_uint128 tmp_w14 = o440; - FStar_UInt128_uint128 o0 = FStar_UInt128_mul_wide(f30, f40); - FStar_UInt128_uint128 o1 = FStar_UInt128_mul_wide(f30, f41); - FStar_UInt128_uint128 o2 = FStar_UInt128_mul_wide(f30, f42); - FStar_UInt128_uint128 o3 = FStar_UInt128_mul_wide(f30, f43); - FStar_UInt128_uint128 o4 = FStar_UInt128_mul_wide(f30, f44); - FStar_UInt128_uint128 o01 = FStar_UInt128_add(o0, FStar_UInt128_mul_wide(f31, tmp24)); - FStar_UInt128_uint128 o111 = FStar_UInt128_add(o1, FStar_UInt128_mul_wide(f31, f40)); - FStar_UInt128_uint128 o211 = FStar_UInt128_add(o2, FStar_UInt128_mul_wide(f31, f41)); - FStar_UInt128_uint128 o31 = FStar_UInt128_add(o3, FStar_UInt128_mul_wide(f31, f42)); - FStar_UInt128_uint128 o41 = FStar_UInt128_add(o4, FStar_UInt128_mul_wide(f31, f43)); - FStar_UInt128_uint128 o02 = FStar_UInt128_add(o01, FStar_UInt128_mul_wide(f32, tmp23)); - FStar_UInt128_uint128 o121 = FStar_UInt128_add(o111, FStar_UInt128_mul_wide(f32, tmp24)); - FStar_UInt128_uint128 o221 = FStar_UInt128_add(o211, FStar_UInt128_mul_wide(f32, f40)); - FStar_UInt128_uint128 o32 = FStar_UInt128_add(o31, FStar_UInt128_mul_wide(f32, f41)); - FStar_UInt128_uint128 o42 = FStar_UInt128_add(o41, FStar_UInt128_mul_wide(f32, f42)); - FStar_UInt128_uint128 o03 = FStar_UInt128_add(o02, FStar_UInt128_mul_wide(f33, tmp22)); - FStar_UInt128_uint128 o131 = FStar_UInt128_add(o121, FStar_UInt128_mul_wide(f33, tmp23)); - FStar_UInt128_uint128 o231 = FStar_UInt128_add(o221, FStar_UInt128_mul_wide(f33, tmp24)); - FStar_UInt128_uint128 o33 = FStar_UInt128_add(o32, FStar_UInt128_mul_wide(f33, f40)); - FStar_UInt128_uint128 o43 = FStar_UInt128_add(o42, FStar_UInt128_mul_wide(f33, f41)); - FStar_UInt128_uint128 o04 = FStar_UInt128_add(o03, FStar_UInt128_mul_wide(f34, tmp21)); - FStar_UInt128_uint128 o141 = FStar_UInt128_add(o131, FStar_UInt128_mul_wide(f34, tmp22)); - FStar_UInt128_uint128 o241 = FStar_UInt128_add(o231, FStar_UInt128_mul_wide(f34, tmp23)); - FStar_UInt128_uint128 o34 = FStar_UInt128_add(o33, FStar_UInt128_mul_wide(f34, tmp24)); - FStar_UInt128_uint128 o44 = FStar_UInt128_add(o43, FStar_UInt128_mul_wide(f34, f40)); - FStar_UInt128_uint128 tmp_w20 = o04; - FStar_UInt128_uint128 tmp_w21 = o141; - FStar_UInt128_uint128 tmp_w22 = o241; - FStar_UInt128_uint128 tmp_w23 = o34; - FStar_UInt128_uint128 tmp_w24 = o44; - FStar_UInt128_uint128 - l_ = FStar_UInt128_add(tmp_w10, FStar_UInt128_uint64_to_uint128((uint64_t)0U)); - uint64_t tmp00 = FStar_UInt128_uint128_to_uint64(l_) & (uint64_t)0x7ffffffffffffU; - uint64_t c00 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_, (uint32_t)51U)); - FStar_UInt128_uint128 l_0 = FStar_UInt128_add(tmp_w11, FStar_UInt128_uint64_to_uint128(c00)); - uint64_t tmp10 = FStar_UInt128_uint128_to_uint64(l_0) & (uint64_t)0x7ffffffffffffU; - uint64_t c10 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_0, (uint32_t)51U)); - FStar_UInt128_uint128 l_1 = FStar_UInt128_add(tmp_w12, FStar_UInt128_uint64_to_uint128(c10)); - uint64_t tmp20 = FStar_UInt128_uint128_to_uint64(l_1) & (uint64_t)0x7ffffffffffffU; - uint64_t c20 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_1, (uint32_t)51U)); - FStar_UInt128_uint128 l_2 = FStar_UInt128_add(tmp_w13, FStar_UInt128_uint64_to_uint128(c20)); - uint64_t tmp30 = FStar_UInt128_uint128_to_uint64(l_2) & (uint64_t)0x7ffffffffffffU; - uint64_t c30 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_2, (uint32_t)51U)); - FStar_UInt128_uint128 l_3 = FStar_UInt128_add(tmp_w14, FStar_UInt128_uint64_to_uint128(c30)); - uint64_t tmp40 = FStar_UInt128_uint128_to_uint64(l_3) & (uint64_t)0x7ffffffffffffU; - uint64_t c40 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_3, (uint32_t)51U)); - uint64_t l_4 = tmp00 + c40 * (uint64_t)19U; - uint64_t tmp0_ = l_4 & (uint64_t)0x7ffffffffffffU; - uint64_t c50 = l_4 >> (uint32_t)51U; - uint64_t o100 = tmp0_; - uint64_t o112 = tmp10 + c50; - uint64_t o122 = tmp20; - uint64_t o132 = tmp30; - uint64_t o142 = tmp40; - FStar_UInt128_uint128 - l_5 = FStar_UInt128_add(tmp_w20, FStar_UInt128_uint64_to_uint128((uint64_t)0U)); - uint64_t tmp0 = FStar_UInt128_uint128_to_uint64(l_5) & (uint64_t)0x7ffffffffffffU; - uint64_t c0 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_5, (uint32_t)51U)); - FStar_UInt128_uint128 l_6 = FStar_UInt128_add(tmp_w21, FStar_UInt128_uint64_to_uint128(c0)); - uint64_t tmp1 = FStar_UInt128_uint128_to_uint64(l_6) & (uint64_t)0x7ffffffffffffU; - uint64_t c1 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_6, (uint32_t)51U)); - FStar_UInt128_uint128 l_7 = FStar_UInt128_add(tmp_w22, FStar_UInt128_uint64_to_uint128(c1)); - uint64_t tmp2 = FStar_UInt128_uint128_to_uint64(l_7) & (uint64_t)0x7ffffffffffffU; - uint64_t c2 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_7, (uint32_t)51U)); - FStar_UInt128_uint128 l_8 = FStar_UInt128_add(tmp_w23, FStar_UInt128_uint64_to_uint128(c2)); - uint64_t tmp3 = FStar_UInt128_uint128_to_uint64(l_8) & (uint64_t)0x7ffffffffffffU; - uint64_t c3 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_8, (uint32_t)51U)); - FStar_UInt128_uint128 l_9 = FStar_UInt128_add(tmp_w24, FStar_UInt128_uint64_to_uint128(c3)); - uint64_t tmp4 = FStar_UInt128_uint128_to_uint64(l_9) & (uint64_t)0x7ffffffffffffU; - uint64_t c4 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_9, (uint32_t)51U)); - uint64_t l_10 = tmp0 + c4 * (uint64_t)19U; - uint64_t tmp0_0 = l_10 & (uint64_t)0x7ffffffffffffU; - uint64_t c5 = l_10 >> (uint32_t)51U; - uint64_t o200 = tmp0_0; - uint64_t o212 = tmp1 + c5; - uint64_t o222 = tmp2; - uint64_t o232 = tmp3; - uint64_t o242 = tmp4; - uint64_t o10 = o100; - uint64_t o11 = o112; - uint64_t o12 = o122; - uint64_t o13 = o132; - uint64_t o14 = o142; - uint64_t o20 = o200; - uint64_t o21 = o212; - uint64_t o22 = o222; - uint64_t o23 = o232; - uint64_t o24 = o242; - out[0U] = o10; - out[1U] = o11; - out[2U] = o12; - out[3U] = o13; - out[4U] = o14; - out[5U] = o20; - out[6U] = o21; - out[7U] = o22; - out[8U] = o23; - out[9U] = o24; -} - -static inline void Hacl_Impl_Curve25519_Field51_fmul1(uint64_t *out, uint64_t *f1, uint64_t f2) -{ - uint64_t f10 = f1[0U]; - uint64_t f11 = f1[1U]; - uint64_t f12 = f1[2U]; - uint64_t f13 = f1[3U]; - uint64_t f14 = f1[4U]; - FStar_UInt128_uint128 tmp_w0 = FStar_UInt128_mul_wide(f2, f10); - FStar_UInt128_uint128 tmp_w1 = FStar_UInt128_mul_wide(f2, f11); - FStar_UInt128_uint128 tmp_w2 = FStar_UInt128_mul_wide(f2, f12); - FStar_UInt128_uint128 tmp_w3 = FStar_UInt128_mul_wide(f2, f13); - FStar_UInt128_uint128 tmp_w4 = FStar_UInt128_mul_wide(f2, f14); - FStar_UInt128_uint128 - l_ = FStar_UInt128_add(tmp_w0, FStar_UInt128_uint64_to_uint128((uint64_t)0U)); - uint64_t tmp0 = FStar_UInt128_uint128_to_uint64(l_) & (uint64_t)0x7ffffffffffffU; - uint64_t c0 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_, (uint32_t)51U)); - FStar_UInt128_uint128 l_0 = FStar_UInt128_add(tmp_w1, FStar_UInt128_uint64_to_uint128(c0)); - uint64_t tmp1 = FStar_UInt128_uint128_to_uint64(l_0) & (uint64_t)0x7ffffffffffffU; - uint64_t c1 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_0, (uint32_t)51U)); - FStar_UInt128_uint128 l_1 = FStar_UInt128_add(tmp_w2, FStar_UInt128_uint64_to_uint128(c1)); - uint64_t tmp2 = FStar_UInt128_uint128_to_uint64(l_1) & (uint64_t)0x7ffffffffffffU; - uint64_t c2 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_1, (uint32_t)51U)); - FStar_UInt128_uint128 l_2 = FStar_UInt128_add(tmp_w3, FStar_UInt128_uint64_to_uint128(c2)); - uint64_t tmp3 = FStar_UInt128_uint128_to_uint64(l_2) & (uint64_t)0x7ffffffffffffU; - uint64_t c3 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_2, (uint32_t)51U)); - FStar_UInt128_uint128 l_3 = FStar_UInt128_add(tmp_w4, FStar_UInt128_uint64_to_uint128(c3)); - uint64_t tmp4 = FStar_UInt128_uint128_to_uint64(l_3) & (uint64_t)0x7ffffffffffffU; - uint64_t c4 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_3, (uint32_t)51U)); - uint64_t l_4 = tmp0 + c4 * (uint64_t)19U; - uint64_t tmp0_ = l_4 & (uint64_t)0x7ffffffffffffU; - uint64_t c5 = l_4 >> (uint32_t)51U; - uint64_t o0 = tmp0_; - uint64_t o1 = tmp1 + c5; - uint64_t o2 = tmp2; - uint64_t o3 = tmp3; - uint64_t o4 = tmp4; - out[0U] = o0; - out[1U] = o1; - out[2U] = o2; - out[3U] = o3; - out[4U] = o4; -} - -static inline void -Hacl_Impl_Curve25519_Field51_fsqr(uint64_t *out, uint64_t *f, FStar_UInt128_uint128 *uu___) -{ - uint64_t f0 = f[0U]; - uint64_t f1 = f[1U]; - uint64_t f2 = f[2U]; - uint64_t f3 = f[3U]; - uint64_t f4 = f[4U]; - uint64_t d0 = (uint64_t)2U * f0; - uint64_t d1 = (uint64_t)2U * f1; - uint64_t d2 = (uint64_t)38U * f2; - uint64_t d3 = (uint64_t)19U * f3; - uint64_t d419 = (uint64_t)19U * f4; - uint64_t d4 = (uint64_t)2U * d419; - FStar_UInt128_uint128 - s0 = - FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_mul_wide(f0, f0), - FStar_UInt128_mul_wide(d4, f1)), - FStar_UInt128_mul_wide(d2, f3)); - FStar_UInt128_uint128 - s1 = - FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_mul_wide(d0, f1), - FStar_UInt128_mul_wide(d4, f2)), - FStar_UInt128_mul_wide(d3, f3)); - FStar_UInt128_uint128 - s2 = - FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_mul_wide(d0, f2), - FStar_UInt128_mul_wide(f1, f1)), - FStar_UInt128_mul_wide(d4, f3)); - FStar_UInt128_uint128 - s3 = - FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_mul_wide(d0, f3), - FStar_UInt128_mul_wide(d1, f2)), - FStar_UInt128_mul_wide(f4, d419)); - FStar_UInt128_uint128 - s4 = - FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_mul_wide(d0, f4), - FStar_UInt128_mul_wide(d1, f3)), - FStar_UInt128_mul_wide(f2, f2)); - FStar_UInt128_uint128 o00 = s0; - FStar_UInt128_uint128 o10 = s1; - FStar_UInt128_uint128 o20 = s2; - FStar_UInt128_uint128 o30 = s3; - FStar_UInt128_uint128 o40 = s4; - FStar_UInt128_uint128 - l_ = FStar_UInt128_add(o00, FStar_UInt128_uint64_to_uint128((uint64_t)0U)); - uint64_t tmp0 = FStar_UInt128_uint128_to_uint64(l_) & (uint64_t)0x7ffffffffffffU; - uint64_t c0 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_, (uint32_t)51U)); - FStar_UInt128_uint128 l_0 = FStar_UInt128_add(o10, FStar_UInt128_uint64_to_uint128(c0)); - uint64_t tmp1 = FStar_UInt128_uint128_to_uint64(l_0) & (uint64_t)0x7ffffffffffffU; - uint64_t c1 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_0, (uint32_t)51U)); - FStar_UInt128_uint128 l_1 = FStar_UInt128_add(o20, FStar_UInt128_uint64_to_uint128(c1)); - uint64_t tmp2 = FStar_UInt128_uint128_to_uint64(l_1) & (uint64_t)0x7ffffffffffffU; - uint64_t c2 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_1, (uint32_t)51U)); - FStar_UInt128_uint128 l_2 = FStar_UInt128_add(o30, FStar_UInt128_uint64_to_uint128(c2)); - uint64_t tmp3 = FStar_UInt128_uint128_to_uint64(l_2) & (uint64_t)0x7ffffffffffffU; - uint64_t c3 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_2, (uint32_t)51U)); - FStar_UInt128_uint128 l_3 = FStar_UInt128_add(o40, FStar_UInt128_uint64_to_uint128(c3)); - uint64_t tmp4 = FStar_UInt128_uint128_to_uint64(l_3) & (uint64_t)0x7ffffffffffffU; - uint64_t c4 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_3, (uint32_t)51U)); - uint64_t l_4 = tmp0 + c4 * (uint64_t)19U; - uint64_t tmp0_ = l_4 & (uint64_t)0x7ffffffffffffU; - uint64_t c5 = l_4 >> (uint32_t)51U; - uint64_t o0 = tmp0_; - uint64_t o1 = tmp1 + c5; - uint64_t o2 = tmp2; - uint64_t o3 = tmp3; - uint64_t o4 = tmp4; - out[0U] = o0; - out[1U] = o1; - out[2U] = o2; - out[3U] = o3; - out[4U] = o4; -} - -static inline void -Hacl_Impl_Curve25519_Field51_fsqr2(uint64_t *out, uint64_t *f, FStar_UInt128_uint128 *uu___) -{ - uint64_t f10 = f[0U]; - uint64_t f11 = f[1U]; - uint64_t f12 = f[2U]; - uint64_t f13 = f[3U]; - uint64_t f14 = f[4U]; - uint64_t f20 = f[5U]; - uint64_t f21 = f[6U]; - uint64_t f22 = f[7U]; - uint64_t f23 = f[8U]; - uint64_t f24 = f[9U]; - uint64_t d00 = (uint64_t)2U * f10; - uint64_t d10 = (uint64_t)2U * f11; - uint64_t d20 = (uint64_t)38U * f12; - uint64_t d30 = (uint64_t)19U * f13; - uint64_t d4190 = (uint64_t)19U * f14; - uint64_t d40 = (uint64_t)2U * d4190; - FStar_UInt128_uint128 - s00 = - FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_mul_wide(f10, f10), - FStar_UInt128_mul_wide(d40, f11)), - FStar_UInt128_mul_wide(d20, f13)); - FStar_UInt128_uint128 - s10 = - FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_mul_wide(d00, f11), - FStar_UInt128_mul_wide(d40, f12)), - FStar_UInt128_mul_wide(d30, f13)); - FStar_UInt128_uint128 - s20 = - FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_mul_wide(d00, f12), - FStar_UInt128_mul_wide(f11, f11)), - FStar_UInt128_mul_wide(d40, f13)); - FStar_UInt128_uint128 - s30 = - FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_mul_wide(d00, f13), - FStar_UInt128_mul_wide(d10, f12)), - FStar_UInt128_mul_wide(f14, d4190)); - FStar_UInt128_uint128 - s40 = - FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_mul_wide(d00, f14), - FStar_UInt128_mul_wide(d10, f13)), - FStar_UInt128_mul_wide(f12, f12)); - FStar_UInt128_uint128 o100 = s00; - FStar_UInt128_uint128 o110 = s10; - FStar_UInt128_uint128 o120 = s20; - FStar_UInt128_uint128 o130 = s30; - FStar_UInt128_uint128 o140 = s40; - uint64_t d0 = (uint64_t)2U * f20; - uint64_t d1 = (uint64_t)2U * f21; - uint64_t d2 = (uint64_t)38U * f22; - uint64_t d3 = (uint64_t)19U * f23; - uint64_t d419 = (uint64_t)19U * f24; - uint64_t d4 = (uint64_t)2U * d419; - FStar_UInt128_uint128 - s0 = - FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_mul_wide(f20, f20), - FStar_UInt128_mul_wide(d4, f21)), - FStar_UInt128_mul_wide(d2, f23)); - FStar_UInt128_uint128 - s1 = - FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_mul_wide(d0, f21), - FStar_UInt128_mul_wide(d4, f22)), - FStar_UInt128_mul_wide(d3, f23)); - FStar_UInt128_uint128 - s2 = - FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_mul_wide(d0, f22), - FStar_UInt128_mul_wide(f21, f21)), - FStar_UInt128_mul_wide(d4, f23)); - FStar_UInt128_uint128 - s3 = - FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_mul_wide(d0, f23), - FStar_UInt128_mul_wide(d1, f22)), - FStar_UInt128_mul_wide(f24, d419)); - FStar_UInt128_uint128 - s4 = - FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_mul_wide(d0, f24), - FStar_UInt128_mul_wide(d1, f23)), - FStar_UInt128_mul_wide(f22, f22)); - FStar_UInt128_uint128 o200 = s0; - FStar_UInt128_uint128 o210 = s1; - FStar_UInt128_uint128 o220 = s2; - FStar_UInt128_uint128 o230 = s3; - FStar_UInt128_uint128 o240 = s4; - FStar_UInt128_uint128 - l_ = FStar_UInt128_add(o100, FStar_UInt128_uint64_to_uint128((uint64_t)0U)); - uint64_t tmp00 = FStar_UInt128_uint128_to_uint64(l_) & (uint64_t)0x7ffffffffffffU; - uint64_t c00 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_, (uint32_t)51U)); - FStar_UInt128_uint128 l_0 = FStar_UInt128_add(o110, FStar_UInt128_uint64_to_uint128(c00)); - uint64_t tmp10 = FStar_UInt128_uint128_to_uint64(l_0) & (uint64_t)0x7ffffffffffffU; - uint64_t c10 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_0, (uint32_t)51U)); - FStar_UInt128_uint128 l_1 = FStar_UInt128_add(o120, FStar_UInt128_uint64_to_uint128(c10)); - uint64_t tmp20 = FStar_UInt128_uint128_to_uint64(l_1) & (uint64_t)0x7ffffffffffffU; - uint64_t c20 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_1, (uint32_t)51U)); - FStar_UInt128_uint128 l_2 = FStar_UInt128_add(o130, FStar_UInt128_uint64_to_uint128(c20)); - uint64_t tmp30 = FStar_UInt128_uint128_to_uint64(l_2) & (uint64_t)0x7ffffffffffffU; - uint64_t c30 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_2, (uint32_t)51U)); - FStar_UInt128_uint128 l_3 = FStar_UInt128_add(o140, FStar_UInt128_uint64_to_uint128(c30)); - uint64_t tmp40 = FStar_UInt128_uint128_to_uint64(l_3) & (uint64_t)0x7ffffffffffffU; - uint64_t c40 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_3, (uint32_t)51U)); - uint64_t l_4 = tmp00 + c40 * (uint64_t)19U; - uint64_t tmp0_ = l_4 & (uint64_t)0x7ffffffffffffU; - uint64_t c50 = l_4 >> (uint32_t)51U; - uint64_t o101 = tmp0_; - uint64_t o111 = tmp10 + c50; - uint64_t o121 = tmp20; - uint64_t o131 = tmp30; - uint64_t o141 = tmp40; - FStar_UInt128_uint128 - l_5 = FStar_UInt128_add(o200, FStar_UInt128_uint64_to_uint128((uint64_t)0U)); - uint64_t tmp0 = FStar_UInt128_uint128_to_uint64(l_5) & (uint64_t)0x7ffffffffffffU; - uint64_t c0 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_5, (uint32_t)51U)); - FStar_UInt128_uint128 l_6 = FStar_UInt128_add(o210, FStar_UInt128_uint64_to_uint128(c0)); - uint64_t tmp1 = FStar_UInt128_uint128_to_uint64(l_6) & (uint64_t)0x7ffffffffffffU; - uint64_t c1 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_6, (uint32_t)51U)); - FStar_UInt128_uint128 l_7 = FStar_UInt128_add(o220, FStar_UInt128_uint64_to_uint128(c1)); - uint64_t tmp2 = FStar_UInt128_uint128_to_uint64(l_7) & (uint64_t)0x7ffffffffffffU; - uint64_t c2 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_7, (uint32_t)51U)); - FStar_UInt128_uint128 l_8 = FStar_UInt128_add(o230, FStar_UInt128_uint64_to_uint128(c2)); - uint64_t tmp3 = FStar_UInt128_uint128_to_uint64(l_8) & (uint64_t)0x7ffffffffffffU; - uint64_t c3 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_8, (uint32_t)51U)); - FStar_UInt128_uint128 l_9 = FStar_UInt128_add(o240, FStar_UInt128_uint64_to_uint128(c3)); - uint64_t tmp4 = FStar_UInt128_uint128_to_uint64(l_9) & (uint64_t)0x7ffffffffffffU; - uint64_t c4 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_9, (uint32_t)51U)); - uint64_t l_10 = tmp0 + c4 * (uint64_t)19U; - uint64_t tmp0_0 = l_10 & (uint64_t)0x7ffffffffffffU; - uint64_t c5 = l_10 >> (uint32_t)51U; - uint64_t o201 = tmp0_0; - uint64_t o211 = tmp1 + c5; - uint64_t o221 = tmp2; - uint64_t o231 = tmp3; - uint64_t o241 = tmp4; - uint64_t o10 = o101; - uint64_t o11 = o111; - uint64_t o12 = o121; - uint64_t o13 = o131; - uint64_t o14 = o141; - uint64_t o20 = o201; - uint64_t o21 = o211; - uint64_t o22 = o221; - uint64_t o23 = o231; - uint64_t o24 = o241; - out[0U] = o10; - out[1U] = o11; - out[2U] = o12; - out[3U] = o13; - out[4U] = o14; - out[5U] = o20; - out[6U] = o21; - out[7U] = o22; - out[8U] = o23; - out[9U] = o24; -} - -static inline void Hacl_Impl_Curve25519_Field51_store_felem(uint64_t *u64s, uint64_t *f) -{ - uint64_t f0 = f[0U]; - uint64_t f1 = f[1U]; - uint64_t f2 = f[2U]; - uint64_t f3 = f[3U]; - uint64_t f4 = f[4U]; - uint64_t l_ = f0 + (uint64_t)0U; - uint64_t tmp0 = l_ & (uint64_t)0x7ffffffffffffU; - uint64_t c0 = l_ >> (uint32_t)51U; - uint64_t l_0 = f1 + c0; - uint64_t tmp1 = l_0 & (uint64_t)0x7ffffffffffffU; - uint64_t c1 = l_0 >> (uint32_t)51U; - uint64_t l_1 = f2 + c1; - uint64_t tmp2 = l_1 & (uint64_t)0x7ffffffffffffU; - uint64_t c2 = l_1 >> (uint32_t)51U; - uint64_t l_2 = f3 + c2; - uint64_t tmp3 = l_2 & (uint64_t)0x7ffffffffffffU; - uint64_t c3 = l_2 >> (uint32_t)51U; - uint64_t l_3 = f4 + c3; - uint64_t tmp4 = l_3 & (uint64_t)0x7ffffffffffffU; - uint64_t c4 = l_3 >> (uint32_t)51U; - uint64_t l_4 = tmp0 + c4 * (uint64_t)19U; - uint64_t tmp0_ = l_4 & (uint64_t)0x7ffffffffffffU; - uint64_t c5 = l_4 >> (uint32_t)51U; - uint64_t f01 = tmp0_; - uint64_t f11 = tmp1 + c5; - uint64_t f21 = tmp2; - uint64_t f31 = tmp3; - uint64_t f41 = tmp4; - uint64_t m0 = FStar_UInt64_gte_mask(f01, (uint64_t)0x7ffffffffffedU); - uint64_t m1 = FStar_UInt64_eq_mask(f11, (uint64_t)0x7ffffffffffffU); - uint64_t m2 = FStar_UInt64_eq_mask(f21, (uint64_t)0x7ffffffffffffU); - uint64_t m3 = FStar_UInt64_eq_mask(f31, (uint64_t)0x7ffffffffffffU); - uint64_t m4 = FStar_UInt64_eq_mask(f41, (uint64_t)0x7ffffffffffffU); - uint64_t mask = (((m0 & m1) & m2) & m3) & m4; - uint64_t f0_ = f01 - (mask & (uint64_t)0x7ffffffffffedU); - uint64_t f1_ = f11 - (mask & (uint64_t)0x7ffffffffffffU); - uint64_t f2_ = f21 - (mask & (uint64_t)0x7ffffffffffffU); - uint64_t f3_ = f31 - (mask & (uint64_t)0x7ffffffffffffU); - uint64_t f4_ = f41 - (mask & (uint64_t)0x7ffffffffffffU); - uint64_t f02 = f0_; - uint64_t f12 = f1_; - uint64_t f22 = f2_; - uint64_t f32 = f3_; - uint64_t f42 = f4_; - uint64_t o00 = f02 | f12 << (uint32_t)51U; - uint64_t o10 = f12 >> (uint32_t)13U | f22 << (uint32_t)38U; - uint64_t o20 = f22 >> (uint32_t)26U | f32 << (uint32_t)25U; - uint64_t o30 = f32 >> (uint32_t)39U | f42 << (uint32_t)12U; - uint64_t o0 = o00; - uint64_t o1 = o10; - uint64_t o2 = o20; - uint64_t o3 = o30; - u64s[0U] = o0; - u64s[1U] = o1; - u64s[2U] = o2; - u64s[3U] = o3; -} - -static inline void -Hacl_Impl_Curve25519_Field51_cswap2(uint64_t bit, uint64_t *p1, uint64_t *p2) -{ - uint64_t mask = (uint64_t)0U - bit; - uint32_t i; - for (i = (uint32_t)0U; i < (uint32_t)10U; i++) - { - uint64_t dummy = mask & (p1[i] ^ p2[i]); - p1[i] = p1[i] ^ dummy; - p2[i] = p2[i] ^ dummy; - } -} - -#if defined(__cplusplus) -} -#endif - -#define __Hacl_Bignum25519_51_H_DEFINED -#endif diff --git a/dist/c89-compatible/Hacl_Bignum256.c b/dist/c89-compatible/Hacl_Bignum256.c deleted file mode 100644 index 432ff87496..0000000000 --- a/dist/c89-compatible/Hacl_Bignum256.c +++ /dev/null @@ -1,1850 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#include "Hacl_Bignum256.h" - -#include "internal/Hacl_Bignum.h" - -/******************************************************************************* - -A verified 256-bit bignum library. - -This is a 64-bit optimized version, where bignums are represented as an array -of four unsigned 64-bit integers, i.e. uint64_t[4]. Furthermore, the -limbs are stored in little-endian format, i.e. the least significant limb is at -index 0. Each limb is stored in native format in memory. Example: - - uint64_t sixteen[4] = { 0x10; 0x00; 0x00; 0x00 } - -We strongly encourage users to go through the conversion functions, e.g. -bn_from_bytes_be, to i) not depend on internal representation choices and ii) -have the ability to switch easily to a 32-bit optimized version in the future. - -*******************************************************************************/ - -/************************/ -/* Arithmetic functions */ -/************************/ - - -/** -Write `a + b mod 2^256` in `res`. - - This functions returns the carry. - - The arguments a, b and res are meant to be 256-bit bignums, i.e. uint64_t[4] -*/ -uint64_t Hacl_Bignum256_add(uint64_t *a, uint64_t *b, uint64_t *res) -{ - uint64_t c = (uint64_t)0U; - { - uint64_t t1 = a[(uint32_t)4U * (uint32_t)0U]; - uint64_t t20 = b[(uint32_t)4U * (uint32_t)0U]; - uint64_t *res_i0 = res + (uint32_t)4U * (uint32_t)0U; - c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t1, t20, res_i0); - { - uint64_t t10 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U]; - uint64_t t21 = b[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U]; - uint64_t *res_i1 = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U; - c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t10, t21, res_i1); - { - uint64_t t11 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U]; - uint64_t t22 = b[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U]; - uint64_t *res_i2 = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U; - c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t11, t22, res_i2); - { - uint64_t t12 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U]; - uint64_t t2 = b[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U]; - uint64_t *res_i = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U; - c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t12, t2, res_i); - } - } - } - } - return c; -} - -/** -Write `a - b mod 2^256` in `res`. - - This functions returns the carry. - - The arguments a, b and res are meant to be 256-bit bignums, i.e. uint64_t[4] -*/ -uint64_t Hacl_Bignum256_sub(uint64_t *a, uint64_t *b, uint64_t *res) -{ - uint64_t c = (uint64_t)0U; - { - uint64_t t1 = a[(uint32_t)4U * (uint32_t)0U]; - uint64_t t20 = b[(uint32_t)4U * (uint32_t)0U]; - uint64_t *res_i0 = res + (uint32_t)4U * (uint32_t)0U; - c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, t20, res_i0); - { - uint64_t t10 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U]; - uint64_t t21 = b[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U]; - uint64_t *res_i1 = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U; - c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t10, t21, res_i1); - { - uint64_t t11 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U]; - uint64_t t22 = b[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U]; - uint64_t *res_i2 = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U; - c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t11, t22, res_i2); - { - uint64_t t12 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U]; - uint64_t t2 = b[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U]; - uint64_t *res_i = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U; - c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t12, t2, res_i); - } - } - } - } - return c; -} - -/** -Write `(a + b) mod n` in `res`. - - The arguments a, b, n and the outparam res are meant to be 256-bit bignums, i.e. uint64_t[4]. - - Before calling this function, the caller will need to ensure that the following - preconditions are observed. - • a < n - • b < n -*/ -void Hacl_Bignum256_add_mod(uint64_t *n, uint64_t *a, uint64_t *b, uint64_t *res) -{ - uint64_t c2 = (uint64_t)0U; - uint64_t c0; - { - uint64_t t1 = a[(uint32_t)4U * (uint32_t)0U]; - uint64_t t20 = b[(uint32_t)4U * (uint32_t)0U]; - uint64_t *res_i0 = res + (uint32_t)4U * (uint32_t)0U; - c2 = Lib_IntTypes_Intrinsics_add_carry_u64(c2, t1, t20, res_i0); - { - uint64_t t10 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U]; - uint64_t t21 = b[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U]; - uint64_t *res_i1 = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U; - c2 = Lib_IntTypes_Intrinsics_add_carry_u64(c2, t10, t21, res_i1); - { - uint64_t t11 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U]; - uint64_t t22 = b[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U]; - uint64_t *res_i2 = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U; - c2 = Lib_IntTypes_Intrinsics_add_carry_u64(c2, t11, t22, res_i2); - { - uint64_t t12 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U]; - uint64_t t2 = b[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U]; - uint64_t *res_i = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U; - c2 = Lib_IntTypes_Intrinsics_add_carry_u64(c2, t12, t2, res_i); - } - } - } - } - c0 = c2; - { - uint64_t tmp[4U] = { 0U }; - uint64_t c3 = (uint64_t)0U; - uint64_t c1; - uint64_t c; - { - uint64_t t1 = res[(uint32_t)4U * (uint32_t)0U]; - uint64_t t20 = n[(uint32_t)4U * (uint32_t)0U]; - uint64_t *res_i0 = tmp + (uint32_t)4U * (uint32_t)0U; - c3 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c3, t1, t20, res_i0); - { - uint64_t t10 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U]; - uint64_t t21 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U]; - uint64_t *res_i1 = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U; - c3 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c3, t10, t21, res_i1); - { - uint64_t t11 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U]; - uint64_t t22 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U]; - uint64_t *res_i2 = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U; - c3 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c3, t11, t22, res_i2); - { - uint64_t t12 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U]; - uint64_t t2 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U]; - uint64_t *res_i = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U; - c3 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c3, t12, t2, res_i); - } - } - } - } - c1 = c3; - c = c0 - c1; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *os = res; - uint64_t x = (c & res[i]) | (~c & tmp[i]); - os[i] = x;); - } -} - -/** -Write `(a - b) mod n` in `res`. - - The arguments a, b, n and the outparam res are meant to be 256-bit bignums, i.e. uint64_t[4]. - - Before calling this function, the caller will need to ensure that the following - preconditions are observed. - • a < n - • b < n -*/ -void Hacl_Bignum256_sub_mod(uint64_t *n, uint64_t *a, uint64_t *b, uint64_t *res) -{ - uint64_t c2 = (uint64_t)0U; - uint64_t c0; - { - uint64_t t1 = a[(uint32_t)4U * (uint32_t)0U]; - uint64_t t20 = b[(uint32_t)4U * (uint32_t)0U]; - uint64_t *res_i0 = res + (uint32_t)4U * (uint32_t)0U; - c2 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c2, t1, t20, res_i0); - { - uint64_t t10 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U]; - uint64_t t21 = b[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U]; - uint64_t *res_i1 = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U; - c2 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c2, t10, t21, res_i1); - { - uint64_t t11 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U]; - uint64_t t22 = b[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U]; - uint64_t *res_i2 = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U; - c2 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c2, t11, t22, res_i2); - { - uint64_t t12 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U]; - uint64_t t2 = b[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U]; - uint64_t *res_i = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U; - c2 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c2, t12, t2, res_i); - } - } - } - } - c0 = c2; - { - uint64_t tmp[4U] = { 0U }; - uint64_t c3 = (uint64_t)0U; - uint64_t c1; - uint64_t c; - { - uint64_t t1 = res[(uint32_t)4U * (uint32_t)0U]; - uint64_t t20 = n[(uint32_t)4U * (uint32_t)0U]; - uint64_t *res_i0 = tmp + (uint32_t)4U * (uint32_t)0U; - c3 = Lib_IntTypes_Intrinsics_add_carry_u64(c3, t1, t20, res_i0); - { - uint64_t t10 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U]; - uint64_t t21 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U]; - uint64_t *res_i1 = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U; - c3 = Lib_IntTypes_Intrinsics_add_carry_u64(c3, t10, t21, res_i1); - { - uint64_t t11 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U]; - uint64_t t22 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U]; - uint64_t *res_i2 = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U; - c3 = Lib_IntTypes_Intrinsics_add_carry_u64(c3, t11, t22, res_i2); - { - uint64_t t12 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U]; - uint64_t t2 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U]; - uint64_t *res_i = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U; - c3 = Lib_IntTypes_Intrinsics_add_carry_u64(c3, t12, t2, res_i); - } - } - } - } - c1 = c3; - c = (uint64_t)0U - c0; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *os = res; - uint64_t x = (c & tmp[i]) | (~c & res[i]); - os[i] = x;); - } -} - -/** -Write `a * b` in `res`. - - The arguments a and b are meant to be 256-bit bignums, i.e. uint64_t[4]. - The outparam res is meant to be a 512-bit bignum, i.e. uint64_t[8]. -*/ -void Hacl_Bignum256_mul(uint64_t *a, uint64_t *b, uint64_t *res) -{ - uint32_t i; - memset(res, 0U, (uint32_t)8U * sizeof (uint64_t)); - for (i = (uint32_t)0U; i < (uint32_t)4U; i++) - { - uint64_t bj = b[i]; - uint64_t *res_j = res + i; - uint64_t c = (uint64_t)0U; - uint64_t r; - { - uint64_t a_i = a[(uint32_t)4U * (uint32_t)0U]; - uint64_t *res_i0 = res_j + (uint32_t)4U * (uint32_t)0U; - c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, bj, c, res_i0); - { - uint64_t a_i0 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U]; - uint64_t *res_i1 = res_j + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U; - c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i0, bj, c, res_i1); - { - uint64_t a_i1 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U]; - uint64_t *res_i2 = res_j + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U; - c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i1, bj, c, res_i2); - { - uint64_t a_i2 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U]; - uint64_t *res_i = res_j + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U; - c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i2, bj, c, res_i); - } - } - } - } - r = c; - res[(uint32_t)4U + i] = r; - } -} - -/** -Write `a * a` in `res`. - - The argument a is meant to be a 256-bit bignum, i.e. uint64_t[4]. - The outparam res is meant to be a 512-bit bignum, i.e. uint64_t[8]. -*/ -void Hacl_Bignum256_sqr(uint64_t *a, uint64_t *res) -{ - uint64_t c0; - memset(res, 0U, (uint32_t)8U * sizeof (uint64_t)); - KRML_MAYBE_FOR4(i0, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *ab = a; - uint64_t a_j = a[i0]; - uint64_t *res_j = res + i0; - uint64_t c = (uint64_t)0U; - { - uint32_t i; - for (i = (uint32_t)0U; i < i0 / (uint32_t)4U; i++) - { - uint64_t a_i = ab[(uint32_t)4U * i]; - uint64_t *res_i0 = res_j + (uint32_t)4U * i; - c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, a_j, c, res_i0); - { - uint64_t a_i0 = ab[(uint32_t)4U * i + (uint32_t)1U]; - uint64_t *res_i1 = res_j + (uint32_t)4U * i + (uint32_t)1U; - c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i0, a_j, c, res_i1); - { - uint64_t a_i1 = ab[(uint32_t)4U * i + (uint32_t)2U]; - uint64_t *res_i2 = res_j + (uint32_t)4U * i + (uint32_t)2U; - c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i1, a_j, c, res_i2); - { - uint64_t a_i2 = ab[(uint32_t)4U * i + (uint32_t)3U]; - uint64_t *res_i = res_j + (uint32_t)4U * i + (uint32_t)3U; - c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i2, a_j, c, res_i); - } - } - } - } - } - { - uint32_t i; - for (i = i0 / (uint32_t)4U * (uint32_t)4U; i < i0; i++) - { - uint64_t a_i = ab[i]; - uint64_t *res_i = res_j + i; - c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, a_j, c, res_i); - } - } - uint64_t r = c; - res[i0 + i0] = r;); - c0 = Hacl_Bignum_Addition_bn_add_eq_len_u64((uint32_t)8U, res, res, res); - { - uint64_t tmp[8U] = { 0U }; - uint64_t c1; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - FStar_UInt128_uint128 res1 = FStar_UInt128_mul_wide(a[i], a[i]); - uint64_t hi = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(res1, (uint32_t)64U)); - uint64_t lo = FStar_UInt128_uint128_to_uint64(res1); - tmp[(uint32_t)2U * i] = lo; - tmp[(uint32_t)2U * i + (uint32_t)1U] = hi;); - c1 = Hacl_Bignum_Addition_bn_add_eq_len_u64((uint32_t)8U, res, tmp, res); - } -} - -static inline void precompr2(uint32_t nBits, uint64_t *n, uint64_t *res) -{ - uint32_t i0; - uint32_t j; - uint32_t i; - memset(res, 0U, (uint32_t)4U * sizeof (uint64_t)); - i0 = nBits / (uint32_t)64U; - j = nBits % (uint32_t)64U; - res[i0] = res[i0] | (uint64_t)1U << j; - for (i = (uint32_t)0U; i < (uint32_t)512U - nBits; i++) - { - Hacl_Bignum256_add_mod(n, res, res, res); - } -} - -static inline void reduction(uint64_t *n, uint64_t nInv, uint64_t *c, uint64_t *res) -{ - uint64_t c00 = (uint64_t)0U; - uint64_t c0; - KRML_MAYBE_FOR4(i0, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t qj = nInv * c[i0]; - uint64_t *res_j0 = c + i0; - uint64_t c1 = (uint64_t)0U; - { - uint64_t a_i = n[(uint32_t)4U * (uint32_t)0U]; - uint64_t *res_i0 = res_j0 + (uint32_t)4U * (uint32_t)0U; - c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, qj, c1, res_i0); - { - uint64_t a_i0 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U]; - uint64_t *res_i1 = res_j0 + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U; - c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i0, qj, c1, res_i1); - { - uint64_t a_i1 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U]; - uint64_t *res_i2 = res_j0 + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U; - c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i1, qj, c1, res_i2); - { - uint64_t a_i2 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U]; - uint64_t *res_i = res_j0 + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U; - c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i2, qj, c1, res_i); - } - } - } - } - uint64_t r = c1; - uint64_t c10 = r; - uint64_t *resb = c + (uint32_t)4U + i0; - uint64_t res_j = c[(uint32_t)4U + i0]; - c00 = Lib_IntTypes_Intrinsics_add_carry_u64(c00, c10, res_j, resb);); - memcpy(res, c + (uint32_t)4U, (uint32_t)4U * sizeof (uint64_t)); - c0 = c00; - { - uint64_t tmp[4U] = { 0U }; - uint64_t c10 = (uint64_t)0U; - uint64_t c1; - uint64_t c2; - { - uint64_t t1 = res[(uint32_t)4U * (uint32_t)0U]; - uint64_t t20 = n[(uint32_t)4U * (uint32_t)0U]; - uint64_t *res_i0 = tmp + (uint32_t)4U * (uint32_t)0U; - c10 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c10, t1, t20, res_i0); - { - uint64_t t10 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U]; - uint64_t t21 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U]; - uint64_t *res_i1 = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U; - c10 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c10, t10, t21, res_i1); - { - uint64_t t11 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U]; - uint64_t t22 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U]; - uint64_t *res_i2 = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U; - c10 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c10, t11, t22, res_i2); - { - uint64_t t12 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U]; - uint64_t t2 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U]; - uint64_t *res_i = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U; - c10 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c10, t12, t2, res_i); - } - } - } - } - c1 = c10; - c2 = c0 - c1; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *os = res; - uint64_t x = (c2 & res[i]) | (~c2 & tmp[i]); - os[i] = x;); - } -} - -static inline void from(uint64_t *n, uint64_t nInv_u64, uint64_t *aM, uint64_t *a) -{ - uint64_t tmp[8U] = { 0U }; - memcpy(tmp, aM, (uint32_t)4U * sizeof (uint64_t)); - reduction(n, nInv_u64, tmp, a); -} - -static inline void areduction(uint64_t *n, uint64_t nInv, uint64_t *c, uint64_t *res) -{ - uint64_t c00 = (uint64_t)0U; - uint64_t c0; - KRML_MAYBE_FOR4(i0, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t qj = nInv * c[i0]; - uint64_t *res_j0 = c + i0; - uint64_t c1 = (uint64_t)0U; - { - uint64_t a_i = n[(uint32_t)4U * (uint32_t)0U]; - uint64_t *res_i0 = res_j0 + (uint32_t)4U * (uint32_t)0U; - c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, qj, c1, res_i0); - { - uint64_t a_i0 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U]; - uint64_t *res_i1 = res_j0 + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U; - c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i0, qj, c1, res_i1); - { - uint64_t a_i1 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U]; - uint64_t *res_i2 = res_j0 + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U; - c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i1, qj, c1, res_i2); - { - uint64_t a_i2 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U]; - uint64_t *res_i = res_j0 + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U; - c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i2, qj, c1, res_i); - } - } - } - } - uint64_t r = c1; - uint64_t c10 = r; - uint64_t *resb = c + (uint32_t)4U + i0; - uint64_t res_j = c[(uint32_t)4U + i0]; - c00 = Lib_IntTypes_Intrinsics_add_carry_u64(c00, c10, res_j, resb);); - memcpy(res, c + (uint32_t)4U, (uint32_t)4U * sizeof (uint64_t)); - c0 = c00; - { - uint64_t tmp[4U] = { 0U }; - uint64_t c1 = Hacl_Bignum256_sub(res, n, tmp); - uint64_t m = (uint64_t)0U - c0; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *os = res; - uint64_t x = (m & tmp[i]) | (~m & res[i]); - os[i] = x;); - } -} - -static inline void -amont_mul(uint64_t *n, uint64_t nInv_u64, uint64_t *aM, uint64_t *bM, uint64_t *resM) -{ - uint64_t c[8U] = { 0U }; - memset(c, 0U, (uint32_t)8U * sizeof (uint64_t)); - KRML_MAYBE_FOR4(i0, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t bj = bM[i0]; - uint64_t *res_j = c + i0; - uint64_t c1 = (uint64_t)0U; - { - uint64_t a_i = aM[(uint32_t)4U * (uint32_t)0U]; - uint64_t *res_i0 = res_j + (uint32_t)4U * (uint32_t)0U; - c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, bj, c1, res_i0); - { - uint64_t a_i0 = aM[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U]; - uint64_t *res_i1 = res_j + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U; - c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i0, bj, c1, res_i1); - { - uint64_t a_i1 = aM[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U]; - uint64_t *res_i2 = res_j + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U; - c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i1, bj, c1, res_i2); - { - uint64_t a_i2 = aM[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U]; - uint64_t *res_i = res_j + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U; - c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i2, bj, c1, res_i); - } - } - } - } - uint64_t r = c1; - c[(uint32_t)4U + i0] = r;); - areduction(n, nInv_u64, c, resM); -} - -static inline void amont_sqr(uint64_t *n, uint64_t nInv_u64, uint64_t *aM, uint64_t *resM) -{ - uint64_t c[8U] = { 0U }; - uint64_t c0; - memset(c, 0U, (uint32_t)8U * sizeof (uint64_t)); - KRML_MAYBE_FOR4(i0, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *ab = aM; - uint64_t a_j = aM[i0]; - uint64_t *res_j = c + i0; - uint64_t c1 = (uint64_t)0U; - { - uint32_t i; - for (i = (uint32_t)0U; i < i0 / (uint32_t)4U; i++) - { - uint64_t a_i = ab[(uint32_t)4U * i]; - uint64_t *res_i0 = res_j + (uint32_t)4U * i; - c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, a_j, c1, res_i0); - { - uint64_t a_i0 = ab[(uint32_t)4U * i + (uint32_t)1U]; - uint64_t *res_i1 = res_j + (uint32_t)4U * i + (uint32_t)1U; - c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i0, a_j, c1, res_i1); - { - uint64_t a_i1 = ab[(uint32_t)4U * i + (uint32_t)2U]; - uint64_t *res_i2 = res_j + (uint32_t)4U * i + (uint32_t)2U; - c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i1, a_j, c1, res_i2); - { - uint64_t a_i2 = ab[(uint32_t)4U * i + (uint32_t)3U]; - uint64_t *res_i = res_j + (uint32_t)4U * i + (uint32_t)3U; - c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i2, a_j, c1, res_i); - } - } - } - } - } - { - uint32_t i; - for (i = i0 / (uint32_t)4U * (uint32_t)4U; i < i0; i++) - { - uint64_t a_i = ab[i]; - uint64_t *res_i = res_j + i; - c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, a_j, c1, res_i); - } - } - uint64_t r = c1; - c[i0 + i0] = r;); - c0 = Hacl_Bignum_Addition_bn_add_eq_len_u64((uint32_t)8U, c, c, c); - { - uint64_t tmp[8U] = { 0U }; - uint64_t c1; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - FStar_UInt128_uint128 res = FStar_UInt128_mul_wide(aM[i], aM[i]); - uint64_t hi = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(res, (uint32_t)64U)); - uint64_t lo = FStar_UInt128_uint128_to_uint64(res); - tmp[(uint32_t)2U * i] = lo; - tmp[(uint32_t)2U * i + (uint32_t)1U] = hi;); - c1 = Hacl_Bignum_Addition_bn_add_eq_len_u64((uint32_t)8U, c, tmp, c); - areduction(n, nInv_u64, c, resM); - } -} - -static inline void -bn_slow_precomp(uint64_t *n, uint64_t mu, uint64_t *r2, uint64_t *a, uint64_t *res) -{ - uint64_t a_mod[4U] = { 0U }; - uint64_t a1[8U] = { 0U }; - memcpy(a1, a, (uint32_t)8U * sizeof (uint64_t)); - { - uint64_t c00 = (uint64_t)0U; - uint64_t c0; - KRML_MAYBE_FOR4(i0, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t qj = mu * a1[i0]; - uint64_t *res_j0 = a1 + i0; - uint64_t c = (uint64_t)0U; - { - uint64_t a_i = n[(uint32_t)4U * (uint32_t)0U]; - uint64_t *res_i0 = res_j0 + (uint32_t)4U * (uint32_t)0U; - c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, qj, c, res_i0); - { - uint64_t a_i0 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U]; - uint64_t *res_i1 = res_j0 + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U; - c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i0, qj, c, res_i1); - { - uint64_t a_i1 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U]; - uint64_t *res_i2 = res_j0 + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U; - c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i1, qj, c, res_i2); - { - uint64_t a_i2 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U]; - uint64_t *res_i = res_j0 + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U; - c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i2, qj, c, res_i); - } - } - } - } - uint64_t r = c; - uint64_t c1 = r; - uint64_t *resb = a1 + (uint32_t)4U + i0; - uint64_t res_j = a1[(uint32_t)4U + i0]; - c00 = Lib_IntTypes_Intrinsics_add_carry_u64(c00, c1, res_j, resb);); - memcpy(a_mod, a1 + (uint32_t)4U, (uint32_t)4U * sizeof (uint64_t)); - c0 = c00; - { - uint64_t tmp[4U] = { 0U }; - uint64_t c1 = Hacl_Bignum256_sub(a_mod, n, tmp); - uint64_t m = (uint64_t)0U - c0; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *os = a_mod; - uint64_t x = (m & tmp[i]) | (~m & a_mod[i]); - os[i] = x;); - { - uint64_t c[8U] = { 0U }; - Hacl_Bignum256_mul(a_mod, r2, c); - reduction(n, mu, c, res); - } - } - } -} - -/** -Write `a mod n` in `res`. - - The argument a is meant to be a 512-bit bignum, i.e. uint64_t[8]. - The argument n and the outparam res are meant to be 256-bit bignums, i.e. uint64_t[4]. - - The function returns false if any of the following preconditions are violated, - true otherwise. - • 1 < n - • n % 2 = 1 -*/ -bool Hacl_Bignum256_mod(uint64_t *n, uint64_t *a, uint64_t *res) -{ - uint64_t one[4U] = { 0U }; - uint64_t bit0; - uint64_t m0; - memset(one, 0U, (uint32_t)4U * sizeof (uint64_t)); - one[0U] = (uint64_t)1U; - bit0 = n[0U] & (uint64_t)1U; - m0 = (uint64_t)0U - bit0; - { - uint64_t acc = (uint64_t)0U; - uint64_t m1; - uint64_t is_valid_m; - uint32_t nBits; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t beq = FStar_UInt64_eq_mask(one[i], n[i]); - uint64_t blt = ~FStar_UInt64_gte_mask(one[i], n[i]); - acc = (beq & acc) | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U)));); - m1 = acc; - is_valid_m = m0 & m1; - nBits = (uint32_t)64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64((uint32_t)4U, n); - if (is_valid_m == (uint64_t)0xFFFFFFFFFFFFFFFFU) - { - uint64_t r2[4U] = { 0U }; - precompr2(nBits, n, r2); - { - uint64_t mu = Hacl_Bignum_ModInvLimb_mod_inv_uint64(n[0U]); - bn_slow_precomp(n, mu, r2, a, res); - } - } - else - { - memset(res, 0U, (uint32_t)4U * sizeof (uint64_t)); - } - return is_valid_m == (uint64_t)0xFFFFFFFFFFFFFFFFU; - } -} - -static uint64_t exp_check(uint64_t *n, uint64_t *a, uint32_t bBits, uint64_t *b) -{ - uint64_t one[4U] = { 0U }; - uint64_t bit0; - uint64_t m00; - memset(one, 0U, (uint32_t)4U * sizeof (uint64_t)); - one[0U] = (uint64_t)1U; - bit0 = n[0U] & (uint64_t)1U; - m00 = (uint64_t)0U - bit0; - { - uint64_t acc0 = (uint64_t)0U; - uint64_t m10; - uint64_t m0; - uint32_t bLen; - uint64_t m1; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t beq = FStar_UInt64_eq_mask(one[i], n[i]); - uint64_t blt = ~FStar_UInt64_gte_mask(one[i], n[i]); - acc0 = (beq & acc0) | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U)));); - m10 = acc0; - m0 = m00 & m10; - if (bBits == (uint32_t)0U) - { - bLen = (uint32_t)1U; - } - else - { - bLen = (bBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U; - } - if (bBits < (uint32_t)64U * bLen) - { - KRML_CHECK_SIZE(sizeof (uint64_t), bLen); - { - uint64_t b2[bLen]; - memset(b2, 0U, bLen * sizeof (uint64_t)); - { - uint32_t i0 = bBits / (uint32_t)64U; - uint32_t j = bBits % (uint32_t)64U; - b2[i0] = b2[i0] | (uint64_t)1U << j; - { - uint64_t acc = (uint64_t)0U; - { - uint32_t i; - for (i = (uint32_t)0U; i < bLen; i++) - { - uint64_t beq = FStar_UInt64_eq_mask(b[i], b2[i]); - uint64_t blt = ~FStar_UInt64_gte_mask(b[i], b2[i]); - acc = - (beq & acc) - | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U))); - } - } - { - uint64_t res = acc; - m1 = res; - } - } - } - } - } - else - { - m1 = (uint64_t)0xFFFFFFFFFFFFFFFFU; - } - { - uint64_t acc = (uint64_t)0U; - uint64_t m2; - uint64_t m; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t beq = FStar_UInt64_eq_mask(a[i], n[i]); - uint64_t blt = ~FStar_UInt64_gte_mask(a[i], n[i]); - acc = (beq & acc) | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U)));); - m2 = acc; - m = m1 & m2; - return m0 & m; - } - } -} - -static inline void -exp_vartime_precomp( - uint64_t *n, - uint64_t mu, - uint64_t *r2, - uint64_t *a, - uint32_t bBits, - uint64_t *b, - uint64_t *res -) -{ - if (bBits < (uint32_t)200U) - { - uint64_t aM[4U] = { 0U }; - uint64_t c[8U] = { 0U }; - Hacl_Bignum256_mul(a, r2, c); - reduction(n, mu, c, aM); - { - uint64_t resM[4U] = { 0U }; - uint64_t ctx[8U] = { 0U }; - uint64_t *ctx_n; - uint64_t *ctx_r2; - memcpy(ctx, n, (uint32_t)4U * sizeof (uint64_t)); - memcpy(ctx + (uint32_t)4U, r2, (uint32_t)4U * sizeof (uint64_t)); - ctx_n = ctx; - ctx_r2 = ctx + (uint32_t)4U; - from(ctx_n, mu, ctx_r2, resM); - { - uint32_t i; - for (i = (uint32_t)0U; i < bBits; i++) - { - uint32_t i1 = i / (uint32_t)64U; - uint32_t j = i % (uint32_t)64U; - uint64_t tmp = b[i1]; - uint64_t bit = tmp >> j & (uint64_t)1U; - if (!(bit == (uint64_t)0U)) - { - uint64_t *ctx_n0 = ctx; - amont_mul(ctx_n0, mu, resM, aM, resM); - } - { - uint64_t *ctx_n0 = ctx; - amont_sqr(ctx_n0, mu, aM, aM); - } - } - } - { - uint64_t tmp[8U] = { 0U }; - memcpy(tmp, resM, (uint32_t)4U * sizeof (uint64_t)); - reduction(n, mu, tmp, res); - return; - } - } - } - { - uint64_t aM[4U] = { 0U }; - uint64_t c[8U] = { 0U }; - Hacl_Bignum256_mul(a, r2, c); - reduction(n, mu, c, aM); - { - uint64_t resM[4U] = { 0U }; - uint32_t bLen; - if (bBits == (uint32_t)0U) - { - bLen = (uint32_t)1U; - } - else - { - bLen = (bBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U; - } - { - uint64_t ctx[8U] = { 0U }; - memcpy(ctx, n, (uint32_t)4U * sizeof (uint64_t)); - memcpy(ctx + (uint32_t)4U, r2, (uint32_t)4U * sizeof (uint64_t)); - { - uint64_t table[64U] = { 0U }; - uint64_t tmp[4U] = { 0U }; - uint64_t *t0 = table; - uint64_t *t1 = table + (uint32_t)4U; - uint64_t *ctx_n0 = ctx; - uint64_t *ctx_r20 = ctx + (uint32_t)4U; - from(ctx_n0, mu, ctx_r20, t0); - memcpy(t1, aM, (uint32_t)4U * sizeof (uint64_t)); - KRML_MAYBE_FOR7(i, - (uint32_t)0U, - (uint32_t)7U, - (uint32_t)1U, - uint64_t *t11 = table + (i + (uint32_t)1U) * (uint32_t)4U; - uint64_t *ctx_n1 = ctx; - amont_sqr(ctx_n1, mu, t11, tmp); - memcpy(table + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)4U, - tmp, - (uint32_t)4U * sizeof (uint64_t)); - uint64_t *t2 = table + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)4U; - uint64_t *ctx_n = ctx; - amont_mul(ctx_n, mu, aM, t2, tmp); - memcpy(table + ((uint32_t)2U * i + (uint32_t)3U) * (uint32_t)4U, - tmp, - (uint32_t)4U * sizeof (uint64_t));); - if (bBits % (uint32_t)4U != (uint32_t)0U) - { - uint64_t mask_l = (uint64_t)15U; - uint32_t i = bBits / (uint32_t)4U * (uint32_t)4U / (uint32_t)64U; - uint32_t j = bBits / (uint32_t)4U * (uint32_t)4U % (uint32_t)64U; - uint64_t p1 = b[i] >> j; - uint64_t ite; - if (i + (uint32_t)1U < bLen && (uint32_t)0U < j) - { - ite = p1 | b[i + (uint32_t)1U] << ((uint32_t)64U - j); - } - else - { - ite = p1; - } - { - uint64_t bits_c = ite & mask_l; - uint32_t bits_l32 = (uint32_t)bits_c; - uint64_t *a_bits_l = table + bits_l32 * (uint32_t)4U; - memcpy(resM, a_bits_l, (uint32_t)4U * sizeof (uint64_t)); - } - } - else - { - uint64_t *ctx_n = ctx; - uint64_t *ctx_r2 = ctx + (uint32_t)4U; - from(ctx_n, mu, ctx_r2, resM); - } - { - uint32_t i; - for (i = (uint32_t)0U; i < bBits / (uint32_t)4U; i++) - { - KRML_MAYBE_FOR4(i0, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *ctx_n = ctx; - amont_sqr(ctx_n, mu, resM, resM);); - { - uint32_t bk = bBits - bBits % (uint32_t)4U; - uint64_t mask_l = (uint64_t)15U; - uint32_t i1 = (bk - (uint32_t)4U * i - (uint32_t)4U) / (uint32_t)64U; - uint32_t j = (bk - (uint32_t)4U * i - (uint32_t)4U) % (uint32_t)64U; - uint64_t p1 = b[i1] >> j; - uint64_t ite; - if (i1 + (uint32_t)1U < bLen && (uint32_t)0U < j) - { - ite = p1 | b[i1 + (uint32_t)1U] << ((uint32_t)64U - j); - } - else - { - ite = p1; - } - { - uint64_t bits_l = ite & mask_l; - uint64_t a_bits_l[4U] = { 0U }; - uint32_t bits_l32 = (uint32_t)bits_l; - uint64_t *a_bits_l1 = table + bits_l32 * (uint32_t)4U; - memcpy(a_bits_l, a_bits_l1, (uint32_t)4U * sizeof (uint64_t)); - { - uint64_t *ctx_n = ctx; - amont_mul(ctx_n, mu, resM, a_bits_l, resM); - } - } - } - } - } - { - uint64_t tmp0[8U] = { 0U }; - memcpy(tmp0, resM, (uint32_t)4U * sizeof (uint64_t)); - reduction(n, mu, tmp0, res); - } - } - } - } - } -} - -static inline void -exp_consttime_precomp( - uint64_t *n, - uint64_t mu, - uint64_t *r2, - uint64_t *a, - uint32_t bBits, - uint64_t *b, - uint64_t *res -) -{ - if (bBits < (uint32_t)200U) - { - uint64_t aM[4U] = { 0U }; - uint64_t c[8U] = { 0U }; - Hacl_Bignum256_mul(a, r2, c); - reduction(n, mu, c, aM); - { - uint64_t resM[4U] = { 0U }; - uint64_t ctx[8U] = { 0U }; - memcpy(ctx, n, (uint32_t)4U * sizeof (uint64_t)); - memcpy(ctx + (uint32_t)4U, r2, (uint32_t)4U * sizeof (uint64_t)); - { - uint64_t sw = (uint64_t)0U; - uint64_t *ctx_n = ctx; - uint64_t *ctx_r2 = ctx + (uint32_t)4U; - uint64_t sw0; - from(ctx_n, mu, ctx_r2, resM); - { - uint32_t i0; - for (i0 = (uint32_t)0U; i0 < bBits; i0++) - { - uint32_t i1 = (bBits - i0 - (uint32_t)1U) / (uint32_t)64U; - uint32_t j = (bBits - i0 - (uint32_t)1U) % (uint32_t)64U; - uint64_t tmp = b[i1]; - uint64_t bit = tmp >> j & (uint64_t)1U; - uint64_t sw1 = bit ^ sw; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t dummy = ((uint64_t)0U - sw1) & (resM[i] ^ aM[i]); - resM[i] = resM[i] ^ dummy; - aM[i] = aM[i] ^ dummy;); - { - uint64_t *ctx_n0 = ctx; - amont_mul(ctx_n0, mu, aM, resM, aM); - { - uint64_t *ctx_n1 = ctx; - amont_sqr(ctx_n1, mu, resM, resM); - sw = bit; - } - } - } - } - sw0 = sw; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t dummy = ((uint64_t)0U - sw0) & (resM[i] ^ aM[i]); - resM[i] = resM[i] ^ dummy; - aM[i] = aM[i] ^ dummy;); - { - uint64_t tmp[8U] = { 0U }; - memcpy(tmp, resM, (uint32_t)4U * sizeof (uint64_t)); - reduction(n, mu, tmp, res); - return; - } - } - } - } - { - uint64_t aM[4U] = { 0U }; - uint64_t c0[8U] = { 0U }; - Hacl_Bignum256_mul(a, r2, c0); - reduction(n, mu, c0, aM); - { - uint64_t resM[4U] = { 0U }; - uint32_t bLen; - if (bBits == (uint32_t)0U) - { - bLen = (uint32_t)1U; - } - else - { - bLen = (bBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U; - } - { - uint64_t ctx[8U] = { 0U }; - memcpy(ctx, n, (uint32_t)4U * sizeof (uint64_t)); - memcpy(ctx + (uint32_t)4U, r2, (uint32_t)4U * sizeof (uint64_t)); - { - uint64_t table[64U] = { 0U }; - uint64_t tmp[4U] = { 0U }; - uint64_t *t0 = table; - uint64_t *t1 = table + (uint32_t)4U; - uint64_t *ctx_n0 = ctx; - uint64_t *ctx_r20 = ctx + (uint32_t)4U; - from(ctx_n0, mu, ctx_r20, t0); - memcpy(t1, aM, (uint32_t)4U * sizeof (uint64_t)); - KRML_MAYBE_FOR7(i, - (uint32_t)0U, - (uint32_t)7U, - (uint32_t)1U, - uint64_t *t11 = table + (i + (uint32_t)1U) * (uint32_t)4U; - uint64_t *ctx_n1 = ctx; - amont_sqr(ctx_n1, mu, t11, tmp); - memcpy(table + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)4U, - tmp, - (uint32_t)4U * sizeof (uint64_t)); - uint64_t *t2 = table + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)4U; - uint64_t *ctx_n = ctx; - amont_mul(ctx_n, mu, aM, t2, tmp); - memcpy(table + ((uint32_t)2U * i + (uint32_t)3U) * (uint32_t)4U, - tmp, - (uint32_t)4U * sizeof (uint64_t));); - if (bBits % (uint32_t)4U != (uint32_t)0U) - { - uint64_t mask_l = (uint64_t)15U; - uint32_t i0 = bBits / (uint32_t)4U * (uint32_t)4U / (uint32_t)64U; - uint32_t j = bBits / (uint32_t)4U * (uint32_t)4U % (uint32_t)64U; - uint64_t p1 = b[i0] >> j; - uint64_t ite; - if (i0 + (uint32_t)1U < bLen && (uint32_t)0U < j) - { - ite = p1 | b[i0 + (uint32_t)1U] << ((uint32_t)64U - j); - } - else - { - ite = p1; - } - { - uint64_t bits_c = ite & mask_l; - memcpy(resM, table, (uint32_t)4U * sizeof (uint64_t)); - KRML_MAYBE_FOR15(i1, - (uint32_t)0U, - (uint32_t)15U, - (uint32_t)1U, - uint64_t c = FStar_UInt64_eq_mask(bits_c, (uint64_t)(i1 + (uint32_t)1U)); - uint64_t *res_j = table + (i1 + (uint32_t)1U) * (uint32_t)4U; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *os = resM; - uint64_t x = (c & res_j[i]) | (~c & resM[i]); - os[i] = x;);); - } - } - else - { - uint64_t *ctx_n = ctx; - uint64_t *ctx_r2 = ctx + (uint32_t)4U; - from(ctx_n, mu, ctx_r2, resM); - } - { - uint32_t i0; - for (i0 = (uint32_t)0U; i0 < bBits / (uint32_t)4U; i0++) - { - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *ctx_n = ctx; - amont_sqr(ctx_n, mu, resM, resM);); - { - uint32_t bk = bBits - bBits % (uint32_t)4U; - uint64_t mask_l = (uint64_t)15U; - uint32_t i1 = (bk - (uint32_t)4U * i0 - (uint32_t)4U) / (uint32_t)64U; - uint32_t j = (bk - (uint32_t)4U * i0 - (uint32_t)4U) % (uint32_t)64U; - uint64_t p1 = b[i1] >> j; - uint64_t ite; - if (i1 + (uint32_t)1U < bLen && (uint32_t)0U < j) - { - ite = p1 | b[i1 + (uint32_t)1U] << ((uint32_t)64U - j); - } - else - { - ite = p1; - } - { - uint64_t bits_l = ite & mask_l; - uint64_t a_bits_l[4U] = { 0U }; - memcpy(a_bits_l, table, (uint32_t)4U * sizeof (uint64_t)); - KRML_MAYBE_FOR15(i2, - (uint32_t)0U, - (uint32_t)15U, - (uint32_t)1U, - uint64_t c = FStar_UInt64_eq_mask(bits_l, (uint64_t)(i2 + (uint32_t)1U)); - uint64_t *res_j = table + (i2 + (uint32_t)1U) * (uint32_t)4U; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *os = a_bits_l; - uint64_t x = (c & res_j[i]) | (~c & a_bits_l[i]); - os[i] = x;);); - { - uint64_t *ctx_n = ctx; - amont_mul(ctx_n, mu, resM, a_bits_l, resM); - } - } - } - } - } - { - uint64_t tmp0[8U] = { 0U }; - memcpy(tmp0, resM, (uint32_t)4U * sizeof (uint64_t)); - reduction(n, mu, tmp0, res); - } - } - } - } - } -} - -static inline void -exp_vartime( - uint32_t nBits, - uint64_t *n, - uint64_t *a, - uint32_t bBits, - uint64_t *b, - uint64_t *res -) -{ - uint64_t r2[4U] = { 0U }; - uint64_t mu; - precompr2(nBits, n, r2); - mu = Hacl_Bignum_ModInvLimb_mod_inv_uint64(n[0U]); - exp_vartime_precomp(n, mu, r2, a, bBits, b, res); -} - -static inline void -exp_consttime( - uint32_t nBits, - uint64_t *n, - uint64_t *a, - uint32_t bBits, - uint64_t *b, - uint64_t *res -) -{ - uint64_t r2[4U] = { 0U }; - uint64_t mu; - precompr2(nBits, n, r2); - mu = Hacl_Bignum_ModInvLimb_mod_inv_uint64(n[0U]); - exp_consttime_precomp(n, mu, r2, a, bBits, b, res); -} - -/** -Write `a ^ b mod n` in `res`. - - The arguments a, n and the outparam res are meant to be 256-bit bignums, i.e. uint64_t[4]. - - The argument b is a bignum of any size, and bBits is an upper bound on the - number of significant bits of b. A tighter bound results in faster execution - time. When in doubt, the number of bits for the bignum size is always a safe - default, e.g. if b is a 256-bit bignum, bBits should be 256. - - The function is *NOT* constant-time on the argument b. See the - mod_exp_consttime_* functions for constant-time variants. - - The function returns false if any of the following preconditions are violated, - true otherwise. - • n % 2 = 1 - • 1 < n - • b < pow2 bBits - • a < n -*/ -bool -Hacl_Bignum256_mod_exp_vartime( - uint64_t *n, - uint64_t *a, - uint32_t bBits, - uint64_t *b, - uint64_t *res -) -{ - uint64_t is_valid_m = exp_check(n, a, bBits, b); - uint32_t - nBits = (uint32_t)64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64((uint32_t)4U, n); - if (is_valid_m == (uint64_t)0xFFFFFFFFFFFFFFFFU) - { - exp_vartime(nBits, n, a, bBits, b, res); - } - else - { - memset(res, 0U, (uint32_t)4U * sizeof (uint64_t)); - } - return is_valid_m == (uint64_t)0xFFFFFFFFFFFFFFFFU; -} - -/** -Write `a ^ b mod n` in `res`. - - The arguments a, n and the outparam res are meant to be 256-bit bignums, i.e. uint64_t[4]. - - The argument b is a bignum of any size, and bBits is an upper bound on the - number of significant bits of b. A tighter bound results in faster execution - time. When in doubt, the number of bits for the bignum size is always a safe - default, e.g. if b is a 256-bit bignum, bBits should be 256. - - This function is constant-time over its argument b, at the cost of a slower - execution time than mod_exp_vartime. - - The function returns false if any of the following preconditions are violated, - true otherwise. - • n % 2 = 1 - • 1 < n - • b < pow2 bBits - • a < n -*/ -bool -Hacl_Bignum256_mod_exp_consttime( - uint64_t *n, - uint64_t *a, - uint32_t bBits, - uint64_t *b, - uint64_t *res -) -{ - uint64_t is_valid_m = exp_check(n, a, bBits, b); - uint32_t - nBits = (uint32_t)64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64((uint32_t)4U, n); - if (is_valid_m == (uint64_t)0xFFFFFFFFFFFFFFFFU) - { - exp_consttime(nBits, n, a, bBits, b, res); - } - else - { - memset(res, 0U, (uint32_t)4U * sizeof (uint64_t)); - } - return is_valid_m == (uint64_t)0xFFFFFFFFFFFFFFFFU; -} - -/** -Write `a ^ (-1) mod n` in `res`. - - The arguments a, n and the outparam res are meant to be 256-bit bignums, i.e. uint64_t[4]. - - Before calling this function, the caller will need to ensure that the following - preconditions are observed. - • n is a prime - - The function returns false if any of the following preconditions are violated, true otherwise. - • n % 2 = 1 - • 1 < n - • 0 < a - • a < n -*/ -bool Hacl_Bignum256_mod_inv_prime_vartime(uint64_t *n, uint64_t *a, uint64_t *res) -{ - uint64_t one[4U] = { 0U }; - uint64_t bit0; - uint64_t m00; - memset(one, 0U, (uint32_t)4U * sizeof (uint64_t)); - one[0U] = (uint64_t)1U; - bit0 = n[0U] & (uint64_t)1U; - m00 = (uint64_t)0U - bit0; - { - uint64_t acc0 = (uint64_t)0U; - uint64_t m10; - uint64_t m0; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t beq = FStar_UInt64_eq_mask(one[i], n[i]); - uint64_t blt = ~FStar_UInt64_gte_mask(one[i], n[i]); - acc0 = (beq & acc0) | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U)));); - m10 = acc0; - m0 = m00 & m10; - { - uint64_t bn_zero[4U] = { 0U }; - uint64_t mask = (uint64_t)0xFFFFFFFFFFFFFFFFU; - uint64_t mask1; - uint64_t res10; - uint64_t m1; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t uu____0 = FStar_UInt64_eq_mask(a[i], bn_zero[i]); - mask = uu____0 & mask;); - mask1 = mask; - res10 = mask1; - m1 = res10; - { - uint64_t acc = (uint64_t)0U; - uint64_t m2; - uint64_t is_valid_m; - uint32_t nBits; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t beq = FStar_UInt64_eq_mask(a[i], n[i]); - uint64_t blt = ~FStar_UInt64_gte_mask(a[i], n[i]); - acc = - (beq & acc) - | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U)));); - m2 = acc; - is_valid_m = (m0 & ~m1) & m2; - nBits = (uint32_t)64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64((uint32_t)4U, n); - if (is_valid_m == (uint64_t)0xFFFFFFFFFFFFFFFFU) - { - uint64_t n2[4U] = { 0U }; - uint64_t - c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64((uint64_t)0U, n[0U], (uint64_t)2U, n2); - uint64_t *a1 = n + (uint32_t)1U; - uint64_t *res1 = n2 + (uint32_t)1U; - uint64_t c = c0; - KRML_MAYBE_FOR3(i, - (uint32_t)0U, - (uint32_t)3U, - (uint32_t)1U, - uint64_t t1 = a1[i]; - uint64_t *res_i = res1 + i; - c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, (uint64_t)0U, res_i);); - { - uint64_t c1 = c; - uint64_t c2 = c1; - exp_vartime(nBits, n, a, (uint32_t)256U, n2, res); - } - } - else - { - memset(res, 0U, (uint32_t)4U * sizeof (uint64_t)); - } - return is_valid_m == (uint64_t)0xFFFFFFFFFFFFFFFFU; - } - } - } -} - - -/**********************************************/ -/* Arithmetic functions with precomputations. */ -/**********************************************/ - - -/** -Heap-allocate and initialize a montgomery context. - - The argument n is meant to be a 256-bit bignum, i.e. uint64_t[4]. - - Before calling this function, the caller will need to ensure that the following - preconditions are observed. - • n % 2 = 1 - • 1 < n - - The caller will need to call Hacl_Bignum256_mont_ctx_free on the return value - to avoid memory leaks. -*/ -Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 *Hacl_Bignum256_mont_ctx_init(uint64_t *n) -{ - uint64_t *r2 = (uint64_t *)KRML_HOST_CALLOC((uint32_t)4U, sizeof (uint64_t)); - uint64_t *n1 = (uint64_t *)KRML_HOST_CALLOC((uint32_t)4U, sizeof (uint64_t)); - uint64_t *r21 = r2; - uint64_t *n11 = n1; - uint32_t nBits; - uint64_t mu; - memcpy(n11, n, (uint32_t)4U * sizeof (uint64_t)); - nBits = (uint32_t)64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64((uint32_t)4U, n); - precompr2(nBits, n, r21); - mu = Hacl_Bignum_ModInvLimb_mod_inv_uint64(n[0U]); - { - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 res; - res.len = (uint32_t)4U; - res.n = n11; - res.mu = mu; - res.r2 = r21; - KRML_CHECK_SIZE(sizeof (Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64), (uint32_t)1U); - { - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 - *buf = - (Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 *)KRML_HOST_MALLOC(sizeof ( - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 - )); - buf[0U] = res; - return buf; - } - } -} - -/** -Deallocate the memory previously allocated by Hacl_Bignum256_mont_ctx_init. - - The argument k is a montgomery context obtained through Hacl_Bignum256_mont_ctx_init. -*/ -void Hacl_Bignum256_mont_ctx_free(Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 *k) -{ - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 k1 = *k; - uint64_t *n = k1.n; - uint64_t *r2 = k1.r2; - KRML_HOST_FREE(n); - KRML_HOST_FREE(r2); - KRML_HOST_FREE(k); -} - -/** -Write `a mod n` in `res`. - - The argument a is meant to be a 512-bit bignum, i.e. uint64_t[8]. - The outparam res is meant to be a 256-bit bignum, i.e. uint64_t[4]. - The argument k is a montgomery context obtained through Hacl_Bignum256_mont_ctx_init. -*/ -void -Hacl_Bignum256_mod_precomp( - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 *k, - uint64_t *a, - uint64_t *res -) -{ - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 k1 = *k; - bn_slow_precomp(k1.n, k1.mu, k1.r2, a, res); -} - -/** -Write `a ^ b mod n` in `res`. - - The arguments a and the outparam res are meant to be 256-bit bignums, i.e. uint64_t[4]. - The argument k is a montgomery context obtained through Hacl_Bignum256_mont_ctx_init. - - The argument b is a bignum of any size, and bBits is an upper bound on the - number of significant bits of b. A tighter bound results in faster execution - time. When in doubt, the number of bits for the bignum size is always a safe - default, e.g. if b is a 256-bit bignum, bBits should be 256. - - The function is *NOT* constant-time on the argument b. See the - mod_exp_consttime_* functions for constant-time variants. - - Before calling this function, the caller will need to ensure that the following - preconditions are observed. - • b < pow2 bBits - • a < n -*/ -void -Hacl_Bignum256_mod_exp_vartime_precomp( - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 *k, - uint64_t *a, - uint32_t bBits, - uint64_t *b, - uint64_t *res -) -{ - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 k1 = *k; - exp_vartime_precomp(k1.n, k1.mu, k1.r2, a, bBits, b, res); -} - -/** -Write `a ^ b mod n` in `res`. - - The arguments a and the outparam res are meant to be 256-bit bignums, i.e. uint64_t[4]. - The argument k is a montgomery context obtained through Hacl_Bignum256_mont_ctx_init. - - The argument b is a bignum of any size, and bBits is an upper bound on the - number of significant bits of b. A tighter bound results in faster execution - time. When in doubt, the number of bits for the bignum size is always a safe - default, e.g. if b is a 256-bit bignum, bBits should be 256. - - This function is constant-time over its argument b, at the cost of a slower - execution time than mod_exp_vartime_*. - - Before calling this function, the caller will need to ensure that the following - preconditions are observed. - • b < pow2 bBits - • a < n -*/ -void -Hacl_Bignum256_mod_exp_consttime_precomp( - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 *k, - uint64_t *a, - uint32_t bBits, - uint64_t *b, - uint64_t *res -) -{ - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 k1 = *k; - exp_consttime_precomp(k1.n, k1.mu, k1.r2, a, bBits, b, res); -} - -/** -Write `a ^ (-1) mod n` in `res`. - - The argument a and the outparam res are meant to be 256-bit bignums, i.e. uint64_t[4]. - The argument k is a montgomery context obtained through Hacl_Bignum256_mont_ctx_init. - - Before calling this function, the caller will need to ensure that the following - preconditions are observed. - • n is a prime - • 0 < a - • a < n -*/ -void -Hacl_Bignum256_mod_inv_prime_vartime_precomp( - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 *k, - uint64_t *a, - uint64_t *res -) -{ - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 k1 = *k; - uint64_t n2[4U] = { 0U }; - uint64_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64((uint64_t)0U, k1.n[0U], (uint64_t)2U, n2); - uint64_t *a1 = k1.n + (uint32_t)1U; - uint64_t *res1 = n2 + (uint32_t)1U; - uint64_t c2 = c0; - uint64_t c1; - uint64_t c; - KRML_MAYBE_FOR3(i, - (uint32_t)0U, - (uint32_t)3U, - (uint32_t)1U, - uint64_t t1 = a1[i]; - uint64_t *res_i = res1 + i; - c2 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c2, t1, (uint64_t)0U, res_i);); - c1 = c2; - c = c1; - exp_vartime_precomp(k1.n, k1.mu, k1.r2, a, (uint32_t)256U, n2, res); -} - - -/********************/ -/* Loads and stores */ -/********************/ - - -/** -Load a bid-endian bignum from memory. - - The argument b points to len bytes of valid memory. - The function returns a heap-allocated bignum of size sufficient to hold the - result of loading b, or NULL if either the allocation failed, or the amount of - required memory would exceed 4GB. - - If the return value is non-null, clients must eventually call free(3) on it to - avoid memory leaks. -*/ -uint64_t *Hacl_Bignum256_new_bn_from_bytes_be(uint32_t len, uint8_t *b) -{ - if - ( - len - == (uint32_t)0U - || !((len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U <= (uint32_t)536870911U) - ) - { - return NULL; - } - KRML_CHECK_SIZE(sizeof (uint64_t), (len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U); - { - uint64_t - *res = - (uint64_t *)KRML_HOST_CALLOC((len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U, - sizeof (uint64_t)); - if (res == NULL) - { - return res; - } - { - uint64_t *res1 = res; - uint64_t *res2 = res1; - uint32_t bnLen = (len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U; - uint32_t tmpLen = (uint32_t)8U * bnLen; - KRML_CHECK_SIZE(sizeof (uint8_t), tmpLen); - { - uint8_t tmp[tmpLen]; - memset(tmp, 0U, tmpLen * sizeof (uint8_t)); - memcpy(tmp + tmpLen - len, b, len * sizeof (uint8_t)); - { - uint32_t i; - for (i = (uint32_t)0U; i < bnLen; i++) - { - uint64_t *os = res2; - uint64_t u = load64_be(tmp + (bnLen - i - (uint32_t)1U) * (uint32_t)8U); - uint64_t x = u; - os[i] = x; - } - } - return res2; - } - } - } -} - -/** -Load a little-endian bignum from memory. - - The argument b points to len bytes of valid memory. - The function returns a heap-allocated bignum of size sufficient to hold the - result of loading b, or NULL if either the allocation failed, or the amount of - required memory would exceed 4GB. - - If the return value is non-null, clients must eventually call free(3) on it to - avoid memory leaks. -*/ -uint64_t *Hacl_Bignum256_new_bn_from_bytes_le(uint32_t len, uint8_t *b) -{ - if - ( - len - == (uint32_t)0U - || !((len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U <= (uint32_t)536870911U) - ) - { - return NULL; - } - KRML_CHECK_SIZE(sizeof (uint64_t), (len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U); - { - uint64_t - *res = - (uint64_t *)KRML_HOST_CALLOC((len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U, - sizeof (uint64_t)); - if (res == NULL) - { - return res; - } - { - uint64_t *res1 = res; - uint64_t *res2 = res1; - uint32_t bnLen = (len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U; - uint32_t tmpLen = (uint32_t)8U * bnLen; - KRML_CHECK_SIZE(sizeof (uint8_t), tmpLen); - { - uint8_t tmp[tmpLen]; - memset(tmp, 0U, tmpLen * sizeof (uint8_t)); - memcpy(tmp, b, len * sizeof (uint8_t)); - { - uint32_t i; - for (i = (uint32_t)0U; i < (len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U; i++) - { - uint64_t *os = res2; - uint8_t *bj = tmp + i * (uint32_t)8U; - uint64_t u = load64_le(bj); - uint64_t r1 = u; - uint64_t x = r1; - os[i] = x; - } - } - return res2; - } - } - } -} - -/** -Serialize a bignum into big-endian memory. - - The argument b points to a 256-bit bignum. - The outparam res points to 32 bytes of valid memory. -*/ -void Hacl_Bignum256_bn_to_bytes_be(uint64_t *b, uint8_t *res) -{ - uint8_t tmp[32U] = { 0U }; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - store64_be(res + i * (uint32_t)8U, b[(uint32_t)4U - i - (uint32_t)1U]);); -} - -/** -Serialize a bignum into little-endian memory. - - The argument b points to a 256-bit bignum. - The outparam res points to 32 bytes of valid memory. -*/ -void Hacl_Bignum256_bn_to_bytes_le(uint64_t *b, uint8_t *res) -{ - uint8_t tmp[32U] = { 0U }; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - store64_le(res + i * (uint32_t)8U, b[i]);); -} - - -/***************/ -/* Comparisons */ -/***************/ - - -/** -Returns 2^64 - 1 if a < b, otherwise returns 0. - - The arguments a and b are meant to be 256-bit bignums, i.e. uint64_t[4]. -*/ -uint64_t Hacl_Bignum256_lt_mask(uint64_t *a, uint64_t *b) -{ - uint64_t acc = (uint64_t)0U; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t beq = FStar_UInt64_eq_mask(a[i], b[i]); - uint64_t blt = ~FStar_UInt64_gte_mask(a[i], b[i]); - acc = (beq & acc) | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U)));); - return acc; -} - -/** -Returns 2^64 - 1 if a = b, otherwise returns 0. - - The arguments a and b are meant to be 256-bit bignums, i.e. uint64_t[4]. -*/ -uint64_t Hacl_Bignum256_eq_mask(uint64_t *a, uint64_t *b) -{ - uint64_t mask = (uint64_t)0xFFFFFFFFFFFFFFFFU; - uint64_t mask1; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t uu____0 = FStar_UInt64_eq_mask(a[i], b[i]); - mask = uu____0 & mask;); - mask1 = mask; - return mask1; -} - diff --git a/dist/c89-compatible/Hacl_Bignum256.h b/dist/c89-compatible/Hacl_Bignum256.h deleted file mode 100644 index 165e2df521..0000000000 --- a/dist/c89-compatible/Hacl_Bignum256.h +++ /dev/null @@ -1,400 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#ifndef __Hacl_Bignum256_H -#define __Hacl_Bignum256_H - -#if defined(__cplusplus) -extern "C" { -#endif - -#include -#include "krml/internal/types.h" -#include "krml/lowstar_endianness.h" -#include "krml/internal/target.h" - - -#include "Hacl_Krmllib.h" -#include "Hacl_Bignum_Base.h" -#include "Hacl_Bignum.h" -#include "evercrypt_targetconfig.h" -/******************************************************************************* - -A verified 256-bit bignum library. - -This is a 64-bit optimized version, where bignums are represented as an array -of four unsigned 64-bit integers, i.e. uint64_t[4]. Furthermore, the -limbs are stored in little-endian format, i.e. the least significant limb is at -index 0. Each limb is stored in native format in memory. Example: - - uint64_t sixteen[4] = { 0x10; 0x00; 0x00; 0x00 } - -We strongly encourage users to go through the conversion functions, e.g. -bn_from_bytes_be, to i) not depend on internal representation choices and ii) -have the ability to switch easily to a 32-bit optimized version in the future. - -*******************************************************************************/ - -/************************/ -/* Arithmetic functions */ -/************************/ - - -/** -Write `a + b mod 2^256` in `res`. - - This functions returns the carry. - - The arguments a, b and res are meant to be 256-bit bignums, i.e. uint64_t[4] -*/ -uint64_t Hacl_Bignum256_add(uint64_t *a, uint64_t *b, uint64_t *res); - -/** -Write `a - b mod 2^256` in `res`. - - This functions returns the carry. - - The arguments a, b and res are meant to be 256-bit bignums, i.e. uint64_t[4] -*/ -uint64_t Hacl_Bignum256_sub(uint64_t *a, uint64_t *b, uint64_t *res); - -/** -Write `(a + b) mod n` in `res`. - - The arguments a, b, n and the outparam res are meant to be 256-bit bignums, i.e. uint64_t[4]. - - Before calling this function, the caller will need to ensure that the following - preconditions are observed. - • a < n - • b < n -*/ -void Hacl_Bignum256_add_mod(uint64_t *n, uint64_t *a, uint64_t *b, uint64_t *res); - -/** -Write `(a - b) mod n` in `res`. - - The arguments a, b, n and the outparam res are meant to be 256-bit bignums, i.e. uint64_t[4]. - - Before calling this function, the caller will need to ensure that the following - preconditions are observed. - • a < n - • b < n -*/ -void Hacl_Bignum256_sub_mod(uint64_t *n, uint64_t *a, uint64_t *b, uint64_t *res); - -/** -Write `a * b` in `res`. - - The arguments a and b are meant to be 256-bit bignums, i.e. uint64_t[4]. - The outparam res is meant to be a 512-bit bignum, i.e. uint64_t[8]. -*/ -void Hacl_Bignum256_mul(uint64_t *a, uint64_t *b, uint64_t *res); - -/** -Write `a * a` in `res`. - - The argument a is meant to be a 256-bit bignum, i.e. uint64_t[4]. - The outparam res is meant to be a 512-bit bignum, i.e. uint64_t[8]. -*/ -void Hacl_Bignum256_sqr(uint64_t *a, uint64_t *res); - -/** -Write `a mod n` in `res`. - - The argument a is meant to be a 512-bit bignum, i.e. uint64_t[8]. - The argument n and the outparam res are meant to be 256-bit bignums, i.e. uint64_t[4]. - - The function returns false if any of the following preconditions are violated, - true otherwise. - • 1 < n - • n % 2 = 1 -*/ -bool Hacl_Bignum256_mod(uint64_t *n, uint64_t *a, uint64_t *res); - -/** -Write `a ^ b mod n` in `res`. - - The arguments a, n and the outparam res are meant to be 256-bit bignums, i.e. uint64_t[4]. - - The argument b is a bignum of any size, and bBits is an upper bound on the - number of significant bits of b. A tighter bound results in faster execution - time. When in doubt, the number of bits for the bignum size is always a safe - default, e.g. if b is a 256-bit bignum, bBits should be 256. - - The function is *NOT* constant-time on the argument b. See the - mod_exp_consttime_* functions for constant-time variants. - - The function returns false if any of the following preconditions are violated, - true otherwise. - • n % 2 = 1 - • 1 < n - • b < pow2 bBits - • a < n -*/ -bool -Hacl_Bignum256_mod_exp_vartime( - uint64_t *n, - uint64_t *a, - uint32_t bBits, - uint64_t *b, - uint64_t *res -); - -/** -Write `a ^ b mod n` in `res`. - - The arguments a, n and the outparam res are meant to be 256-bit bignums, i.e. uint64_t[4]. - - The argument b is a bignum of any size, and bBits is an upper bound on the - number of significant bits of b. A tighter bound results in faster execution - time. When in doubt, the number of bits for the bignum size is always a safe - default, e.g. if b is a 256-bit bignum, bBits should be 256. - - This function is constant-time over its argument b, at the cost of a slower - execution time than mod_exp_vartime. - - The function returns false if any of the following preconditions are violated, - true otherwise. - • n % 2 = 1 - • 1 < n - • b < pow2 bBits - • a < n -*/ -bool -Hacl_Bignum256_mod_exp_consttime( - uint64_t *n, - uint64_t *a, - uint32_t bBits, - uint64_t *b, - uint64_t *res -); - -/** -Write `a ^ (-1) mod n` in `res`. - - The arguments a, n and the outparam res are meant to be 256-bit bignums, i.e. uint64_t[4]. - - Before calling this function, the caller will need to ensure that the following - preconditions are observed. - • n is a prime - - The function returns false if any of the following preconditions are violated, true otherwise. - • n % 2 = 1 - • 1 < n - • 0 < a - • a < n -*/ -bool Hacl_Bignum256_mod_inv_prime_vartime(uint64_t *n, uint64_t *a, uint64_t *res); - - -/**********************************************/ -/* Arithmetic functions with precomputations. */ -/**********************************************/ - - -/** -Heap-allocate and initialize a montgomery context. - - The argument n is meant to be a 256-bit bignum, i.e. uint64_t[4]. - - Before calling this function, the caller will need to ensure that the following - preconditions are observed. - • n % 2 = 1 - • 1 < n - - The caller will need to call Hacl_Bignum256_mont_ctx_free on the return value - to avoid memory leaks. -*/ -Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 *Hacl_Bignum256_mont_ctx_init(uint64_t *n); - -/** -Deallocate the memory previously allocated by Hacl_Bignum256_mont_ctx_init. - - The argument k is a montgomery context obtained through Hacl_Bignum256_mont_ctx_init. -*/ -void Hacl_Bignum256_mont_ctx_free(Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 *k); - -/** -Write `a mod n` in `res`. - - The argument a is meant to be a 512-bit bignum, i.e. uint64_t[8]. - The outparam res is meant to be a 256-bit bignum, i.e. uint64_t[4]. - The argument k is a montgomery context obtained through Hacl_Bignum256_mont_ctx_init. -*/ -void -Hacl_Bignum256_mod_precomp( - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 *k, - uint64_t *a, - uint64_t *res -); - -/** -Write `a ^ b mod n` in `res`. - - The arguments a and the outparam res are meant to be 256-bit bignums, i.e. uint64_t[4]. - The argument k is a montgomery context obtained through Hacl_Bignum256_mont_ctx_init. - - The argument b is a bignum of any size, and bBits is an upper bound on the - number of significant bits of b. A tighter bound results in faster execution - time. When in doubt, the number of bits for the bignum size is always a safe - default, e.g. if b is a 256-bit bignum, bBits should be 256. - - The function is *NOT* constant-time on the argument b. See the - mod_exp_consttime_* functions for constant-time variants. - - Before calling this function, the caller will need to ensure that the following - preconditions are observed. - • b < pow2 bBits - • a < n -*/ -void -Hacl_Bignum256_mod_exp_vartime_precomp( - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 *k, - uint64_t *a, - uint32_t bBits, - uint64_t *b, - uint64_t *res -); - -/** -Write `a ^ b mod n` in `res`. - - The arguments a and the outparam res are meant to be 256-bit bignums, i.e. uint64_t[4]. - The argument k is a montgomery context obtained through Hacl_Bignum256_mont_ctx_init. - - The argument b is a bignum of any size, and bBits is an upper bound on the - number of significant bits of b. A tighter bound results in faster execution - time. When in doubt, the number of bits for the bignum size is always a safe - default, e.g. if b is a 256-bit bignum, bBits should be 256. - - This function is constant-time over its argument b, at the cost of a slower - execution time than mod_exp_vartime_*. - - Before calling this function, the caller will need to ensure that the following - preconditions are observed. - • b < pow2 bBits - • a < n -*/ -void -Hacl_Bignum256_mod_exp_consttime_precomp( - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 *k, - uint64_t *a, - uint32_t bBits, - uint64_t *b, - uint64_t *res -); - -/** -Write `a ^ (-1) mod n` in `res`. - - The argument a and the outparam res are meant to be 256-bit bignums, i.e. uint64_t[4]. - The argument k is a montgomery context obtained through Hacl_Bignum256_mont_ctx_init. - - Before calling this function, the caller will need to ensure that the following - preconditions are observed. - • n is a prime - • 0 < a - • a < n -*/ -void -Hacl_Bignum256_mod_inv_prime_vartime_precomp( - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 *k, - uint64_t *a, - uint64_t *res -); - - -/********************/ -/* Loads and stores */ -/********************/ - - -/** -Load a bid-endian bignum from memory. - - The argument b points to len bytes of valid memory. - The function returns a heap-allocated bignum of size sufficient to hold the - result of loading b, or NULL if either the allocation failed, or the amount of - required memory would exceed 4GB. - - If the return value is non-null, clients must eventually call free(3) on it to - avoid memory leaks. -*/ -uint64_t *Hacl_Bignum256_new_bn_from_bytes_be(uint32_t len, uint8_t *b); - -/** -Load a little-endian bignum from memory. - - The argument b points to len bytes of valid memory. - The function returns a heap-allocated bignum of size sufficient to hold the - result of loading b, or NULL if either the allocation failed, or the amount of - required memory would exceed 4GB. - - If the return value is non-null, clients must eventually call free(3) on it to - avoid memory leaks. -*/ -uint64_t *Hacl_Bignum256_new_bn_from_bytes_le(uint32_t len, uint8_t *b); - -/** -Serialize a bignum into big-endian memory. - - The argument b points to a 256-bit bignum. - The outparam res points to 32 bytes of valid memory. -*/ -void Hacl_Bignum256_bn_to_bytes_be(uint64_t *b, uint8_t *res); - -/** -Serialize a bignum into little-endian memory. - - The argument b points to a 256-bit bignum. - The outparam res points to 32 bytes of valid memory. -*/ -void Hacl_Bignum256_bn_to_bytes_le(uint64_t *b, uint8_t *res); - - -/***************/ -/* Comparisons */ -/***************/ - - -/** -Returns 2^64 - 1 if a < b, otherwise returns 0. - - The arguments a and b are meant to be 256-bit bignums, i.e. uint64_t[4]. -*/ -uint64_t Hacl_Bignum256_lt_mask(uint64_t *a, uint64_t *b); - -/** -Returns 2^64 - 1 if a = b, otherwise returns 0. - - The arguments a and b are meant to be 256-bit bignums, i.e. uint64_t[4]. -*/ -uint64_t Hacl_Bignum256_eq_mask(uint64_t *a, uint64_t *b); - -#if defined(__cplusplus) -} -#endif - -#define __Hacl_Bignum256_H_DEFINED -#endif diff --git a/dist/c89-compatible/Hacl_Bignum256_32.c b/dist/c89-compatible/Hacl_Bignum256_32.c deleted file mode 100644 index 87b25f0e18..0000000000 --- a/dist/c89-compatible/Hacl_Bignum256_32.c +++ /dev/null @@ -1,1836 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#include "Hacl_Bignum256_32.h" - -#include "internal/Hacl_Bignum.h" - -/******************************************************************************* - -A verified 256-bit bignum library. - -This is a 32-bit optimized version, where bignums are represented as an array -of eight unsigned 32-bit integers, i.e. uint32_t[8]. Furthermore, the -limbs are stored in little-endian format, i.e. the least significant limb is at -index 0. Each limb is stored in native format in memory. Example: - - uint32_t sixteen[8] = { 0x10; 0x00; 0x00; 0x00; 0x00; 0x00; 0x00; 0x00 } - -We strongly encourage users to go through the conversion functions, e.g. -bn_from_bytes_be, to i) not depend on internal representation choices and ii) -have the ability to switch easily to a 64-bit optimized version in the future. - -*******************************************************************************/ - -/************************/ -/* Arithmetic functions */ -/************************/ - - -/** -Write `a + b mod 2^256` in `res`. - - This functions returns the carry. - - The arguments a, b and res are meant to be 256-bit bignums, i.e. uint32_t[8] -*/ -uint32_t Hacl_Bignum256_32_add(uint32_t *a, uint32_t *b, uint32_t *res) -{ - uint32_t c = (uint32_t)0U; - KRML_MAYBE_FOR2(i, - (uint32_t)0U, - (uint32_t)2U, - (uint32_t)1U, - uint32_t t1 = a[(uint32_t)4U * i]; - uint32_t t20 = b[(uint32_t)4U * i]; - uint32_t *res_i0 = res + (uint32_t)4U * i; - c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t1, t20, res_i0); - uint32_t t10 = a[(uint32_t)4U * i + (uint32_t)1U]; - uint32_t t21 = b[(uint32_t)4U * i + (uint32_t)1U]; - uint32_t *res_i1 = res + (uint32_t)4U * i + (uint32_t)1U; - c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t10, t21, res_i1); - uint32_t t11 = a[(uint32_t)4U * i + (uint32_t)2U]; - uint32_t t22 = b[(uint32_t)4U * i + (uint32_t)2U]; - uint32_t *res_i2 = res + (uint32_t)4U * i + (uint32_t)2U; - c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t11, t22, res_i2); - uint32_t t12 = a[(uint32_t)4U * i + (uint32_t)3U]; - uint32_t t2 = b[(uint32_t)4U * i + (uint32_t)3U]; - uint32_t *res_i = res + (uint32_t)4U * i + (uint32_t)3U; - c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t12, t2, res_i);); - return c; -} - -/** -Write `a - b mod 2^256` in `res`. - - This functions returns the carry. - - The arguments a, b and res are meant to be 256-bit bignums, i.e. uint32_t[8] -*/ -uint32_t Hacl_Bignum256_32_sub(uint32_t *a, uint32_t *b, uint32_t *res) -{ - uint32_t c = (uint32_t)0U; - KRML_MAYBE_FOR2(i, - (uint32_t)0U, - (uint32_t)2U, - (uint32_t)1U, - uint32_t t1 = a[(uint32_t)4U * i]; - uint32_t t20 = b[(uint32_t)4U * i]; - uint32_t *res_i0 = res + (uint32_t)4U * i; - c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, t20, res_i0); - uint32_t t10 = a[(uint32_t)4U * i + (uint32_t)1U]; - uint32_t t21 = b[(uint32_t)4U * i + (uint32_t)1U]; - uint32_t *res_i1 = res + (uint32_t)4U * i + (uint32_t)1U; - c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t10, t21, res_i1); - uint32_t t11 = a[(uint32_t)4U * i + (uint32_t)2U]; - uint32_t t22 = b[(uint32_t)4U * i + (uint32_t)2U]; - uint32_t *res_i2 = res + (uint32_t)4U * i + (uint32_t)2U; - c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t11, t22, res_i2); - uint32_t t12 = a[(uint32_t)4U * i + (uint32_t)3U]; - uint32_t t2 = b[(uint32_t)4U * i + (uint32_t)3U]; - uint32_t *res_i = res + (uint32_t)4U * i + (uint32_t)3U; - c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t12, t2, res_i);); - return c; -} - -/** -Write `(a + b) mod n` in `res`. - - The arguments a, b, n and the outparam res are meant to be 256-bit bignums, i.e. uint32_t[8]. - - Before calling this function, the caller will need to ensure that the following - preconditions are observed. - • a < n - • b < n -*/ -void Hacl_Bignum256_32_add_mod(uint32_t *n, uint32_t *a, uint32_t *b, uint32_t *res) -{ - uint32_t c2 = (uint32_t)0U; - uint32_t c0; - KRML_MAYBE_FOR2(i, - (uint32_t)0U, - (uint32_t)2U, - (uint32_t)1U, - uint32_t t1 = a[(uint32_t)4U * i]; - uint32_t t20 = b[(uint32_t)4U * i]; - uint32_t *res_i0 = res + (uint32_t)4U * i; - c2 = Lib_IntTypes_Intrinsics_add_carry_u32(c2, t1, t20, res_i0); - uint32_t t10 = a[(uint32_t)4U * i + (uint32_t)1U]; - uint32_t t21 = b[(uint32_t)4U * i + (uint32_t)1U]; - uint32_t *res_i1 = res + (uint32_t)4U * i + (uint32_t)1U; - c2 = Lib_IntTypes_Intrinsics_add_carry_u32(c2, t10, t21, res_i1); - uint32_t t11 = a[(uint32_t)4U * i + (uint32_t)2U]; - uint32_t t22 = b[(uint32_t)4U * i + (uint32_t)2U]; - uint32_t *res_i2 = res + (uint32_t)4U * i + (uint32_t)2U; - c2 = Lib_IntTypes_Intrinsics_add_carry_u32(c2, t11, t22, res_i2); - uint32_t t12 = a[(uint32_t)4U * i + (uint32_t)3U]; - uint32_t t2 = b[(uint32_t)4U * i + (uint32_t)3U]; - uint32_t *res_i = res + (uint32_t)4U * i + (uint32_t)3U; - c2 = Lib_IntTypes_Intrinsics_add_carry_u32(c2, t12, t2, res_i);); - c0 = c2; - { - uint32_t tmp[8U] = { 0U }; - uint32_t c3 = (uint32_t)0U; - uint32_t c1; - uint32_t c; - KRML_MAYBE_FOR2(i, - (uint32_t)0U, - (uint32_t)2U, - (uint32_t)1U, - uint32_t t1 = res[(uint32_t)4U * i]; - uint32_t t20 = n[(uint32_t)4U * i]; - uint32_t *res_i0 = tmp + (uint32_t)4U * i; - c3 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c3, t1, t20, res_i0); - uint32_t t10 = res[(uint32_t)4U * i + (uint32_t)1U]; - uint32_t t21 = n[(uint32_t)4U * i + (uint32_t)1U]; - uint32_t *res_i1 = tmp + (uint32_t)4U * i + (uint32_t)1U; - c3 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c3, t10, t21, res_i1); - uint32_t t11 = res[(uint32_t)4U * i + (uint32_t)2U]; - uint32_t t22 = n[(uint32_t)4U * i + (uint32_t)2U]; - uint32_t *res_i2 = tmp + (uint32_t)4U * i + (uint32_t)2U; - c3 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c3, t11, t22, res_i2); - uint32_t t12 = res[(uint32_t)4U * i + (uint32_t)3U]; - uint32_t t2 = n[(uint32_t)4U * i + (uint32_t)3U]; - uint32_t *res_i = tmp + (uint32_t)4U * i + (uint32_t)3U; - c3 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c3, t12, t2, res_i);); - c1 = c3; - c = c0 - c1; - KRML_MAYBE_FOR8(i, - (uint32_t)0U, - (uint32_t)8U, - (uint32_t)1U, - uint32_t *os = res; - uint32_t x = (c & res[i]) | (~c & tmp[i]); - os[i] = x;); - } -} - -/** -Write `(a - b) mod n` in `res`. - - The arguments a, b, n and the outparam res are meant to be 256-bit bignums, i.e. uint32_t[8]. - - Before calling this function, the caller will need to ensure that the following - preconditions are observed. - • a < n - • b < n -*/ -void Hacl_Bignum256_32_sub_mod(uint32_t *n, uint32_t *a, uint32_t *b, uint32_t *res) -{ - uint32_t c2 = (uint32_t)0U; - uint32_t c0; - KRML_MAYBE_FOR2(i, - (uint32_t)0U, - (uint32_t)2U, - (uint32_t)1U, - uint32_t t1 = a[(uint32_t)4U * i]; - uint32_t t20 = b[(uint32_t)4U * i]; - uint32_t *res_i0 = res + (uint32_t)4U * i; - c2 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c2, t1, t20, res_i0); - uint32_t t10 = a[(uint32_t)4U * i + (uint32_t)1U]; - uint32_t t21 = b[(uint32_t)4U * i + (uint32_t)1U]; - uint32_t *res_i1 = res + (uint32_t)4U * i + (uint32_t)1U; - c2 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c2, t10, t21, res_i1); - uint32_t t11 = a[(uint32_t)4U * i + (uint32_t)2U]; - uint32_t t22 = b[(uint32_t)4U * i + (uint32_t)2U]; - uint32_t *res_i2 = res + (uint32_t)4U * i + (uint32_t)2U; - c2 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c2, t11, t22, res_i2); - uint32_t t12 = a[(uint32_t)4U * i + (uint32_t)3U]; - uint32_t t2 = b[(uint32_t)4U * i + (uint32_t)3U]; - uint32_t *res_i = res + (uint32_t)4U * i + (uint32_t)3U; - c2 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c2, t12, t2, res_i);); - c0 = c2; - { - uint32_t tmp[8U] = { 0U }; - uint32_t c3 = (uint32_t)0U; - uint32_t c1; - uint32_t c; - KRML_MAYBE_FOR2(i, - (uint32_t)0U, - (uint32_t)2U, - (uint32_t)1U, - uint32_t t1 = res[(uint32_t)4U * i]; - uint32_t t20 = n[(uint32_t)4U * i]; - uint32_t *res_i0 = tmp + (uint32_t)4U * i; - c3 = Lib_IntTypes_Intrinsics_add_carry_u32(c3, t1, t20, res_i0); - uint32_t t10 = res[(uint32_t)4U * i + (uint32_t)1U]; - uint32_t t21 = n[(uint32_t)4U * i + (uint32_t)1U]; - uint32_t *res_i1 = tmp + (uint32_t)4U * i + (uint32_t)1U; - c3 = Lib_IntTypes_Intrinsics_add_carry_u32(c3, t10, t21, res_i1); - uint32_t t11 = res[(uint32_t)4U * i + (uint32_t)2U]; - uint32_t t22 = n[(uint32_t)4U * i + (uint32_t)2U]; - uint32_t *res_i2 = tmp + (uint32_t)4U * i + (uint32_t)2U; - c3 = Lib_IntTypes_Intrinsics_add_carry_u32(c3, t11, t22, res_i2); - uint32_t t12 = res[(uint32_t)4U * i + (uint32_t)3U]; - uint32_t t2 = n[(uint32_t)4U * i + (uint32_t)3U]; - uint32_t *res_i = tmp + (uint32_t)4U * i + (uint32_t)3U; - c3 = Lib_IntTypes_Intrinsics_add_carry_u32(c3, t12, t2, res_i);); - c1 = c3; - c = (uint32_t)0U - c0; - KRML_MAYBE_FOR8(i, - (uint32_t)0U, - (uint32_t)8U, - (uint32_t)1U, - uint32_t *os = res; - uint32_t x = (c & tmp[i]) | (~c & res[i]); - os[i] = x;); - } -} - -/** -Write `a * b` in `res`. - - The arguments a and b are meant to be 256-bit bignums, i.e. uint32_t[8]. - The outparam res is meant to be a 512-bit bignum, i.e. uint32_t[16]. -*/ -void Hacl_Bignum256_32_mul(uint32_t *a, uint32_t *b, uint32_t *res) -{ - uint32_t i; - memset(res, 0U, (uint32_t)16U * sizeof (uint32_t)); - for (i = (uint32_t)0U; i < (uint32_t)8U; i++) - { - uint32_t bj = b[i]; - uint32_t *res_j = res + i; - uint32_t c = (uint32_t)0U; - uint32_t r; - KRML_MAYBE_FOR2(i0, - (uint32_t)0U, - (uint32_t)2U, - (uint32_t)1U, - uint32_t a_i = a[(uint32_t)4U * i0]; - uint32_t *res_i0 = res_j + (uint32_t)4U * i0; - c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i, bj, c, res_i0); - uint32_t a_i0 = a[(uint32_t)4U * i0 + (uint32_t)1U]; - uint32_t *res_i1 = res_j + (uint32_t)4U * i0 + (uint32_t)1U; - c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i0, bj, c, res_i1); - uint32_t a_i1 = a[(uint32_t)4U * i0 + (uint32_t)2U]; - uint32_t *res_i2 = res_j + (uint32_t)4U * i0 + (uint32_t)2U; - c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i1, bj, c, res_i2); - uint32_t a_i2 = a[(uint32_t)4U * i0 + (uint32_t)3U]; - uint32_t *res_i = res_j + (uint32_t)4U * i0 + (uint32_t)3U; - c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i2, bj, c, res_i);); - r = c; - res[(uint32_t)8U + i] = r; - } -} - -/** -Write `a * a` in `res`. - - The argument a is meant to be a 256-bit bignum, i.e. uint32_t[8]. - The outparam res is meant to be a 512-bit bignum, i.e. uint32_t[16]. -*/ -void Hacl_Bignum256_32_sqr(uint32_t *a, uint32_t *res) -{ - uint32_t c0; - memset(res, 0U, (uint32_t)16U * sizeof (uint32_t)); - KRML_MAYBE_FOR8(i0, - (uint32_t)0U, - (uint32_t)8U, - (uint32_t)1U, - uint32_t *ab = a; - uint32_t a_j = a[i0]; - uint32_t *res_j = res + i0; - uint32_t c = (uint32_t)0U; - { - uint32_t i; - for (i = (uint32_t)0U; i < i0 / (uint32_t)4U; i++) - { - uint32_t a_i = ab[(uint32_t)4U * i]; - uint32_t *res_i0 = res_j + (uint32_t)4U * i; - c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i, a_j, c, res_i0); - { - uint32_t a_i0 = ab[(uint32_t)4U * i + (uint32_t)1U]; - uint32_t *res_i1 = res_j + (uint32_t)4U * i + (uint32_t)1U; - c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i0, a_j, c, res_i1); - { - uint32_t a_i1 = ab[(uint32_t)4U * i + (uint32_t)2U]; - uint32_t *res_i2 = res_j + (uint32_t)4U * i + (uint32_t)2U; - c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i1, a_j, c, res_i2); - { - uint32_t a_i2 = ab[(uint32_t)4U * i + (uint32_t)3U]; - uint32_t *res_i = res_j + (uint32_t)4U * i + (uint32_t)3U; - c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i2, a_j, c, res_i); - } - } - } - } - } - { - uint32_t i; - for (i = i0 / (uint32_t)4U * (uint32_t)4U; i < i0; i++) - { - uint32_t a_i = ab[i]; - uint32_t *res_i = res_j + i; - c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i, a_j, c, res_i); - } - } - uint32_t r = c; - res[i0 + i0] = r;); - c0 = Hacl_Bignum_Addition_bn_add_eq_len_u32((uint32_t)16U, res, res, res); - { - uint32_t tmp[16U] = { 0U }; - uint32_t c1; - KRML_MAYBE_FOR8(i, - (uint32_t)0U, - (uint32_t)8U, - (uint32_t)1U, - uint64_t res1 = (uint64_t)a[i] * (uint64_t)a[i]; - uint32_t hi = (uint32_t)(res1 >> (uint32_t)32U); - uint32_t lo = (uint32_t)res1; - tmp[(uint32_t)2U * i] = lo; - tmp[(uint32_t)2U * i + (uint32_t)1U] = hi;); - c1 = Hacl_Bignum_Addition_bn_add_eq_len_u32((uint32_t)16U, res, tmp, res); - } -} - -static inline void precompr2(uint32_t nBits, uint32_t *n, uint32_t *res) -{ - uint32_t i0; - uint32_t j; - uint32_t i; - memset(res, 0U, (uint32_t)8U * sizeof (uint32_t)); - i0 = nBits / (uint32_t)32U; - j = nBits % (uint32_t)32U; - res[i0] = res[i0] | (uint32_t)1U << j; - for (i = (uint32_t)0U; i < (uint32_t)512U - nBits; i++) - { - Hacl_Bignum256_32_add_mod(n, res, res, res); - } -} - -static inline void reduction(uint32_t *n, uint32_t nInv, uint32_t *c, uint32_t *res) -{ - uint32_t c00 = (uint32_t)0U; - uint32_t c0; - KRML_MAYBE_FOR8(i0, - (uint32_t)0U, - (uint32_t)8U, - (uint32_t)1U, - uint32_t qj = nInv * c[i0]; - uint32_t *res_j0 = c + i0; - uint32_t c1 = (uint32_t)0U; - KRML_MAYBE_FOR2(i, - (uint32_t)0U, - (uint32_t)2U, - (uint32_t)1U, - uint32_t a_i = n[(uint32_t)4U * i]; - uint32_t *res_i0 = res_j0 + (uint32_t)4U * i; - c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i, qj, c1, res_i0); - uint32_t a_i0 = n[(uint32_t)4U * i + (uint32_t)1U]; - uint32_t *res_i1 = res_j0 + (uint32_t)4U * i + (uint32_t)1U; - c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i0, qj, c1, res_i1); - uint32_t a_i1 = n[(uint32_t)4U * i + (uint32_t)2U]; - uint32_t *res_i2 = res_j0 + (uint32_t)4U * i + (uint32_t)2U; - c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i1, qj, c1, res_i2); - uint32_t a_i2 = n[(uint32_t)4U * i + (uint32_t)3U]; - uint32_t *res_i = res_j0 + (uint32_t)4U * i + (uint32_t)3U; - c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i2, qj, c1, res_i);); - uint32_t r = c1; - uint32_t c10 = r; - uint32_t *resb = c + (uint32_t)8U + i0; - uint32_t res_j = c[(uint32_t)8U + i0]; - c00 = Lib_IntTypes_Intrinsics_add_carry_u32(c00, c10, res_j, resb);); - memcpy(res, c + (uint32_t)8U, (uint32_t)8U * sizeof (uint32_t)); - c0 = c00; - { - uint32_t tmp[8U] = { 0U }; - uint32_t c10 = (uint32_t)0U; - uint32_t c1; - uint32_t c2; - KRML_MAYBE_FOR2(i, - (uint32_t)0U, - (uint32_t)2U, - (uint32_t)1U, - uint32_t t1 = res[(uint32_t)4U * i]; - uint32_t t20 = n[(uint32_t)4U * i]; - uint32_t *res_i0 = tmp + (uint32_t)4U * i; - c10 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c10, t1, t20, res_i0); - uint32_t t10 = res[(uint32_t)4U * i + (uint32_t)1U]; - uint32_t t21 = n[(uint32_t)4U * i + (uint32_t)1U]; - uint32_t *res_i1 = tmp + (uint32_t)4U * i + (uint32_t)1U; - c10 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c10, t10, t21, res_i1); - uint32_t t11 = res[(uint32_t)4U * i + (uint32_t)2U]; - uint32_t t22 = n[(uint32_t)4U * i + (uint32_t)2U]; - uint32_t *res_i2 = tmp + (uint32_t)4U * i + (uint32_t)2U; - c10 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c10, t11, t22, res_i2); - uint32_t t12 = res[(uint32_t)4U * i + (uint32_t)3U]; - uint32_t t2 = n[(uint32_t)4U * i + (uint32_t)3U]; - uint32_t *res_i = tmp + (uint32_t)4U * i + (uint32_t)3U; - c10 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c10, t12, t2, res_i);); - c1 = c10; - c2 = c0 - c1; - KRML_MAYBE_FOR8(i, - (uint32_t)0U, - (uint32_t)8U, - (uint32_t)1U, - uint32_t *os = res; - uint32_t x = (c2 & res[i]) | (~c2 & tmp[i]); - os[i] = x;); - } -} - -static inline void from(uint32_t *n, uint32_t nInv_u64, uint32_t *aM, uint32_t *a) -{ - uint32_t tmp[16U] = { 0U }; - memcpy(tmp, aM, (uint32_t)8U * sizeof (uint32_t)); - reduction(n, nInv_u64, tmp, a); -} - -static inline void areduction(uint32_t *n, uint32_t nInv, uint32_t *c, uint32_t *res) -{ - uint32_t c00 = (uint32_t)0U; - uint32_t c0; - KRML_MAYBE_FOR8(i0, - (uint32_t)0U, - (uint32_t)8U, - (uint32_t)1U, - uint32_t qj = nInv * c[i0]; - uint32_t *res_j0 = c + i0; - uint32_t c1 = (uint32_t)0U; - KRML_MAYBE_FOR2(i, - (uint32_t)0U, - (uint32_t)2U, - (uint32_t)1U, - uint32_t a_i = n[(uint32_t)4U * i]; - uint32_t *res_i0 = res_j0 + (uint32_t)4U * i; - c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i, qj, c1, res_i0); - uint32_t a_i0 = n[(uint32_t)4U * i + (uint32_t)1U]; - uint32_t *res_i1 = res_j0 + (uint32_t)4U * i + (uint32_t)1U; - c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i0, qj, c1, res_i1); - uint32_t a_i1 = n[(uint32_t)4U * i + (uint32_t)2U]; - uint32_t *res_i2 = res_j0 + (uint32_t)4U * i + (uint32_t)2U; - c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i1, qj, c1, res_i2); - uint32_t a_i2 = n[(uint32_t)4U * i + (uint32_t)3U]; - uint32_t *res_i = res_j0 + (uint32_t)4U * i + (uint32_t)3U; - c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i2, qj, c1, res_i);); - uint32_t r = c1; - uint32_t c10 = r; - uint32_t *resb = c + (uint32_t)8U + i0; - uint32_t res_j = c[(uint32_t)8U + i0]; - c00 = Lib_IntTypes_Intrinsics_add_carry_u32(c00, c10, res_j, resb);); - memcpy(res, c + (uint32_t)8U, (uint32_t)8U * sizeof (uint32_t)); - c0 = c00; - { - uint32_t tmp[8U] = { 0U }; - uint32_t c1 = Hacl_Bignum256_32_sub(res, n, tmp); - uint32_t m = (uint32_t)0U - c0; - KRML_MAYBE_FOR8(i, - (uint32_t)0U, - (uint32_t)8U, - (uint32_t)1U, - uint32_t *os = res; - uint32_t x = (m & tmp[i]) | (~m & res[i]); - os[i] = x;); - } -} - -static inline void -amont_mul(uint32_t *n, uint32_t nInv_u64, uint32_t *aM, uint32_t *bM, uint32_t *resM) -{ - uint32_t c[16U] = { 0U }; - memset(c, 0U, (uint32_t)16U * sizeof (uint32_t)); - KRML_MAYBE_FOR8(i0, - (uint32_t)0U, - (uint32_t)8U, - (uint32_t)1U, - uint32_t bj = bM[i0]; - uint32_t *res_j = c + i0; - uint32_t c1 = (uint32_t)0U; - KRML_MAYBE_FOR2(i, - (uint32_t)0U, - (uint32_t)2U, - (uint32_t)1U, - uint32_t a_i = aM[(uint32_t)4U * i]; - uint32_t *res_i0 = res_j + (uint32_t)4U * i; - c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i, bj, c1, res_i0); - uint32_t a_i0 = aM[(uint32_t)4U * i + (uint32_t)1U]; - uint32_t *res_i1 = res_j + (uint32_t)4U * i + (uint32_t)1U; - c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i0, bj, c1, res_i1); - uint32_t a_i1 = aM[(uint32_t)4U * i + (uint32_t)2U]; - uint32_t *res_i2 = res_j + (uint32_t)4U * i + (uint32_t)2U; - c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i1, bj, c1, res_i2); - uint32_t a_i2 = aM[(uint32_t)4U * i + (uint32_t)3U]; - uint32_t *res_i = res_j + (uint32_t)4U * i + (uint32_t)3U; - c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i2, bj, c1, res_i);); - uint32_t r = c1; - c[(uint32_t)8U + i0] = r;); - areduction(n, nInv_u64, c, resM); -} - -static inline void amont_sqr(uint32_t *n, uint32_t nInv_u64, uint32_t *aM, uint32_t *resM) -{ - uint32_t c[16U] = { 0U }; - uint32_t c0; - memset(c, 0U, (uint32_t)16U * sizeof (uint32_t)); - KRML_MAYBE_FOR8(i0, - (uint32_t)0U, - (uint32_t)8U, - (uint32_t)1U, - uint32_t *ab = aM; - uint32_t a_j = aM[i0]; - uint32_t *res_j = c + i0; - uint32_t c1 = (uint32_t)0U; - { - uint32_t i; - for (i = (uint32_t)0U; i < i0 / (uint32_t)4U; i++) - { - uint32_t a_i = ab[(uint32_t)4U * i]; - uint32_t *res_i0 = res_j + (uint32_t)4U * i; - c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i, a_j, c1, res_i0); - { - uint32_t a_i0 = ab[(uint32_t)4U * i + (uint32_t)1U]; - uint32_t *res_i1 = res_j + (uint32_t)4U * i + (uint32_t)1U; - c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i0, a_j, c1, res_i1); - { - uint32_t a_i1 = ab[(uint32_t)4U * i + (uint32_t)2U]; - uint32_t *res_i2 = res_j + (uint32_t)4U * i + (uint32_t)2U; - c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i1, a_j, c1, res_i2); - { - uint32_t a_i2 = ab[(uint32_t)4U * i + (uint32_t)3U]; - uint32_t *res_i = res_j + (uint32_t)4U * i + (uint32_t)3U; - c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i2, a_j, c1, res_i); - } - } - } - } - } - { - uint32_t i; - for (i = i0 / (uint32_t)4U * (uint32_t)4U; i < i0; i++) - { - uint32_t a_i = ab[i]; - uint32_t *res_i = res_j + i; - c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i, a_j, c1, res_i); - } - } - uint32_t r = c1; - c[i0 + i0] = r;); - c0 = Hacl_Bignum_Addition_bn_add_eq_len_u32((uint32_t)16U, c, c, c); - { - uint32_t tmp[16U] = { 0U }; - uint32_t c1; - KRML_MAYBE_FOR8(i, - (uint32_t)0U, - (uint32_t)8U, - (uint32_t)1U, - uint64_t res = (uint64_t)aM[i] * (uint64_t)aM[i]; - uint32_t hi = (uint32_t)(res >> (uint32_t)32U); - uint32_t lo = (uint32_t)res; - tmp[(uint32_t)2U * i] = lo; - tmp[(uint32_t)2U * i + (uint32_t)1U] = hi;); - c1 = Hacl_Bignum_Addition_bn_add_eq_len_u32((uint32_t)16U, c, tmp, c); - areduction(n, nInv_u64, c, resM); - } -} - -static inline void -bn_slow_precomp(uint32_t *n, uint32_t mu, uint32_t *r2, uint32_t *a, uint32_t *res) -{ - uint32_t a_mod[8U] = { 0U }; - uint32_t a1[16U] = { 0U }; - memcpy(a1, a, (uint32_t)16U * sizeof (uint32_t)); - { - uint32_t c00 = (uint32_t)0U; - uint32_t c0; - KRML_MAYBE_FOR8(i0, - (uint32_t)0U, - (uint32_t)8U, - (uint32_t)1U, - uint32_t qj = mu * a1[i0]; - uint32_t *res_j0 = a1 + i0; - uint32_t c = (uint32_t)0U; - KRML_MAYBE_FOR2(i, - (uint32_t)0U, - (uint32_t)2U, - (uint32_t)1U, - uint32_t a_i = n[(uint32_t)4U * i]; - uint32_t *res_i0 = res_j0 + (uint32_t)4U * i; - c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i, qj, c, res_i0); - uint32_t a_i0 = n[(uint32_t)4U * i + (uint32_t)1U]; - uint32_t *res_i1 = res_j0 + (uint32_t)4U * i + (uint32_t)1U; - c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i0, qj, c, res_i1); - uint32_t a_i1 = n[(uint32_t)4U * i + (uint32_t)2U]; - uint32_t *res_i2 = res_j0 + (uint32_t)4U * i + (uint32_t)2U; - c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i1, qj, c, res_i2); - uint32_t a_i2 = n[(uint32_t)4U * i + (uint32_t)3U]; - uint32_t *res_i = res_j0 + (uint32_t)4U * i + (uint32_t)3U; - c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i2, qj, c, res_i);); - uint32_t r = c; - uint32_t c1 = r; - uint32_t *resb = a1 + (uint32_t)8U + i0; - uint32_t res_j = a1[(uint32_t)8U + i0]; - c00 = Lib_IntTypes_Intrinsics_add_carry_u32(c00, c1, res_j, resb);); - memcpy(a_mod, a1 + (uint32_t)8U, (uint32_t)8U * sizeof (uint32_t)); - c0 = c00; - { - uint32_t tmp[8U] = { 0U }; - uint32_t c1 = Hacl_Bignum256_32_sub(a_mod, n, tmp); - uint32_t m = (uint32_t)0U - c0; - KRML_MAYBE_FOR8(i, - (uint32_t)0U, - (uint32_t)8U, - (uint32_t)1U, - uint32_t *os = a_mod; - uint32_t x = (m & tmp[i]) | (~m & a_mod[i]); - os[i] = x;); - { - uint32_t c[16U] = { 0U }; - Hacl_Bignum256_32_mul(a_mod, r2, c); - reduction(n, mu, c, res); - } - } - } -} - -/** -Write `a mod n` in `res`. - - The argument a is meant to be a 512-bit bignum, i.e. uint32_t[16]. - The argument n and the outparam res are meant to be 256-bit bignums, i.e. uint32_t[8]. - - The function returns false if any of the following preconditions are violated, - true otherwise. - • 1 < n - • n % 2 = 1 -*/ -bool Hacl_Bignum256_32_mod(uint32_t *n, uint32_t *a, uint32_t *res) -{ - uint32_t one[8U] = { 0U }; - uint32_t bit0; - uint32_t m0; - memset(one, 0U, (uint32_t)8U * sizeof (uint32_t)); - one[0U] = (uint32_t)1U; - bit0 = n[0U] & (uint32_t)1U; - m0 = (uint32_t)0U - bit0; - { - uint32_t acc = (uint32_t)0U; - uint32_t m1; - uint32_t is_valid_m; - uint32_t nBits; - KRML_MAYBE_FOR8(i, - (uint32_t)0U, - (uint32_t)8U, - (uint32_t)1U, - uint32_t beq = FStar_UInt32_eq_mask(one[i], n[i]); - uint32_t blt = ~FStar_UInt32_gte_mask(one[i], n[i]); - acc = (beq & acc) | (~beq & ((blt & (uint32_t)0xFFFFFFFFU) | (~blt & (uint32_t)0U)));); - m1 = acc; - is_valid_m = m0 & m1; - nBits = (uint32_t)32U * Hacl_Bignum_Lib_bn_get_top_index_u32((uint32_t)8U, n); - if (is_valid_m == (uint32_t)0xFFFFFFFFU) - { - uint32_t r2[8U] = { 0U }; - precompr2(nBits, n, r2); - { - uint32_t mu = Hacl_Bignum_ModInvLimb_mod_inv_uint32(n[0U]); - bn_slow_precomp(n, mu, r2, a, res); - } - } - else - { - memset(res, 0U, (uint32_t)8U * sizeof (uint32_t)); - } - return is_valid_m == (uint32_t)0xFFFFFFFFU; - } -} - -static uint32_t exp_check(uint32_t *n, uint32_t *a, uint32_t bBits, uint32_t *b) -{ - uint32_t one[8U] = { 0U }; - uint32_t bit0; - uint32_t m00; - memset(one, 0U, (uint32_t)8U * sizeof (uint32_t)); - one[0U] = (uint32_t)1U; - bit0 = n[0U] & (uint32_t)1U; - m00 = (uint32_t)0U - bit0; - { - uint32_t acc0 = (uint32_t)0U; - uint32_t m10; - uint32_t m0; - uint32_t bLen; - uint32_t m1; - KRML_MAYBE_FOR8(i, - (uint32_t)0U, - (uint32_t)8U, - (uint32_t)1U, - uint32_t beq = FStar_UInt32_eq_mask(one[i], n[i]); - uint32_t blt = ~FStar_UInt32_gte_mask(one[i], n[i]); - acc0 = (beq & acc0) | (~beq & ((blt & (uint32_t)0xFFFFFFFFU) | (~blt & (uint32_t)0U)));); - m10 = acc0; - m0 = m00 & m10; - if (bBits == (uint32_t)0U) - { - bLen = (uint32_t)1U; - } - else - { - bLen = (bBits - (uint32_t)1U) / (uint32_t)32U + (uint32_t)1U; - } - if (bBits < (uint32_t)32U * bLen) - { - KRML_CHECK_SIZE(sizeof (uint32_t), bLen); - { - uint32_t b2[bLen]; - memset(b2, 0U, bLen * sizeof (uint32_t)); - { - uint32_t i0 = bBits / (uint32_t)32U; - uint32_t j = bBits % (uint32_t)32U; - b2[i0] = b2[i0] | (uint32_t)1U << j; - { - uint32_t acc = (uint32_t)0U; - { - uint32_t i; - for (i = (uint32_t)0U; i < bLen; i++) - { - uint32_t beq = FStar_UInt32_eq_mask(b[i], b2[i]); - uint32_t blt = ~FStar_UInt32_gte_mask(b[i], b2[i]); - acc = (beq & acc) | (~beq & ((blt & (uint32_t)0xFFFFFFFFU) | (~blt & (uint32_t)0U))); - } - } - { - uint32_t res = acc; - m1 = res; - } - } - } - } - } - else - { - m1 = (uint32_t)0xFFFFFFFFU; - } - { - uint32_t acc = (uint32_t)0U; - uint32_t m2; - uint32_t m; - KRML_MAYBE_FOR8(i, - (uint32_t)0U, - (uint32_t)8U, - (uint32_t)1U, - uint32_t beq = FStar_UInt32_eq_mask(a[i], n[i]); - uint32_t blt = ~FStar_UInt32_gte_mask(a[i], n[i]); - acc = (beq & acc) | (~beq & ((blt & (uint32_t)0xFFFFFFFFU) | (~blt & (uint32_t)0U)));); - m2 = acc; - m = m1 & m2; - return m0 & m; - } - } -} - -static inline void -exp_vartime_precomp( - uint32_t *n, - uint32_t mu, - uint32_t *r2, - uint32_t *a, - uint32_t bBits, - uint32_t *b, - uint32_t *res -) -{ - if (bBits < (uint32_t)200U) - { - uint32_t aM[8U] = { 0U }; - uint32_t c[16U] = { 0U }; - Hacl_Bignum256_32_mul(a, r2, c); - reduction(n, mu, c, aM); - { - uint32_t resM[8U] = { 0U }; - uint32_t ctx[16U] = { 0U }; - uint32_t *ctx_n; - uint32_t *ctx_r2; - memcpy(ctx, n, (uint32_t)8U * sizeof (uint32_t)); - memcpy(ctx + (uint32_t)8U, r2, (uint32_t)8U * sizeof (uint32_t)); - ctx_n = ctx; - ctx_r2 = ctx + (uint32_t)8U; - from(ctx_n, mu, ctx_r2, resM); - { - uint32_t i; - for (i = (uint32_t)0U; i < bBits; i++) - { - uint32_t i1 = i / (uint32_t)32U; - uint32_t j = i % (uint32_t)32U; - uint32_t tmp = b[i1]; - uint32_t bit = tmp >> j & (uint32_t)1U; - if (!(bit == (uint32_t)0U)) - { - uint32_t *ctx_n0 = ctx; - amont_mul(ctx_n0, mu, resM, aM, resM); - } - { - uint32_t *ctx_n0 = ctx; - amont_sqr(ctx_n0, mu, aM, aM); - } - } - } - { - uint32_t tmp[16U] = { 0U }; - memcpy(tmp, resM, (uint32_t)8U * sizeof (uint32_t)); - reduction(n, mu, tmp, res); - return; - } - } - } - { - uint32_t aM[8U] = { 0U }; - uint32_t c[16U] = { 0U }; - Hacl_Bignum256_32_mul(a, r2, c); - reduction(n, mu, c, aM); - { - uint32_t resM[8U] = { 0U }; - uint32_t bLen; - if (bBits == (uint32_t)0U) - { - bLen = (uint32_t)1U; - } - else - { - bLen = (bBits - (uint32_t)1U) / (uint32_t)32U + (uint32_t)1U; - } - { - uint32_t ctx[16U] = { 0U }; - memcpy(ctx, n, (uint32_t)8U * sizeof (uint32_t)); - memcpy(ctx + (uint32_t)8U, r2, (uint32_t)8U * sizeof (uint32_t)); - { - uint32_t table[128U] = { 0U }; - uint32_t tmp[8U] = { 0U }; - uint32_t *t0 = table; - uint32_t *t1 = table + (uint32_t)8U; - uint32_t *ctx_n0 = ctx; - uint32_t *ctx_r20 = ctx + (uint32_t)8U; - from(ctx_n0, mu, ctx_r20, t0); - memcpy(t1, aM, (uint32_t)8U * sizeof (uint32_t)); - KRML_MAYBE_FOR7(i, - (uint32_t)0U, - (uint32_t)7U, - (uint32_t)1U, - uint32_t *t11 = table + (i + (uint32_t)1U) * (uint32_t)8U; - uint32_t *ctx_n1 = ctx; - amont_sqr(ctx_n1, mu, t11, tmp); - memcpy(table + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)8U, - tmp, - (uint32_t)8U * sizeof (uint32_t)); - uint32_t *t2 = table + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)8U; - uint32_t *ctx_n = ctx; - amont_mul(ctx_n, mu, aM, t2, tmp); - memcpy(table + ((uint32_t)2U * i + (uint32_t)3U) * (uint32_t)8U, - tmp, - (uint32_t)8U * sizeof (uint32_t));); - if (bBits % (uint32_t)4U != (uint32_t)0U) - { - uint32_t mask_l = (uint32_t)15U; - uint32_t i = bBits / (uint32_t)4U * (uint32_t)4U / (uint32_t)32U; - uint32_t j = bBits / (uint32_t)4U * (uint32_t)4U % (uint32_t)32U; - uint32_t p1 = b[i] >> j; - uint32_t ite; - if (i + (uint32_t)1U < bLen && (uint32_t)0U < j) - { - ite = p1 | b[i + (uint32_t)1U] << ((uint32_t)32U - j); - } - else - { - ite = p1; - } - { - uint32_t bits_c = ite & mask_l; - uint32_t bits_l32 = bits_c; - uint32_t *a_bits_l = table + bits_l32 * (uint32_t)8U; - memcpy(resM, a_bits_l, (uint32_t)8U * sizeof (uint32_t)); - } - } - else - { - uint32_t *ctx_n = ctx; - uint32_t *ctx_r2 = ctx + (uint32_t)8U; - from(ctx_n, mu, ctx_r2, resM); - } - { - uint32_t i; - for (i = (uint32_t)0U; i < bBits / (uint32_t)4U; i++) - { - KRML_MAYBE_FOR4(i0, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint32_t *ctx_n = ctx; - amont_sqr(ctx_n, mu, resM, resM);); - { - uint32_t bk = bBits - bBits % (uint32_t)4U; - uint32_t mask_l = (uint32_t)15U; - uint32_t i1 = (bk - (uint32_t)4U * i - (uint32_t)4U) / (uint32_t)32U; - uint32_t j = (bk - (uint32_t)4U * i - (uint32_t)4U) % (uint32_t)32U; - uint32_t p1 = b[i1] >> j; - uint32_t ite; - if (i1 + (uint32_t)1U < bLen && (uint32_t)0U < j) - { - ite = p1 | b[i1 + (uint32_t)1U] << ((uint32_t)32U - j); - } - else - { - ite = p1; - } - { - uint32_t bits_l = ite & mask_l; - uint32_t a_bits_l[8U] = { 0U }; - uint32_t bits_l32 = bits_l; - uint32_t *a_bits_l1 = table + bits_l32 * (uint32_t)8U; - memcpy(a_bits_l, a_bits_l1, (uint32_t)8U * sizeof (uint32_t)); - { - uint32_t *ctx_n = ctx; - amont_mul(ctx_n, mu, resM, a_bits_l, resM); - } - } - } - } - } - { - uint32_t tmp0[16U] = { 0U }; - memcpy(tmp0, resM, (uint32_t)8U * sizeof (uint32_t)); - reduction(n, mu, tmp0, res); - } - } - } - } - } -} - -static inline void -exp_consttime_precomp( - uint32_t *n, - uint32_t mu, - uint32_t *r2, - uint32_t *a, - uint32_t bBits, - uint32_t *b, - uint32_t *res -) -{ - if (bBits < (uint32_t)200U) - { - uint32_t aM[8U] = { 0U }; - uint32_t c[16U] = { 0U }; - Hacl_Bignum256_32_mul(a, r2, c); - reduction(n, mu, c, aM); - { - uint32_t resM[8U] = { 0U }; - uint32_t ctx[16U] = { 0U }; - memcpy(ctx, n, (uint32_t)8U * sizeof (uint32_t)); - memcpy(ctx + (uint32_t)8U, r2, (uint32_t)8U * sizeof (uint32_t)); - { - uint32_t sw = (uint32_t)0U; - uint32_t *ctx_n = ctx; - uint32_t *ctx_r2 = ctx + (uint32_t)8U; - uint32_t sw0; - from(ctx_n, mu, ctx_r2, resM); - { - uint32_t i0; - for (i0 = (uint32_t)0U; i0 < bBits; i0++) - { - uint32_t i1 = (bBits - i0 - (uint32_t)1U) / (uint32_t)32U; - uint32_t j = (bBits - i0 - (uint32_t)1U) % (uint32_t)32U; - uint32_t tmp = b[i1]; - uint32_t bit = tmp >> j & (uint32_t)1U; - uint32_t sw1 = bit ^ sw; - KRML_MAYBE_FOR8(i, - (uint32_t)0U, - (uint32_t)8U, - (uint32_t)1U, - uint32_t dummy = ((uint32_t)0U - sw1) & (resM[i] ^ aM[i]); - resM[i] = resM[i] ^ dummy; - aM[i] = aM[i] ^ dummy;); - { - uint32_t *ctx_n0 = ctx; - amont_mul(ctx_n0, mu, aM, resM, aM); - { - uint32_t *ctx_n1 = ctx; - amont_sqr(ctx_n1, mu, resM, resM); - sw = bit; - } - } - } - } - sw0 = sw; - KRML_MAYBE_FOR8(i, - (uint32_t)0U, - (uint32_t)8U, - (uint32_t)1U, - uint32_t dummy = ((uint32_t)0U - sw0) & (resM[i] ^ aM[i]); - resM[i] = resM[i] ^ dummy; - aM[i] = aM[i] ^ dummy;); - { - uint32_t tmp[16U] = { 0U }; - memcpy(tmp, resM, (uint32_t)8U * sizeof (uint32_t)); - reduction(n, mu, tmp, res); - return; - } - } - } - } - { - uint32_t aM[8U] = { 0U }; - uint32_t c0[16U] = { 0U }; - Hacl_Bignum256_32_mul(a, r2, c0); - reduction(n, mu, c0, aM); - { - uint32_t resM[8U] = { 0U }; - uint32_t bLen; - if (bBits == (uint32_t)0U) - { - bLen = (uint32_t)1U; - } - else - { - bLen = (bBits - (uint32_t)1U) / (uint32_t)32U + (uint32_t)1U; - } - { - uint32_t ctx[16U] = { 0U }; - memcpy(ctx, n, (uint32_t)8U * sizeof (uint32_t)); - memcpy(ctx + (uint32_t)8U, r2, (uint32_t)8U * sizeof (uint32_t)); - { - uint32_t table[128U] = { 0U }; - uint32_t tmp[8U] = { 0U }; - uint32_t *t0 = table; - uint32_t *t1 = table + (uint32_t)8U; - uint32_t *ctx_n0 = ctx; - uint32_t *ctx_r20 = ctx + (uint32_t)8U; - from(ctx_n0, mu, ctx_r20, t0); - memcpy(t1, aM, (uint32_t)8U * sizeof (uint32_t)); - KRML_MAYBE_FOR7(i, - (uint32_t)0U, - (uint32_t)7U, - (uint32_t)1U, - uint32_t *t11 = table + (i + (uint32_t)1U) * (uint32_t)8U; - uint32_t *ctx_n1 = ctx; - amont_sqr(ctx_n1, mu, t11, tmp); - memcpy(table + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)8U, - tmp, - (uint32_t)8U * sizeof (uint32_t)); - uint32_t *t2 = table + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)8U; - uint32_t *ctx_n = ctx; - amont_mul(ctx_n, mu, aM, t2, tmp); - memcpy(table + ((uint32_t)2U * i + (uint32_t)3U) * (uint32_t)8U, - tmp, - (uint32_t)8U * sizeof (uint32_t));); - if (bBits % (uint32_t)4U != (uint32_t)0U) - { - uint32_t mask_l = (uint32_t)15U; - uint32_t i0 = bBits / (uint32_t)4U * (uint32_t)4U / (uint32_t)32U; - uint32_t j = bBits / (uint32_t)4U * (uint32_t)4U % (uint32_t)32U; - uint32_t p1 = b[i0] >> j; - uint32_t ite; - if (i0 + (uint32_t)1U < bLen && (uint32_t)0U < j) - { - ite = p1 | b[i0 + (uint32_t)1U] << ((uint32_t)32U - j); - } - else - { - ite = p1; - } - { - uint32_t bits_c = ite & mask_l; - memcpy(resM, table, (uint32_t)8U * sizeof (uint32_t)); - KRML_MAYBE_FOR15(i1, - (uint32_t)0U, - (uint32_t)15U, - (uint32_t)1U, - uint32_t c = FStar_UInt32_eq_mask(bits_c, i1 + (uint32_t)1U); - uint32_t *res_j = table + (i1 + (uint32_t)1U) * (uint32_t)8U; - KRML_MAYBE_FOR8(i, - (uint32_t)0U, - (uint32_t)8U, - (uint32_t)1U, - uint32_t *os = resM; - uint32_t x = (c & res_j[i]) | (~c & resM[i]); - os[i] = x;);); - } - } - else - { - uint32_t *ctx_n = ctx; - uint32_t *ctx_r2 = ctx + (uint32_t)8U; - from(ctx_n, mu, ctx_r2, resM); - } - { - uint32_t i0; - for (i0 = (uint32_t)0U; i0 < bBits / (uint32_t)4U; i0++) - { - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint32_t *ctx_n = ctx; - amont_sqr(ctx_n, mu, resM, resM);); - { - uint32_t bk = bBits - bBits % (uint32_t)4U; - uint32_t mask_l = (uint32_t)15U; - uint32_t i1 = (bk - (uint32_t)4U * i0 - (uint32_t)4U) / (uint32_t)32U; - uint32_t j = (bk - (uint32_t)4U * i0 - (uint32_t)4U) % (uint32_t)32U; - uint32_t p1 = b[i1] >> j; - uint32_t ite; - if (i1 + (uint32_t)1U < bLen && (uint32_t)0U < j) - { - ite = p1 | b[i1 + (uint32_t)1U] << ((uint32_t)32U - j); - } - else - { - ite = p1; - } - { - uint32_t bits_l = ite & mask_l; - uint32_t a_bits_l[8U] = { 0U }; - memcpy(a_bits_l, table, (uint32_t)8U * sizeof (uint32_t)); - KRML_MAYBE_FOR15(i2, - (uint32_t)0U, - (uint32_t)15U, - (uint32_t)1U, - uint32_t c = FStar_UInt32_eq_mask(bits_l, i2 + (uint32_t)1U); - uint32_t *res_j = table + (i2 + (uint32_t)1U) * (uint32_t)8U; - KRML_MAYBE_FOR8(i, - (uint32_t)0U, - (uint32_t)8U, - (uint32_t)1U, - uint32_t *os = a_bits_l; - uint32_t x = (c & res_j[i]) | (~c & a_bits_l[i]); - os[i] = x;);); - { - uint32_t *ctx_n = ctx; - amont_mul(ctx_n, mu, resM, a_bits_l, resM); - } - } - } - } - } - { - uint32_t tmp0[16U] = { 0U }; - memcpy(tmp0, resM, (uint32_t)8U * sizeof (uint32_t)); - reduction(n, mu, tmp0, res); - } - } - } - } - } -} - -static inline void -exp_vartime( - uint32_t nBits, - uint32_t *n, - uint32_t *a, - uint32_t bBits, - uint32_t *b, - uint32_t *res -) -{ - uint32_t r2[8U] = { 0U }; - uint32_t mu; - precompr2(nBits, n, r2); - mu = Hacl_Bignum_ModInvLimb_mod_inv_uint32(n[0U]); - exp_vartime_precomp(n, mu, r2, a, bBits, b, res); -} - -static inline void -exp_consttime( - uint32_t nBits, - uint32_t *n, - uint32_t *a, - uint32_t bBits, - uint32_t *b, - uint32_t *res -) -{ - uint32_t r2[8U] = { 0U }; - uint32_t mu; - precompr2(nBits, n, r2); - mu = Hacl_Bignum_ModInvLimb_mod_inv_uint32(n[0U]); - exp_consttime_precomp(n, mu, r2, a, bBits, b, res); -} - -/** -Write `a ^ b mod n` in `res`. - - The arguments a, n and the outparam res are meant to be 256-bit bignums, i.e. uint32_t[8]. - - The argument b is a bignum of any size, and bBits is an upper bound on the - number of significant bits of b. A tighter bound results in faster execution - time. When in doubt, the number of bits for the bignum size is always a safe - default, e.g. if b is a 256-bit bignum, bBits should be 256. - - The function is *NOT* constant-time on the argument b. See the - mod_exp_consttime_* functions for constant-time variants. - - The function returns false if any of the following preconditions are violated, - true otherwise. - • n % 2 = 1 - • 1 < n - • b < pow2 bBits - • a < n -*/ -bool -Hacl_Bignum256_32_mod_exp_vartime( - uint32_t *n, - uint32_t *a, - uint32_t bBits, - uint32_t *b, - uint32_t *res -) -{ - uint32_t is_valid_m = exp_check(n, a, bBits, b); - uint32_t nBits = (uint32_t)32U * Hacl_Bignum_Lib_bn_get_top_index_u32((uint32_t)8U, n); - if (is_valid_m == (uint32_t)0xFFFFFFFFU) - { - exp_vartime(nBits, n, a, bBits, b, res); - } - else - { - memset(res, 0U, (uint32_t)8U * sizeof (uint32_t)); - } - return is_valid_m == (uint32_t)0xFFFFFFFFU; -} - -/** -Write `a ^ b mod n` in `res`. - - The arguments a, n and the outparam res are meant to be 256-bit bignums, i.e. uint32_t[8]. - - The argument b is a bignum of any size, and bBits is an upper bound on the - number of significant bits of b. A tighter bound results in faster execution - time. When in doubt, the number of bits for the bignum size is always a safe - default, e.g. if b is a 256-bit bignum, bBits should be 256. - - This function is constant-time over its argument b, at the cost of a slower - execution time than mod_exp_vartime. - - The function returns false if any of the following preconditions are violated, - true otherwise. - • n % 2 = 1 - • 1 < n - • b < pow2 bBits - • a < n -*/ -bool -Hacl_Bignum256_32_mod_exp_consttime( - uint32_t *n, - uint32_t *a, - uint32_t bBits, - uint32_t *b, - uint32_t *res -) -{ - uint32_t is_valid_m = exp_check(n, a, bBits, b); - uint32_t nBits = (uint32_t)32U * Hacl_Bignum_Lib_bn_get_top_index_u32((uint32_t)8U, n); - if (is_valid_m == (uint32_t)0xFFFFFFFFU) - { - exp_consttime(nBits, n, a, bBits, b, res); - } - else - { - memset(res, 0U, (uint32_t)8U * sizeof (uint32_t)); - } - return is_valid_m == (uint32_t)0xFFFFFFFFU; -} - -/** -Write `a ^ (-1) mod n` in `res`. - - The arguments a, n and the outparam res are meant to be 256-bit bignums, i.e. uint32_t[8]. - - Before calling this function, the caller will need to ensure that the following - preconditions are observed. - • n is a prime - - The function returns false if any of the following preconditions are violated, true otherwise. - • n % 2 = 1 - • 1 < n - • 0 < a - • a < n -*/ -bool Hacl_Bignum256_32_mod_inv_prime_vartime(uint32_t *n, uint32_t *a, uint32_t *res) -{ - uint32_t one[8U] = { 0U }; - uint32_t bit0; - uint32_t m00; - memset(one, 0U, (uint32_t)8U * sizeof (uint32_t)); - one[0U] = (uint32_t)1U; - bit0 = n[0U] & (uint32_t)1U; - m00 = (uint32_t)0U - bit0; - { - uint32_t acc0 = (uint32_t)0U; - uint32_t m10; - uint32_t m0; - KRML_MAYBE_FOR8(i, - (uint32_t)0U, - (uint32_t)8U, - (uint32_t)1U, - uint32_t beq = FStar_UInt32_eq_mask(one[i], n[i]); - uint32_t blt = ~FStar_UInt32_gte_mask(one[i], n[i]); - acc0 = (beq & acc0) | (~beq & ((blt & (uint32_t)0xFFFFFFFFU) | (~blt & (uint32_t)0U)));); - m10 = acc0; - m0 = m00 & m10; - { - uint32_t bn_zero[8U] = { 0U }; - uint32_t mask = (uint32_t)0xFFFFFFFFU; - uint32_t mask1; - uint32_t res10; - uint32_t m1; - KRML_MAYBE_FOR8(i, - (uint32_t)0U, - (uint32_t)8U, - (uint32_t)1U, - uint32_t uu____0 = FStar_UInt32_eq_mask(a[i], bn_zero[i]); - mask = uu____0 & mask;); - mask1 = mask; - res10 = mask1; - m1 = res10; - { - uint32_t acc = (uint32_t)0U; - uint32_t m2; - uint32_t is_valid_m; - uint32_t nBits; - KRML_MAYBE_FOR8(i, - (uint32_t)0U, - (uint32_t)8U, - (uint32_t)1U, - uint32_t beq = FStar_UInt32_eq_mask(a[i], n[i]); - uint32_t blt = ~FStar_UInt32_gte_mask(a[i], n[i]); - acc = (beq & acc) | (~beq & ((blt & (uint32_t)0xFFFFFFFFU) | (~blt & (uint32_t)0U)));); - m2 = acc; - is_valid_m = (m0 & ~m1) & m2; - nBits = (uint32_t)32U * Hacl_Bignum_Lib_bn_get_top_index_u32((uint32_t)8U, n); - if (is_valid_m == (uint32_t)0xFFFFFFFFU) - { - uint32_t n2[8U] = { 0U }; - uint32_t - c0 = Lib_IntTypes_Intrinsics_sub_borrow_u32((uint32_t)0U, n[0U], (uint32_t)2U, n2); - uint32_t *a1 = n + (uint32_t)1U; - uint32_t *res1 = n2 + (uint32_t)1U; - uint32_t c = c0; - { - uint32_t t1 = a1[(uint32_t)4U * (uint32_t)0U]; - uint32_t *res_i0 = res1 + (uint32_t)4U * (uint32_t)0U; - c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, (uint32_t)0U, res_i0); - { - uint32_t t10 = a1[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U]; - uint32_t *res_i1 = res1 + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U; - c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t10, (uint32_t)0U, res_i1); - { - uint32_t t11 = a1[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U]; - uint32_t *res_i2 = res1 + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U; - c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t11, (uint32_t)0U, res_i2); - { - uint32_t t12 = a1[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U]; - uint32_t *res_i = res1 + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U; - c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t12, (uint32_t)0U, res_i); - } - } - } - } - KRML_MAYBE_FOR3(i, - (uint32_t)4U, - (uint32_t)7U, - (uint32_t)1U, - uint32_t t1 = a1[i]; - uint32_t *res_i = res1 + i; - c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, (uint32_t)0U, res_i);); - { - uint32_t c1 = c; - uint32_t c2 = c1; - exp_vartime(nBits, n, a, (uint32_t)256U, n2, res); - } - } - else - { - memset(res, 0U, (uint32_t)8U * sizeof (uint32_t)); - } - return is_valid_m == (uint32_t)0xFFFFFFFFU; - } - } - } -} - - -/**********************************************/ -/* Arithmetic functions with precomputations. */ -/**********************************************/ - - -/** -Heap-allocate and initialize a montgomery context. - - The argument n is meant to be a 256-bit bignum, i.e. uint32_t[8]. - - Before calling this function, the caller will need to ensure that the following - preconditions are observed. - • n % 2 = 1 - • 1 < n - - The caller will need to call Hacl_Bignum256_mont_ctx_free on the return value - to avoid memory leaks. -*/ -Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 *Hacl_Bignum256_32_mont_ctx_init(uint32_t *n) -{ - uint32_t *r2 = (uint32_t *)KRML_HOST_CALLOC((uint32_t)8U, sizeof (uint32_t)); - uint32_t *n1 = (uint32_t *)KRML_HOST_CALLOC((uint32_t)8U, sizeof (uint32_t)); - uint32_t *r21 = r2; - uint32_t *n11 = n1; - uint32_t nBits; - uint32_t mu; - memcpy(n11, n, (uint32_t)8U * sizeof (uint32_t)); - nBits = (uint32_t)32U * Hacl_Bignum_Lib_bn_get_top_index_u32((uint32_t)8U, n); - precompr2(nBits, n, r21); - mu = Hacl_Bignum_ModInvLimb_mod_inv_uint32(n[0U]); - { - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 res; - res.len = (uint32_t)8U; - res.n = n11; - res.mu = mu; - res.r2 = r21; - KRML_CHECK_SIZE(sizeof (Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32), (uint32_t)1U); - { - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 - *buf = - (Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 *)KRML_HOST_MALLOC(sizeof ( - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 - )); - buf[0U] = res; - return buf; - } - } -} - -/** -Deallocate the memory previously allocated by Hacl_Bignum256_mont_ctx_init. - - The argument k is a montgomery context obtained through Hacl_Bignum256_mont_ctx_init. -*/ -void Hacl_Bignum256_32_mont_ctx_free(Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 *k) -{ - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 k1 = *k; - uint32_t *n = k1.n; - uint32_t *r2 = k1.r2; - KRML_HOST_FREE(n); - KRML_HOST_FREE(r2); - KRML_HOST_FREE(k); -} - -/** -Write `a mod n` in `res`. - - The argument a is meant to be a 512-bit bignum, i.e. uint32_t[16]. - The outparam res is meant to be a 256-bit bignum, i.e. uint32_t[8]. - The argument k is a montgomery context obtained through Hacl_Bignum256_mont_ctx_init. -*/ -void -Hacl_Bignum256_32_mod_precomp( - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 *k, - uint32_t *a, - uint32_t *res -) -{ - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 k1 = *k; - bn_slow_precomp(k1.n, k1.mu, k1.r2, a, res); -} - -/** -Write `a ^ b mod n` in `res`. - - The arguments a and the outparam res are meant to be 256-bit bignums, i.e. uint32_t[8]. - The argument k is a montgomery context obtained through Hacl_Bignum256_mont_ctx_init. - - The argument b is a bignum of any size, and bBits is an upper bound on the - number of significant bits of b. A tighter bound results in faster execution - time. When in doubt, the number of bits for the bignum size is always a safe - default, e.g. if b is a 256-bit bignum, bBits should be 256. - - The function is *NOT* constant-time on the argument b. See the - mod_exp_consttime_* functions for constant-time variants. - - Before calling this function, the caller will need to ensure that the following - preconditions are observed. - • b < pow2 bBits - • a < n -*/ -void -Hacl_Bignum256_32_mod_exp_vartime_precomp( - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 *k, - uint32_t *a, - uint32_t bBits, - uint32_t *b, - uint32_t *res -) -{ - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 k1 = *k; - exp_vartime_precomp(k1.n, k1.mu, k1.r2, a, bBits, b, res); -} - -/** -Write `a ^ b mod n` in `res`. - - The arguments a and the outparam res are meant to be 256-bit bignums, i.e. uint32_t[8]. - The argument k is a montgomery context obtained through Hacl_Bignum256_mont_ctx_init. - - The argument b is a bignum of any size, and bBits is an upper bound on the - number of significant bits of b. A tighter bound results in faster execution - time. When in doubt, the number of bits for the bignum size is always a safe - default, e.g. if b is a 256-bit bignum, bBits should be 256. - - This function is constant-time over its argument b, at the cost of a slower - execution time than mod_exp_vartime_*. - - Before calling this function, the caller will need to ensure that the following - preconditions are observed. - • b < pow2 bBits - • a < n -*/ -void -Hacl_Bignum256_32_mod_exp_consttime_precomp( - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 *k, - uint32_t *a, - uint32_t bBits, - uint32_t *b, - uint32_t *res -) -{ - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 k1 = *k; - exp_consttime_precomp(k1.n, k1.mu, k1.r2, a, bBits, b, res); -} - -/** -Write `a ^ (-1) mod n` in `res`. - - The argument a and the outparam res are meant to be 256-bit bignums, i.e. uint32_t[8]. - The argument k is a montgomery context obtained through Hacl_Bignum256_mont_ctx_init. - - Before calling this function, the caller will need to ensure that the following - preconditions are observed. - • n is a prime - • 0 < a - • a < n -*/ -void -Hacl_Bignum256_32_mod_inv_prime_vartime_precomp( - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 *k, - uint32_t *a, - uint32_t *res -) -{ - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 k1 = *k; - uint32_t n2[8U] = { 0U }; - uint32_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u32((uint32_t)0U, k1.n[0U], (uint32_t)2U, n2); - uint32_t *a1 = k1.n + (uint32_t)1U; - uint32_t *res1 = n2 + (uint32_t)1U; - uint32_t c2 = c0; - uint32_t c1; - uint32_t c; - { - uint32_t t1 = a1[(uint32_t)4U * (uint32_t)0U]; - uint32_t *res_i0 = res1 + (uint32_t)4U * (uint32_t)0U; - c2 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c2, t1, (uint32_t)0U, res_i0); - { - uint32_t t10 = a1[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U]; - uint32_t *res_i1 = res1 + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U; - c2 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c2, t10, (uint32_t)0U, res_i1); - { - uint32_t t11 = a1[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U]; - uint32_t *res_i2 = res1 + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U; - c2 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c2, t11, (uint32_t)0U, res_i2); - { - uint32_t t12 = a1[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U]; - uint32_t *res_i = res1 + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U; - c2 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c2, t12, (uint32_t)0U, res_i); - } - } - } - } - KRML_MAYBE_FOR3(i, - (uint32_t)4U, - (uint32_t)7U, - (uint32_t)1U, - uint32_t t1 = a1[i]; - uint32_t *res_i = res1 + i; - c2 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c2, t1, (uint32_t)0U, res_i);); - c1 = c2; - c = c1; - exp_vartime_precomp(k1.n, k1.mu, k1.r2, a, (uint32_t)256U, n2, res); -} - - -/********************/ -/* Loads and stores */ -/********************/ - - -/** -Load a bid-endian bignum from memory. - - The argument b points to len bytes of valid memory. - The function returns a heap-allocated bignum of size sufficient to hold the - result of loading b, or NULL if either the allocation failed, or the amount of - required memory would exceed 4GB. - - If the return value is non-null, clients must eventually call free(3) on it to - avoid memory leaks. -*/ -uint32_t *Hacl_Bignum256_32_new_bn_from_bytes_be(uint32_t len, uint8_t *b) -{ - if - ( - len - == (uint32_t)0U - || !((len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U <= (uint32_t)1073741823U) - ) - { - return NULL; - } - KRML_CHECK_SIZE(sizeof (uint32_t), (len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U); - { - uint32_t - *res = - (uint32_t *)KRML_HOST_CALLOC((len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U, - sizeof (uint32_t)); - if (res == NULL) - { - return res; - } - { - uint32_t *res1 = res; - uint32_t *res2 = res1; - uint32_t bnLen = (len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U; - uint32_t tmpLen = (uint32_t)4U * bnLen; - KRML_CHECK_SIZE(sizeof (uint8_t), tmpLen); - { - uint8_t tmp[tmpLen]; - memset(tmp, 0U, tmpLen * sizeof (uint8_t)); - memcpy(tmp + tmpLen - len, b, len * sizeof (uint8_t)); - { - uint32_t i; - for (i = (uint32_t)0U; i < bnLen; i++) - { - uint32_t *os = res2; - uint32_t u = load32_be(tmp + (bnLen - i - (uint32_t)1U) * (uint32_t)4U); - uint32_t x = u; - os[i] = x; - } - } - return res2; - } - } - } -} - -/** -Load a little-endian bignum from memory. - - The argument b points to len bytes of valid memory. - The function returns a heap-allocated bignum of size sufficient to hold the - result of loading b, or NULL if either the allocation failed, or the amount of - required memory would exceed 4GB. - - If the return value is non-null, clients must eventually call free(3) on it to - avoid memory leaks. -*/ -uint32_t *Hacl_Bignum256_32_new_bn_from_bytes_le(uint32_t len, uint8_t *b) -{ - if - ( - len - == (uint32_t)0U - || !((len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U <= (uint32_t)1073741823U) - ) - { - return NULL; - } - KRML_CHECK_SIZE(sizeof (uint32_t), (len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U); - { - uint32_t - *res = - (uint32_t *)KRML_HOST_CALLOC((len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U, - sizeof (uint32_t)); - if (res == NULL) - { - return res; - } - { - uint32_t *res1 = res; - uint32_t *res2 = res1; - uint32_t bnLen = (len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U; - uint32_t tmpLen = (uint32_t)4U * bnLen; - KRML_CHECK_SIZE(sizeof (uint8_t), tmpLen); - { - uint8_t tmp[tmpLen]; - memset(tmp, 0U, tmpLen * sizeof (uint8_t)); - memcpy(tmp, b, len * sizeof (uint8_t)); - { - uint32_t i; - for (i = (uint32_t)0U; i < (len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U; i++) - { - uint32_t *os = res2; - uint8_t *bj = tmp + i * (uint32_t)4U; - uint32_t u = load32_le(bj); - uint32_t r1 = u; - uint32_t x = r1; - os[i] = x; - } - } - return res2; - } - } - } -} - -/** -Serialize a bignum into big-endian memory. - - The argument b points to a 256-bit bignum. - The outparam res points to 32 bytes of valid memory. -*/ -void Hacl_Bignum256_32_bn_to_bytes_be(uint32_t *b, uint8_t *res) -{ - uint8_t tmp[32U] = { 0U }; - KRML_MAYBE_FOR8(i, - (uint32_t)0U, - (uint32_t)8U, - (uint32_t)1U, - store32_be(res + i * (uint32_t)4U, b[(uint32_t)8U - i - (uint32_t)1U]);); -} - -/** -Serialize a bignum into little-endian memory. - - The argument b points to a 256-bit bignum. - The outparam res points to 32 bytes of valid memory. -*/ -void Hacl_Bignum256_32_bn_to_bytes_le(uint32_t *b, uint8_t *res) -{ - uint8_t tmp[32U] = { 0U }; - KRML_MAYBE_FOR8(i, - (uint32_t)0U, - (uint32_t)8U, - (uint32_t)1U, - store32_le(res + i * (uint32_t)4U, b[i]);); -} - - -/***************/ -/* Comparisons */ -/***************/ - - -/** -Returns 2^32 - 1 if a < b, otherwise returns 0. - - The arguments a and b are meant to be 256-bit bignums, i.e. uint32_t[8]. -*/ -uint32_t Hacl_Bignum256_32_lt_mask(uint32_t *a, uint32_t *b) -{ - uint32_t acc = (uint32_t)0U; - KRML_MAYBE_FOR8(i, - (uint32_t)0U, - (uint32_t)8U, - (uint32_t)1U, - uint32_t beq = FStar_UInt32_eq_mask(a[i], b[i]); - uint32_t blt = ~FStar_UInt32_gte_mask(a[i], b[i]); - acc = (beq & acc) | (~beq & ((blt & (uint32_t)0xFFFFFFFFU) | (~blt & (uint32_t)0U)));); - return acc; -} - -/** -Returns 2^32 - 1 if a = b, otherwise returns 0. - - The arguments a and b are meant to be 256-bit bignums, i.e. uint32_t[8]. -*/ -uint32_t Hacl_Bignum256_32_eq_mask(uint32_t *a, uint32_t *b) -{ - uint32_t mask = (uint32_t)0xFFFFFFFFU; - uint32_t mask1; - KRML_MAYBE_FOR8(i, - (uint32_t)0U, - (uint32_t)8U, - (uint32_t)1U, - uint32_t uu____0 = FStar_UInt32_eq_mask(a[i], b[i]); - mask = uu____0 & mask;); - mask1 = mask; - return mask1; -} - diff --git a/dist/c89-compatible/Hacl_Bignum256_32.h b/dist/c89-compatible/Hacl_Bignum256_32.h deleted file mode 100644 index d5ed1067c8..0000000000 --- a/dist/c89-compatible/Hacl_Bignum256_32.h +++ /dev/null @@ -1,400 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#ifndef __Hacl_Bignum256_32_H -#define __Hacl_Bignum256_32_H - -#if defined(__cplusplus) -extern "C" { -#endif - -#include -#include "krml/internal/types.h" -#include "krml/lowstar_endianness.h" -#include "krml/internal/target.h" - - -#include "Hacl_Krmllib.h" -#include "Hacl_Bignum_Base.h" -#include "Hacl_Bignum.h" -#include "evercrypt_targetconfig.h" -/******************************************************************************* - -A verified 256-bit bignum library. - -This is a 32-bit optimized version, where bignums are represented as an array -of eight unsigned 32-bit integers, i.e. uint32_t[8]. Furthermore, the -limbs are stored in little-endian format, i.e. the least significant limb is at -index 0. Each limb is stored in native format in memory. Example: - - uint32_t sixteen[8] = { 0x10; 0x00; 0x00; 0x00; 0x00; 0x00; 0x00; 0x00 } - -We strongly encourage users to go through the conversion functions, e.g. -bn_from_bytes_be, to i) not depend on internal representation choices and ii) -have the ability to switch easily to a 64-bit optimized version in the future. - -*******************************************************************************/ - -/************************/ -/* Arithmetic functions */ -/************************/ - - -/** -Write `a + b mod 2^256` in `res`. - - This functions returns the carry. - - The arguments a, b and res are meant to be 256-bit bignums, i.e. uint32_t[8] -*/ -uint32_t Hacl_Bignum256_32_add(uint32_t *a, uint32_t *b, uint32_t *res); - -/** -Write `a - b mod 2^256` in `res`. - - This functions returns the carry. - - The arguments a, b and res are meant to be 256-bit bignums, i.e. uint32_t[8] -*/ -uint32_t Hacl_Bignum256_32_sub(uint32_t *a, uint32_t *b, uint32_t *res); - -/** -Write `(a + b) mod n` in `res`. - - The arguments a, b, n and the outparam res are meant to be 256-bit bignums, i.e. uint32_t[8]. - - Before calling this function, the caller will need to ensure that the following - preconditions are observed. - • a < n - • b < n -*/ -void Hacl_Bignum256_32_add_mod(uint32_t *n, uint32_t *a, uint32_t *b, uint32_t *res); - -/** -Write `(a - b) mod n` in `res`. - - The arguments a, b, n and the outparam res are meant to be 256-bit bignums, i.e. uint32_t[8]. - - Before calling this function, the caller will need to ensure that the following - preconditions are observed. - • a < n - • b < n -*/ -void Hacl_Bignum256_32_sub_mod(uint32_t *n, uint32_t *a, uint32_t *b, uint32_t *res); - -/** -Write `a * b` in `res`. - - The arguments a and b are meant to be 256-bit bignums, i.e. uint32_t[8]. - The outparam res is meant to be a 512-bit bignum, i.e. uint32_t[16]. -*/ -void Hacl_Bignum256_32_mul(uint32_t *a, uint32_t *b, uint32_t *res); - -/** -Write `a * a` in `res`. - - The argument a is meant to be a 256-bit bignum, i.e. uint32_t[8]. - The outparam res is meant to be a 512-bit bignum, i.e. uint32_t[16]. -*/ -void Hacl_Bignum256_32_sqr(uint32_t *a, uint32_t *res); - -/** -Write `a mod n` in `res`. - - The argument a is meant to be a 512-bit bignum, i.e. uint32_t[16]. - The argument n and the outparam res are meant to be 256-bit bignums, i.e. uint32_t[8]. - - The function returns false if any of the following preconditions are violated, - true otherwise. - • 1 < n - • n % 2 = 1 -*/ -bool Hacl_Bignum256_32_mod(uint32_t *n, uint32_t *a, uint32_t *res); - -/** -Write `a ^ b mod n` in `res`. - - The arguments a, n and the outparam res are meant to be 256-bit bignums, i.e. uint32_t[8]. - - The argument b is a bignum of any size, and bBits is an upper bound on the - number of significant bits of b. A tighter bound results in faster execution - time. When in doubt, the number of bits for the bignum size is always a safe - default, e.g. if b is a 256-bit bignum, bBits should be 256. - - The function is *NOT* constant-time on the argument b. See the - mod_exp_consttime_* functions for constant-time variants. - - The function returns false if any of the following preconditions are violated, - true otherwise. - • n % 2 = 1 - • 1 < n - • b < pow2 bBits - • a < n -*/ -bool -Hacl_Bignum256_32_mod_exp_vartime( - uint32_t *n, - uint32_t *a, - uint32_t bBits, - uint32_t *b, - uint32_t *res -); - -/** -Write `a ^ b mod n` in `res`. - - The arguments a, n and the outparam res are meant to be 256-bit bignums, i.e. uint32_t[8]. - - The argument b is a bignum of any size, and bBits is an upper bound on the - number of significant bits of b. A tighter bound results in faster execution - time. When in doubt, the number of bits for the bignum size is always a safe - default, e.g. if b is a 256-bit bignum, bBits should be 256. - - This function is constant-time over its argument b, at the cost of a slower - execution time than mod_exp_vartime. - - The function returns false if any of the following preconditions are violated, - true otherwise. - • n % 2 = 1 - • 1 < n - • b < pow2 bBits - • a < n -*/ -bool -Hacl_Bignum256_32_mod_exp_consttime( - uint32_t *n, - uint32_t *a, - uint32_t bBits, - uint32_t *b, - uint32_t *res -); - -/** -Write `a ^ (-1) mod n` in `res`. - - The arguments a, n and the outparam res are meant to be 256-bit bignums, i.e. uint32_t[8]. - - Before calling this function, the caller will need to ensure that the following - preconditions are observed. - • n is a prime - - The function returns false if any of the following preconditions are violated, true otherwise. - • n % 2 = 1 - • 1 < n - • 0 < a - • a < n -*/ -bool Hacl_Bignum256_32_mod_inv_prime_vartime(uint32_t *n, uint32_t *a, uint32_t *res); - - -/**********************************************/ -/* Arithmetic functions with precomputations. */ -/**********************************************/ - - -/** -Heap-allocate and initialize a montgomery context. - - The argument n is meant to be a 256-bit bignum, i.e. uint32_t[8]. - - Before calling this function, the caller will need to ensure that the following - preconditions are observed. - • n % 2 = 1 - • 1 < n - - The caller will need to call Hacl_Bignum256_mont_ctx_free on the return value - to avoid memory leaks. -*/ -Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 *Hacl_Bignum256_32_mont_ctx_init(uint32_t *n); - -/** -Deallocate the memory previously allocated by Hacl_Bignum256_mont_ctx_init. - - The argument k is a montgomery context obtained through Hacl_Bignum256_mont_ctx_init. -*/ -void Hacl_Bignum256_32_mont_ctx_free(Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 *k); - -/** -Write `a mod n` in `res`. - - The argument a is meant to be a 512-bit bignum, i.e. uint32_t[16]. - The outparam res is meant to be a 256-bit bignum, i.e. uint32_t[8]. - The argument k is a montgomery context obtained through Hacl_Bignum256_mont_ctx_init. -*/ -void -Hacl_Bignum256_32_mod_precomp( - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 *k, - uint32_t *a, - uint32_t *res -); - -/** -Write `a ^ b mod n` in `res`. - - The arguments a and the outparam res are meant to be 256-bit bignums, i.e. uint32_t[8]. - The argument k is a montgomery context obtained through Hacl_Bignum256_mont_ctx_init. - - The argument b is a bignum of any size, and bBits is an upper bound on the - number of significant bits of b. A tighter bound results in faster execution - time. When in doubt, the number of bits for the bignum size is always a safe - default, e.g. if b is a 256-bit bignum, bBits should be 256. - - The function is *NOT* constant-time on the argument b. See the - mod_exp_consttime_* functions for constant-time variants. - - Before calling this function, the caller will need to ensure that the following - preconditions are observed. - • b < pow2 bBits - • a < n -*/ -void -Hacl_Bignum256_32_mod_exp_vartime_precomp( - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 *k, - uint32_t *a, - uint32_t bBits, - uint32_t *b, - uint32_t *res -); - -/** -Write `a ^ b mod n` in `res`. - - The arguments a and the outparam res are meant to be 256-bit bignums, i.e. uint32_t[8]. - The argument k is a montgomery context obtained through Hacl_Bignum256_mont_ctx_init. - - The argument b is a bignum of any size, and bBits is an upper bound on the - number of significant bits of b. A tighter bound results in faster execution - time. When in doubt, the number of bits for the bignum size is always a safe - default, e.g. if b is a 256-bit bignum, bBits should be 256. - - This function is constant-time over its argument b, at the cost of a slower - execution time than mod_exp_vartime_*. - - Before calling this function, the caller will need to ensure that the following - preconditions are observed. - • b < pow2 bBits - • a < n -*/ -void -Hacl_Bignum256_32_mod_exp_consttime_precomp( - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 *k, - uint32_t *a, - uint32_t bBits, - uint32_t *b, - uint32_t *res -); - -/** -Write `a ^ (-1) mod n` in `res`. - - The argument a and the outparam res are meant to be 256-bit bignums, i.e. uint32_t[8]. - The argument k is a montgomery context obtained through Hacl_Bignum256_mont_ctx_init. - - Before calling this function, the caller will need to ensure that the following - preconditions are observed. - • n is a prime - • 0 < a - • a < n -*/ -void -Hacl_Bignum256_32_mod_inv_prime_vartime_precomp( - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 *k, - uint32_t *a, - uint32_t *res -); - - -/********************/ -/* Loads and stores */ -/********************/ - - -/** -Load a bid-endian bignum from memory. - - The argument b points to len bytes of valid memory. - The function returns a heap-allocated bignum of size sufficient to hold the - result of loading b, or NULL if either the allocation failed, or the amount of - required memory would exceed 4GB. - - If the return value is non-null, clients must eventually call free(3) on it to - avoid memory leaks. -*/ -uint32_t *Hacl_Bignum256_32_new_bn_from_bytes_be(uint32_t len, uint8_t *b); - -/** -Load a little-endian bignum from memory. - - The argument b points to len bytes of valid memory. - The function returns a heap-allocated bignum of size sufficient to hold the - result of loading b, or NULL if either the allocation failed, or the amount of - required memory would exceed 4GB. - - If the return value is non-null, clients must eventually call free(3) on it to - avoid memory leaks. -*/ -uint32_t *Hacl_Bignum256_32_new_bn_from_bytes_le(uint32_t len, uint8_t *b); - -/** -Serialize a bignum into big-endian memory. - - The argument b points to a 256-bit bignum. - The outparam res points to 32 bytes of valid memory. -*/ -void Hacl_Bignum256_32_bn_to_bytes_be(uint32_t *b, uint8_t *res); - -/** -Serialize a bignum into little-endian memory. - - The argument b points to a 256-bit bignum. - The outparam res points to 32 bytes of valid memory. -*/ -void Hacl_Bignum256_32_bn_to_bytes_le(uint32_t *b, uint8_t *res); - - -/***************/ -/* Comparisons */ -/***************/ - - -/** -Returns 2^32 - 1 if a < b, otherwise returns 0. - - The arguments a and b are meant to be 256-bit bignums, i.e. uint32_t[8]. -*/ -uint32_t Hacl_Bignum256_32_lt_mask(uint32_t *a, uint32_t *b); - -/** -Returns 2^32 - 1 if a = b, otherwise returns 0. - - The arguments a and b are meant to be 256-bit bignums, i.e. uint32_t[8]. -*/ -uint32_t Hacl_Bignum256_32_eq_mask(uint32_t *a, uint32_t *b); - -#if defined(__cplusplus) -} -#endif - -#define __Hacl_Bignum256_32_H_DEFINED -#endif diff --git a/dist/c89-compatible/Hacl_Bignum32.c b/dist/c89-compatible/Hacl_Bignum32.c deleted file mode 100644 index 9ebef38c74..0000000000 --- a/dist/c89-compatible/Hacl_Bignum32.c +++ /dev/null @@ -1,1055 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#include "Hacl_Bignum32.h" - -#include "internal/Hacl_Bignum.h" - -/******************************************************************************* - -A verified bignum library. - -This is a 32-bit optimized version, where bignums are represented as an array -of `len` unsigned 32-bit integers, i.e. uint32_t[len]. - -*******************************************************************************/ - -/************************/ -/* Arithmetic functions */ -/************************/ - - -/** -Write `a + b mod 2 ^ (32 * len)` in `res`. - - This functions returns the carry. - - The arguments a, b and the outparam res are meant to be `len` limbs in size, i.e. uint32_t[len] -*/ -uint32_t Hacl_Bignum32_add(uint32_t len, uint32_t *a, uint32_t *b, uint32_t *res) -{ - return Hacl_Bignum_Addition_bn_add_eq_len_u32(len, a, b, res); -} - -/** -Write `a - b mod 2 ^ (32 * len)` in `res`. - - This functions returns the carry. - - The arguments a, b and the outparam res are meant to be `len` limbs in size, i.e. uint32_t[len] -*/ -uint32_t Hacl_Bignum32_sub(uint32_t len, uint32_t *a, uint32_t *b, uint32_t *res) -{ - return Hacl_Bignum_Addition_bn_sub_eq_len_u32(len, a, b, res); -} - -/** -Write `(a + b) mod n` in `res`. - - The arguments a, b, n and the outparam res are meant to be `len` limbs in size, i.e. uint32_t[len]. - - Before calling this function, the caller will need to ensure that the following - preconditions are observed. - • a < n - • b < n -*/ -void Hacl_Bignum32_add_mod(uint32_t len, uint32_t *n, uint32_t *a, uint32_t *b, uint32_t *res) -{ - Hacl_Bignum_bn_add_mod_n_u32(len, n, a, b, res); -} - -/** -Write `(a - b) mod n` in `res`. - - The arguments a, b, n and the outparam res are meant to be `len` limbs in size, i.e. uint32_t[len]. - - Before calling this function, the caller will need to ensure that the following - preconditions are observed. - • a < n - • b < n -*/ -void Hacl_Bignum32_sub_mod(uint32_t len, uint32_t *n, uint32_t *a, uint32_t *b, uint32_t *res) -{ - Hacl_Bignum_bn_sub_mod_n_u32(len, n, a, b, res); -} - -/** -Write `a * b` in `res`. - - The arguments a and b are meant to be `len` limbs in size, i.e. uint32_t[len]. - The outparam res is meant to be `2*len` limbs in size, i.e. uint32_t[2*len]. -*/ -void Hacl_Bignum32_mul(uint32_t len, uint32_t *a, uint32_t *b, uint32_t *res) -{ - KRML_CHECK_SIZE(sizeof (uint32_t), (uint32_t)4U * len); - { - uint32_t tmp[(uint32_t)4U * len]; - memset(tmp, 0U, (uint32_t)4U * len * sizeof (uint32_t)); - Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint32(len, a, b, tmp, res); - } -} - -/** -Write `a * a` in `res`. - - The argument a is meant to be `len` limbs in size, i.e. uint32_t[len]. - The outparam res is meant to be `2*len` limbs in size, i.e. uint32_t[2*len]. -*/ -void Hacl_Bignum32_sqr(uint32_t len, uint32_t *a, uint32_t *res) -{ - KRML_CHECK_SIZE(sizeof (uint32_t), (uint32_t)4U * len); - { - uint32_t tmp[(uint32_t)4U * len]; - memset(tmp, 0U, (uint32_t)4U * len * sizeof (uint32_t)); - Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint32(len, a, tmp, res); - } -} - -static inline void -bn_slow_precomp( - uint32_t len, - uint32_t *n, - uint32_t mu, - uint32_t *r2, - uint32_t *a, - uint32_t *res -) -{ - KRML_CHECK_SIZE(sizeof (uint32_t), len); - { - uint32_t a_mod[len]; - memset(a_mod, 0U, len * sizeof (uint32_t)); - KRML_CHECK_SIZE(sizeof (uint32_t), len + len); - { - uint32_t a1[len + len]; - memset(a1, 0U, (len + len) * sizeof (uint32_t)); - memcpy(a1, a, (len + len) * sizeof (uint32_t)); - { - uint32_t c00 = (uint32_t)0U; - uint32_t c0; - { - uint32_t i0; - for (i0 = (uint32_t)0U; i0 < len; i0++) - { - uint32_t qj = mu * a1[i0]; - uint32_t *res_j0 = a1 + i0; - uint32_t c = (uint32_t)0U; - { - uint32_t i; - for (i = (uint32_t)0U; i < len / (uint32_t)4U; i++) - { - uint32_t a_i = n[(uint32_t)4U * i]; - uint32_t *res_i0 = res_j0 + (uint32_t)4U * i; - c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i, qj, c, res_i0); - { - uint32_t a_i0 = n[(uint32_t)4U * i + (uint32_t)1U]; - uint32_t *res_i1 = res_j0 + (uint32_t)4U * i + (uint32_t)1U; - c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i0, qj, c, res_i1); - { - uint32_t a_i1 = n[(uint32_t)4U * i + (uint32_t)2U]; - uint32_t *res_i2 = res_j0 + (uint32_t)4U * i + (uint32_t)2U; - c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i1, qj, c, res_i2); - { - uint32_t a_i2 = n[(uint32_t)4U * i + (uint32_t)3U]; - uint32_t *res_i = res_j0 + (uint32_t)4U * i + (uint32_t)3U; - c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i2, qj, c, res_i); - } - } - } - } - } - { - uint32_t i; - for (i = len / (uint32_t)4U * (uint32_t)4U; i < len; i++) - { - uint32_t a_i = n[i]; - uint32_t *res_i = res_j0 + i; - c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i, qj, c, res_i); - } - } - { - uint32_t r = c; - uint32_t c1 = r; - uint32_t *resb = a1 + len + i0; - uint32_t res_j = a1[len + i0]; - c00 = Lib_IntTypes_Intrinsics_add_carry_u32(c00, c1, res_j, resb); - } - } - } - memcpy(a_mod, a1 + len, (len + len - len) * sizeof (uint32_t)); - c0 = c00; - KRML_CHECK_SIZE(sizeof (uint32_t), len); - { - uint32_t tmp0[len]; - memset(tmp0, 0U, len * sizeof (uint32_t)); - { - uint32_t c1 = Hacl_Bignum_Addition_bn_sub_eq_len_u32(len, a_mod, n, tmp0); - uint32_t m = (uint32_t)0U - c0; - { - uint32_t i; - for (i = (uint32_t)0U; i < len; i++) - { - uint32_t *os = a_mod; - uint32_t x = (m & tmp0[i]) | (~m & a_mod[i]); - os[i] = x; - } - } - KRML_CHECK_SIZE(sizeof (uint32_t), len + len); - { - uint32_t c[len + len]; - memset(c, 0U, (len + len) * sizeof (uint32_t)); - KRML_CHECK_SIZE(sizeof (uint32_t), (uint32_t)4U * len); - { - uint32_t tmp[(uint32_t)4U * len]; - memset(tmp, 0U, (uint32_t)4U * len * sizeof (uint32_t)); - Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint32(len, a_mod, r2, tmp, c); - Hacl_Bignum_Montgomery_bn_mont_reduction_u32(len, n, mu, c, res); - } - } - } - } - } - } - } -} - -/** -Write `a mod n` in `res`. - - The argument a is meant to be `2*len` limbs in size, i.e. uint32_t[2*len]. - The argument n and the outparam res are meant to be `len` limbs in size, i.e. uint32_t[len]. - - The function returns false if any of the following preconditions are violated, - true otherwise. - • 1 < n - • n % 2 = 1 -*/ -bool Hacl_Bignum32_mod(uint32_t len, uint32_t *n, uint32_t *a, uint32_t *res) -{ - KRML_CHECK_SIZE(sizeof (uint32_t), len); - { - uint32_t one[len]; - memset(one, 0U, len * sizeof (uint32_t)); - { - uint32_t bit0; - uint32_t m0; - memset(one, 0U, len * sizeof (uint32_t)); - one[0U] = (uint32_t)1U; - bit0 = n[0U] & (uint32_t)1U; - m0 = (uint32_t)0U - bit0; - { - uint32_t acc = (uint32_t)0U; - uint32_t m1; - uint32_t is_valid_m; - uint32_t nBits; - { - uint32_t i; - for (i = (uint32_t)0U; i < len; i++) - { - uint32_t beq = FStar_UInt32_eq_mask(one[i], n[i]); - uint32_t blt = ~FStar_UInt32_gte_mask(one[i], n[i]); - acc = (beq & acc) | (~beq & ((blt & (uint32_t)0xFFFFFFFFU) | (~blt & (uint32_t)0U))); - } - } - m1 = acc; - is_valid_m = m0 & m1; - nBits = (uint32_t)32U * Hacl_Bignum_Lib_bn_get_top_index_u32(len, n); - if (is_valid_m == (uint32_t)0xFFFFFFFFU) - { - KRML_CHECK_SIZE(sizeof (uint32_t), len); - { - uint32_t r2[len]; - memset(r2, 0U, len * sizeof (uint32_t)); - Hacl_Bignum_Montgomery_bn_precomp_r2_mod_n_u32(len, nBits, n, r2); - { - uint32_t mu = Hacl_Bignum_ModInvLimb_mod_inv_uint32(n[0U]); - bn_slow_precomp(len, n, mu, r2, a, res); - } - } - } - else - { - memset(res, 0U, len * sizeof (uint32_t)); - } - return is_valid_m == (uint32_t)0xFFFFFFFFU; - } - } - } -} - -/** -Write `a ^ b mod n` in `res`. - - The arguments a, n and the outparam res are meant to be `len` limbs in size, i.e. uint32_t[len]. - - The argument b is a bignum of any size, and bBits is an upper bound on the - number of significant bits of b. A tighter bound results in faster execution - time. When in doubt, the number of bits for the bignum size is always a safe - default, e.g. if b is a 4096-bit bignum, bBits should be 4096. - - The function is *NOT* constant-time on the argument b. See the - mod_exp_consttime_* functions for constant-time variants. - - The function returns false if any of the following preconditions are violated, - true otherwise. - • n % 2 = 1 - • 1 < n - • b < pow2 bBits - • a < n -*/ -bool -Hacl_Bignum32_mod_exp_vartime( - uint32_t len, - uint32_t *n, - uint32_t *a, - uint32_t bBits, - uint32_t *b, - uint32_t *res -) -{ - uint32_t is_valid_m = Hacl_Bignum_Exponentiation_bn_check_mod_exp_u32(len, n, a, bBits, b); - uint32_t nBits = (uint32_t)32U * Hacl_Bignum_Lib_bn_get_top_index_u32(len, n); - if (is_valid_m == (uint32_t)0xFFFFFFFFU) - { - Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_u32(len, nBits, n, a, bBits, b, res); - } - else - { - memset(res, 0U, len * sizeof (uint32_t)); - } - return is_valid_m == (uint32_t)0xFFFFFFFFU; -} - -/** -Write `a ^ b mod n` in `res`. - - The arguments a, n and the outparam res are meant to be `len` limbs in size, i.e. uint32_t[len]. - - The argument b is a bignum of any size, and bBits is an upper bound on the - number of significant bits of b. A tighter bound results in faster execution - time. When in doubt, the number of bits for the bignum size is always a safe - default, e.g. if b is a 4096-bit bignum, bBits should be 4096. - - This function is constant-time over its argument b, at the cost of a slower - execution time than mod_exp_vartime. - - The function returns false if any of the following preconditions are violated, - true otherwise. - • n % 2 = 1 - • 1 < n - • b < pow2 bBits - • a < n -*/ -bool -Hacl_Bignum32_mod_exp_consttime( - uint32_t len, - uint32_t *n, - uint32_t *a, - uint32_t bBits, - uint32_t *b, - uint32_t *res -) -{ - uint32_t is_valid_m = Hacl_Bignum_Exponentiation_bn_check_mod_exp_u32(len, n, a, bBits, b); - uint32_t nBits = (uint32_t)32U * Hacl_Bignum_Lib_bn_get_top_index_u32(len, n); - if (is_valid_m == (uint32_t)0xFFFFFFFFU) - { - Hacl_Bignum_Exponentiation_bn_mod_exp_consttime_u32(len, nBits, n, a, bBits, b, res); - } - else - { - memset(res, 0U, len * sizeof (uint32_t)); - } - return is_valid_m == (uint32_t)0xFFFFFFFFU; -} - -/** -Write `a ^ (-1) mod n` in `res`. - - The arguments a, n and the outparam res are meant to be `len` limbs in size, i.e. uint32_t[len]. - - Before calling this function, the caller will need to ensure that the following - preconditions are observed. - • n is a prime - - The function returns false if any of the following preconditions are violated, - true otherwise. - • n % 2 = 1 - • 1 < n - • 0 < a - • a < n -*/ -bool Hacl_Bignum32_mod_inv_prime_vartime(uint32_t len, uint32_t *n, uint32_t *a, uint32_t *res) -{ - KRML_CHECK_SIZE(sizeof (uint32_t), len); - { - uint32_t one[len]; - memset(one, 0U, len * sizeof (uint32_t)); - { - uint32_t bit0; - uint32_t m00; - memset(one, 0U, len * sizeof (uint32_t)); - one[0U] = (uint32_t)1U; - bit0 = n[0U] & (uint32_t)1U; - m00 = (uint32_t)0U - bit0; - { - uint32_t acc0 = (uint32_t)0U; - uint32_t m10; - uint32_t m0; - { - uint32_t i; - for (i = (uint32_t)0U; i < len; i++) - { - uint32_t beq = FStar_UInt32_eq_mask(one[i], n[i]); - uint32_t blt = ~FStar_UInt32_gte_mask(one[i], n[i]); - acc0 = (beq & acc0) | (~beq & ((blt & (uint32_t)0xFFFFFFFFU) | (~blt & (uint32_t)0U))); - } - } - m10 = acc0; - m0 = m00 & m10; - KRML_CHECK_SIZE(sizeof (uint32_t), len); - { - uint32_t bn_zero[len]; - memset(bn_zero, 0U, len * sizeof (uint32_t)); - { - uint32_t mask = (uint32_t)0xFFFFFFFFU; - uint32_t mask1; - uint32_t res10; - uint32_t m1; - { - uint32_t i; - for (i = (uint32_t)0U; i < len; i++) - { - uint32_t uu____0 = FStar_UInt32_eq_mask(a[i], bn_zero[i]); - mask = uu____0 & mask; - } - } - mask1 = mask; - res10 = mask1; - m1 = res10; - { - uint32_t acc = (uint32_t)0U; - uint32_t m2; - uint32_t is_valid_m; - uint32_t nBits; - { - uint32_t i; - for (i = (uint32_t)0U; i < len; i++) - { - uint32_t beq = FStar_UInt32_eq_mask(a[i], n[i]); - uint32_t blt = ~FStar_UInt32_gte_mask(a[i], n[i]); - acc = - (beq & acc) - | (~beq & ((blt & (uint32_t)0xFFFFFFFFU) | (~blt & (uint32_t)0U))); - } - } - m2 = acc; - is_valid_m = (m0 & ~m1) & m2; - nBits = (uint32_t)32U * Hacl_Bignum_Lib_bn_get_top_index_u32(len, n); - if (is_valid_m == (uint32_t)0xFFFFFFFFU) - { - KRML_CHECK_SIZE(sizeof (uint32_t), len); - { - uint32_t n2[len]; - memset(n2, 0U, len * sizeof (uint32_t)); - { - uint32_t - c0 = - Lib_IntTypes_Intrinsics_sub_borrow_u32((uint32_t)0U, - n[0U], - (uint32_t)2U, - n2); - uint32_t c1; - if ((uint32_t)1U < len) - { - uint32_t *a1 = n + (uint32_t)1U; - uint32_t *res1 = n2 + (uint32_t)1U; - uint32_t c = c0; - { - uint32_t i; - for (i = (uint32_t)0U; i < (len - (uint32_t)1U) / (uint32_t)4U; i++) - { - uint32_t t1 = a1[(uint32_t)4U * i]; - uint32_t *res_i0 = res1 + (uint32_t)4U * i; - c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, (uint32_t)0U, res_i0); - { - uint32_t t10 = a1[(uint32_t)4U * i + (uint32_t)1U]; - uint32_t *res_i1 = res1 + (uint32_t)4U * i + (uint32_t)1U; - c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t10, (uint32_t)0U, res_i1); - { - uint32_t t11 = a1[(uint32_t)4U * i + (uint32_t)2U]; - uint32_t *res_i2 = res1 + (uint32_t)4U * i + (uint32_t)2U; - c = - Lib_IntTypes_Intrinsics_sub_borrow_u32(c, - t11, - (uint32_t)0U, - res_i2); - { - uint32_t t12 = a1[(uint32_t)4U * i + (uint32_t)3U]; - uint32_t *res_i = res1 + (uint32_t)4U * i + (uint32_t)3U; - c = - Lib_IntTypes_Intrinsics_sub_borrow_u32(c, - t12, - (uint32_t)0U, - res_i); - } - } - } - } - } - { - uint32_t i; - for - (i - = (len - (uint32_t)1U) / (uint32_t)4U * (uint32_t)4U; - i - < len - (uint32_t)1U; - i++) - { - uint32_t t1 = a1[i]; - uint32_t *res_i = res1 + i; - c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, (uint32_t)0U, res_i); - } - } - { - uint32_t c10 = c; - c1 = c10; - } - } - else - { - c1 = c0; - } - Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_u32(len, - nBits, - n, - a, - (uint32_t)32U * len, - n2, - res); - } - } - } - else - { - memset(res, 0U, len * sizeof (uint32_t)); - } - return is_valid_m == (uint32_t)0xFFFFFFFFU; - } - } - } - } - } - } -} - - -/**********************************************/ -/* Arithmetic functions with precomputations. */ -/**********************************************/ - - -/** -Heap-allocate and initialize a montgomery context. - - The argument n is meant to be `len` limbs in size, i.e. uint32_t[len]. - - Before calling this function, the caller will need to ensure that the following - preconditions are observed. - • n % 2 = 1 - • 1 < n - - The caller will need to call Hacl_Bignum32_mont_ctx_free on the return value - to avoid memory leaks. -*/ -Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 -*Hacl_Bignum32_mont_ctx_init(uint32_t len, uint32_t *n) -{ - KRML_CHECK_SIZE(sizeof (uint32_t), len); - { - uint32_t *r2 = (uint32_t *)KRML_HOST_CALLOC(len, sizeof (uint32_t)); - KRML_CHECK_SIZE(sizeof (uint32_t), len); - { - uint32_t *n1 = (uint32_t *)KRML_HOST_CALLOC(len, sizeof (uint32_t)); - uint32_t *r21 = r2; - uint32_t *n11 = n1; - uint32_t nBits; - uint32_t mu; - memcpy(n11, n, len * sizeof (uint32_t)); - nBits = (uint32_t)32U * Hacl_Bignum_Lib_bn_get_top_index_u32(len, n); - Hacl_Bignum_Montgomery_bn_precomp_r2_mod_n_u32(len, nBits, n, r21); - mu = Hacl_Bignum_ModInvLimb_mod_inv_uint32(n[0U]); - { - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 res; - res.len = len; - res.n = n11; - res.mu = mu; - res.r2 = r21; - KRML_CHECK_SIZE(sizeof (Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32), (uint32_t)1U); - { - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 - *buf = - (Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 *)KRML_HOST_MALLOC(sizeof ( - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 - )); - buf[0U] = res; - return buf; - } - } - } - } -} - -/** -Deallocate the memory previously allocated by Hacl_Bignum32_mont_ctx_init. - - The argument k is a montgomery context obtained through Hacl_Bignum32_mont_ctx_init. -*/ -void Hacl_Bignum32_mont_ctx_free(Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 *k) -{ - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 k1 = *k; - uint32_t *n = k1.n; - uint32_t *r2 = k1.r2; - KRML_HOST_FREE(n); - KRML_HOST_FREE(r2); - KRML_HOST_FREE(k); -} - -/** -Write `a mod n` in `res`. - - The argument a is meant to be `2*len` limbs in size, i.e. uint32_t[2*len]. - The outparam res is meant to be `len` limbs in size, i.e. uint32_t[len]. - The argument k is a montgomery context obtained through Hacl_Bignum32_mont_ctx_init. -*/ -void -Hacl_Bignum32_mod_precomp( - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 *k, - uint32_t *a, - uint32_t *res -) -{ - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 k10 = *k; - uint32_t len1 = k10.len; - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 k1 = *k; - bn_slow_precomp(len1, k1.n, k1.mu, k1.r2, a, res); -} - -/** -Write `a ^ b mod n` in `res`. - - The arguments a and the outparam res are meant to be `len` limbs in size, i.e. uint32_t[len]. - The argument k is a montgomery context obtained through Hacl_Bignum32_mont_ctx_init. - - The argument b is a bignum of any size, and bBits is an upper bound on the - number of significant bits of b. A tighter bound results in faster execution - time. When in doubt, the number of bits for the bignum size is always a safe - default, e.g. if b is a 4096-bit bignum, bBits should be 4096. - - The function is *NOT* constant-time on the argument b. See the - mod_exp_consttime_* functions for constant-time variants. - - Before calling this function, the caller will need to ensure that the following - preconditions are observed. - • b < pow2 bBits - • a < n -*/ -void -Hacl_Bignum32_mod_exp_vartime_precomp( - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 *k, - uint32_t *a, - uint32_t bBits, - uint32_t *b, - uint32_t *res -) -{ - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 k10 = *k; - uint32_t len1 = k10.len; - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 k1 = *k; - Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_precomp_u32(len1, - k1.n, - k1.mu, - k1.r2, - a, - bBits, - b, - res); -} - -/** -Write `a ^ b mod n` in `res`. - - The arguments a and the outparam res are meant to be `len` limbs in size, i.e. uint32_t[len]. - The argument k is a montgomery context obtained through Hacl_Bignum32_mont_ctx_init. - - The argument b is a bignum of any size, and bBits is an upper bound on the - number of significant bits of b. A tighter bound results in faster execution - time. When in doubt, the number of bits for the bignum size is always a safe - default, e.g. if b is a 4096-bit bignum, bBits should be 4096. - - This function is constant-time over its argument b, at the cost of a slower - execution time than mod_exp_vartime_*. - - Before calling this function, the caller will need to ensure that the following - preconditions are observed. - • b < pow2 bBits - • a < n -*/ -void -Hacl_Bignum32_mod_exp_consttime_precomp( - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 *k, - uint32_t *a, - uint32_t bBits, - uint32_t *b, - uint32_t *res -) -{ - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 k10 = *k; - uint32_t len1 = k10.len; - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 k1 = *k; - Hacl_Bignum_Exponentiation_bn_mod_exp_consttime_precomp_u32(len1, - k1.n, - k1.mu, - k1.r2, - a, - bBits, - b, - res); -} - -/** -Write `a ^ (-1) mod n` in `res`. - - The argument a and the outparam res are meant to be `len` limbs in size, i.e. uint32_t[len]. - The argument k is a montgomery context obtained through Hacl_Bignum32_mont_ctx_init. - - Before calling this function, the caller will need to ensure that the following - preconditions are observed. - • n is a prime - • 0 < a - • a < n -*/ -void -Hacl_Bignum32_mod_inv_prime_vartime_precomp( - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 *k, - uint32_t *a, - uint32_t *res -) -{ - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 k10 = *k; - uint32_t len1 = k10.len; - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 k1 = *k; - KRML_CHECK_SIZE(sizeof (uint32_t), len1); - { - uint32_t n2[len1]; - memset(n2, 0U, len1 * sizeof (uint32_t)); - { - uint32_t - c0 = Lib_IntTypes_Intrinsics_sub_borrow_u32((uint32_t)0U, k1.n[0U], (uint32_t)2U, n2); - uint32_t c1; - if ((uint32_t)1U < len1) - { - uint32_t *a1 = k1.n + (uint32_t)1U; - uint32_t *res1 = n2 + (uint32_t)1U; - uint32_t c = c0; - { - uint32_t i; - for (i = (uint32_t)0U; i < (len1 - (uint32_t)1U) / (uint32_t)4U; i++) - { - uint32_t t1 = a1[(uint32_t)4U * i]; - uint32_t *res_i0 = res1 + (uint32_t)4U * i; - c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, (uint32_t)0U, res_i0); - { - uint32_t t10 = a1[(uint32_t)4U * i + (uint32_t)1U]; - uint32_t *res_i1 = res1 + (uint32_t)4U * i + (uint32_t)1U; - c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t10, (uint32_t)0U, res_i1); - { - uint32_t t11 = a1[(uint32_t)4U * i + (uint32_t)2U]; - uint32_t *res_i2 = res1 + (uint32_t)4U * i + (uint32_t)2U; - c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t11, (uint32_t)0U, res_i2); - { - uint32_t t12 = a1[(uint32_t)4U * i + (uint32_t)3U]; - uint32_t *res_i = res1 + (uint32_t)4U * i + (uint32_t)3U; - c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t12, (uint32_t)0U, res_i); - } - } - } - } - } - { - uint32_t i; - for - (i - = (len1 - (uint32_t)1U) / (uint32_t)4U * (uint32_t)4U; - i - < len1 - (uint32_t)1U; - i++) - { - uint32_t t1 = a1[i]; - uint32_t *res_i = res1 + i; - c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, (uint32_t)0U, res_i); - } - } - { - uint32_t c10 = c; - c1 = c10; - } - } - else - { - c1 = c0; - } - Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_precomp_u32(len1, - k1.n, - k1.mu, - k1.r2, - a, - (uint32_t)32U * len1, - n2, - res); - } - } -} - - -/********************/ -/* Loads and stores */ -/********************/ - - -/** -Load a bid-endian bignum from memory. - - The argument b points to `len` bytes of valid memory. - The function returns a heap-allocated bignum of size sufficient to hold the - result of loading b, or NULL if either the allocation failed, or the amount of - required memory would exceed 4GB. - - If the return value is non-null, clients must eventually call free(3) on it to - avoid memory leaks. -*/ -uint32_t *Hacl_Bignum32_new_bn_from_bytes_be(uint32_t len, uint8_t *b) -{ - if - ( - len - == (uint32_t)0U - || !((len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U <= (uint32_t)1073741823U) - ) - { - return NULL; - } - KRML_CHECK_SIZE(sizeof (uint32_t), (len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U); - { - uint32_t - *res = - (uint32_t *)KRML_HOST_CALLOC((len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U, - sizeof (uint32_t)); - if (res == NULL) - { - return res; - } - { - uint32_t *res1 = res; - uint32_t *res2 = res1; - uint32_t bnLen = (len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U; - uint32_t tmpLen = (uint32_t)4U * bnLen; - KRML_CHECK_SIZE(sizeof (uint8_t), tmpLen); - { - uint8_t tmp[tmpLen]; - memset(tmp, 0U, tmpLen * sizeof (uint8_t)); - memcpy(tmp + tmpLen - len, b, len * sizeof (uint8_t)); - { - uint32_t i; - for (i = (uint32_t)0U; i < bnLen; i++) - { - uint32_t *os = res2; - uint32_t u = load32_be(tmp + (bnLen - i - (uint32_t)1U) * (uint32_t)4U); - uint32_t x = u; - os[i] = x; - } - } - return res2; - } - } - } -} - -/** -Load a little-endian bignum from memory. - - The argument b points to `len` bytes of valid memory. - The function returns a heap-allocated bignum of size sufficient to hold the - result of loading b, or NULL if either the allocation failed, or the amount of - required memory would exceed 4GB. - - If the return value is non-null, clients must eventually call free(3) on it to - avoid memory leaks. -*/ -uint32_t *Hacl_Bignum32_new_bn_from_bytes_le(uint32_t len, uint8_t *b) -{ - if - ( - len - == (uint32_t)0U - || !((len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U <= (uint32_t)1073741823U) - ) - { - return NULL; - } - KRML_CHECK_SIZE(sizeof (uint32_t), (len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U); - { - uint32_t - *res = - (uint32_t *)KRML_HOST_CALLOC((len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U, - sizeof (uint32_t)); - if (res == NULL) - { - return res; - } - { - uint32_t *res1 = res; - uint32_t *res2 = res1; - uint32_t bnLen = (len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U; - uint32_t tmpLen = (uint32_t)4U * bnLen; - KRML_CHECK_SIZE(sizeof (uint8_t), tmpLen); - { - uint8_t tmp[tmpLen]; - memset(tmp, 0U, tmpLen * sizeof (uint8_t)); - memcpy(tmp, b, len * sizeof (uint8_t)); - { - uint32_t i; - for (i = (uint32_t)0U; i < (len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U; i++) - { - uint32_t *os = res2; - uint8_t *bj = tmp + i * (uint32_t)4U; - uint32_t u = load32_le(bj); - uint32_t r1 = u; - uint32_t x = r1; - os[i] = x; - } - } - return res2; - } - } - } -} - -/** -Serialize a bignum into big-endian memory. - - The argument b points to a bignum of ⌈len / 4⌉ size. - The outparam res points to `len` bytes of valid memory. -*/ -void Hacl_Bignum32_bn_to_bytes_be(uint32_t len, uint32_t *b, uint8_t *res) -{ - uint32_t bnLen = (len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U; - uint32_t tmpLen = (uint32_t)4U * bnLen; - KRML_CHECK_SIZE(sizeof (uint8_t), tmpLen); - { - uint8_t tmp[tmpLen]; - memset(tmp, 0U, tmpLen * sizeof (uint8_t)); - { - uint32_t i; - for (i = (uint32_t)0U; i < bnLen; i++) - { - store32_be(tmp + i * (uint32_t)4U, b[bnLen - i - (uint32_t)1U]); - } - } - memcpy(res, tmp + tmpLen - len, len * sizeof (uint8_t)); - } -} - -/** -Serialize a bignum into little-endian memory. - - The argument b points to a bignum of ⌈len / 4⌉ size. - The outparam res points to `len` bytes of valid memory. -*/ -void Hacl_Bignum32_bn_to_bytes_le(uint32_t len, uint32_t *b, uint8_t *res) -{ - uint32_t bnLen = (len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U; - uint32_t tmpLen = (uint32_t)4U * bnLen; - KRML_CHECK_SIZE(sizeof (uint8_t), tmpLen); - { - uint8_t tmp[tmpLen]; - memset(tmp, 0U, tmpLen * sizeof (uint8_t)); - { - uint32_t i; - for (i = (uint32_t)0U; i < bnLen; i++) - { - store32_le(tmp + i * (uint32_t)4U, b[i]); - } - } - memcpy(res, tmp, len * sizeof (uint8_t)); - } -} - - -/***************/ -/* Comparisons */ -/***************/ - - -/** -Returns 2^32 - 1 if a < b, otherwise returns 0. - - The arguments a and b are meant to be `len` limbs in size, i.e. uint32_t[len]. -*/ -uint32_t Hacl_Bignum32_lt_mask(uint32_t len, uint32_t *a, uint32_t *b) -{ - uint32_t acc = (uint32_t)0U; - { - uint32_t i; - for (i = (uint32_t)0U; i < len; i++) - { - uint32_t beq = FStar_UInt32_eq_mask(a[i], b[i]); - uint32_t blt = ~FStar_UInt32_gte_mask(a[i], b[i]); - acc = (beq & acc) | (~beq & ((blt & (uint32_t)0xFFFFFFFFU) | (~blt & (uint32_t)0U))); - } - } - return acc; -} - -/** -Returns 2^32 - 1 if a = b, otherwise returns 0. - - The arguments a and b are meant to be `len` limbs in size, i.e. uint32_t[len]. -*/ -uint32_t Hacl_Bignum32_eq_mask(uint32_t len, uint32_t *a, uint32_t *b) -{ - uint32_t mask = (uint32_t)0xFFFFFFFFU; - uint32_t mask1; - { - uint32_t i; - for (i = (uint32_t)0U; i < len; i++) - { - uint32_t uu____0 = FStar_UInt32_eq_mask(a[i], b[i]); - mask = uu____0 & mask; - } - } - mask1 = mask; - return mask1; -} - diff --git a/dist/c89-compatible/Hacl_Bignum32.h b/dist/c89-compatible/Hacl_Bignum32.h deleted file mode 100644 index 0f00a03d71..0000000000 --- a/dist/c89-compatible/Hacl_Bignum32.h +++ /dev/null @@ -1,399 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#ifndef __Hacl_Bignum32_H -#define __Hacl_Bignum32_H - -#if defined(__cplusplus) -extern "C" { -#endif - -#include -#include "krml/internal/types.h" -#include "krml/lowstar_endianness.h" -#include "krml/internal/target.h" - - -#include "Hacl_Krmllib.h" -#include "Hacl_Bignum_Base.h" -#include "Hacl_Bignum.h" -#include "evercrypt_targetconfig.h" -typedef Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 *Hacl_Bignum32_pbn_mont_ctx_u32; - -/******************************************************************************* - -A verified bignum library. - -This is a 32-bit optimized version, where bignums are represented as an array -of `len` unsigned 32-bit integers, i.e. uint32_t[len]. - -*******************************************************************************/ - -/************************/ -/* Arithmetic functions */ -/************************/ - - -/** -Write `a + b mod 2 ^ (32 * len)` in `res`. - - This functions returns the carry. - - The arguments a, b and the outparam res are meant to be `len` limbs in size, i.e. uint32_t[len] -*/ -uint32_t Hacl_Bignum32_add(uint32_t len, uint32_t *a, uint32_t *b, uint32_t *res); - -/** -Write `a - b mod 2 ^ (32 * len)` in `res`. - - This functions returns the carry. - - The arguments a, b and the outparam res are meant to be `len` limbs in size, i.e. uint32_t[len] -*/ -uint32_t Hacl_Bignum32_sub(uint32_t len, uint32_t *a, uint32_t *b, uint32_t *res); - -/** -Write `(a + b) mod n` in `res`. - - The arguments a, b, n and the outparam res are meant to be `len` limbs in size, i.e. uint32_t[len]. - - Before calling this function, the caller will need to ensure that the following - preconditions are observed. - • a < n - • b < n -*/ -void Hacl_Bignum32_add_mod(uint32_t len, uint32_t *n, uint32_t *a, uint32_t *b, uint32_t *res); - -/** -Write `(a - b) mod n` in `res`. - - The arguments a, b, n and the outparam res are meant to be `len` limbs in size, i.e. uint32_t[len]. - - Before calling this function, the caller will need to ensure that the following - preconditions are observed. - • a < n - • b < n -*/ -void Hacl_Bignum32_sub_mod(uint32_t len, uint32_t *n, uint32_t *a, uint32_t *b, uint32_t *res); - -/** -Write `a * b` in `res`. - - The arguments a and b are meant to be `len` limbs in size, i.e. uint32_t[len]. - The outparam res is meant to be `2*len` limbs in size, i.e. uint32_t[2*len]. -*/ -void Hacl_Bignum32_mul(uint32_t len, uint32_t *a, uint32_t *b, uint32_t *res); - -/** -Write `a * a` in `res`. - - The argument a is meant to be `len` limbs in size, i.e. uint32_t[len]. - The outparam res is meant to be `2*len` limbs in size, i.e. uint32_t[2*len]. -*/ -void Hacl_Bignum32_sqr(uint32_t len, uint32_t *a, uint32_t *res); - -/** -Write `a mod n` in `res`. - - The argument a is meant to be `2*len` limbs in size, i.e. uint32_t[2*len]. - The argument n and the outparam res are meant to be `len` limbs in size, i.e. uint32_t[len]. - - The function returns false if any of the following preconditions are violated, - true otherwise. - • 1 < n - • n % 2 = 1 -*/ -bool Hacl_Bignum32_mod(uint32_t len, uint32_t *n, uint32_t *a, uint32_t *res); - -/** -Write `a ^ b mod n` in `res`. - - The arguments a, n and the outparam res are meant to be `len` limbs in size, i.e. uint32_t[len]. - - The argument b is a bignum of any size, and bBits is an upper bound on the - number of significant bits of b. A tighter bound results in faster execution - time. When in doubt, the number of bits for the bignum size is always a safe - default, e.g. if b is a 4096-bit bignum, bBits should be 4096. - - The function is *NOT* constant-time on the argument b. See the - mod_exp_consttime_* functions for constant-time variants. - - The function returns false if any of the following preconditions are violated, - true otherwise. - • n % 2 = 1 - • 1 < n - • b < pow2 bBits - • a < n -*/ -bool -Hacl_Bignum32_mod_exp_vartime( - uint32_t len, - uint32_t *n, - uint32_t *a, - uint32_t bBits, - uint32_t *b, - uint32_t *res -); - -/** -Write `a ^ b mod n` in `res`. - - The arguments a, n and the outparam res are meant to be `len` limbs in size, i.e. uint32_t[len]. - - The argument b is a bignum of any size, and bBits is an upper bound on the - number of significant bits of b. A tighter bound results in faster execution - time. When in doubt, the number of bits for the bignum size is always a safe - default, e.g. if b is a 4096-bit bignum, bBits should be 4096. - - This function is constant-time over its argument b, at the cost of a slower - execution time than mod_exp_vartime. - - The function returns false if any of the following preconditions are violated, - true otherwise. - • n % 2 = 1 - • 1 < n - • b < pow2 bBits - • a < n -*/ -bool -Hacl_Bignum32_mod_exp_consttime( - uint32_t len, - uint32_t *n, - uint32_t *a, - uint32_t bBits, - uint32_t *b, - uint32_t *res -); - -/** -Write `a ^ (-1) mod n` in `res`. - - The arguments a, n and the outparam res are meant to be `len` limbs in size, i.e. uint32_t[len]. - - Before calling this function, the caller will need to ensure that the following - preconditions are observed. - • n is a prime - - The function returns false if any of the following preconditions are violated, - true otherwise. - • n % 2 = 1 - • 1 < n - • 0 < a - • a < n -*/ -bool -Hacl_Bignum32_mod_inv_prime_vartime(uint32_t len, uint32_t *n, uint32_t *a, uint32_t *res); - - -/**********************************************/ -/* Arithmetic functions with precomputations. */ -/**********************************************/ - - -/** -Heap-allocate and initialize a montgomery context. - - The argument n is meant to be `len` limbs in size, i.e. uint32_t[len]. - - Before calling this function, the caller will need to ensure that the following - preconditions are observed. - • n % 2 = 1 - • 1 < n - - The caller will need to call Hacl_Bignum32_mont_ctx_free on the return value - to avoid memory leaks. -*/ -Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 -*Hacl_Bignum32_mont_ctx_init(uint32_t len, uint32_t *n); - -/** -Deallocate the memory previously allocated by Hacl_Bignum32_mont_ctx_init. - - The argument k is a montgomery context obtained through Hacl_Bignum32_mont_ctx_init. -*/ -void Hacl_Bignum32_mont_ctx_free(Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 *k); - -/** -Write `a mod n` in `res`. - - The argument a is meant to be `2*len` limbs in size, i.e. uint32_t[2*len]. - The outparam res is meant to be `len` limbs in size, i.e. uint32_t[len]. - The argument k is a montgomery context obtained through Hacl_Bignum32_mont_ctx_init. -*/ -void -Hacl_Bignum32_mod_precomp( - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 *k, - uint32_t *a, - uint32_t *res -); - -/** -Write `a ^ b mod n` in `res`. - - The arguments a and the outparam res are meant to be `len` limbs in size, i.e. uint32_t[len]. - The argument k is a montgomery context obtained through Hacl_Bignum32_mont_ctx_init. - - The argument b is a bignum of any size, and bBits is an upper bound on the - number of significant bits of b. A tighter bound results in faster execution - time. When in doubt, the number of bits for the bignum size is always a safe - default, e.g. if b is a 4096-bit bignum, bBits should be 4096. - - The function is *NOT* constant-time on the argument b. See the - mod_exp_consttime_* functions for constant-time variants. - - Before calling this function, the caller will need to ensure that the following - preconditions are observed. - • b < pow2 bBits - • a < n -*/ -void -Hacl_Bignum32_mod_exp_vartime_precomp( - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 *k, - uint32_t *a, - uint32_t bBits, - uint32_t *b, - uint32_t *res -); - -/** -Write `a ^ b mod n` in `res`. - - The arguments a and the outparam res are meant to be `len` limbs in size, i.e. uint32_t[len]. - The argument k is a montgomery context obtained through Hacl_Bignum32_mont_ctx_init. - - The argument b is a bignum of any size, and bBits is an upper bound on the - number of significant bits of b. A tighter bound results in faster execution - time. When in doubt, the number of bits for the bignum size is always a safe - default, e.g. if b is a 4096-bit bignum, bBits should be 4096. - - This function is constant-time over its argument b, at the cost of a slower - execution time than mod_exp_vartime_*. - - Before calling this function, the caller will need to ensure that the following - preconditions are observed. - • b < pow2 bBits - • a < n -*/ -void -Hacl_Bignum32_mod_exp_consttime_precomp( - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 *k, - uint32_t *a, - uint32_t bBits, - uint32_t *b, - uint32_t *res -); - -/** -Write `a ^ (-1) mod n` in `res`. - - The argument a and the outparam res are meant to be `len` limbs in size, i.e. uint32_t[len]. - The argument k is a montgomery context obtained through Hacl_Bignum32_mont_ctx_init. - - Before calling this function, the caller will need to ensure that the following - preconditions are observed. - • n is a prime - • 0 < a - • a < n -*/ -void -Hacl_Bignum32_mod_inv_prime_vartime_precomp( - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 *k, - uint32_t *a, - uint32_t *res -); - - -/********************/ -/* Loads and stores */ -/********************/ - - -/** -Load a bid-endian bignum from memory. - - The argument b points to `len` bytes of valid memory. - The function returns a heap-allocated bignum of size sufficient to hold the - result of loading b, or NULL if either the allocation failed, or the amount of - required memory would exceed 4GB. - - If the return value is non-null, clients must eventually call free(3) on it to - avoid memory leaks. -*/ -uint32_t *Hacl_Bignum32_new_bn_from_bytes_be(uint32_t len, uint8_t *b); - -/** -Load a little-endian bignum from memory. - - The argument b points to `len` bytes of valid memory. - The function returns a heap-allocated bignum of size sufficient to hold the - result of loading b, or NULL if either the allocation failed, or the amount of - required memory would exceed 4GB. - - If the return value is non-null, clients must eventually call free(3) on it to - avoid memory leaks. -*/ -uint32_t *Hacl_Bignum32_new_bn_from_bytes_le(uint32_t len, uint8_t *b); - -/** -Serialize a bignum into big-endian memory. - - The argument b points to a bignum of ⌈len / 4⌉ size. - The outparam res points to `len` bytes of valid memory. -*/ -void Hacl_Bignum32_bn_to_bytes_be(uint32_t len, uint32_t *b, uint8_t *res); - -/** -Serialize a bignum into little-endian memory. - - The argument b points to a bignum of ⌈len / 4⌉ size. - The outparam res points to `len` bytes of valid memory. -*/ -void Hacl_Bignum32_bn_to_bytes_le(uint32_t len, uint32_t *b, uint8_t *res); - - -/***************/ -/* Comparisons */ -/***************/ - - -/** -Returns 2^32 - 1 if a < b, otherwise returns 0. - - The arguments a and b are meant to be `len` limbs in size, i.e. uint32_t[len]. -*/ -uint32_t Hacl_Bignum32_lt_mask(uint32_t len, uint32_t *a, uint32_t *b); - -/** -Returns 2^32 - 1 if a = b, otherwise returns 0. - - The arguments a and b are meant to be `len` limbs in size, i.e. uint32_t[len]. -*/ -uint32_t Hacl_Bignum32_eq_mask(uint32_t len, uint32_t *a, uint32_t *b); - -#if defined(__cplusplus) -} -#endif - -#define __Hacl_Bignum32_H_DEFINED -#endif diff --git a/dist/c89-compatible/Hacl_Bignum4096.c b/dist/c89-compatible/Hacl_Bignum4096.c deleted file mode 100644 index 1f8f1d463a..0000000000 --- a/dist/c89-compatible/Hacl_Bignum4096.c +++ /dev/null @@ -1,1729 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#include "Hacl_Bignum4096.h" - -#include "internal/Hacl_Bignum.h" - -/******************************************************************************* - -A verified 4096-bit bignum library. - -This is a 64-bit optimized version, where bignums are represented as an array -of sixty four unsigned 64-bit integers, i.e. uint64_t[64]. Furthermore, the -limbs are stored in little-endian format, i.e. the least significant limb is at -index 0. Each limb is stored in native format in memory. Example: - - uint64_t sixteen[64] = { 0x10 } - - (relying on the fact that when an initializer-list is provided, the remainder - of the object gets initialized as if it had static storage duration, i.e. with - zeroes) - -We strongly encourage users to go through the conversion functions, e.g. -bn_from_bytes_be, to i) not depend on internal representation choices and ii) -have the ability to switch easily to a 32-bit optimized version in the future. - -*******************************************************************************/ - -/************************/ -/* Arithmetic functions */ -/************************/ - - -/** -Write `a + b mod 2^4096` in `res`. - - This functions returns the carry. - - The arguments a, b and res are meant to be 4096-bit bignums, i.e. uint64_t[64] -*/ -uint64_t Hacl_Bignum4096_add(uint64_t *a, uint64_t *b, uint64_t *res) -{ - uint64_t c = (uint64_t)0U; - KRML_MAYBE_FOR16(i, - (uint32_t)0U, - (uint32_t)16U, - (uint32_t)1U, - uint64_t t1 = a[(uint32_t)4U * i]; - uint64_t t20 = b[(uint32_t)4U * i]; - uint64_t *res_i0 = res + (uint32_t)4U * i; - c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t1, t20, res_i0); - uint64_t t10 = a[(uint32_t)4U * i + (uint32_t)1U]; - uint64_t t21 = b[(uint32_t)4U * i + (uint32_t)1U]; - uint64_t *res_i1 = res + (uint32_t)4U * i + (uint32_t)1U; - c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t10, t21, res_i1); - uint64_t t11 = a[(uint32_t)4U * i + (uint32_t)2U]; - uint64_t t22 = b[(uint32_t)4U * i + (uint32_t)2U]; - uint64_t *res_i2 = res + (uint32_t)4U * i + (uint32_t)2U; - c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t11, t22, res_i2); - uint64_t t12 = a[(uint32_t)4U * i + (uint32_t)3U]; - uint64_t t2 = b[(uint32_t)4U * i + (uint32_t)3U]; - uint64_t *res_i = res + (uint32_t)4U * i + (uint32_t)3U; - c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t12, t2, res_i);); - return c; -} - -/** -Write `a - b mod 2^4096` in `res`. - - This functions returns the carry. - - The arguments a, b and res are meant to be 4096-bit bignums, i.e. uint64_t[64] -*/ -uint64_t Hacl_Bignum4096_sub(uint64_t *a, uint64_t *b, uint64_t *res) -{ - uint64_t c = (uint64_t)0U; - KRML_MAYBE_FOR16(i, - (uint32_t)0U, - (uint32_t)16U, - (uint32_t)1U, - uint64_t t1 = a[(uint32_t)4U * i]; - uint64_t t20 = b[(uint32_t)4U * i]; - uint64_t *res_i0 = res + (uint32_t)4U * i; - c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, t20, res_i0); - uint64_t t10 = a[(uint32_t)4U * i + (uint32_t)1U]; - uint64_t t21 = b[(uint32_t)4U * i + (uint32_t)1U]; - uint64_t *res_i1 = res + (uint32_t)4U * i + (uint32_t)1U; - c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t10, t21, res_i1); - uint64_t t11 = a[(uint32_t)4U * i + (uint32_t)2U]; - uint64_t t22 = b[(uint32_t)4U * i + (uint32_t)2U]; - uint64_t *res_i2 = res + (uint32_t)4U * i + (uint32_t)2U; - c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t11, t22, res_i2); - uint64_t t12 = a[(uint32_t)4U * i + (uint32_t)3U]; - uint64_t t2 = b[(uint32_t)4U * i + (uint32_t)3U]; - uint64_t *res_i = res + (uint32_t)4U * i + (uint32_t)3U; - c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t12, t2, res_i);); - return c; -} - -/** -Write `(a + b) mod n` in `res`. - - The arguments a, b, n and the outparam res are meant to be 4096-bit bignums, i.e. uint64_t[64]. - - Before calling this function, the caller will need to ensure that the following - preconditions are observed. - • a < n - • b < n -*/ -void Hacl_Bignum4096_add_mod(uint64_t *n, uint64_t *a, uint64_t *b, uint64_t *res) -{ - uint64_t c2 = (uint64_t)0U; - uint64_t c0; - KRML_MAYBE_FOR16(i, - (uint32_t)0U, - (uint32_t)16U, - (uint32_t)1U, - uint64_t t1 = a[(uint32_t)4U * i]; - uint64_t t20 = b[(uint32_t)4U * i]; - uint64_t *res_i0 = res + (uint32_t)4U * i; - c2 = Lib_IntTypes_Intrinsics_add_carry_u64(c2, t1, t20, res_i0); - uint64_t t10 = a[(uint32_t)4U * i + (uint32_t)1U]; - uint64_t t21 = b[(uint32_t)4U * i + (uint32_t)1U]; - uint64_t *res_i1 = res + (uint32_t)4U * i + (uint32_t)1U; - c2 = Lib_IntTypes_Intrinsics_add_carry_u64(c2, t10, t21, res_i1); - uint64_t t11 = a[(uint32_t)4U * i + (uint32_t)2U]; - uint64_t t22 = b[(uint32_t)4U * i + (uint32_t)2U]; - uint64_t *res_i2 = res + (uint32_t)4U * i + (uint32_t)2U; - c2 = Lib_IntTypes_Intrinsics_add_carry_u64(c2, t11, t22, res_i2); - uint64_t t12 = a[(uint32_t)4U * i + (uint32_t)3U]; - uint64_t t2 = b[(uint32_t)4U * i + (uint32_t)3U]; - uint64_t *res_i = res + (uint32_t)4U * i + (uint32_t)3U; - c2 = Lib_IntTypes_Intrinsics_add_carry_u64(c2, t12, t2, res_i);); - c0 = c2; - { - uint64_t tmp[64U] = { 0U }; - uint64_t c3 = (uint64_t)0U; - uint64_t c1; - uint64_t c; - KRML_MAYBE_FOR16(i, - (uint32_t)0U, - (uint32_t)16U, - (uint32_t)1U, - uint64_t t1 = res[(uint32_t)4U * i]; - uint64_t t20 = n[(uint32_t)4U * i]; - uint64_t *res_i0 = tmp + (uint32_t)4U * i; - c3 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c3, t1, t20, res_i0); - uint64_t t10 = res[(uint32_t)4U * i + (uint32_t)1U]; - uint64_t t21 = n[(uint32_t)4U * i + (uint32_t)1U]; - uint64_t *res_i1 = tmp + (uint32_t)4U * i + (uint32_t)1U; - c3 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c3, t10, t21, res_i1); - uint64_t t11 = res[(uint32_t)4U * i + (uint32_t)2U]; - uint64_t t22 = n[(uint32_t)4U * i + (uint32_t)2U]; - uint64_t *res_i2 = tmp + (uint32_t)4U * i + (uint32_t)2U; - c3 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c3, t11, t22, res_i2); - uint64_t t12 = res[(uint32_t)4U * i + (uint32_t)3U]; - uint64_t t2 = n[(uint32_t)4U * i + (uint32_t)3U]; - uint64_t *res_i = tmp + (uint32_t)4U * i + (uint32_t)3U; - c3 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c3, t12, t2, res_i);); - c1 = c3; - c = c0 - c1; - { - uint32_t i; - for (i = (uint32_t)0U; i < (uint32_t)64U; i++) - { - uint64_t *os = res; - uint64_t x = (c & res[i]) | (~c & tmp[i]); - os[i] = x; - } - } - } -} - -/** -Write `(a - b) mod n` in `res`. - - The arguments a, b, n and the outparam res are meant to be 4096-bit bignums, i.e. uint64_t[64]. - - Before calling this function, the caller will need to ensure that the following - preconditions are observed. - • a < n - • b < n -*/ -void Hacl_Bignum4096_sub_mod(uint64_t *n, uint64_t *a, uint64_t *b, uint64_t *res) -{ - uint64_t c2 = (uint64_t)0U; - uint64_t c0; - KRML_MAYBE_FOR16(i, - (uint32_t)0U, - (uint32_t)16U, - (uint32_t)1U, - uint64_t t1 = a[(uint32_t)4U * i]; - uint64_t t20 = b[(uint32_t)4U * i]; - uint64_t *res_i0 = res + (uint32_t)4U * i; - c2 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c2, t1, t20, res_i0); - uint64_t t10 = a[(uint32_t)4U * i + (uint32_t)1U]; - uint64_t t21 = b[(uint32_t)4U * i + (uint32_t)1U]; - uint64_t *res_i1 = res + (uint32_t)4U * i + (uint32_t)1U; - c2 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c2, t10, t21, res_i1); - uint64_t t11 = a[(uint32_t)4U * i + (uint32_t)2U]; - uint64_t t22 = b[(uint32_t)4U * i + (uint32_t)2U]; - uint64_t *res_i2 = res + (uint32_t)4U * i + (uint32_t)2U; - c2 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c2, t11, t22, res_i2); - uint64_t t12 = a[(uint32_t)4U * i + (uint32_t)3U]; - uint64_t t2 = b[(uint32_t)4U * i + (uint32_t)3U]; - uint64_t *res_i = res + (uint32_t)4U * i + (uint32_t)3U; - c2 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c2, t12, t2, res_i);); - c0 = c2; - { - uint64_t tmp[64U] = { 0U }; - uint64_t c3 = (uint64_t)0U; - uint64_t c1; - uint64_t c; - KRML_MAYBE_FOR16(i, - (uint32_t)0U, - (uint32_t)16U, - (uint32_t)1U, - uint64_t t1 = res[(uint32_t)4U * i]; - uint64_t t20 = n[(uint32_t)4U * i]; - uint64_t *res_i0 = tmp + (uint32_t)4U * i; - c3 = Lib_IntTypes_Intrinsics_add_carry_u64(c3, t1, t20, res_i0); - uint64_t t10 = res[(uint32_t)4U * i + (uint32_t)1U]; - uint64_t t21 = n[(uint32_t)4U * i + (uint32_t)1U]; - uint64_t *res_i1 = tmp + (uint32_t)4U * i + (uint32_t)1U; - c3 = Lib_IntTypes_Intrinsics_add_carry_u64(c3, t10, t21, res_i1); - uint64_t t11 = res[(uint32_t)4U * i + (uint32_t)2U]; - uint64_t t22 = n[(uint32_t)4U * i + (uint32_t)2U]; - uint64_t *res_i2 = tmp + (uint32_t)4U * i + (uint32_t)2U; - c3 = Lib_IntTypes_Intrinsics_add_carry_u64(c3, t11, t22, res_i2); - uint64_t t12 = res[(uint32_t)4U * i + (uint32_t)3U]; - uint64_t t2 = n[(uint32_t)4U * i + (uint32_t)3U]; - uint64_t *res_i = tmp + (uint32_t)4U * i + (uint32_t)3U; - c3 = Lib_IntTypes_Intrinsics_add_carry_u64(c3, t12, t2, res_i);); - c1 = c3; - c = (uint64_t)0U - c0; - { - uint32_t i; - for (i = (uint32_t)0U; i < (uint32_t)64U; i++) - { - uint64_t *os = res; - uint64_t x = (c & tmp[i]) | (~c & res[i]); - os[i] = x; - } - } - } -} - -/** -Write `a * b` in `res`. - - The arguments a and b are meant to be 4096-bit bignums, i.e. uint64_t[64]. - The outparam res is meant to be a 8192-bit bignum, i.e. uint64_t[128]. -*/ -void Hacl_Bignum4096_mul(uint64_t *a, uint64_t *b, uint64_t *res) -{ - uint64_t tmp[256U] = { 0U }; - Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint64((uint32_t)64U, a, b, tmp, res); -} - -/** -Write `a * a` in `res`. - - The argument a is meant to be a 4096-bit bignum, i.e. uint64_t[64]. - The outparam res is meant to be a 8192-bit bignum, i.e. uint64_t[128]. -*/ -void Hacl_Bignum4096_sqr(uint64_t *a, uint64_t *res) -{ - uint64_t tmp[256U] = { 0U }; - Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint64((uint32_t)64U, a, tmp, res); -} - -static inline void precompr2(uint32_t nBits, uint64_t *n, uint64_t *res) -{ - uint32_t i0; - uint32_t j; - uint32_t i; - memset(res, 0U, (uint32_t)64U * sizeof (uint64_t)); - i0 = nBits / (uint32_t)64U; - j = nBits % (uint32_t)64U; - res[i0] = res[i0] | (uint64_t)1U << j; - for (i = (uint32_t)0U; i < (uint32_t)8192U - nBits; i++) - { - Hacl_Bignum4096_add_mod(n, res, res, res); - } -} - -static inline void reduction(uint64_t *n, uint64_t nInv, uint64_t *c, uint64_t *res) -{ - uint64_t c00 = (uint64_t)0U; - uint64_t c0; - { - uint32_t i0; - for (i0 = (uint32_t)0U; i0 < (uint32_t)64U; i0++) - { - uint64_t qj = nInv * c[i0]; - uint64_t *res_j0 = c + i0; - uint64_t c1 = (uint64_t)0U; - KRML_MAYBE_FOR16(i, - (uint32_t)0U, - (uint32_t)16U, - (uint32_t)1U, - uint64_t a_i = n[(uint32_t)4U * i]; - uint64_t *res_i0 = res_j0 + (uint32_t)4U * i; - c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, qj, c1, res_i0); - uint64_t a_i0 = n[(uint32_t)4U * i + (uint32_t)1U]; - uint64_t *res_i1 = res_j0 + (uint32_t)4U * i + (uint32_t)1U; - c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i0, qj, c1, res_i1); - uint64_t a_i1 = n[(uint32_t)4U * i + (uint32_t)2U]; - uint64_t *res_i2 = res_j0 + (uint32_t)4U * i + (uint32_t)2U; - c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i1, qj, c1, res_i2); - uint64_t a_i2 = n[(uint32_t)4U * i + (uint32_t)3U]; - uint64_t *res_i = res_j0 + (uint32_t)4U * i + (uint32_t)3U; - c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i2, qj, c1, res_i);); - { - uint64_t r = c1; - uint64_t c10 = r; - uint64_t *resb = c + (uint32_t)64U + i0; - uint64_t res_j = c[(uint32_t)64U + i0]; - c00 = Lib_IntTypes_Intrinsics_add_carry_u64(c00, c10, res_j, resb); - } - } - } - memcpy(res, c + (uint32_t)64U, (uint32_t)64U * sizeof (uint64_t)); - c0 = c00; - { - uint64_t tmp[64U] = { 0U }; - uint64_t c10 = (uint64_t)0U; - uint64_t c1; - uint64_t c2; - KRML_MAYBE_FOR16(i, - (uint32_t)0U, - (uint32_t)16U, - (uint32_t)1U, - uint64_t t1 = res[(uint32_t)4U * i]; - uint64_t t20 = n[(uint32_t)4U * i]; - uint64_t *res_i0 = tmp + (uint32_t)4U * i; - c10 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c10, t1, t20, res_i0); - uint64_t t10 = res[(uint32_t)4U * i + (uint32_t)1U]; - uint64_t t21 = n[(uint32_t)4U * i + (uint32_t)1U]; - uint64_t *res_i1 = tmp + (uint32_t)4U * i + (uint32_t)1U; - c10 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c10, t10, t21, res_i1); - uint64_t t11 = res[(uint32_t)4U * i + (uint32_t)2U]; - uint64_t t22 = n[(uint32_t)4U * i + (uint32_t)2U]; - uint64_t *res_i2 = tmp + (uint32_t)4U * i + (uint32_t)2U; - c10 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c10, t11, t22, res_i2); - uint64_t t12 = res[(uint32_t)4U * i + (uint32_t)3U]; - uint64_t t2 = n[(uint32_t)4U * i + (uint32_t)3U]; - uint64_t *res_i = tmp + (uint32_t)4U * i + (uint32_t)3U; - c10 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c10, t12, t2, res_i);); - c1 = c10; - c2 = c0 - c1; - { - uint32_t i; - for (i = (uint32_t)0U; i < (uint32_t)64U; i++) - { - uint64_t *os = res; - uint64_t x = (c2 & res[i]) | (~c2 & tmp[i]); - os[i] = x; - } - } - } -} - -static inline void from(uint64_t *n, uint64_t nInv_u64, uint64_t *aM, uint64_t *a) -{ - uint64_t tmp[128U] = { 0U }; - memcpy(tmp, aM, (uint32_t)64U * sizeof (uint64_t)); - reduction(n, nInv_u64, tmp, a); -} - -static inline void areduction(uint64_t *n, uint64_t nInv, uint64_t *c, uint64_t *res) -{ - uint64_t c00 = (uint64_t)0U; - uint64_t c0; - { - uint32_t i0; - for (i0 = (uint32_t)0U; i0 < (uint32_t)64U; i0++) - { - uint64_t qj = nInv * c[i0]; - uint64_t *res_j0 = c + i0; - uint64_t c1 = (uint64_t)0U; - KRML_MAYBE_FOR16(i, - (uint32_t)0U, - (uint32_t)16U, - (uint32_t)1U, - uint64_t a_i = n[(uint32_t)4U * i]; - uint64_t *res_i0 = res_j0 + (uint32_t)4U * i; - c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, qj, c1, res_i0); - uint64_t a_i0 = n[(uint32_t)4U * i + (uint32_t)1U]; - uint64_t *res_i1 = res_j0 + (uint32_t)4U * i + (uint32_t)1U; - c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i0, qj, c1, res_i1); - uint64_t a_i1 = n[(uint32_t)4U * i + (uint32_t)2U]; - uint64_t *res_i2 = res_j0 + (uint32_t)4U * i + (uint32_t)2U; - c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i1, qj, c1, res_i2); - uint64_t a_i2 = n[(uint32_t)4U * i + (uint32_t)3U]; - uint64_t *res_i = res_j0 + (uint32_t)4U * i + (uint32_t)3U; - c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i2, qj, c1, res_i);); - { - uint64_t r = c1; - uint64_t c10 = r; - uint64_t *resb = c + (uint32_t)64U + i0; - uint64_t res_j = c[(uint32_t)64U + i0]; - c00 = Lib_IntTypes_Intrinsics_add_carry_u64(c00, c10, res_j, resb); - } - } - } - memcpy(res, c + (uint32_t)64U, (uint32_t)64U * sizeof (uint64_t)); - c0 = c00; - { - uint64_t tmp[64U] = { 0U }; - uint64_t c1 = Hacl_Bignum4096_sub(res, n, tmp); - uint64_t m = (uint64_t)0U - c0; - { - uint32_t i; - for (i = (uint32_t)0U; i < (uint32_t)64U; i++) - { - uint64_t *os = res; - uint64_t x = (m & tmp[i]) | (~m & res[i]); - os[i] = x; - } - } - } -} - -static inline void -amont_mul(uint64_t *n, uint64_t nInv_u64, uint64_t *aM, uint64_t *bM, uint64_t *resM) -{ - uint64_t c[128U] = { 0U }; - uint64_t tmp[256U] = { 0U }; - Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint64((uint32_t)64U, aM, bM, tmp, c); - areduction(n, nInv_u64, c, resM); -} - -static inline void amont_sqr(uint64_t *n, uint64_t nInv_u64, uint64_t *aM, uint64_t *resM) -{ - uint64_t c[128U] = { 0U }; - uint64_t tmp[256U] = { 0U }; - Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint64((uint32_t)64U, aM, tmp, c); - areduction(n, nInv_u64, c, resM); -} - -static inline void -bn_slow_precomp(uint64_t *n, uint64_t mu, uint64_t *r2, uint64_t *a, uint64_t *res) -{ - uint64_t a_mod[64U] = { 0U }; - uint64_t a1[128U] = { 0U }; - memcpy(a1, a, (uint32_t)128U * sizeof (uint64_t)); - { - uint64_t c00 = (uint64_t)0U; - uint64_t c0; - { - uint32_t i0; - for (i0 = (uint32_t)0U; i0 < (uint32_t)64U; i0++) - { - uint64_t qj = mu * a1[i0]; - uint64_t *res_j0 = a1 + i0; - uint64_t c = (uint64_t)0U; - KRML_MAYBE_FOR16(i, - (uint32_t)0U, - (uint32_t)16U, - (uint32_t)1U, - uint64_t a_i = n[(uint32_t)4U * i]; - uint64_t *res_i0 = res_j0 + (uint32_t)4U * i; - c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, qj, c, res_i0); - uint64_t a_i0 = n[(uint32_t)4U * i + (uint32_t)1U]; - uint64_t *res_i1 = res_j0 + (uint32_t)4U * i + (uint32_t)1U; - c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i0, qj, c, res_i1); - uint64_t a_i1 = n[(uint32_t)4U * i + (uint32_t)2U]; - uint64_t *res_i2 = res_j0 + (uint32_t)4U * i + (uint32_t)2U; - c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i1, qj, c, res_i2); - uint64_t a_i2 = n[(uint32_t)4U * i + (uint32_t)3U]; - uint64_t *res_i = res_j0 + (uint32_t)4U * i + (uint32_t)3U; - c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i2, qj, c, res_i);); - { - uint64_t r = c; - uint64_t c1 = r; - uint64_t *resb = a1 + (uint32_t)64U + i0; - uint64_t res_j = a1[(uint32_t)64U + i0]; - c00 = Lib_IntTypes_Intrinsics_add_carry_u64(c00, c1, res_j, resb); - } - } - } - memcpy(a_mod, a1 + (uint32_t)64U, (uint32_t)64U * sizeof (uint64_t)); - c0 = c00; - { - uint64_t tmp[64U] = { 0U }; - uint64_t c1 = Hacl_Bignum4096_sub(a_mod, n, tmp); - uint64_t m = (uint64_t)0U - c0; - { - uint32_t i; - for (i = (uint32_t)0U; i < (uint32_t)64U; i++) - { - uint64_t *os = a_mod; - uint64_t x = (m & tmp[i]) | (~m & a_mod[i]); - os[i] = x; - } - } - { - uint64_t c[128U] = { 0U }; - Hacl_Bignum4096_mul(a_mod, r2, c); - reduction(n, mu, c, res); - } - } - } -} - -/** -Write `a mod n` in `res`. - - The argument a is meant to be a 8192-bit bignum, i.e. uint64_t[128]. - The argument n and the outparam res are meant to be 4096-bit bignums, i.e. uint64_t[64]. - - The function returns false if any of the following preconditions are violated, - true otherwise. - • 1 < n - • n % 2 = 1 -*/ -bool Hacl_Bignum4096_mod(uint64_t *n, uint64_t *a, uint64_t *res) -{ - uint64_t one[64U] = { 0U }; - uint64_t bit0; - uint64_t m0; - memset(one, 0U, (uint32_t)64U * sizeof (uint64_t)); - one[0U] = (uint64_t)1U; - bit0 = n[0U] & (uint64_t)1U; - m0 = (uint64_t)0U - bit0; - { - uint64_t acc = (uint64_t)0U; - uint64_t m1; - uint64_t is_valid_m; - uint32_t nBits; - { - uint32_t i; - for (i = (uint32_t)0U; i < (uint32_t)64U; i++) - { - uint64_t beq = FStar_UInt64_eq_mask(one[i], n[i]); - uint64_t blt = ~FStar_UInt64_gte_mask(one[i], n[i]); - acc = (beq & acc) | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U))); - } - } - m1 = acc; - is_valid_m = m0 & m1; - nBits = (uint32_t)64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64((uint32_t)64U, n); - if (is_valid_m == (uint64_t)0xFFFFFFFFFFFFFFFFU) - { - uint64_t r2[64U] = { 0U }; - precompr2(nBits, n, r2); - { - uint64_t mu = Hacl_Bignum_ModInvLimb_mod_inv_uint64(n[0U]); - bn_slow_precomp(n, mu, r2, a, res); - } - } - else - { - memset(res, 0U, (uint32_t)64U * sizeof (uint64_t)); - } - return is_valid_m == (uint64_t)0xFFFFFFFFFFFFFFFFU; - } -} - -static uint64_t exp_check(uint64_t *n, uint64_t *a, uint32_t bBits, uint64_t *b) -{ - uint64_t one[64U] = { 0U }; - uint64_t bit0; - uint64_t m00; - memset(one, 0U, (uint32_t)64U * sizeof (uint64_t)); - one[0U] = (uint64_t)1U; - bit0 = n[0U] & (uint64_t)1U; - m00 = (uint64_t)0U - bit0; - { - uint64_t acc0 = (uint64_t)0U; - uint64_t m10; - uint64_t m0; - uint32_t bLen; - uint64_t m1; - { - uint32_t i; - for (i = (uint32_t)0U; i < (uint32_t)64U; i++) - { - uint64_t beq = FStar_UInt64_eq_mask(one[i], n[i]); - uint64_t blt = ~FStar_UInt64_gte_mask(one[i], n[i]); - acc0 = - (beq & acc0) - | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U))); - } - } - m10 = acc0; - m0 = m00 & m10; - if (bBits == (uint32_t)0U) - { - bLen = (uint32_t)1U; - } - else - { - bLen = (bBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U; - } - if (bBits < (uint32_t)64U * bLen) - { - KRML_CHECK_SIZE(sizeof (uint64_t), bLen); - { - uint64_t b2[bLen]; - memset(b2, 0U, bLen * sizeof (uint64_t)); - { - uint32_t i0 = bBits / (uint32_t)64U; - uint32_t j = bBits % (uint32_t)64U; - b2[i0] = b2[i0] | (uint64_t)1U << j; - { - uint64_t acc = (uint64_t)0U; - { - uint32_t i; - for (i = (uint32_t)0U; i < bLen; i++) - { - uint64_t beq = FStar_UInt64_eq_mask(b[i], b2[i]); - uint64_t blt = ~FStar_UInt64_gte_mask(b[i], b2[i]); - acc = - (beq & acc) - | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U))); - } - } - { - uint64_t res = acc; - m1 = res; - } - } - } - } - } - else - { - m1 = (uint64_t)0xFFFFFFFFFFFFFFFFU; - } - { - uint64_t acc = (uint64_t)0U; - uint64_t m2; - uint64_t m; - { - uint32_t i; - for (i = (uint32_t)0U; i < (uint32_t)64U; i++) - { - uint64_t beq = FStar_UInt64_eq_mask(a[i], n[i]); - uint64_t blt = ~FStar_UInt64_gte_mask(a[i], n[i]); - acc = - (beq & acc) - | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U))); - } - } - m2 = acc; - m = m1 & m2; - return m0 & m; - } - } -} - -static inline void -exp_vartime_precomp( - uint64_t *n, - uint64_t mu, - uint64_t *r2, - uint64_t *a, - uint32_t bBits, - uint64_t *b, - uint64_t *res -) -{ - if (bBits < (uint32_t)200U) - { - uint64_t aM[64U] = { 0U }; - uint64_t c[128U] = { 0U }; - Hacl_Bignum4096_mul(a, r2, c); - reduction(n, mu, c, aM); - { - uint64_t resM[64U] = { 0U }; - uint64_t ctx[128U] = { 0U }; - uint64_t *ctx_n; - uint64_t *ctx_r2; - memcpy(ctx, n, (uint32_t)64U * sizeof (uint64_t)); - memcpy(ctx + (uint32_t)64U, r2, (uint32_t)64U * sizeof (uint64_t)); - ctx_n = ctx; - ctx_r2 = ctx + (uint32_t)64U; - from(ctx_n, mu, ctx_r2, resM); - { - uint32_t i; - for (i = (uint32_t)0U; i < bBits; i++) - { - uint32_t i1 = i / (uint32_t)64U; - uint32_t j = i % (uint32_t)64U; - uint64_t tmp = b[i1]; - uint64_t bit = tmp >> j & (uint64_t)1U; - if (!(bit == (uint64_t)0U)) - { - uint64_t *ctx_n0 = ctx; - amont_mul(ctx_n0, mu, resM, aM, resM); - } - { - uint64_t *ctx_n0 = ctx; - amont_sqr(ctx_n0, mu, aM, aM); - } - } - } - { - uint64_t tmp[128U] = { 0U }; - memcpy(tmp, resM, (uint32_t)64U * sizeof (uint64_t)); - reduction(n, mu, tmp, res); - return; - } - } - } - { - uint64_t aM[64U] = { 0U }; - uint64_t c[128U] = { 0U }; - Hacl_Bignum4096_mul(a, r2, c); - reduction(n, mu, c, aM); - { - uint64_t resM[64U] = { 0U }; - uint32_t bLen; - if (bBits == (uint32_t)0U) - { - bLen = (uint32_t)1U; - } - else - { - bLen = (bBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U; - } - { - uint64_t ctx[128U] = { 0U }; - memcpy(ctx, n, (uint32_t)64U * sizeof (uint64_t)); - memcpy(ctx + (uint32_t)64U, r2, (uint32_t)64U * sizeof (uint64_t)); - { - uint64_t table[1024U] = { 0U }; - uint64_t tmp[64U] = { 0U }; - uint64_t *t0 = table; - uint64_t *t1 = table + (uint32_t)64U; - uint64_t *ctx_n0 = ctx; - uint64_t *ctx_r20 = ctx + (uint32_t)64U; - from(ctx_n0, mu, ctx_r20, t0); - memcpy(t1, aM, (uint32_t)64U * sizeof (uint64_t)); - KRML_MAYBE_FOR7(i, - (uint32_t)0U, - (uint32_t)7U, - (uint32_t)1U, - uint64_t *t11 = table + (i + (uint32_t)1U) * (uint32_t)64U; - uint64_t *ctx_n1 = ctx; - amont_sqr(ctx_n1, mu, t11, tmp); - memcpy(table + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)64U, - tmp, - (uint32_t)64U * sizeof (uint64_t)); - uint64_t *t2 = table + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)64U; - uint64_t *ctx_n = ctx; - amont_mul(ctx_n, mu, aM, t2, tmp); - memcpy(table + ((uint32_t)2U * i + (uint32_t)3U) * (uint32_t)64U, - tmp, - (uint32_t)64U * sizeof (uint64_t));); - if (bBits % (uint32_t)4U != (uint32_t)0U) - { - uint64_t mask_l = (uint64_t)15U; - uint32_t i = bBits / (uint32_t)4U * (uint32_t)4U / (uint32_t)64U; - uint32_t j = bBits / (uint32_t)4U * (uint32_t)4U % (uint32_t)64U; - uint64_t p1 = b[i] >> j; - uint64_t ite; - if (i + (uint32_t)1U < bLen && (uint32_t)0U < j) - { - ite = p1 | b[i + (uint32_t)1U] << ((uint32_t)64U - j); - } - else - { - ite = p1; - } - { - uint64_t bits_c = ite & mask_l; - uint32_t bits_l32 = (uint32_t)bits_c; - uint64_t *a_bits_l = table + bits_l32 * (uint32_t)64U; - memcpy(resM, a_bits_l, (uint32_t)64U * sizeof (uint64_t)); - } - } - else - { - uint64_t *ctx_n = ctx; - uint64_t *ctx_r2 = ctx + (uint32_t)64U; - from(ctx_n, mu, ctx_r2, resM); - } - { - uint32_t i; - for (i = (uint32_t)0U; i < bBits / (uint32_t)4U; i++) - { - KRML_MAYBE_FOR4(i0, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *ctx_n = ctx; - amont_sqr(ctx_n, mu, resM, resM);); - { - uint32_t bk = bBits - bBits % (uint32_t)4U; - uint64_t mask_l = (uint64_t)15U; - uint32_t i1 = (bk - (uint32_t)4U * i - (uint32_t)4U) / (uint32_t)64U; - uint32_t j = (bk - (uint32_t)4U * i - (uint32_t)4U) % (uint32_t)64U; - uint64_t p1 = b[i1] >> j; - uint64_t ite; - if (i1 + (uint32_t)1U < bLen && (uint32_t)0U < j) - { - ite = p1 | b[i1 + (uint32_t)1U] << ((uint32_t)64U - j); - } - else - { - ite = p1; - } - { - uint64_t bits_l = ite & mask_l; - uint64_t a_bits_l[64U] = { 0U }; - uint32_t bits_l32 = (uint32_t)bits_l; - uint64_t *a_bits_l1 = table + bits_l32 * (uint32_t)64U; - memcpy(a_bits_l, a_bits_l1, (uint32_t)64U * sizeof (uint64_t)); - { - uint64_t *ctx_n = ctx; - amont_mul(ctx_n, mu, resM, a_bits_l, resM); - } - } - } - } - } - { - uint64_t tmp0[128U] = { 0U }; - memcpy(tmp0, resM, (uint32_t)64U * sizeof (uint64_t)); - reduction(n, mu, tmp0, res); - } - } - } - } - } -} - -static inline void -exp_consttime_precomp( - uint64_t *n, - uint64_t mu, - uint64_t *r2, - uint64_t *a, - uint32_t bBits, - uint64_t *b, - uint64_t *res -) -{ - if (bBits < (uint32_t)200U) - { - uint64_t aM[64U] = { 0U }; - uint64_t c[128U] = { 0U }; - Hacl_Bignum4096_mul(a, r2, c); - reduction(n, mu, c, aM); - { - uint64_t resM[64U] = { 0U }; - uint64_t ctx[128U] = { 0U }; - memcpy(ctx, n, (uint32_t)64U * sizeof (uint64_t)); - memcpy(ctx + (uint32_t)64U, r2, (uint32_t)64U * sizeof (uint64_t)); - { - uint64_t sw = (uint64_t)0U; - uint64_t *ctx_n = ctx; - uint64_t *ctx_r2 = ctx + (uint32_t)64U; - uint64_t sw0; - from(ctx_n, mu, ctx_r2, resM); - { - uint32_t i0; - for (i0 = (uint32_t)0U; i0 < bBits; i0++) - { - uint32_t i1 = (bBits - i0 - (uint32_t)1U) / (uint32_t)64U; - uint32_t j = (bBits - i0 - (uint32_t)1U) % (uint32_t)64U; - uint64_t tmp = b[i1]; - uint64_t bit = tmp >> j & (uint64_t)1U; - uint64_t sw1 = bit ^ sw; - { - uint32_t i; - for (i = (uint32_t)0U; i < (uint32_t)64U; i++) - { - uint64_t dummy = ((uint64_t)0U - sw1) & (resM[i] ^ aM[i]); - resM[i] = resM[i] ^ dummy; - aM[i] = aM[i] ^ dummy; - } - } - { - uint64_t *ctx_n0 = ctx; - amont_mul(ctx_n0, mu, aM, resM, aM); - { - uint64_t *ctx_n1 = ctx; - amont_sqr(ctx_n1, mu, resM, resM); - sw = bit; - } - } - } - } - sw0 = sw; - { - uint32_t i; - for (i = (uint32_t)0U; i < (uint32_t)64U; i++) - { - uint64_t dummy = ((uint64_t)0U - sw0) & (resM[i] ^ aM[i]); - resM[i] = resM[i] ^ dummy; - aM[i] = aM[i] ^ dummy; - } - } - { - uint64_t tmp[128U] = { 0U }; - memcpy(tmp, resM, (uint32_t)64U * sizeof (uint64_t)); - reduction(n, mu, tmp, res); - return; - } - } - } - } - { - uint64_t aM[64U] = { 0U }; - uint64_t c0[128U] = { 0U }; - Hacl_Bignum4096_mul(a, r2, c0); - reduction(n, mu, c0, aM); - { - uint64_t resM[64U] = { 0U }; - uint32_t bLen; - if (bBits == (uint32_t)0U) - { - bLen = (uint32_t)1U; - } - else - { - bLen = (bBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U; - } - { - uint64_t ctx[128U] = { 0U }; - memcpy(ctx, n, (uint32_t)64U * sizeof (uint64_t)); - memcpy(ctx + (uint32_t)64U, r2, (uint32_t)64U * sizeof (uint64_t)); - { - uint64_t table[1024U] = { 0U }; - uint64_t tmp[64U] = { 0U }; - uint64_t *t0 = table; - uint64_t *t1 = table + (uint32_t)64U; - uint64_t *ctx_n0 = ctx; - uint64_t *ctx_r20 = ctx + (uint32_t)64U; - from(ctx_n0, mu, ctx_r20, t0); - memcpy(t1, aM, (uint32_t)64U * sizeof (uint64_t)); - KRML_MAYBE_FOR7(i, - (uint32_t)0U, - (uint32_t)7U, - (uint32_t)1U, - uint64_t *t11 = table + (i + (uint32_t)1U) * (uint32_t)64U; - uint64_t *ctx_n1 = ctx; - amont_sqr(ctx_n1, mu, t11, tmp); - memcpy(table + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)64U, - tmp, - (uint32_t)64U * sizeof (uint64_t)); - uint64_t *t2 = table + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)64U; - uint64_t *ctx_n = ctx; - amont_mul(ctx_n, mu, aM, t2, tmp); - memcpy(table + ((uint32_t)2U * i + (uint32_t)3U) * (uint32_t)64U, - tmp, - (uint32_t)64U * sizeof (uint64_t));); - if (bBits % (uint32_t)4U != (uint32_t)0U) - { - uint64_t mask_l = (uint64_t)15U; - uint32_t i0 = bBits / (uint32_t)4U * (uint32_t)4U / (uint32_t)64U; - uint32_t j = bBits / (uint32_t)4U * (uint32_t)4U % (uint32_t)64U; - uint64_t p1 = b[i0] >> j; - uint64_t ite; - if (i0 + (uint32_t)1U < bLen && (uint32_t)0U < j) - { - ite = p1 | b[i0 + (uint32_t)1U] << ((uint32_t)64U - j); - } - else - { - ite = p1; - } - { - uint64_t bits_c = ite & mask_l; - memcpy(resM, table, (uint32_t)64U * sizeof (uint64_t)); - KRML_MAYBE_FOR15(i1, - (uint32_t)0U, - (uint32_t)15U, - (uint32_t)1U, - uint64_t c = FStar_UInt64_eq_mask(bits_c, (uint64_t)(i1 + (uint32_t)1U)); - uint64_t *res_j = table + (i1 + (uint32_t)1U) * (uint32_t)64U; - { - uint32_t i; - for (i = (uint32_t)0U; i < (uint32_t)64U; i++) - { - uint64_t *os = resM; - uint64_t x = (c & res_j[i]) | (~c & resM[i]); - os[i] = x; - } - }); - } - } - else - { - uint64_t *ctx_n = ctx; - uint64_t *ctx_r2 = ctx + (uint32_t)64U; - from(ctx_n, mu, ctx_r2, resM); - } - { - uint32_t i0; - for (i0 = (uint32_t)0U; i0 < bBits / (uint32_t)4U; i0++) - { - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *ctx_n = ctx; - amont_sqr(ctx_n, mu, resM, resM);); - { - uint32_t bk = bBits - bBits % (uint32_t)4U; - uint64_t mask_l = (uint64_t)15U; - uint32_t i1 = (bk - (uint32_t)4U * i0 - (uint32_t)4U) / (uint32_t)64U; - uint32_t j = (bk - (uint32_t)4U * i0 - (uint32_t)4U) % (uint32_t)64U; - uint64_t p1 = b[i1] >> j; - uint64_t ite; - if (i1 + (uint32_t)1U < bLen && (uint32_t)0U < j) - { - ite = p1 | b[i1 + (uint32_t)1U] << ((uint32_t)64U - j); - } - else - { - ite = p1; - } - { - uint64_t bits_l = ite & mask_l; - uint64_t a_bits_l[64U] = { 0U }; - memcpy(a_bits_l, table, (uint32_t)64U * sizeof (uint64_t)); - KRML_MAYBE_FOR15(i2, - (uint32_t)0U, - (uint32_t)15U, - (uint32_t)1U, - uint64_t c = FStar_UInt64_eq_mask(bits_l, (uint64_t)(i2 + (uint32_t)1U)); - uint64_t *res_j = table + (i2 + (uint32_t)1U) * (uint32_t)64U; - { - uint32_t i; - for (i = (uint32_t)0U; i < (uint32_t)64U; i++) - { - uint64_t *os = a_bits_l; - uint64_t x = (c & res_j[i]) | (~c & a_bits_l[i]); - os[i] = x; - } - }); - { - uint64_t *ctx_n = ctx; - amont_mul(ctx_n, mu, resM, a_bits_l, resM); - } - } - } - } - } - { - uint64_t tmp0[128U] = { 0U }; - memcpy(tmp0, resM, (uint32_t)64U * sizeof (uint64_t)); - reduction(n, mu, tmp0, res); - } - } - } - } - } -} - -static inline void -exp_vartime( - uint32_t nBits, - uint64_t *n, - uint64_t *a, - uint32_t bBits, - uint64_t *b, - uint64_t *res -) -{ - uint64_t r2[64U] = { 0U }; - uint64_t mu; - precompr2(nBits, n, r2); - mu = Hacl_Bignum_ModInvLimb_mod_inv_uint64(n[0U]); - exp_vartime_precomp(n, mu, r2, a, bBits, b, res); -} - -static inline void -exp_consttime( - uint32_t nBits, - uint64_t *n, - uint64_t *a, - uint32_t bBits, - uint64_t *b, - uint64_t *res -) -{ - uint64_t r2[64U] = { 0U }; - uint64_t mu; - precompr2(nBits, n, r2); - mu = Hacl_Bignum_ModInvLimb_mod_inv_uint64(n[0U]); - exp_consttime_precomp(n, mu, r2, a, bBits, b, res); -} - -/** -Write `a ^ b mod n` in `res`. - - The arguments a, n and the outparam res are meant to be 4096-bit bignums, i.e. uint64_t[64]. - - The argument b is a bignum of any size, and bBits is an upper bound on the - number of significant bits of b. A tighter bound results in faster execution - time. When in doubt, the number of bits for the bignum size is always a safe - default, e.g. if b is a 4096-bit bignum, bBits should be 4096. - - The function is *NOT* constant-time on the argument b. See the - mod_exp_consttime_* functions for constant-time variants. - - The function returns false if any of the following preconditions are violated, - true otherwise. - • n % 2 = 1 - • 1 < n - • b < pow2 bBits - • a < n -*/ -bool -Hacl_Bignum4096_mod_exp_vartime( - uint64_t *n, - uint64_t *a, - uint32_t bBits, - uint64_t *b, - uint64_t *res -) -{ - uint64_t is_valid_m = exp_check(n, a, bBits, b); - uint32_t - nBits = (uint32_t)64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64((uint32_t)64U, n); - if (is_valid_m == (uint64_t)0xFFFFFFFFFFFFFFFFU) - { - exp_vartime(nBits, n, a, bBits, b, res); - } - else - { - memset(res, 0U, (uint32_t)64U * sizeof (uint64_t)); - } - return is_valid_m == (uint64_t)0xFFFFFFFFFFFFFFFFU; -} - -/** -Write `a ^ b mod n` in `res`. - - The arguments a, n and the outparam res are meant to be 4096-bit bignums, i.e. uint64_t[64]. - - The argument b is a bignum of any size, and bBits is an upper bound on the - number of significant bits of b. A tighter bound results in faster execution - time. When in doubt, the number of bits for the bignum size is always a safe - default, e.g. if b is a 4096-bit bignum, bBits should be 4096. - - This function is constant-time over its argument b, at the cost of a slower - execution time than mod_exp_vartime. - - The function returns false if any of the following preconditions are violated, - true otherwise. - • n % 2 = 1 - • 1 < n - • b < pow2 bBits - • a < n -*/ -bool -Hacl_Bignum4096_mod_exp_consttime( - uint64_t *n, - uint64_t *a, - uint32_t bBits, - uint64_t *b, - uint64_t *res -) -{ - uint64_t is_valid_m = exp_check(n, a, bBits, b); - uint32_t - nBits = (uint32_t)64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64((uint32_t)64U, n); - if (is_valid_m == (uint64_t)0xFFFFFFFFFFFFFFFFU) - { - exp_consttime(nBits, n, a, bBits, b, res); - } - else - { - memset(res, 0U, (uint32_t)64U * sizeof (uint64_t)); - } - return is_valid_m == (uint64_t)0xFFFFFFFFFFFFFFFFU; -} - -/** -Write `a ^ (-1) mod n` in `res`. - - The arguments a, n and the outparam res are meant to be 4096-bit bignums, i.e. uint64_t[64]. - - Before calling this function, the caller will need to ensure that the following - preconditions are observed. - • n is a prime - - The function returns false if any of the following preconditions are violated, true otherwise. - • n % 2 = 1 - • 1 < n - • 0 < a - • a < n -*/ -bool Hacl_Bignum4096_mod_inv_prime_vartime(uint64_t *n, uint64_t *a, uint64_t *res) -{ - uint64_t one[64U] = { 0U }; - uint64_t bit0; - uint64_t m00; - memset(one, 0U, (uint32_t)64U * sizeof (uint64_t)); - one[0U] = (uint64_t)1U; - bit0 = n[0U] & (uint64_t)1U; - m00 = (uint64_t)0U - bit0; - { - uint64_t acc0 = (uint64_t)0U; - uint64_t m10; - uint64_t m0; - { - uint32_t i; - for (i = (uint32_t)0U; i < (uint32_t)64U; i++) - { - uint64_t beq = FStar_UInt64_eq_mask(one[i], n[i]); - uint64_t blt = ~FStar_UInt64_gte_mask(one[i], n[i]); - acc0 = - (beq & acc0) - | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U))); - } - } - m10 = acc0; - m0 = m00 & m10; - { - uint64_t bn_zero[64U] = { 0U }; - uint64_t mask = (uint64_t)0xFFFFFFFFFFFFFFFFU; - uint64_t mask1; - uint64_t res10; - uint64_t m1; - { - uint32_t i; - for (i = (uint32_t)0U; i < (uint32_t)64U; i++) - { - uint64_t uu____0 = FStar_UInt64_eq_mask(a[i], bn_zero[i]); - mask = uu____0 & mask; - } - } - mask1 = mask; - res10 = mask1; - m1 = res10; - { - uint64_t acc = (uint64_t)0U; - uint64_t m2; - uint64_t is_valid_m; - uint32_t nBits; - { - uint32_t i; - for (i = (uint32_t)0U; i < (uint32_t)64U; i++) - { - uint64_t beq = FStar_UInt64_eq_mask(a[i], n[i]); - uint64_t blt = ~FStar_UInt64_gte_mask(a[i], n[i]); - acc = - (beq & acc) - | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U))); - } - } - m2 = acc; - is_valid_m = (m0 & ~m1) & m2; - nBits = (uint32_t)64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64((uint32_t)64U, n); - if (is_valid_m == (uint64_t)0xFFFFFFFFFFFFFFFFU) - { - uint64_t n2[64U] = { 0U }; - uint64_t - c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64((uint64_t)0U, n[0U], (uint64_t)2U, n2); - uint64_t *a1 = n + (uint32_t)1U; - uint64_t *res1 = n2 + (uint32_t)1U; - uint64_t c = c0; - KRML_MAYBE_FOR15(i, - (uint32_t)0U, - (uint32_t)15U, - (uint32_t)1U, - uint64_t t1 = a1[(uint32_t)4U * i]; - uint64_t *res_i0 = res1 + (uint32_t)4U * i; - c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, (uint64_t)0U, res_i0); - uint64_t t10 = a1[(uint32_t)4U * i + (uint32_t)1U]; - uint64_t *res_i1 = res1 + (uint32_t)4U * i + (uint32_t)1U; - c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t10, (uint64_t)0U, res_i1); - uint64_t t11 = a1[(uint32_t)4U * i + (uint32_t)2U]; - uint64_t *res_i2 = res1 + (uint32_t)4U * i + (uint32_t)2U; - c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t11, (uint64_t)0U, res_i2); - uint64_t t12 = a1[(uint32_t)4U * i + (uint32_t)3U]; - uint64_t *res_i = res1 + (uint32_t)4U * i + (uint32_t)3U; - c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t12, (uint64_t)0U, res_i);); - KRML_MAYBE_FOR3(i, - (uint32_t)60U, - (uint32_t)63U, - (uint32_t)1U, - uint64_t t1 = a1[i]; - uint64_t *res_i = res1 + i; - c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, (uint64_t)0U, res_i);); - { - uint64_t c1 = c; - uint64_t c2 = c1; - exp_vartime(nBits, n, a, (uint32_t)4096U, n2, res); - } - } - else - { - memset(res, 0U, (uint32_t)64U * sizeof (uint64_t)); - } - return is_valid_m == (uint64_t)0xFFFFFFFFFFFFFFFFU; - } - } - } -} - - -/**********************************************/ -/* Arithmetic functions with precomputations. */ -/**********************************************/ - - -/** -Heap-allocate and initialize a montgomery context. - - The argument n is meant to be a 4096-bit bignum, i.e. uint64_t[64]. - - Before calling this function, the caller will need to ensure that the following - preconditions are observed. - • n % 2 = 1 - • 1 < n - - The caller will need to call Hacl_Bignum4096_mont_ctx_free on the return value - to avoid memory leaks. -*/ -Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 *Hacl_Bignum4096_mont_ctx_init(uint64_t *n) -{ - uint64_t *r2 = (uint64_t *)KRML_HOST_CALLOC((uint32_t)64U, sizeof (uint64_t)); - uint64_t *n1 = (uint64_t *)KRML_HOST_CALLOC((uint32_t)64U, sizeof (uint64_t)); - uint64_t *r21 = r2; - uint64_t *n11 = n1; - uint32_t nBits; - uint64_t mu; - memcpy(n11, n, (uint32_t)64U * sizeof (uint64_t)); - nBits = (uint32_t)64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64((uint32_t)64U, n); - precompr2(nBits, n, r21); - mu = Hacl_Bignum_ModInvLimb_mod_inv_uint64(n[0U]); - { - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 res; - res.len = (uint32_t)64U; - res.n = n11; - res.mu = mu; - res.r2 = r21; - KRML_CHECK_SIZE(sizeof (Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64), (uint32_t)1U); - { - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 - *buf = - (Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 *)KRML_HOST_MALLOC(sizeof ( - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 - )); - buf[0U] = res; - return buf; - } - } -} - -/** -Deallocate the memory previously allocated by Hacl_Bignum4096_mont_ctx_init. - - The argument k is a montgomery context obtained through Hacl_Bignum4096_mont_ctx_init. -*/ -void Hacl_Bignum4096_mont_ctx_free(Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 *k) -{ - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 k1 = *k; - uint64_t *n = k1.n; - uint64_t *r2 = k1.r2; - KRML_HOST_FREE(n); - KRML_HOST_FREE(r2); - KRML_HOST_FREE(k); -} - -/** -Write `a mod n` in `res`. - - The argument a is meant to be a 8192-bit bignum, i.e. uint64_t[128]. - The outparam res is meant to be a 4096-bit bignum, i.e. uint64_t[64]. - The argument k is a montgomery context obtained through Hacl_Bignum4096_mont_ctx_init. -*/ -void -Hacl_Bignum4096_mod_precomp( - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 *k, - uint64_t *a, - uint64_t *res -) -{ - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 k1 = *k; - bn_slow_precomp(k1.n, k1.mu, k1.r2, a, res); -} - -/** -Write `a ^ b mod n` in `res`. - - The arguments a and the outparam res are meant to be 4096-bit bignums, i.e. uint64_t[64]. - The argument k is a montgomery context obtained through Hacl_Bignum4096_mont_ctx_init. - - The argument b is a bignum of any size, and bBits is an upper bound on the - number of significant bits of b. A tighter bound results in faster execution - time. When in doubt, the number of bits for the bignum size is always a safe - default, e.g. if b is a 4096-bit bignum, bBits should be 4096. - - The function is *NOT* constant-time on the argument b. See the - mod_exp_consttime_* functions for constant-time variants. - - Before calling this function, the caller will need to ensure that the following - preconditions are observed. - • b < pow2 bBits - • a < n -*/ -void -Hacl_Bignum4096_mod_exp_vartime_precomp( - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 *k, - uint64_t *a, - uint32_t bBits, - uint64_t *b, - uint64_t *res -) -{ - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 k1 = *k; - exp_vartime_precomp(k1.n, k1.mu, k1.r2, a, bBits, b, res); -} - -/** -Write `a ^ b mod n` in `res`. - - The arguments a and the outparam res are meant to be 4096-bit bignums, i.e. uint64_t[64]. - The argument k is a montgomery context obtained through Hacl_Bignum4096_mont_ctx_init. - - The argument b is a bignum of any size, and bBits is an upper bound on the - number of significant bits of b. A tighter bound results in faster execution - time. When in doubt, the number of bits for the bignum size is always a safe - default, e.g. if b is a 4096-bit bignum, bBits should be 4096. - - This function is constant-time over its argument b, at the cost of a slower - execution time than mod_exp_vartime_*. - - Before calling this function, the caller will need to ensure that the following - preconditions are observed. - • b < pow2 bBits - • a < n -*/ -void -Hacl_Bignum4096_mod_exp_consttime_precomp( - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 *k, - uint64_t *a, - uint32_t bBits, - uint64_t *b, - uint64_t *res -) -{ - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 k1 = *k; - exp_consttime_precomp(k1.n, k1.mu, k1.r2, a, bBits, b, res); -} - -/** -Write `a ^ (-1) mod n` in `res`. - - The argument a and the outparam res are meant to be 4096-bit bignums, i.e. uint64_t[64]. - The argument k is a montgomery context obtained through Hacl_Bignum4096_mont_ctx_init. - - Before calling this function, the caller will need to ensure that the following - preconditions are observed. - • n is a prime - • 0 < a - • a < n -*/ -void -Hacl_Bignum4096_mod_inv_prime_vartime_precomp( - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 *k, - uint64_t *a, - uint64_t *res -) -{ - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 k1 = *k; - uint64_t n2[64U] = { 0U }; - uint64_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64((uint64_t)0U, k1.n[0U], (uint64_t)2U, n2); - uint64_t *a1 = k1.n + (uint32_t)1U; - uint64_t *res1 = n2 + (uint32_t)1U; - uint64_t c2 = c0; - uint64_t c1; - uint64_t c; - KRML_MAYBE_FOR15(i, - (uint32_t)0U, - (uint32_t)15U, - (uint32_t)1U, - uint64_t t1 = a1[(uint32_t)4U * i]; - uint64_t *res_i0 = res1 + (uint32_t)4U * i; - c2 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c2, t1, (uint64_t)0U, res_i0); - uint64_t t10 = a1[(uint32_t)4U * i + (uint32_t)1U]; - uint64_t *res_i1 = res1 + (uint32_t)4U * i + (uint32_t)1U; - c2 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c2, t10, (uint64_t)0U, res_i1); - uint64_t t11 = a1[(uint32_t)4U * i + (uint32_t)2U]; - uint64_t *res_i2 = res1 + (uint32_t)4U * i + (uint32_t)2U; - c2 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c2, t11, (uint64_t)0U, res_i2); - uint64_t t12 = a1[(uint32_t)4U * i + (uint32_t)3U]; - uint64_t *res_i = res1 + (uint32_t)4U * i + (uint32_t)3U; - c2 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c2, t12, (uint64_t)0U, res_i);); - KRML_MAYBE_FOR3(i, - (uint32_t)60U, - (uint32_t)63U, - (uint32_t)1U, - uint64_t t1 = a1[i]; - uint64_t *res_i = res1 + i; - c2 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c2, t1, (uint64_t)0U, res_i);); - c1 = c2; - c = c1; - exp_vartime_precomp(k1.n, k1.mu, k1.r2, a, (uint32_t)4096U, n2, res); -} - - -/********************/ -/* Loads and stores */ -/********************/ - - -/** -Load a bid-endian bignum from memory. - - The argument b points to len bytes of valid memory. - The function returns a heap-allocated bignum of size sufficient to hold the - result of loading b, or NULL if either the allocation failed, or the amount of - required memory would exceed 4GB. - - If the return value is non-null, clients must eventually call free(3) on it to - avoid memory leaks. -*/ -uint64_t *Hacl_Bignum4096_new_bn_from_bytes_be(uint32_t len, uint8_t *b) -{ - if - ( - len - == (uint32_t)0U - || !((len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U <= (uint32_t)536870911U) - ) - { - return NULL; - } - KRML_CHECK_SIZE(sizeof (uint64_t), (len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U); - { - uint64_t - *res = - (uint64_t *)KRML_HOST_CALLOC((len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U, - sizeof (uint64_t)); - if (res == NULL) - { - return res; - } - { - uint64_t *res1 = res; - uint64_t *res2 = res1; - uint32_t bnLen = (len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U; - uint32_t tmpLen = (uint32_t)8U * bnLen; - KRML_CHECK_SIZE(sizeof (uint8_t), tmpLen); - { - uint8_t tmp[tmpLen]; - memset(tmp, 0U, tmpLen * sizeof (uint8_t)); - memcpy(tmp + tmpLen - len, b, len * sizeof (uint8_t)); - { - uint32_t i; - for (i = (uint32_t)0U; i < bnLen; i++) - { - uint64_t *os = res2; - uint64_t u = load64_be(tmp + (bnLen - i - (uint32_t)1U) * (uint32_t)8U); - uint64_t x = u; - os[i] = x; - } - } - return res2; - } - } - } -} - -/** -Load a little-endian bignum from memory. - - The argument b points to len bytes of valid memory. - The function returns a heap-allocated bignum of size sufficient to hold the - result of loading b, or NULL if either the allocation failed, or the amount of - required memory would exceed 4GB. - - If the return value is non-null, clients must eventually call free(3) on it to - avoid memory leaks. -*/ -uint64_t *Hacl_Bignum4096_new_bn_from_bytes_le(uint32_t len, uint8_t *b) -{ - if - ( - len - == (uint32_t)0U - || !((len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U <= (uint32_t)536870911U) - ) - { - return NULL; - } - KRML_CHECK_SIZE(sizeof (uint64_t), (len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U); - { - uint64_t - *res = - (uint64_t *)KRML_HOST_CALLOC((len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U, - sizeof (uint64_t)); - if (res == NULL) - { - return res; - } - { - uint64_t *res1 = res; - uint64_t *res2 = res1; - uint32_t bnLen = (len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U; - uint32_t tmpLen = (uint32_t)8U * bnLen; - KRML_CHECK_SIZE(sizeof (uint8_t), tmpLen); - { - uint8_t tmp[tmpLen]; - memset(tmp, 0U, tmpLen * sizeof (uint8_t)); - memcpy(tmp, b, len * sizeof (uint8_t)); - { - uint32_t i; - for (i = (uint32_t)0U; i < (len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U; i++) - { - uint64_t *os = res2; - uint8_t *bj = tmp + i * (uint32_t)8U; - uint64_t u = load64_le(bj); - uint64_t r1 = u; - uint64_t x = r1; - os[i] = x; - } - } - return res2; - } - } - } -} - -/** -Serialize a bignum into big-endian memory. - - The argument b points to a 4096-bit bignum. - The outparam res points to 512 bytes of valid memory. -*/ -void Hacl_Bignum4096_bn_to_bytes_be(uint64_t *b, uint8_t *res) -{ - uint8_t tmp[512U] = { 0U }; - { - uint32_t i; - for (i = (uint32_t)0U; i < (uint32_t)64U; i++) - { - store64_be(res + i * (uint32_t)8U, b[(uint32_t)64U - i - (uint32_t)1U]); - } - } -} - -/** -Serialize a bignum into little-endian memory. - - The argument b points to a 4096-bit bignum. - The outparam res points to 512 bytes of valid memory. -*/ -void Hacl_Bignum4096_bn_to_bytes_le(uint64_t *b, uint8_t *res) -{ - uint8_t tmp[512U] = { 0U }; - { - uint32_t i; - for (i = (uint32_t)0U; i < (uint32_t)64U; i++) - { - store64_le(res + i * (uint32_t)8U, b[i]); - } - } -} - - -/***************/ -/* Comparisons */ -/***************/ - - -/** -Returns 2^64 - 1 if a < b, otherwise returns 0. - - The arguments a and b are meant to be 4096-bit bignums, i.e. uint64_t[64]. -*/ -uint64_t Hacl_Bignum4096_lt_mask(uint64_t *a, uint64_t *b) -{ - uint64_t acc = (uint64_t)0U; - { - uint32_t i; - for (i = (uint32_t)0U; i < (uint32_t)64U; i++) - { - uint64_t beq = FStar_UInt64_eq_mask(a[i], b[i]); - uint64_t blt = ~FStar_UInt64_gte_mask(a[i], b[i]); - acc = (beq & acc) | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U))); - } - } - return acc; -} - -/** -Returns 2^64 - 1 if a = b, otherwise returns 0. - - The arguments a and b are meant to be 4096-bit bignums, i.e. uint64_t[64]. -*/ -uint64_t Hacl_Bignum4096_eq_mask(uint64_t *a, uint64_t *b) -{ - uint64_t mask = (uint64_t)0xFFFFFFFFFFFFFFFFU; - uint64_t mask1; - { - uint32_t i; - for (i = (uint32_t)0U; i < (uint32_t)64U; i++) - { - uint64_t uu____0 = FStar_UInt64_eq_mask(a[i], b[i]); - mask = uu____0 & mask; - } - } - mask1 = mask; - return mask1; -} - diff --git a/dist/c89-compatible/Hacl_Bignum4096.h b/dist/c89-compatible/Hacl_Bignum4096.h deleted file mode 100644 index 8504fdd17b..0000000000 --- a/dist/c89-compatible/Hacl_Bignum4096.h +++ /dev/null @@ -1,404 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#ifndef __Hacl_Bignum4096_H -#define __Hacl_Bignum4096_H - -#if defined(__cplusplus) -extern "C" { -#endif - -#include -#include "krml/internal/types.h" -#include "krml/lowstar_endianness.h" -#include "krml/internal/target.h" - - -#include "Hacl_Krmllib.h" -#include "Hacl_Bignum_Base.h" -#include "Hacl_Bignum.h" -#include "evercrypt_targetconfig.h" -/******************************************************************************* - -A verified 4096-bit bignum library. - -This is a 64-bit optimized version, where bignums are represented as an array -of sixty four unsigned 64-bit integers, i.e. uint64_t[64]. Furthermore, the -limbs are stored in little-endian format, i.e. the least significant limb is at -index 0. Each limb is stored in native format in memory. Example: - - uint64_t sixteen[64] = { 0x10 } - - (relying on the fact that when an initializer-list is provided, the remainder - of the object gets initialized as if it had static storage duration, i.e. with - zeroes) - -We strongly encourage users to go through the conversion functions, e.g. -bn_from_bytes_be, to i) not depend on internal representation choices and ii) -have the ability to switch easily to a 32-bit optimized version in the future. - -*******************************************************************************/ - -/************************/ -/* Arithmetic functions */ -/************************/ - - -/** -Write `a + b mod 2^4096` in `res`. - - This functions returns the carry. - - The arguments a, b and res are meant to be 4096-bit bignums, i.e. uint64_t[64] -*/ -uint64_t Hacl_Bignum4096_add(uint64_t *a, uint64_t *b, uint64_t *res); - -/** -Write `a - b mod 2^4096` in `res`. - - This functions returns the carry. - - The arguments a, b and res are meant to be 4096-bit bignums, i.e. uint64_t[64] -*/ -uint64_t Hacl_Bignum4096_sub(uint64_t *a, uint64_t *b, uint64_t *res); - -/** -Write `(a + b) mod n` in `res`. - - The arguments a, b, n and the outparam res are meant to be 4096-bit bignums, i.e. uint64_t[64]. - - Before calling this function, the caller will need to ensure that the following - preconditions are observed. - • a < n - • b < n -*/ -void Hacl_Bignum4096_add_mod(uint64_t *n, uint64_t *a, uint64_t *b, uint64_t *res); - -/** -Write `(a - b) mod n` in `res`. - - The arguments a, b, n and the outparam res are meant to be 4096-bit bignums, i.e. uint64_t[64]. - - Before calling this function, the caller will need to ensure that the following - preconditions are observed. - • a < n - • b < n -*/ -void Hacl_Bignum4096_sub_mod(uint64_t *n, uint64_t *a, uint64_t *b, uint64_t *res); - -/** -Write `a * b` in `res`. - - The arguments a and b are meant to be 4096-bit bignums, i.e. uint64_t[64]. - The outparam res is meant to be a 8192-bit bignum, i.e. uint64_t[128]. -*/ -void Hacl_Bignum4096_mul(uint64_t *a, uint64_t *b, uint64_t *res); - -/** -Write `a * a` in `res`. - - The argument a is meant to be a 4096-bit bignum, i.e. uint64_t[64]. - The outparam res is meant to be a 8192-bit bignum, i.e. uint64_t[128]. -*/ -void Hacl_Bignum4096_sqr(uint64_t *a, uint64_t *res); - -/** -Write `a mod n` in `res`. - - The argument a is meant to be a 8192-bit bignum, i.e. uint64_t[128]. - The argument n and the outparam res are meant to be 4096-bit bignums, i.e. uint64_t[64]. - - The function returns false if any of the following preconditions are violated, - true otherwise. - • 1 < n - • n % 2 = 1 -*/ -bool Hacl_Bignum4096_mod(uint64_t *n, uint64_t *a, uint64_t *res); - -/** -Write `a ^ b mod n` in `res`. - - The arguments a, n and the outparam res are meant to be 4096-bit bignums, i.e. uint64_t[64]. - - The argument b is a bignum of any size, and bBits is an upper bound on the - number of significant bits of b. A tighter bound results in faster execution - time. When in doubt, the number of bits for the bignum size is always a safe - default, e.g. if b is a 4096-bit bignum, bBits should be 4096. - - The function is *NOT* constant-time on the argument b. See the - mod_exp_consttime_* functions for constant-time variants. - - The function returns false if any of the following preconditions are violated, - true otherwise. - • n % 2 = 1 - • 1 < n - • b < pow2 bBits - • a < n -*/ -bool -Hacl_Bignum4096_mod_exp_vartime( - uint64_t *n, - uint64_t *a, - uint32_t bBits, - uint64_t *b, - uint64_t *res -); - -/** -Write `a ^ b mod n` in `res`. - - The arguments a, n and the outparam res are meant to be 4096-bit bignums, i.e. uint64_t[64]. - - The argument b is a bignum of any size, and bBits is an upper bound on the - number of significant bits of b. A tighter bound results in faster execution - time. When in doubt, the number of bits for the bignum size is always a safe - default, e.g. if b is a 4096-bit bignum, bBits should be 4096. - - This function is constant-time over its argument b, at the cost of a slower - execution time than mod_exp_vartime. - - The function returns false if any of the following preconditions are violated, - true otherwise. - • n % 2 = 1 - • 1 < n - • b < pow2 bBits - • a < n -*/ -bool -Hacl_Bignum4096_mod_exp_consttime( - uint64_t *n, - uint64_t *a, - uint32_t bBits, - uint64_t *b, - uint64_t *res -); - -/** -Write `a ^ (-1) mod n` in `res`. - - The arguments a, n and the outparam res are meant to be 4096-bit bignums, i.e. uint64_t[64]. - - Before calling this function, the caller will need to ensure that the following - preconditions are observed. - • n is a prime - - The function returns false if any of the following preconditions are violated, true otherwise. - • n % 2 = 1 - • 1 < n - • 0 < a - • a < n -*/ -bool Hacl_Bignum4096_mod_inv_prime_vartime(uint64_t *n, uint64_t *a, uint64_t *res); - - -/**********************************************/ -/* Arithmetic functions with precomputations. */ -/**********************************************/ - - -/** -Heap-allocate and initialize a montgomery context. - - The argument n is meant to be a 4096-bit bignum, i.e. uint64_t[64]. - - Before calling this function, the caller will need to ensure that the following - preconditions are observed. - • n % 2 = 1 - • 1 < n - - The caller will need to call Hacl_Bignum4096_mont_ctx_free on the return value - to avoid memory leaks. -*/ -Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 *Hacl_Bignum4096_mont_ctx_init(uint64_t *n); - -/** -Deallocate the memory previously allocated by Hacl_Bignum4096_mont_ctx_init. - - The argument k is a montgomery context obtained through Hacl_Bignum4096_mont_ctx_init. -*/ -void Hacl_Bignum4096_mont_ctx_free(Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 *k); - -/** -Write `a mod n` in `res`. - - The argument a is meant to be a 8192-bit bignum, i.e. uint64_t[128]. - The outparam res is meant to be a 4096-bit bignum, i.e. uint64_t[64]. - The argument k is a montgomery context obtained through Hacl_Bignum4096_mont_ctx_init. -*/ -void -Hacl_Bignum4096_mod_precomp( - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 *k, - uint64_t *a, - uint64_t *res -); - -/** -Write `a ^ b mod n` in `res`. - - The arguments a and the outparam res are meant to be 4096-bit bignums, i.e. uint64_t[64]. - The argument k is a montgomery context obtained through Hacl_Bignum4096_mont_ctx_init. - - The argument b is a bignum of any size, and bBits is an upper bound on the - number of significant bits of b. A tighter bound results in faster execution - time. When in doubt, the number of bits for the bignum size is always a safe - default, e.g. if b is a 4096-bit bignum, bBits should be 4096. - - The function is *NOT* constant-time on the argument b. See the - mod_exp_consttime_* functions for constant-time variants. - - Before calling this function, the caller will need to ensure that the following - preconditions are observed. - • b < pow2 bBits - • a < n -*/ -void -Hacl_Bignum4096_mod_exp_vartime_precomp( - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 *k, - uint64_t *a, - uint32_t bBits, - uint64_t *b, - uint64_t *res -); - -/** -Write `a ^ b mod n` in `res`. - - The arguments a and the outparam res are meant to be 4096-bit bignums, i.e. uint64_t[64]. - The argument k is a montgomery context obtained through Hacl_Bignum4096_mont_ctx_init. - - The argument b is a bignum of any size, and bBits is an upper bound on the - number of significant bits of b. A tighter bound results in faster execution - time. When in doubt, the number of bits for the bignum size is always a safe - default, e.g. if b is a 4096-bit bignum, bBits should be 4096. - - This function is constant-time over its argument b, at the cost of a slower - execution time than mod_exp_vartime_*. - - Before calling this function, the caller will need to ensure that the following - preconditions are observed. - • b < pow2 bBits - • a < n -*/ -void -Hacl_Bignum4096_mod_exp_consttime_precomp( - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 *k, - uint64_t *a, - uint32_t bBits, - uint64_t *b, - uint64_t *res -); - -/** -Write `a ^ (-1) mod n` in `res`. - - The argument a and the outparam res are meant to be 4096-bit bignums, i.e. uint64_t[64]. - The argument k is a montgomery context obtained through Hacl_Bignum4096_mont_ctx_init. - - Before calling this function, the caller will need to ensure that the following - preconditions are observed. - • n is a prime - • 0 < a - • a < n -*/ -void -Hacl_Bignum4096_mod_inv_prime_vartime_precomp( - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 *k, - uint64_t *a, - uint64_t *res -); - - -/********************/ -/* Loads and stores */ -/********************/ - - -/** -Load a bid-endian bignum from memory. - - The argument b points to len bytes of valid memory. - The function returns a heap-allocated bignum of size sufficient to hold the - result of loading b, or NULL if either the allocation failed, or the amount of - required memory would exceed 4GB. - - If the return value is non-null, clients must eventually call free(3) on it to - avoid memory leaks. -*/ -uint64_t *Hacl_Bignum4096_new_bn_from_bytes_be(uint32_t len, uint8_t *b); - -/** -Load a little-endian bignum from memory. - - The argument b points to len bytes of valid memory. - The function returns a heap-allocated bignum of size sufficient to hold the - result of loading b, or NULL if either the allocation failed, or the amount of - required memory would exceed 4GB. - - If the return value is non-null, clients must eventually call free(3) on it to - avoid memory leaks. -*/ -uint64_t *Hacl_Bignum4096_new_bn_from_bytes_le(uint32_t len, uint8_t *b); - -/** -Serialize a bignum into big-endian memory. - - The argument b points to a 4096-bit bignum. - The outparam res points to 512 bytes of valid memory. -*/ -void Hacl_Bignum4096_bn_to_bytes_be(uint64_t *b, uint8_t *res); - -/** -Serialize a bignum into little-endian memory. - - The argument b points to a 4096-bit bignum. - The outparam res points to 512 bytes of valid memory. -*/ -void Hacl_Bignum4096_bn_to_bytes_le(uint64_t *b, uint8_t *res); - - -/***************/ -/* Comparisons */ -/***************/ - - -/** -Returns 2^64 - 1 if a < b, otherwise returns 0. - - The arguments a and b are meant to be 4096-bit bignums, i.e. uint64_t[64]. -*/ -uint64_t Hacl_Bignum4096_lt_mask(uint64_t *a, uint64_t *b); - -/** -Returns 2^64 - 1 if a = b, otherwise returns 0. - - The arguments a and b are meant to be 4096-bit bignums, i.e. uint64_t[64]. -*/ -uint64_t Hacl_Bignum4096_eq_mask(uint64_t *a, uint64_t *b); - -#if defined(__cplusplus) -} -#endif - -#define __Hacl_Bignum4096_H_DEFINED -#endif diff --git a/dist/c89-compatible/Hacl_Bignum4096_32.c b/dist/c89-compatible/Hacl_Bignum4096_32.c deleted file mode 100644 index 5d042240d4..0000000000 --- a/dist/c89-compatible/Hacl_Bignum4096_32.c +++ /dev/null @@ -1,1813 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#include "Hacl_Bignum4096_32.h" - -#include "internal/Hacl_Bignum.h" - -/******************************************************************************* - -A verified 4096-bit bignum library. - -This is a 32-bit optimized version, where bignums are represented as an array -of 128 unsigned 32-bit integers, i.e. uint32_t[128]. Furthermore, the -limbs are stored in little-endian format, i.e. the least significant limb is at -index 0. Each limb is stored in native format in memory. Example: - - uint32_t sixteen[128] = { 0x10 } - - (relying on the fact that when an initializer-list is provided, the remainder - of the object gets initialized as if it had static storage duration, i.e. with - zeroes) - -We strongly encourage users to go through the conversion functions, e.g. -bn_from_bytes_be, to i) not depend on internal representation choices and ii) -have the ability to switch easily to a 64-bit optimized version in the future. - -*******************************************************************************/ - -/************************/ -/* Arithmetic functions */ -/************************/ - - -/** -Write `a + b mod 2^4096` in `res`. - - This functions returns the carry. - - The arguments a, b and res are meant to be 4096-bit bignums, i.e. uint32_t[128] -*/ -uint32_t Hacl_Bignum4096_32_add(uint32_t *a, uint32_t *b, uint32_t *res) -{ - uint32_t c = (uint32_t)0U; - { - uint32_t i; - for (i = (uint32_t)0U; i < (uint32_t)32U; i++) - { - uint32_t t1 = a[(uint32_t)4U * i]; - uint32_t t20 = b[(uint32_t)4U * i]; - uint32_t *res_i0 = res + (uint32_t)4U * i; - c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t1, t20, res_i0); - { - uint32_t t10 = a[(uint32_t)4U * i + (uint32_t)1U]; - uint32_t t21 = b[(uint32_t)4U * i + (uint32_t)1U]; - uint32_t *res_i1 = res + (uint32_t)4U * i + (uint32_t)1U; - c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t10, t21, res_i1); - { - uint32_t t11 = a[(uint32_t)4U * i + (uint32_t)2U]; - uint32_t t22 = b[(uint32_t)4U * i + (uint32_t)2U]; - uint32_t *res_i2 = res + (uint32_t)4U * i + (uint32_t)2U; - c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t11, t22, res_i2); - { - uint32_t t12 = a[(uint32_t)4U * i + (uint32_t)3U]; - uint32_t t2 = b[(uint32_t)4U * i + (uint32_t)3U]; - uint32_t *res_i = res + (uint32_t)4U * i + (uint32_t)3U; - c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t12, t2, res_i); - } - } - } - } - } - return c; -} - -/** -Write `a - b mod 2^4096` in `res`. - - This functions returns the carry. - - The arguments a, b and res are meant to be 4096-bit bignums, i.e. uint32_t[128] -*/ -uint32_t Hacl_Bignum4096_32_sub(uint32_t *a, uint32_t *b, uint32_t *res) -{ - uint32_t c = (uint32_t)0U; - { - uint32_t i; - for (i = (uint32_t)0U; i < (uint32_t)32U; i++) - { - uint32_t t1 = a[(uint32_t)4U * i]; - uint32_t t20 = b[(uint32_t)4U * i]; - uint32_t *res_i0 = res + (uint32_t)4U * i; - c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, t20, res_i0); - { - uint32_t t10 = a[(uint32_t)4U * i + (uint32_t)1U]; - uint32_t t21 = b[(uint32_t)4U * i + (uint32_t)1U]; - uint32_t *res_i1 = res + (uint32_t)4U * i + (uint32_t)1U; - c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t10, t21, res_i1); - { - uint32_t t11 = a[(uint32_t)4U * i + (uint32_t)2U]; - uint32_t t22 = b[(uint32_t)4U * i + (uint32_t)2U]; - uint32_t *res_i2 = res + (uint32_t)4U * i + (uint32_t)2U; - c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t11, t22, res_i2); - { - uint32_t t12 = a[(uint32_t)4U * i + (uint32_t)3U]; - uint32_t t2 = b[(uint32_t)4U * i + (uint32_t)3U]; - uint32_t *res_i = res + (uint32_t)4U * i + (uint32_t)3U; - c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t12, t2, res_i); - } - } - } - } - } - return c; -} - -/** -Write `(a + b) mod n` in `res`. - - The arguments a, b, n and the outparam res are meant to be 4096-bit bignums, i.e. uint32_t[128]. - - Before calling this function, the caller will need to ensure that the following - preconditions are observed. - • a < n - • b < n -*/ -void Hacl_Bignum4096_32_add_mod(uint32_t *n, uint32_t *a, uint32_t *b, uint32_t *res) -{ - uint32_t c2 = (uint32_t)0U; - uint32_t c0; - { - uint32_t i; - for (i = (uint32_t)0U; i < (uint32_t)32U; i++) - { - uint32_t t1 = a[(uint32_t)4U * i]; - uint32_t t20 = b[(uint32_t)4U * i]; - uint32_t *res_i0 = res + (uint32_t)4U * i; - c2 = Lib_IntTypes_Intrinsics_add_carry_u32(c2, t1, t20, res_i0); - { - uint32_t t10 = a[(uint32_t)4U * i + (uint32_t)1U]; - uint32_t t21 = b[(uint32_t)4U * i + (uint32_t)1U]; - uint32_t *res_i1 = res + (uint32_t)4U * i + (uint32_t)1U; - c2 = Lib_IntTypes_Intrinsics_add_carry_u32(c2, t10, t21, res_i1); - { - uint32_t t11 = a[(uint32_t)4U * i + (uint32_t)2U]; - uint32_t t22 = b[(uint32_t)4U * i + (uint32_t)2U]; - uint32_t *res_i2 = res + (uint32_t)4U * i + (uint32_t)2U; - c2 = Lib_IntTypes_Intrinsics_add_carry_u32(c2, t11, t22, res_i2); - { - uint32_t t12 = a[(uint32_t)4U * i + (uint32_t)3U]; - uint32_t t2 = b[(uint32_t)4U * i + (uint32_t)3U]; - uint32_t *res_i = res + (uint32_t)4U * i + (uint32_t)3U; - c2 = Lib_IntTypes_Intrinsics_add_carry_u32(c2, t12, t2, res_i); - } - } - } - } - } - c0 = c2; - { - uint32_t tmp[128U] = { 0U }; - uint32_t c3 = (uint32_t)0U; - uint32_t c1; - uint32_t c; - { - uint32_t i; - for (i = (uint32_t)0U; i < (uint32_t)32U; i++) - { - uint32_t t1 = res[(uint32_t)4U * i]; - uint32_t t20 = n[(uint32_t)4U * i]; - uint32_t *res_i0 = tmp + (uint32_t)4U * i; - c3 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c3, t1, t20, res_i0); - { - uint32_t t10 = res[(uint32_t)4U * i + (uint32_t)1U]; - uint32_t t21 = n[(uint32_t)4U * i + (uint32_t)1U]; - uint32_t *res_i1 = tmp + (uint32_t)4U * i + (uint32_t)1U; - c3 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c3, t10, t21, res_i1); - { - uint32_t t11 = res[(uint32_t)4U * i + (uint32_t)2U]; - uint32_t t22 = n[(uint32_t)4U * i + (uint32_t)2U]; - uint32_t *res_i2 = tmp + (uint32_t)4U * i + (uint32_t)2U; - c3 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c3, t11, t22, res_i2); - { - uint32_t t12 = res[(uint32_t)4U * i + (uint32_t)3U]; - uint32_t t2 = n[(uint32_t)4U * i + (uint32_t)3U]; - uint32_t *res_i = tmp + (uint32_t)4U * i + (uint32_t)3U; - c3 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c3, t12, t2, res_i); - } - } - } - } - } - c1 = c3; - c = c0 - c1; - { - uint32_t i; - for (i = (uint32_t)0U; i < (uint32_t)128U; i++) - { - uint32_t *os = res; - uint32_t x = (c & res[i]) | (~c & tmp[i]); - os[i] = x; - } - } - } -} - -/** -Write `(a - b) mod n` in `res`. - - The arguments a, b, n and the outparam res are meant to be 4096-bit bignums, i.e. uint32_t[128]. - - Before calling this function, the caller will need to ensure that the following - preconditions are observed. - • a < n - • b < n -*/ -void Hacl_Bignum4096_32_sub_mod(uint32_t *n, uint32_t *a, uint32_t *b, uint32_t *res) -{ - uint32_t c2 = (uint32_t)0U; - uint32_t c0; - { - uint32_t i; - for (i = (uint32_t)0U; i < (uint32_t)32U; i++) - { - uint32_t t1 = a[(uint32_t)4U * i]; - uint32_t t20 = b[(uint32_t)4U * i]; - uint32_t *res_i0 = res + (uint32_t)4U * i; - c2 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c2, t1, t20, res_i0); - { - uint32_t t10 = a[(uint32_t)4U * i + (uint32_t)1U]; - uint32_t t21 = b[(uint32_t)4U * i + (uint32_t)1U]; - uint32_t *res_i1 = res + (uint32_t)4U * i + (uint32_t)1U; - c2 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c2, t10, t21, res_i1); - { - uint32_t t11 = a[(uint32_t)4U * i + (uint32_t)2U]; - uint32_t t22 = b[(uint32_t)4U * i + (uint32_t)2U]; - uint32_t *res_i2 = res + (uint32_t)4U * i + (uint32_t)2U; - c2 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c2, t11, t22, res_i2); - { - uint32_t t12 = a[(uint32_t)4U * i + (uint32_t)3U]; - uint32_t t2 = b[(uint32_t)4U * i + (uint32_t)3U]; - uint32_t *res_i = res + (uint32_t)4U * i + (uint32_t)3U; - c2 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c2, t12, t2, res_i); - } - } - } - } - } - c0 = c2; - { - uint32_t tmp[128U] = { 0U }; - uint32_t c3 = (uint32_t)0U; - uint32_t c1; - uint32_t c; - { - uint32_t i; - for (i = (uint32_t)0U; i < (uint32_t)32U; i++) - { - uint32_t t1 = res[(uint32_t)4U * i]; - uint32_t t20 = n[(uint32_t)4U * i]; - uint32_t *res_i0 = tmp + (uint32_t)4U * i; - c3 = Lib_IntTypes_Intrinsics_add_carry_u32(c3, t1, t20, res_i0); - { - uint32_t t10 = res[(uint32_t)4U * i + (uint32_t)1U]; - uint32_t t21 = n[(uint32_t)4U * i + (uint32_t)1U]; - uint32_t *res_i1 = tmp + (uint32_t)4U * i + (uint32_t)1U; - c3 = Lib_IntTypes_Intrinsics_add_carry_u32(c3, t10, t21, res_i1); - { - uint32_t t11 = res[(uint32_t)4U * i + (uint32_t)2U]; - uint32_t t22 = n[(uint32_t)4U * i + (uint32_t)2U]; - uint32_t *res_i2 = tmp + (uint32_t)4U * i + (uint32_t)2U; - c3 = Lib_IntTypes_Intrinsics_add_carry_u32(c3, t11, t22, res_i2); - { - uint32_t t12 = res[(uint32_t)4U * i + (uint32_t)3U]; - uint32_t t2 = n[(uint32_t)4U * i + (uint32_t)3U]; - uint32_t *res_i = tmp + (uint32_t)4U * i + (uint32_t)3U; - c3 = Lib_IntTypes_Intrinsics_add_carry_u32(c3, t12, t2, res_i); - } - } - } - } - } - c1 = c3; - c = (uint32_t)0U - c0; - { - uint32_t i; - for (i = (uint32_t)0U; i < (uint32_t)128U; i++) - { - uint32_t *os = res; - uint32_t x = (c & tmp[i]) | (~c & res[i]); - os[i] = x; - } - } - } -} - -/** -Write `a * b` in `res`. - - The arguments a and b are meant to be 4096-bit bignums, i.e. uint32_t[128]. - The outparam res is meant to be a 8192-bit bignum, i.e. uint32_t[256]. -*/ -void Hacl_Bignum4096_32_mul(uint32_t *a, uint32_t *b, uint32_t *res) -{ - uint32_t tmp[512U] = { 0U }; - Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint32((uint32_t)128U, a, b, tmp, res); -} - -/** -Write `a * a` in `res`. - - The argument a is meant to be a 4096-bit bignum, i.e. uint32_t[128]. - The outparam res is meant to be a 8192-bit bignum, i.e. uint32_t[256]. -*/ -void Hacl_Bignum4096_32_sqr(uint32_t *a, uint32_t *res) -{ - uint32_t tmp[512U] = { 0U }; - Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint32((uint32_t)128U, a, tmp, res); -} - -static inline void precompr2(uint32_t nBits, uint32_t *n, uint32_t *res) -{ - uint32_t i0; - uint32_t j; - uint32_t i; - memset(res, 0U, (uint32_t)128U * sizeof (uint32_t)); - i0 = nBits / (uint32_t)32U; - j = nBits % (uint32_t)32U; - res[i0] = res[i0] | (uint32_t)1U << j; - for (i = (uint32_t)0U; i < (uint32_t)8192U - nBits; i++) - { - Hacl_Bignum4096_32_add_mod(n, res, res, res); - } -} - -static inline void reduction(uint32_t *n, uint32_t nInv, uint32_t *c, uint32_t *res) -{ - uint32_t c00 = (uint32_t)0U; - uint32_t c0; - { - uint32_t i0; - for (i0 = (uint32_t)0U; i0 < (uint32_t)128U; i0++) - { - uint32_t qj = nInv * c[i0]; - uint32_t *res_j0 = c + i0; - uint32_t c1 = (uint32_t)0U; - { - uint32_t i; - for (i = (uint32_t)0U; i < (uint32_t)32U; i++) - { - uint32_t a_i = n[(uint32_t)4U * i]; - uint32_t *res_i0 = res_j0 + (uint32_t)4U * i; - c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i, qj, c1, res_i0); - { - uint32_t a_i0 = n[(uint32_t)4U * i + (uint32_t)1U]; - uint32_t *res_i1 = res_j0 + (uint32_t)4U * i + (uint32_t)1U; - c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i0, qj, c1, res_i1); - { - uint32_t a_i1 = n[(uint32_t)4U * i + (uint32_t)2U]; - uint32_t *res_i2 = res_j0 + (uint32_t)4U * i + (uint32_t)2U; - c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i1, qj, c1, res_i2); - { - uint32_t a_i2 = n[(uint32_t)4U * i + (uint32_t)3U]; - uint32_t *res_i = res_j0 + (uint32_t)4U * i + (uint32_t)3U; - c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i2, qj, c1, res_i); - } - } - } - } - } - { - uint32_t r = c1; - uint32_t c10 = r; - uint32_t *resb = c + (uint32_t)128U + i0; - uint32_t res_j = c[(uint32_t)128U + i0]; - c00 = Lib_IntTypes_Intrinsics_add_carry_u32(c00, c10, res_j, resb); - } - } - } - memcpy(res, c + (uint32_t)128U, (uint32_t)128U * sizeof (uint32_t)); - c0 = c00; - { - uint32_t tmp[128U] = { 0U }; - uint32_t c10 = (uint32_t)0U; - uint32_t c1; - uint32_t c2; - { - uint32_t i; - for (i = (uint32_t)0U; i < (uint32_t)32U; i++) - { - uint32_t t1 = res[(uint32_t)4U * i]; - uint32_t t20 = n[(uint32_t)4U * i]; - uint32_t *res_i0 = tmp + (uint32_t)4U * i; - c10 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c10, t1, t20, res_i0); - { - uint32_t t10 = res[(uint32_t)4U * i + (uint32_t)1U]; - uint32_t t21 = n[(uint32_t)4U * i + (uint32_t)1U]; - uint32_t *res_i1 = tmp + (uint32_t)4U * i + (uint32_t)1U; - c10 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c10, t10, t21, res_i1); - { - uint32_t t11 = res[(uint32_t)4U * i + (uint32_t)2U]; - uint32_t t22 = n[(uint32_t)4U * i + (uint32_t)2U]; - uint32_t *res_i2 = tmp + (uint32_t)4U * i + (uint32_t)2U; - c10 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c10, t11, t22, res_i2); - { - uint32_t t12 = res[(uint32_t)4U * i + (uint32_t)3U]; - uint32_t t2 = n[(uint32_t)4U * i + (uint32_t)3U]; - uint32_t *res_i = tmp + (uint32_t)4U * i + (uint32_t)3U; - c10 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c10, t12, t2, res_i); - } - } - } - } - } - c1 = c10; - c2 = c0 - c1; - { - uint32_t i; - for (i = (uint32_t)0U; i < (uint32_t)128U; i++) - { - uint32_t *os = res; - uint32_t x = (c2 & res[i]) | (~c2 & tmp[i]); - os[i] = x; - } - } - } -} - -static inline void from(uint32_t *n, uint32_t nInv_u64, uint32_t *aM, uint32_t *a) -{ - uint32_t tmp[256U] = { 0U }; - memcpy(tmp, aM, (uint32_t)128U * sizeof (uint32_t)); - reduction(n, nInv_u64, tmp, a); -} - -static inline void areduction(uint32_t *n, uint32_t nInv, uint32_t *c, uint32_t *res) -{ - uint32_t c00 = (uint32_t)0U; - uint32_t c0; - { - uint32_t i0; - for (i0 = (uint32_t)0U; i0 < (uint32_t)128U; i0++) - { - uint32_t qj = nInv * c[i0]; - uint32_t *res_j0 = c + i0; - uint32_t c1 = (uint32_t)0U; - { - uint32_t i; - for (i = (uint32_t)0U; i < (uint32_t)32U; i++) - { - uint32_t a_i = n[(uint32_t)4U * i]; - uint32_t *res_i0 = res_j0 + (uint32_t)4U * i; - c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i, qj, c1, res_i0); - { - uint32_t a_i0 = n[(uint32_t)4U * i + (uint32_t)1U]; - uint32_t *res_i1 = res_j0 + (uint32_t)4U * i + (uint32_t)1U; - c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i0, qj, c1, res_i1); - { - uint32_t a_i1 = n[(uint32_t)4U * i + (uint32_t)2U]; - uint32_t *res_i2 = res_j0 + (uint32_t)4U * i + (uint32_t)2U; - c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i1, qj, c1, res_i2); - { - uint32_t a_i2 = n[(uint32_t)4U * i + (uint32_t)3U]; - uint32_t *res_i = res_j0 + (uint32_t)4U * i + (uint32_t)3U; - c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i2, qj, c1, res_i); - } - } - } - } - } - { - uint32_t r = c1; - uint32_t c10 = r; - uint32_t *resb = c + (uint32_t)128U + i0; - uint32_t res_j = c[(uint32_t)128U + i0]; - c00 = Lib_IntTypes_Intrinsics_add_carry_u32(c00, c10, res_j, resb); - } - } - } - memcpy(res, c + (uint32_t)128U, (uint32_t)128U * sizeof (uint32_t)); - c0 = c00; - { - uint32_t tmp[128U] = { 0U }; - uint32_t c1 = Hacl_Bignum4096_32_sub(res, n, tmp); - uint32_t m = (uint32_t)0U - c0; - { - uint32_t i; - for (i = (uint32_t)0U; i < (uint32_t)128U; i++) - { - uint32_t *os = res; - uint32_t x = (m & tmp[i]) | (~m & res[i]); - os[i] = x; - } - } - } -} - -static inline void -amont_mul(uint32_t *n, uint32_t nInv_u64, uint32_t *aM, uint32_t *bM, uint32_t *resM) -{ - uint32_t c[256U] = { 0U }; - uint32_t tmp[512U] = { 0U }; - Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint32((uint32_t)128U, aM, bM, tmp, c); - areduction(n, nInv_u64, c, resM); -} - -static inline void amont_sqr(uint32_t *n, uint32_t nInv_u64, uint32_t *aM, uint32_t *resM) -{ - uint32_t c[256U] = { 0U }; - uint32_t tmp[512U] = { 0U }; - Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint32((uint32_t)128U, aM, tmp, c); - areduction(n, nInv_u64, c, resM); -} - -static inline void -bn_slow_precomp(uint32_t *n, uint32_t mu, uint32_t *r2, uint32_t *a, uint32_t *res) -{ - uint32_t a_mod[128U] = { 0U }; - uint32_t a1[256U] = { 0U }; - memcpy(a1, a, (uint32_t)256U * sizeof (uint32_t)); - { - uint32_t c00 = (uint32_t)0U; - uint32_t c0; - { - uint32_t i0; - for (i0 = (uint32_t)0U; i0 < (uint32_t)128U; i0++) - { - uint32_t qj = mu * a1[i0]; - uint32_t *res_j0 = a1 + i0; - uint32_t c = (uint32_t)0U; - { - uint32_t i; - for (i = (uint32_t)0U; i < (uint32_t)32U; i++) - { - uint32_t a_i = n[(uint32_t)4U * i]; - uint32_t *res_i0 = res_j0 + (uint32_t)4U * i; - c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i, qj, c, res_i0); - { - uint32_t a_i0 = n[(uint32_t)4U * i + (uint32_t)1U]; - uint32_t *res_i1 = res_j0 + (uint32_t)4U * i + (uint32_t)1U; - c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i0, qj, c, res_i1); - { - uint32_t a_i1 = n[(uint32_t)4U * i + (uint32_t)2U]; - uint32_t *res_i2 = res_j0 + (uint32_t)4U * i + (uint32_t)2U; - c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i1, qj, c, res_i2); - { - uint32_t a_i2 = n[(uint32_t)4U * i + (uint32_t)3U]; - uint32_t *res_i = res_j0 + (uint32_t)4U * i + (uint32_t)3U; - c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i2, qj, c, res_i); - } - } - } - } - } - { - uint32_t r = c; - uint32_t c1 = r; - uint32_t *resb = a1 + (uint32_t)128U + i0; - uint32_t res_j = a1[(uint32_t)128U + i0]; - c00 = Lib_IntTypes_Intrinsics_add_carry_u32(c00, c1, res_j, resb); - } - } - } - memcpy(a_mod, a1 + (uint32_t)128U, (uint32_t)128U * sizeof (uint32_t)); - c0 = c00; - { - uint32_t tmp[128U] = { 0U }; - uint32_t c1 = Hacl_Bignum4096_32_sub(a_mod, n, tmp); - uint32_t m = (uint32_t)0U - c0; - { - uint32_t i; - for (i = (uint32_t)0U; i < (uint32_t)128U; i++) - { - uint32_t *os = a_mod; - uint32_t x = (m & tmp[i]) | (~m & a_mod[i]); - os[i] = x; - } - } - { - uint32_t c[256U] = { 0U }; - Hacl_Bignum4096_32_mul(a_mod, r2, c); - reduction(n, mu, c, res); - } - } - } -} - -/** -Write `a mod n` in `res`. - - The argument a is meant to be a 8192-bit bignum, i.e. uint32_t[256]. - The argument n and the outparam res are meant to be 4096-bit bignums, i.e. uint32_t[128]. - - The function returns false if any of the following preconditions are violated, - true otherwise. - • 1 < n - • n % 2 = 1 -*/ -bool Hacl_Bignum4096_32_mod(uint32_t *n, uint32_t *a, uint32_t *res) -{ - uint32_t one[128U] = { 0U }; - uint32_t bit0; - uint32_t m0; - memset(one, 0U, (uint32_t)128U * sizeof (uint32_t)); - one[0U] = (uint32_t)1U; - bit0 = n[0U] & (uint32_t)1U; - m0 = (uint32_t)0U - bit0; - { - uint32_t acc = (uint32_t)0U; - uint32_t m1; - uint32_t is_valid_m; - uint32_t nBits; - { - uint32_t i; - for (i = (uint32_t)0U; i < (uint32_t)128U; i++) - { - uint32_t beq = FStar_UInt32_eq_mask(one[i], n[i]); - uint32_t blt = ~FStar_UInt32_gte_mask(one[i], n[i]); - acc = (beq & acc) | (~beq & ((blt & (uint32_t)0xFFFFFFFFU) | (~blt & (uint32_t)0U))); - } - } - m1 = acc; - is_valid_m = m0 & m1; - nBits = (uint32_t)32U * Hacl_Bignum_Lib_bn_get_top_index_u32((uint32_t)128U, n); - if (is_valid_m == (uint32_t)0xFFFFFFFFU) - { - uint32_t r2[128U] = { 0U }; - precompr2(nBits, n, r2); - { - uint32_t mu = Hacl_Bignum_ModInvLimb_mod_inv_uint32(n[0U]); - bn_slow_precomp(n, mu, r2, a, res); - } - } - else - { - memset(res, 0U, (uint32_t)128U * sizeof (uint32_t)); - } - return is_valid_m == (uint32_t)0xFFFFFFFFU; - } -} - -static uint32_t exp_check(uint32_t *n, uint32_t *a, uint32_t bBits, uint32_t *b) -{ - uint32_t one[128U] = { 0U }; - uint32_t bit0; - uint32_t m00; - memset(one, 0U, (uint32_t)128U * sizeof (uint32_t)); - one[0U] = (uint32_t)1U; - bit0 = n[0U] & (uint32_t)1U; - m00 = (uint32_t)0U - bit0; - { - uint32_t acc0 = (uint32_t)0U; - uint32_t m10; - uint32_t m0; - uint32_t bLen; - uint32_t m1; - { - uint32_t i; - for (i = (uint32_t)0U; i < (uint32_t)128U; i++) - { - uint32_t beq = FStar_UInt32_eq_mask(one[i], n[i]); - uint32_t blt = ~FStar_UInt32_gte_mask(one[i], n[i]); - acc0 = (beq & acc0) | (~beq & ((blt & (uint32_t)0xFFFFFFFFU) | (~blt & (uint32_t)0U))); - } - } - m10 = acc0; - m0 = m00 & m10; - if (bBits == (uint32_t)0U) - { - bLen = (uint32_t)1U; - } - else - { - bLen = (bBits - (uint32_t)1U) / (uint32_t)32U + (uint32_t)1U; - } - if (bBits < (uint32_t)32U * bLen) - { - KRML_CHECK_SIZE(sizeof (uint32_t), bLen); - { - uint32_t b2[bLen]; - memset(b2, 0U, bLen * sizeof (uint32_t)); - { - uint32_t i0 = bBits / (uint32_t)32U; - uint32_t j = bBits % (uint32_t)32U; - b2[i0] = b2[i0] | (uint32_t)1U << j; - { - uint32_t acc = (uint32_t)0U; - { - uint32_t i; - for (i = (uint32_t)0U; i < bLen; i++) - { - uint32_t beq = FStar_UInt32_eq_mask(b[i], b2[i]); - uint32_t blt = ~FStar_UInt32_gte_mask(b[i], b2[i]); - acc = (beq & acc) | (~beq & ((blt & (uint32_t)0xFFFFFFFFU) | (~blt & (uint32_t)0U))); - } - } - { - uint32_t res = acc; - m1 = res; - } - } - } - } - } - else - { - m1 = (uint32_t)0xFFFFFFFFU; - } - { - uint32_t acc = (uint32_t)0U; - uint32_t m2; - uint32_t m; - { - uint32_t i; - for (i = (uint32_t)0U; i < (uint32_t)128U; i++) - { - uint32_t beq = FStar_UInt32_eq_mask(a[i], n[i]); - uint32_t blt = ~FStar_UInt32_gte_mask(a[i], n[i]); - acc = (beq & acc) | (~beq & ((blt & (uint32_t)0xFFFFFFFFU) | (~blt & (uint32_t)0U))); - } - } - m2 = acc; - m = m1 & m2; - return m0 & m; - } - } -} - -static inline void -exp_vartime_precomp( - uint32_t *n, - uint32_t mu, - uint32_t *r2, - uint32_t *a, - uint32_t bBits, - uint32_t *b, - uint32_t *res -) -{ - if (bBits < (uint32_t)200U) - { - uint32_t aM[128U] = { 0U }; - uint32_t c[256U] = { 0U }; - Hacl_Bignum4096_32_mul(a, r2, c); - reduction(n, mu, c, aM); - { - uint32_t resM[128U] = { 0U }; - uint32_t ctx[256U] = { 0U }; - uint32_t *ctx_n; - uint32_t *ctx_r2; - memcpy(ctx, n, (uint32_t)128U * sizeof (uint32_t)); - memcpy(ctx + (uint32_t)128U, r2, (uint32_t)128U * sizeof (uint32_t)); - ctx_n = ctx; - ctx_r2 = ctx + (uint32_t)128U; - from(ctx_n, mu, ctx_r2, resM); - { - uint32_t i; - for (i = (uint32_t)0U; i < bBits; i++) - { - uint32_t i1 = i / (uint32_t)32U; - uint32_t j = i % (uint32_t)32U; - uint32_t tmp = b[i1]; - uint32_t bit = tmp >> j & (uint32_t)1U; - if (!(bit == (uint32_t)0U)) - { - uint32_t *ctx_n0 = ctx; - amont_mul(ctx_n0, mu, resM, aM, resM); - } - { - uint32_t *ctx_n0 = ctx; - amont_sqr(ctx_n0, mu, aM, aM); - } - } - } - { - uint32_t tmp[256U] = { 0U }; - memcpy(tmp, resM, (uint32_t)128U * sizeof (uint32_t)); - reduction(n, mu, tmp, res); - return; - } - } - } - { - uint32_t aM[128U] = { 0U }; - uint32_t c[256U] = { 0U }; - Hacl_Bignum4096_32_mul(a, r2, c); - reduction(n, mu, c, aM); - { - uint32_t resM[128U] = { 0U }; - uint32_t bLen; - if (bBits == (uint32_t)0U) - { - bLen = (uint32_t)1U; - } - else - { - bLen = (bBits - (uint32_t)1U) / (uint32_t)32U + (uint32_t)1U; - } - { - uint32_t ctx[256U] = { 0U }; - memcpy(ctx, n, (uint32_t)128U * sizeof (uint32_t)); - memcpy(ctx + (uint32_t)128U, r2, (uint32_t)128U * sizeof (uint32_t)); - { - uint32_t table[2048U] = { 0U }; - uint32_t tmp[128U] = { 0U }; - uint32_t *t0 = table; - uint32_t *t1 = table + (uint32_t)128U; - uint32_t *ctx_n0 = ctx; - uint32_t *ctx_r20 = ctx + (uint32_t)128U; - from(ctx_n0, mu, ctx_r20, t0); - memcpy(t1, aM, (uint32_t)128U * sizeof (uint32_t)); - KRML_MAYBE_FOR7(i, - (uint32_t)0U, - (uint32_t)7U, - (uint32_t)1U, - uint32_t *t11 = table + (i + (uint32_t)1U) * (uint32_t)128U; - uint32_t *ctx_n1 = ctx; - amont_sqr(ctx_n1, mu, t11, tmp); - memcpy(table + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)128U, - tmp, - (uint32_t)128U * sizeof (uint32_t)); - uint32_t *t2 = table + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)128U; - uint32_t *ctx_n = ctx; - amont_mul(ctx_n, mu, aM, t2, tmp); - memcpy(table + ((uint32_t)2U * i + (uint32_t)3U) * (uint32_t)128U, - tmp, - (uint32_t)128U * sizeof (uint32_t));); - if (bBits % (uint32_t)4U != (uint32_t)0U) - { - uint32_t mask_l = (uint32_t)15U; - uint32_t i = bBits / (uint32_t)4U * (uint32_t)4U / (uint32_t)32U; - uint32_t j = bBits / (uint32_t)4U * (uint32_t)4U % (uint32_t)32U; - uint32_t p1 = b[i] >> j; - uint32_t ite; - if (i + (uint32_t)1U < bLen && (uint32_t)0U < j) - { - ite = p1 | b[i + (uint32_t)1U] << ((uint32_t)32U - j); - } - else - { - ite = p1; - } - { - uint32_t bits_c = ite & mask_l; - uint32_t bits_l32 = bits_c; - uint32_t *a_bits_l = table + bits_l32 * (uint32_t)128U; - memcpy(resM, a_bits_l, (uint32_t)128U * sizeof (uint32_t)); - } - } - else - { - uint32_t *ctx_n = ctx; - uint32_t *ctx_r2 = ctx + (uint32_t)128U; - from(ctx_n, mu, ctx_r2, resM); - } - { - uint32_t i; - for (i = (uint32_t)0U; i < bBits / (uint32_t)4U; i++) - { - KRML_MAYBE_FOR4(i0, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint32_t *ctx_n = ctx; - amont_sqr(ctx_n, mu, resM, resM);); - { - uint32_t bk = bBits - bBits % (uint32_t)4U; - uint32_t mask_l = (uint32_t)15U; - uint32_t i1 = (bk - (uint32_t)4U * i - (uint32_t)4U) / (uint32_t)32U; - uint32_t j = (bk - (uint32_t)4U * i - (uint32_t)4U) % (uint32_t)32U; - uint32_t p1 = b[i1] >> j; - uint32_t ite; - if (i1 + (uint32_t)1U < bLen && (uint32_t)0U < j) - { - ite = p1 | b[i1 + (uint32_t)1U] << ((uint32_t)32U - j); - } - else - { - ite = p1; - } - { - uint32_t bits_l = ite & mask_l; - uint32_t a_bits_l[128U] = { 0U }; - uint32_t bits_l32 = bits_l; - uint32_t *a_bits_l1 = table + bits_l32 * (uint32_t)128U; - memcpy(a_bits_l, a_bits_l1, (uint32_t)128U * sizeof (uint32_t)); - { - uint32_t *ctx_n = ctx; - amont_mul(ctx_n, mu, resM, a_bits_l, resM); - } - } - } - } - } - { - uint32_t tmp0[256U] = { 0U }; - memcpy(tmp0, resM, (uint32_t)128U * sizeof (uint32_t)); - reduction(n, mu, tmp0, res); - } - } - } - } - } -} - -static inline void -exp_consttime_precomp( - uint32_t *n, - uint32_t mu, - uint32_t *r2, - uint32_t *a, - uint32_t bBits, - uint32_t *b, - uint32_t *res -) -{ - if (bBits < (uint32_t)200U) - { - uint32_t aM[128U] = { 0U }; - uint32_t c[256U] = { 0U }; - Hacl_Bignum4096_32_mul(a, r2, c); - reduction(n, mu, c, aM); - { - uint32_t resM[128U] = { 0U }; - uint32_t ctx[256U] = { 0U }; - memcpy(ctx, n, (uint32_t)128U * sizeof (uint32_t)); - memcpy(ctx + (uint32_t)128U, r2, (uint32_t)128U * sizeof (uint32_t)); - { - uint32_t sw = (uint32_t)0U; - uint32_t *ctx_n = ctx; - uint32_t *ctx_r2 = ctx + (uint32_t)128U; - uint32_t sw0; - from(ctx_n, mu, ctx_r2, resM); - { - uint32_t i0; - for (i0 = (uint32_t)0U; i0 < bBits; i0++) - { - uint32_t i1 = (bBits - i0 - (uint32_t)1U) / (uint32_t)32U; - uint32_t j = (bBits - i0 - (uint32_t)1U) % (uint32_t)32U; - uint32_t tmp = b[i1]; - uint32_t bit = tmp >> j & (uint32_t)1U; - uint32_t sw1 = bit ^ sw; - { - uint32_t i; - for (i = (uint32_t)0U; i < (uint32_t)128U; i++) - { - uint32_t dummy = ((uint32_t)0U - sw1) & (resM[i] ^ aM[i]); - resM[i] = resM[i] ^ dummy; - aM[i] = aM[i] ^ dummy; - } - } - { - uint32_t *ctx_n0 = ctx; - amont_mul(ctx_n0, mu, aM, resM, aM); - { - uint32_t *ctx_n1 = ctx; - amont_sqr(ctx_n1, mu, resM, resM); - sw = bit; - } - } - } - } - sw0 = sw; - { - uint32_t i; - for (i = (uint32_t)0U; i < (uint32_t)128U; i++) - { - uint32_t dummy = ((uint32_t)0U - sw0) & (resM[i] ^ aM[i]); - resM[i] = resM[i] ^ dummy; - aM[i] = aM[i] ^ dummy; - } - } - { - uint32_t tmp[256U] = { 0U }; - memcpy(tmp, resM, (uint32_t)128U * sizeof (uint32_t)); - reduction(n, mu, tmp, res); - return; - } - } - } - } - { - uint32_t aM[128U] = { 0U }; - uint32_t c0[256U] = { 0U }; - Hacl_Bignum4096_32_mul(a, r2, c0); - reduction(n, mu, c0, aM); - { - uint32_t resM[128U] = { 0U }; - uint32_t bLen; - if (bBits == (uint32_t)0U) - { - bLen = (uint32_t)1U; - } - else - { - bLen = (bBits - (uint32_t)1U) / (uint32_t)32U + (uint32_t)1U; - } - { - uint32_t ctx[256U] = { 0U }; - memcpy(ctx, n, (uint32_t)128U * sizeof (uint32_t)); - memcpy(ctx + (uint32_t)128U, r2, (uint32_t)128U * sizeof (uint32_t)); - { - uint32_t table[2048U] = { 0U }; - uint32_t tmp[128U] = { 0U }; - uint32_t *t0 = table; - uint32_t *t1 = table + (uint32_t)128U; - uint32_t *ctx_n0 = ctx; - uint32_t *ctx_r20 = ctx + (uint32_t)128U; - from(ctx_n0, mu, ctx_r20, t0); - memcpy(t1, aM, (uint32_t)128U * sizeof (uint32_t)); - KRML_MAYBE_FOR7(i, - (uint32_t)0U, - (uint32_t)7U, - (uint32_t)1U, - uint32_t *t11 = table + (i + (uint32_t)1U) * (uint32_t)128U; - uint32_t *ctx_n1 = ctx; - amont_sqr(ctx_n1, mu, t11, tmp); - memcpy(table + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)128U, - tmp, - (uint32_t)128U * sizeof (uint32_t)); - uint32_t *t2 = table + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)128U; - uint32_t *ctx_n = ctx; - amont_mul(ctx_n, mu, aM, t2, tmp); - memcpy(table + ((uint32_t)2U * i + (uint32_t)3U) * (uint32_t)128U, - tmp, - (uint32_t)128U * sizeof (uint32_t));); - if (bBits % (uint32_t)4U != (uint32_t)0U) - { - uint32_t mask_l = (uint32_t)15U; - uint32_t i0 = bBits / (uint32_t)4U * (uint32_t)4U / (uint32_t)32U; - uint32_t j = bBits / (uint32_t)4U * (uint32_t)4U % (uint32_t)32U; - uint32_t p1 = b[i0] >> j; - uint32_t ite; - if (i0 + (uint32_t)1U < bLen && (uint32_t)0U < j) - { - ite = p1 | b[i0 + (uint32_t)1U] << ((uint32_t)32U - j); - } - else - { - ite = p1; - } - { - uint32_t bits_c = ite & mask_l; - memcpy(resM, table, (uint32_t)128U * sizeof (uint32_t)); - KRML_MAYBE_FOR15(i1, - (uint32_t)0U, - (uint32_t)15U, - (uint32_t)1U, - uint32_t c = FStar_UInt32_eq_mask(bits_c, i1 + (uint32_t)1U); - uint32_t *res_j = table + (i1 + (uint32_t)1U) * (uint32_t)128U; - { - uint32_t i; - for (i = (uint32_t)0U; i < (uint32_t)128U; i++) - { - uint32_t *os = resM; - uint32_t x = (c & res_j[i]) | (~c & resM[i]); - os[i] = x; - } - }); - } - } - else - { - uint32_t *ctx_n = ctx; - uint32_t *ctx_r2 = ctx + (uint32_t)128U; - from(ctx_n, mu, ctx_r2, resM); - } - { - uint32_t i0; - for (i0 = (uint32_t)0U; i0 < bBits / (uint32_t)4U; i0++) - { - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint32_t *ctx_n = ctx; - amont_sqr(ctx_n, mu, resM, resM);); - { - uint32_t bk = bBits - bBits % (uint32_t)4U; - uint32_t mask_l = (uint32_t)15U; - uint32_t i1 = (bk - (uint32_t)4U * i0 - (uint32_t)4U) / (uint32_t)32U; - uint32_t j = (bk - (uint32_t)4U * i0 - (uint32_t)4U) % (uint32_t)32U; - uint32_t p1 = b[i1] >> j; - uint32_t ite; - if (i1 + (uint32_t)1U < bLen && (uint32_t)0U < j) - { - ite = p1 | b[i1 + (uint32_t)1U] << ((uint32_t)32U - j); - } - else - { - ite = p1; - } - { - uint32_t bits_l = ite & mask_l; - uint32_t a_bits_l[128U] = { 0U }; - memcpy(a_bits_l, table, (uint32_t)128U * sizeof (uint32_t)); - KRML_MAYBE_FOR15(i2, - (uint32_t)0U, - (uint32_t)15U, - (uint32_t)1U, - uint32_t c = FStar_UInt32_eq_mask(bits_l, i2 + (uint32_t)1U); - uint32_t *res_j = table + (i2 + (uint32_t)1U) * (uint32_t)128U; - { - uint32_t i; - for (i = (uint32_t)0U; i < (uint32_t)128U; i++) - { - uint32_t *os = a_bits_l; - uint32_t x = (c & res_j[i]) | (~c & a_bits_l[i]); - os[i] = x; - } - }); - { - uint32_t *ctx_n = ctx; - amont_mul(ctx_n, mu, resM, a_bits_l, resM); - } - } - } - } - } - { - uint32_t tmp0[256U] = { 0U }; - memcpy(tmp0, resM, (uint32_t)128U * sizeof (uint32_t)); - reduction(n, mu, tmp0, res); - } - } - } - } - } -} - -static inline void -exp_vartime( - uint32_t nBits, - uint32_t *n, - uint32_t *a, - uint32_t bBits, - uint32_t *b, - uint32_t *res -) -{ - uint32_t r2[128U] = { 0U }; - uint32_t mu; - precompr2(nBits, n, r2); - mu = Hacl_Bignum_ModInvLimb_mod_inv_uint32(n[0U]); - exp_vartime_precomp(n, mu, r2, a, bBits, b, res); -} - -static inline void -exp_consttime( - uint32_t nBits, - uint32_t *n, - uint32_t *a, - uint32_t bBits, - uint32_t *b, - uint32_t *res -) -{ - uint32_t r2[128U] = { 0U }; - uint32_t mu; - precompr2(nBits, n, r2); - mu = Hacl_Bignum_ModInvLimb_mod_inv_uint32(n[0U]); - exp_consttime_precomp(n, mu, r2, a, bBits, b, res); -} - -/** -Write `a ^ b mod n` in `res`. - - The arguments a, n and the outparam res are meant to be 4096-bit bignums, i.e. uint32_t[128]. - - The argument b is a bignum of any size, and bBits is an upper bound on the - number of significant bits of b. A tighter bound results in faster execution - time. When in doubt, the number of bits for the bignum size is always a safe - default, e.g. if b is a 4096-bit bignum, bBits should be 4096. - - The function is *NOT* constant-time on the argument b. See the - mod_exp_consttime_* functions for constant-time variants. - - The function returns false if any of the following preconditions are violated, - true otherwise. - • n % 2 = 1 - • 1 < n - • b < pow2 bBits - • a < n -*/ -bool -Hacl_Bignum4096_32_mod_exp_vartime( - uint32_t *n, - uint32_t *a, - uint32_t bBits, - uint32_t *b, - uint32_t *res -) -{ - uint32_t is_valid_m = exp_check(n, a, bBits, b); - uint32_t nBits = (uint32_t)32U * Hacl_Bignum_Lib_bn_get_top_index_u32((uint32_t)128U, n); - if (is_valid_m == (uint32_t)0xFFFFFFFFU) - { - exp_vartime(nBits, n, a, bBits, b, res); - } - else - { - memset(res, 0U, (uint32_t)128U * sizeof (uint32_t)); - } - return is_valid_m == (uint32_t)0xFFFFFFFFU; -} - -/** -Write `a ^ b mod n` in `res`. - - The arguments a, n and the outparam res are meant to be 4096-bit bignums, i.e. uint32_t[128]. - - The argument b is a bignum of any size, and bBits is an upper bound on the - number of significant bits of b. A tighter bound results in faster execution - time. When in doubt, the number of bits for the bignum size is always a safe - default, e.g. if b is a 4096-bit bignum, bBits should be 4096. - - This function is constant-time over its argument b, at the cost of a slower - execution time than mod_exp_vartime. - - The function returns false if any of the following preconditions are violated, - true otherwise. - • n % 2 = 1 - • 1 < n - • b < pow2 bBits - • a < n -*/ -bool -Hacl_Bignum4096_32_mod_exp_consttime( - uint32_t *n, - uint32_t *a, - uint32_t bBits, - uint32_t *b, - uint32_t *res -) -{ - uint32_t is_valid_m = exp_check(n, a, bBits, b); - uint32_t nBits = (uint32_t)32U * Hacl_Bignum_Lib_bn_get_top_index_u32((uint32_t)128U, n); - if (is_valid_m == (uint32_t)0xFFFFFFFFU) - { - exp_consttime(nBits, n, a, bBits, b, res); - } - else - { - memset(res, 0U, (uint32_t)128U * sizeof (uint32_t)); - } - return is_valid_m == (uint32_t)0xFFFFFFFFU; -} - -/** -Write `a ^ (-1) mod n` in `res`. - - The arguments a, n and the outparam res are meant to be 4096-bit bignums, i.e. uint32_t[128]. - - Before calling this function, the caller will need to ensure that the following - preconditions are observed. - • n is a prime - - The function returns false if any of the following preconditions are violated, true otherwise. - • n % 2 = 1 - • 1 < n - • 0 < a - • a < n -*/ -bool Hacl_Bignum4096_32_mod_inv_prime_vartime(uint32_t *n, uint32_t *a, uint32_t *res) -{ - uint32_t one[128U] = { 0U }; - uint32_t bit0; - uint32_t m00; - memset(one, 0U, (uint32_t)128U * sizeof (uint32_t)); - one[0U] = (uint32_t)1U; - bit0 = n[0U] & (uint32_t)1U; - m00 = (uint32_t)0U - bit0; - { - uint32_t acc0 = (uint32_t)0U; - uint32_t m10; - uint32_t m0; - { - uint32_t i; - for (i = (uint32_t)0U; i < (uint32_t)128U; i++) - { - uint32_t beq = FStar_UInt32_eq_mask(one[i], n[i]); - uint32_t blt = ~FStar_UInt32_gte_mask(one[i], n[i]); - acc0 = (beq & acc0) | (~beq & ((blt & (uint32_t)0xFFFFFFFFU) | (~blt & (uint32_t)0U))); - } - } - m10 = acc0; - m0 = m00 & m10; - { - uint32_t bn_zero[128U] = { 0U }; - uint32_t mask = (uint32_t)0xFFFFFFFFU; - uint32_t mask1; - uint32_t res10; - uint32_t m1; - { - uint32_t i; - for (i = (uint32_t)0U; i < (uint32_t)128U; i++) - { - uint32_t uu____0 = FStar_UInt32_eq_mask(a[i], bn_zero[i]); - mask = uu____0 & mask; - } - } - mask1 = mask; - res10 = mask1; - m1 = res10; - { - uint32_t acc = (uint32_t)0U; - uint32_t m2; - uint32_t is_valid_m; - uint32_t nBits; - { - uint32_t i; - for (i = (uint32_t)0U; i < (uint32_t)128U; i++) - { - uint32_t beq = FStar_UInt32_eq_mask(a[i], n[i]); - uint32_t blt = ~FStar_UInt32_gte_mask(a[i], n[i]); - acc = (beq & acc) | (~beq & ((blt & (uint32_t)0xFFFFFFFFU) | (~blt & (uint32_t)0U))); - } - } - m2 = acc; - is_valid_m = (m0 & ~m1) & m2; - nBits = (uint32_t)32U * Hacl_Bignum_Lib_bn_get_top_index_u32((uint32_t)128U, n); - if (is_valid_m == (uint32_t)0xFFFFFFFFU) - { - uint32_t n2[128U] = { 0U }; - uint32_t - c0 = Lib_IntTypes_Intrinsics_sub_borrow_u32((uint32_t)0U, n[0U], (uint32_t)2U, n2); - uint32_t *a1 = n + (uint32_t)1U; - uint32_t *res1 = n2 + (uint32_t)1U; - uint32_t c = c0; - { - uint32_t i; - for (i = (uint32_t)0U; i < (uint32_t)31U; i++) - { - uint32_t t1 = a1[(uint32_t)4U * i]; - uint32_t *res_i0 = res1 + (uint32_t)4U * i; - c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, (uint32_t)0U, res_i0); - { - uint32_t t10 = a1[(uint32_t)4U * i + (uint32_t)1U]; - uint32_t *res_i1 = res1 + (uint32_t)4U * i + (uint32_t)1U; - c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t10, (uint32_t)0U, res_i1); - { - uint32_t t11 = a1[(uint32_t)4U * i + (uint32_t)2U]; - uint32_t *res_i2 = res1 + (uint32_t)4U * i + (uint32_t)2U; - c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t11, (uint32_t)0U, res_i2); - { - uint32_t t12 = a1[(uint32_t)4U * i + (uint32_t)3U]; - uint32_t *res_i = res1 + (uint32_t)4U * i + (uint32_t)3U; - c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t12, (uint32_t)0U, res_i); - } - } - } - } - } - KRML_MAYBE_FOR3(i, - (uint32_t)124U, - (uint32_t)127U, - (uint32_t)1U, - uint32_t t1 = a1[i]; - uint32_t *res_i = res1 + i; - c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, (uint32_t)0U, res_i);); - { - uint32_t c1 = c; - uint32_t c2 = c1; - exp_vartime(nBits, n, a, (uint32_t)4096U, n2, res); - } - } - else - { - memset(res, 0U, (uint32_t)128U * sizeof (uint32_t)); - } - return is_valid_m == (uint32_t)0xFFFFFFFFU; - } - } - } -} - - -/**********************************************/ -/* Arithmetic functions with precomputations. */ -/**********************************************/ - - -/** -Heap-allocate and initialize a montgomery context. - - The argument n is meant to be a 4096-bit bignum, i.e. uint32_t[128]. - - Before calling this function, the caller will need to ensure that the following - preconditions are observed. - • n % 2 = 1 - • 1 < n - - The caller will need to call Hacl_Bignum4096_mont_ctx_free on the return value - to avoid memory leaks. -*/ -Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 *Hacl_Bignum4096_32_mont_ctx_init(uint32_t *n) -{ - uint32_t *r2 = (uint32_t *)KRML_HOST_CALLOC((uint32_t)128U, sizeof (uint32_t)); - uint32_t *n1 = (uint32_t *)KRML_HOST_CALLOC((uint32_t)128U, sizeof (uint32_t)); - uint32_t *r21 = r2; - uint32_t *n11 = n1; - uint32_t nBits; - uint32_t mu; - memcpy(n11, n, (uint32_t)128U * sizeof (uint32_t)); - nBits = (uint32_t)32U * Hacl_Bignum_Lib_bn_get_top_index_u32((uint32_t)128U, n); - precompr2(nBits, n, r21); - mu = Hacl_Bignum_ModInvLimb_mod_inv_uint32(n[0U]); - { - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 res; - res.len = (uint32_t)128U; - res.n = n11; - res.mu = mu; - res.r2 = r21; - KRML_CHECK_SIZE(sizeof (Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32), (uint32_t)1U); - { - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 - *buf = - (Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 *)KRML_HOST_MALLOC(sizeof ( - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 - )); - buf[0U] = res; - return buf; - } - } -} - -/** -Deallocate the memory previously allocated by Hacl_Bignum4096_mont_ctx_init. - - The argument k is a montgomery context obtained through Hacl_Bignum4096_mont_ctx_init. -*/ -void Hacl_Bignum4096_32_mont_ctx_free(Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 *k) -{ - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 k1 = *k; - uint32_t *n = k1.n; - uint32_t *r2 = k1.r2; - KRML_HOST_FREE(n); - KRML_HOST_FREE(r2); - KRML_HOST_FREE(k); -} - -/** -Write `a mod n` in `res`. - - The argument a is meant to be a 8192-bit bignum, i.e. uint32_t[256]. - The outparam res is meant to be a 4096-bit bignum, i.e. uint32_t[128]. - The argument k is a montgomery context obtained through Hacl_Bignum4096_mont_ctx_init. -*/ -void -Hacl_Bignum4096_32_mod_precomp( - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 *k, - uint32_t *a, - uint32_t *res -) -{ - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 k1 = *k; - bn_slow_precomp(k1.n, k1.mu, k1.r2, a, res); -} - -/** -Write `a ^ b mod n` in `res`. - - The arguments a and the outparam res are meant to be 4096-bit bignums, i.e. uint32_t[128]. - The argument k is a montgomery context obtained through Hacl_Bignum4096_mont_ctx_init. - - The argument b is a bignum of any size, and bBits is an upper bound on the - number of significant bits of b. A tighter bound results in faster execution - time. When in doubt, the number of bits for the bignum size is always a safe - default, e.g. if b is a 4096-bit bignum, bBits should be 4096. - - The function is *NOT* constant-time on the argument b. See the - mod_exp_consttime_* functions for constant-time variants. - - Before calling this function, the caller will need to ensure that the following - preconditions are observed. - • b < pow2 bBits - • a < n -*/ -void -Hacl_Bignum4096_32_mod_exp_vartime_precomp( - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 *k, - uint32_t *a, - uint32_t bBits, - uint32_t *b, - uint32_t *res -) -{ - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 k1 = *k; - exp_vartime_precomp(k1.n, k1.mu, k1.r2, a, bBits, b, res); -} - -/** -Write `a ^ b mod n` in `res`. - - The arguments a and the outparam res are meant to be 4096-bit bignums, i.e. uint32_t[128]. - The argument k is a montgomery context obtained through Hacl_Bignum4096_mont_ctx_init. - - The argument b is a bignum of any size, and bBits is an upper bound on the - number of significant bits of b. A tighter bound results in faster execution - time. When in doubt, the number of bits for the bignum size is always a safe - default, e.g. if b is a 4096-bit bignum, bBits should be 4096. - - This function is constant-time over its argument b, at the cost of a slower - execution time than mod_exp_vartime_*. - - Before calling this function, the caller will need to ensure that the following - preconditions are observed. - • b < pow2 bBits - • a < n -*/ -void -Hacl_Bignum4096_32_mod_exp_consttime_precomp( - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 *k, - uint32_t *a, - uint32_t bBits, - uint32_t *b, - uint32_t *res -) -{ - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 k1 = *k; - exp_consttime_precomp(k1.n, k1.mu, k1.r2, a, bBits, b, res); -} - -/** -Write `a ^ (-1) mod n` in `res`. - - The argument a and the outparam res are meant to be 4096-bit bignums, i.e. uint32_t[128]. - The argument k is a montgomery context obtained through Hacl_Bignum4096_mont_ctx_init. - - Before calling this function, the caller will need to ensure that the following - preconditions are observed. - • n is a prime - • 0 < a - • a < n -*/ -void -Hacl_Bignum4096_32_mod_inv_prime_vartime_precomp( - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 *k, - uint32_t *a, - uint32_t *res -) -{ - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 k1 = *k; - uint32_t n2[128U] = { 0U }; - uint32_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u32((uint32_t)0U, k1.n[0U], (uint32_t)2U, n2); - uint32_t *a1 = k1.n + (uint32_t)1U; - uint32_t *res1 = n2 + (uint32_t)1U; - uint32_t c2 = c0; - uint32_t c1; - uint32_t c; - { - uint32_t i; - for (i = (uint32_t)0U; i < (uint32_t)31U; i++) - { - uint32_t t1 = a1[(uint32_t)4U * i]; - uint32_t *res_i0 = res1 + (uint32_t)4U * i; - c2 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c2, t1, (uint32_t)0U, res_i0); - { - uint32_t t10 = a1[(uint32_t)4U * i + (uint32_t)1U]; - uint32_t *res_i1 = res1 + (uint32_t)4U * i + (uint32_t)1U; - c2 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c2, t10, (uint32_t)0U, res_i1); - { - uint32_t t11 = a1[(uint32_t)4U * i + (uint32_t)2U]; - uint32_t *res_i2 = res1 + (uint32_t)4U * i + (uint32_t)2U; - c2 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c2, t11, (uint32_t)0U, res_i2); - { - uint32_t t12 = a1[(uint32_t)4U * i + (uint32_t)3U]; - uint32_t *res_i = res1 + (uint32_t)4U * i + (uint32_t)3U; - c2 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c2, t12, (uint32_t)0U, res_i); - } - } - } - } - } - KRML_MAYBE_FOR3(i, - (uint32_t)124U, - (uint32_t)127U, - (uint32_t)1U, - uint32_t t1 = a1[i]; - uint32_t *res_i = res1 + i; - c2 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c2, t1, (uint32_t)0U, res_i);); - c1 = c2; - c = c1; - exp_vartime_precomp(k1.n, k1.mu, k1.r2, a, (uint32_t)4096U, n2, res); -} - - -/********************/ -/* Loads and stores */ -/********************/ - - -/** -Load a bid-endian bignum from memory. - - The argument b points to len bytes of valid memory. - The function returns a heap-allocated bignum of size sufficient to hold the - result of loading b, or NULL if either the allocation failed, or the amount of - required memory would exceed 4GB. - - If the return value is non-null, clients must eventually call free(3) on it to - avoid memory leaks. -*/ -uint32_t *Hacl_Bignum4096_32_new_bn_from_bytes_be(uint32_t len, uint8_t *b) -{ - if - ( - len - == (uint32_t)0U - || !((len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U <= (uint32_t)1073741823U) - ) - { - return NULL; - } - KRML_CHECK_SIZE(sizeof (uint32_t), (len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U); - { - uint32_t - *res = - (uint32_t *)KRML_HOST_CALLOC((len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U, - sizeof (uint32_t)); - if (res == NULL) - { - return res; - } - { - uint32_t *res1 = res; - uint32_t *res2 = res1; - uint32_t bnLen = (len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U; - uint32_t tmpLen = (uint32_t)4U * bnLen; - KRML_CHECK_SIZE(sizeof (uint8_t), tmpLen); - { - uint8_t tmp[tmpLen]; - memset(tmp, 0U, tmpLen * sizeof (uint8_t)); - memcpy(tmp + tmpLen - len, b, len * sizeof (uint8_t)); - { - uint32_t i; - for (i = (uint32_t)0U; i < bnLen; i++) - { - uint32_t *os = res2; - uint32_t u = load32_be(tmp + (bnLen - i - (uint32_t)1U) * (uint32_t)4U); - uint32_t x = u; - os[i] = x; - } - } - return res2; - } - } - } -} - -/** -Load a little-endian bignum from memory. - - The argument b points to len bytes of valid memory. - The function returns a heap-allocated bignum of size sufficient to hold the - result of loading b, or NULL if either the allocation failed, or the amount of - required memory would exceed 4GB. - - If the return value is non-null, clients must eventually call free(3) on it to - avoid memory leaks. -*/ -uint32_t *Hacl_Bignum4096_32_new_bn_from_bytes_le(uint32_t len, uint8_t *b) -{ - if - ( - len - == (uint32_t)0U - || !((len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U <= (uint32_t)1073741823U) - ) - { - return NULL; - } - KRML_CHECK_SIZE(sizeof (uint32_t), (len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U); - { - uint32_t - *res = - (uint32_t *)KRML_HOST_CALLOC((len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U, - sizeof (uint32_t)); - if (res == NULL) - { - return res; - } - { - uint32_t *res1 = res; - uint32_t *res2 = res1; - uint32_t bnLen = (len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U; - uint32_t tmpLen = (uint32_t)4U * bnLen; - KRML_CHECK_SIZE(sizeof (uint8_t), tmpLen); - { - uint8_t tmp[tmpLen]; - memset(tmp, 0U, tmpLen * sizeof (uint8_t)); - memcpy(tmp, b, len * sizeof (uint8_t)); - { - uint32_t i; - for (i = (uint32_t)0U; i < (len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U; i++) - { - uint32_t *os = res2; - uint8_t *bj = tmp + i * (uint32_t)4U; - uint32_t u = load32_le(bj); - uint32_t r1 = u; - uint32_t x = r1; - os[i] = x; - } - } - return res2; - } - } - } -} - -/** -Serialize a bignum into big-endian memory. - - The argument b points to a 4096-bit bignum. - The outparam res points to 512 bytes of valid memory. -*/ -void Hacl_Bignum4096_32_bn_to_bytes_be(uint32_t *b, uint8_t *res) -{ - uint8_t tmp[512U] = { 0U }; - { - uint32_t i; - for (i = (uint32_t)0U; i < (uint32_t)128U; i++) - { - store32_be(res + i * (uint32_t)4U, b[(uint32_t)128U - i - (uint32_t)1U]); - } - } -} - -/** -Serialize a bignum into little-endian memory. - - The argument b points to a 4096-bit bignum. - The outparam res points to 512 bytes of valid memory. -*/ -void Hacl_Bignum4096_32_bn_to_bytes_le(uint32_t *b, uint8_t *res) -{ - uint8_t tmp[512U] = { 0U }; - { - uint32_t i; - for (i = (uint32_t)0U; i < (uint32_t)128U; i++) - { - store32_le(res + i * (uint32_t)4U, b[i]); - } - } -} - - -/***************/ -/* Comparisons */ -/***************/ - - -/** -Returns 2^32 - 1 if a < b, otherwise returns 0. - - The arguments a and b are meant to be 4096-bit bignums, i.e. uint32_t[128]. -*/ -uint32_t Hacl_Bignum4096_32_lt_mask(uint32_t *a, uint32_t *b) -{ - uint32_t acc = (uint32_t)0U; - { - uint32_t i; - for (i = (uint32_t)0U; i < (uint32_t)128U; i++) - { - uint32_t beq = FStar_UInt32_eq_mask(a[i], b[i]); - uint32_t blt = ~FStar_UInt32_gte_mask(a[i], b[i]); - acc = (beq & acc) | (~beq & ((blt & (uint32_t)0xFFFFFFFFU) | (~blt & (uint32_t)0U))); - } - } - return acc; -} - -/** -Returns 2^32 - 1 if a = b, otherwise returns 0. - - The arguments a and b are meant to be 4096-bit bignums, i.e. uint32_t[128]. -*/ -uint32_t Hacl_Bignum4096_32_eq_mask(uint32_t *a, uint32_t *b) -{ - uint32_t mask = (uint32_t)0xFFFFFFFFU; - uint32_t mask1; - { - uint32_t i; - for (i = (uint32_t)0U; i < (uint32_t)128U; i++) - { - uint32_t uu____0 = FStar_UInt32_eq_mask(a[i], b[i]); - mask = uu____0 & mask; - } - } - mask1 = mask; - return mask1; -} - diff --git a/dist/c89-compatible/Hacl_Bignum4096_32.h b/dist/c89-compatible/Hacl_Bignum4096_32.h deleted file mode 100644 index cbd47ea320..0000000000 --- a/dist/c89-compatible/Hacl_Bignum4096_32.h +++ /dev/null @@ -1,404 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#ifndef __Hacl_Bignum4096_32_H -#define __Hacl_Bignum4096_32_H - -#if defined(__cplusplus) -extern "C" { -#endif - -#include -#include "krml/internal/types.h" -#include "krml/lowstar_endianness.h" -#include "krml/internal/target.h" - - -#include "Hacl_Krmllib.h" -#include "Hacl_Bignum_Base.h" -#include "Hacl_Bignum.h" -#include "evercrypt_targetconfig.h" -/******************************************************************************* - -A verified 4096-bit bignum library. - -This is a 32-bit optimized version, where bignums are represented as an array -of 128 unsigned 32-bit integers, i.e. uint32_t[128]. Furthermore, the -limbs are stored in little-endian format, i.e. the least significant limb is at -index 0. Each limb is stored in native format in memory. Example: - - uint32_t sixteen[128] = { 0x10 } - - (relying on the fact that when an initializer-list is provided, the remainder - of the object gets initialized as if it had static storage duration, i.e. with - zeroes) - -We strongly encourage users to go through the conversion functions, e.g. -bn_from_bytes_be, to i) not depend on internal representation choices and ii) -have the ability to switch easily to a 64-bit optimized version in the future. - -*******************************************************************************/ - -/************************/ -/* Arithmetic functions */ -/************************/ - - -/** -Write `a + b mod 2^4096` in `res`. - - This functions returns the carry. - - The arguments a, b and res are meant to be 4096-bit bignums, i.e. uint32_t[128] -*/ -uint32_t Hacl_Bignum4096_32_add(uint32_t *a, uint32_t *b, uint32_t *res); - -/** -Write `a - b mod 2^4096` in `res`. - - This functions returns the carry. - - The arguments a, b and res are meant to be 4096-bit bignums, i.e. uint32_t[128] -*/ -uint32_t Hacl_Bignum4096_32_sub(uint32_t *a, uint32_t *b, uint32_t *res); - -/** -Write `(a + b) mod n` in `res`. - - The arguments a, b, n and the outparam res are meant to be 4096-bit bignums, i.e. uint32_t[128]. - - Before calling this function, the caller will need to ensure that the following - preconditions are observed. - • a < n - • b < n -*/ -void Hacl_Bignum4096_32_add_mod(uint32_t *n, uint32_t *a, uint32_t *b, uint32_t *res); - -/** -Write `(a - b) mod n` in `res`. - - The arguments a, b, n and the outparam res are meant to be 4096-bit bignums, i.e. uint32_t[128]. - - Before calling this function, the caller will need to ensure that the following - preconditions are observed. - • a < n - • b < n -*/ -void Hacl_Bignum4096_32_sub_mod(uint32_t *n, uint32_t *a, uint32_t *b, uint32_t *res); - -/** -Write `a * b` in `res`. - - The arguments a and b are meant to be 4096-bit bignums, i.e. uint32_t[128]. - The outparam res is meant to be a 8192-bit bignum, i.e. uint32_t[256]. -*/ -void Hacl_Bignum4096_32_mul(uint32_t *a, uint32_t *b, uint32_t *res); - -/** -Write `a * a` in `res`. - - The argument a is meant to be a 4096-bit bignum, i.e. uint32_t[128]. - The outparam res is meant to be a 8192-bit bignum, i.e. uint32_t[256]. -*/ -void Hacl_Bignum4096_32_sqr(uint32_t *a, uint32_t *res); - -/** -Write `a mod n` in `res`. - - The argument a is meant to be a 8192-bit bignum, i.e. uint32_t[256]. - The argument n and the outparam res are meant to be 4096-bit bignums, i.e. uint32_t[128]. - - The function returns false if any of the following preconditions are violated, - true otherwise. - • 1 < n - • n % 2 = 1 -*/ -bool Hacl_Bignum4096_32_mod(uint32_t *n, uint32_t *a, uint32_t *res); - -/** -Write `a ^ b mod n` in `res`. - - The arguments a, n and the outparam res are meant to be 4096-bit bignums, i.e. uint32_t[128]. - - The argument b is a bignum of any size, and bBits is an upper bound on the - number of significant bits of b. A tighter bound results in faster execution - time. When in doubt, the number of bits for the bignum size is always a safe - default, e.g. if b is a 4096-bit bignum, bBits should be 4096. - - The function is *NOT* constant-time on the argument b. See the - mod_exp_consttime_* functions for constant-time variants. - - The function returns false if any of the following preconditions are violated, - true otherwise. - • n % 2 = 1 - • 1 < n - • b < pow2 bBits - • a < n -*/ -bool -Hacl_Bignum4096_32_mod_exp_vartime( - uint32_t *n, - uint32_t *a, - uint32_t bBits, - uint32_t *b, - uint32_t *res -); - -/** -Write `a ^ b mod n` in `res`. - - The arguments a, n and the outparam res are meant to be 4096-bit bignums, i.e. uint32_t[128]. - - The argument b is a bignum of any size, and bBits is an upper bound on the - number of significant bits of b. A tighter bound results in faster execution - time. When in doubt, the number of bits for the bignum size is always a safe - default, e.g. if b is a 4096-bit bignum, bBits should be 4096. - - This function is constant-time over its argument b, at the cost of a slower - execution time than mod_exp_vartime. - - The function returns false if any of the following preconditions are violated, - true otherwise. - • n % 2 = 1 - • 1 < n - • b < pow2 bBits - • a < n -*/ -bool -Hacl_Bignum4096_32_mod_exp_consttime( - uint32_t *n, - uint32_t *a, - uint32_t bBits, - uint32_t *b, - uint32_t *res -); - -/** -Write `a ^ (-1) mod n` in `res`. - - The arguments a, n and the outparam res are meant to be 4096-bit bignums, i.e. uint32_t[128]. - - Before calling this function, the caller will need to ensure that the following - preconditions are observed. - • n is a prime - - The function returns false if any of the following preconditions are violated, true otherwise. - • n % 2 = 1 - • 1 < n - • 0 < a - • a < n -*/ -bool Hacl_Bignum4096_32_mod_inv_prime_vartime(uint32_t *n, uint32_t *a, uint32_t *res); - - -/**********************************************/ -/* Arithmetic functions with precomputations. */ -/**********************************************/ - - -/** -Heap-allocate and initialize a montgomery context. - - The argument n is meant to be a 4096-bit bignum, i.e. uint32_t[128]. - - Before calling this function, the caller will need to ensure that the following - preconditions are observed. - • n % 2 = 1 - • 1 < n - - The caller will need to call Hacl_Bignum4096_mont_ctx_free on the return value - to avoid memory leaks. -*/ -Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 *Hacl_Bignum4096_32_mont_ctx_init(uint32_t *n); - -/** -Deallocate the memory previously allocated by Hacl_Bignum4096_mont_ctx_init. - - The argument k is a montgomery context obtained through Hacl_Bignum4096_mont_ctx_init. -*/ -void Hacl_Bignum4096_32_mont_ctx_free(Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 *k); - -/** -Write `a mod n` in `res`. - - The argument a is meant to be a 8192-bit bignum, i.e. uint32_t[256]. - The outparam res is meant to be a 4096-bit bignum, i.e. uint32_t[128]. - The argument k is a montgomery context obtained through Hacl_Bignum4096_mont_ctx_init. -*/ -void -Hacl_Bignum4096_32_mod_precomp( - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 *k, - uint32_t *a, - uint32_t *res -); - -/** -Write `a ^ b mod n` in `res`. - - The arguments a and the outparam res are meant to be 4096-bit bignums, i.e. uint32_t[128]. - The argument k is a montgomery context obtained through Hacl_Bignum4096_mont_ctx_init. - - The argument b is a bignum of any size, and bBits is an upper bound on the - number of significant bits of b. A tighter bound results in faster execution - time. When in doubt, the number of bits for the bignum size is always a safe - default, e.g. if b is a 4096-bit bignum, bBits should be 4096. - - The function is *NOT* constant-time on the argument b. See the - mod_exp_consttime_* functions for constant-time variants. - - Before calling this function, the caller will need to ensure that the following - preconditions are observed. - • b < pow2 bBits - • a < n -*/ -void -Hacl_Bignum4096_32_mod_exp_vartime_precomp( - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 *k, - uint32_t *a, - uint32_t bBits, - uint32_t *b, - uint32_t *res -); - -/** -Write `a ^ b mod n` in `res`. - - The arguments a and the outparam res are meant to be 4096-bit bignums, i.e. uint32_t[128]. - The argument k is a montgomery context obtained through Hacl_Bignum4096_mont_ctx_init. - - The argument b is a bignum of any size, and bBits is an upper bound on the - number of significant bits of b. A tighter bound results in faster execution - time. When in doubt, the number of bits for the bignum size is always a safe - default, e.g. if b is a 4096-bit bignum, bBits should be 4096. - - This function is constant-time over its argument b, at the cost of a slower - execution time than mod_exp_vartime_*. - - Before calling this function, the caller will need to ensure that the following - preconditions are observed. - • b < pow2 bBits - • a < n -*/ -void -Hacl_Bignum4096_32_mod_exp_consttime_precomp( - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 *k, - uint32_t *a, - uint32_t bBits, - uint32_t *b, - uint32_t *res -); - -/** -Write `a ^ (-1) mod n` in `res`. - - The argument a and the outparam res are meant to be 4096-bit bignums, i.e. uint32_t[128]. - The argument k is a montgomery context obtained through Hacl_Bignum4096_mont_ctx_init. - - Before calling this function, the caller will need to ensure that the following - preconditions are observed. - • n is a prime - • 0 < a - • a < n -*/ -void -Hacl_Bignum4096_32_mod_inv_prime_vartime_precomp( - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 *k, - uint32_t *a, - uint32_t *res -); - - -/********************/ -/* Loads and stores */ -/********************/ - - -/** -Load a bid-endian bignum from memory. - - The argument b points to len bytes of valid memory. - The function returns a heap-allocated bignum of size sufficient to hold the - result of loading b, or NULL if either the allocation failed, or the amount of - required memory would exceed 4GB. - - If the return value is non-null, clients must eventually call free(3) on it to - avoid memory leaks. -*/ -uint32_t *Hacl_Bignum4096_32_new_bn_from_bytes_be(uint32_t len, uint8_t *b); - -/** -Load a little-endian bignum from memory. - - The argument b points to len bytes of valid memory. - The function returns a heap-allocated bignum of size sufficient to hold the - result of loading b, or NULL if either the allocation failed, or the amount of - required memory would exceed 4GB. - - If the return value is non-null, clients must eventually call free(3) on it to - avoid memory leaks. -*/ -uint32_t *Hacl_Bignum4096_32_new_bn_from_bytes_le(uint32_t len, uint8_t *b); - -/** -Serialize a bignum into big-endian memory. - - The argument b points to a 4096-bit bignum. - The outparam res points to 512 bytes of valid memory. -*/ -void Hacl_Bignum4096_32_bn_to_bytes_be(uint32_t *b, uint8_t *res); - -/** -Serialize a bignum into little-endian memory. - - The argument b points to a 4096-bit bignum. - The outparam res points to 512 bytes of valid memory. -*/ -void Hacl_Bignum4096_32_bn_to_bytes_le(uint32_t *b, uint8_t *res); - - -/***************/ -/* Comparisons */ -/***************/ - - -/** -Returns 2^32 - 1 if a < b, otherwise returns 0. - - The arguments a and b are meant to be 4096-bit bignums, i.e. uint32_t[128]. -*/ -uint32_t Hacl_Bignum4096_32_lt_mask(uint32_t *a, uint32_t *b); - -/** -Returns 2^32 - 1 if a = b, otherwise returns 0. - - The arguments a and b are meant to be 4096-bit bignums, i.e. uint32_t[128]. -*/ -uint32_t Hacl_Bignum4096_32_eq_mask(uint32_t *a, uint32_t *b); - -#if defined(__cplusplus) -} -#endif - -#define __Hacl_Bignum4096_32_H_DEFINED -#endif diff --git a/dist/c89-compatible/Hacl_Bignum64.c b/dist/c89-compatible/Hacl_Bignum64.c deleted file mode 100644 index 73f2804662..0000000000 --- a/dist/c89-compatible/Hacl_Bignum64.c +++ /dev/null @@ -1,1059 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#include "Hacl_Bignum64.h" - -#include "internal/Hacl_Bignum.h" - -/******************************************************************************* - -A verified bignum library. - -This is a 64-bit optimized version, where bignums are represented as an array -of `len` unsigned 64-bit integers, i.e. uint64_t[len]. - -*******************************************************************************/ - -/************************/ -/* Arithmetic functions */ -/************************/ - - -/** -Write `a + b mod 2 ^ (64 * len)` in `res`. - - This functions returns the carry. - - The arguments a, b and the outparam res are meant to be `len` limbs in size, i.e. uint64_t[len] -*/ -uint64_t Hacl_Bignum64_add(uint32_t len, uint64_t *a, uint64_t *b, uint64_t *res) -{ - return Hacl_Bignum_Addition_bn_add_eq_len_u64(len, a, b, res); -} - -/** -Write `a - b mod 2 ^ (64 * len)` in `res`. - - This functions returns the carry. - - The arguments a, b and the outparam res are meant to be `len` limbs in size, i.e. uint64_t[len] -*/ -uint64_t Hacl_Bignum64_sub(uint32_t len, uint64_t *a, uint64_t *b, uint64_t *res) -{ - return Hacl_Bignum_Addition_bn_sub_eq_len_u64(len, a, b, res); -} - -/** -Write `(a + b) mod n` in `res`. - - The arguments a, b, n and the outparam res are meant to be `len` limbs in size, i.e. uint64_t[len]. - - Before calling this function, the caller will need to ensure that the following - preconditions are observed. - • a < n - • b < n -*/ -void Hacl_Bignum64_add_mod(uint32_t len, uint64_t *n, uint64_t *a, uint64_t *b, uint64_t *res) -{ - Hacl_Bignum_bn_add_mod_n_u64(len, n, a, b, res); -} - -/** -Write `(a - b) mod n` in `res`. - - The arguments a, b, n and the outparam res are meant to be `len` limbs in size, i.e. uint64_t[len]. - - Before calling this function, the caller will need to ensure that the following - preconditions are observed. - • a < n - • b < n -*/ -void Hacl_Bignum64_sub_mod(uint32_t len, uint64_t *n, uint64_t *a, uint64_t *b, uint64_t *res) -{ - Hacl_Bignum_bn_sub_mod_n_u64(len, n, a, b, res); -} - -/** -Write `a * b` in `res`. - - The arguments a and b are meant to be `len` limbs in size, i.e. uint64_t[len]. - The outparam res is meant to be `2*len` limbs in size, i.e. uint64_t[2*len]. -*/ -void Hacl_Bignum64_mul(uint32_t len, uint64_t *a, uint64_t *b, uint64_t *res) -{ - KRML_CHECK_SIZE(sizeof (uint64_t), (uint32_t)4U * len); - { - uint64_t tmp[(uint32_t)4U * len]; - memset(tmp, 0U, (uint32_t)4U * len * sizeof (uint64_t)); - Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint64(len, a, b, tmp, res); - } -} - -/** -Write `a * a` in `res`. - - The argument a is meant to be `len` limbs in size, i.e. uint64_t[len]. - The outparam res is meant to be `2*len` limbs in size, i.e. uint64_t[2*len]. -*/ -void Hacl_Bignum64_sqr(uint32_t len, uint64_t *a, uint64_t *res) -{ - KRML_CHECK_SIZE(sizeof (uint64_t), (uint32_t)4U * len); - { - uint64_t tmp[(uint32_t)4U * len]; - memset(tmp, 0U, (uint32_t)4U * len * sizeof (uint64_t)); - Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint64(len, a, tmp, res); - } -} - -static inline void -bn_slow_precomp( - uint32_t len, - uint64_t *n, - uint64_t mu, - uint64_t *r2, - uint64_t *a, - uint64_t *res -) -{ - KRML_CHECK_SIZE(sizeof (uint64_t), len); - { - uint64_t a_mod[len]; - memset(a_mod, 0U, len * sizeof (uint64_t)); - KRML_CHECK_SIZE(sizeof (uint64_t), len + len); - { - uint64_t a1[len + len]; - memset(a1, 0U, (len + len) * sizeof (uint64_t)); - memcpy(a1, a, (len + len) * sizeof (uint64_t)); - { - uint64_t c00 = (uint64_t)0U; - uint64_t c0; - { - uint32_t i0; - for (i0 = (uint32_t)0U; i0 < len; i0++) - { - uint64_t qj = mu * a1[i0]; - uint64_t *res_j0 = a1 + i0; - uint64_t c = (uint64_t)0U; - { - uint32_t i; - for (i = (uint32_t)0U; i < len / (uint32_t)4U; i++) - { - uint64_t a_i = n[(uint32_t)4U * i]; - uint64_t *res_i0 = res_j0 + (uint32_t)4U * i; - c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, qj, c, res_i0); - { - uint64_t a_i0 = n[(uint32_t)4U * i + (uint32_t)1U]; - uint64_t *res_i1 = res_j0 + (uint32_t)4U * i + (uint32_t)1U; - c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i0, qj, c, res_i1); - { - uint64_t a_i1 = n[(uint32_t)4U * i + (uint32_t)2U]; - uint64_t *res_i2 = res_j0 + (uint32_t)4U * i + (uint32_t)2U; - c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i1, qj, c, res_i2); - { - uint64_t a_i2 = n[(uint32_t)4U * i + (uint32_t)3U]; - uint64_t *res_i = res_j0 + (uint32_t)4U * i + (uint32_t)3U; - c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i2, qj, c, res_i); - } - } - } - } - } - { - uint32_t i; - for (i = len / (uint32_t)4U * (uint32_t)4U; i < len; i++) - { - uint64_t a_i = n[i]; - uint64_t *res_i = res_j0 + i; - c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, qj, c, res_i); - } - } - { - uint64_t r = c; - uint64_t c1 = r; - uint64_t *resb = a1 + len + i0; - uint64_t res_j = a1[len + i0]; - c00 = Lib_IntTypes_Intrinsics_add_carry_u64(c00, c1, res_j, resb); - } - } - } - memcpy(a_mod, a1 + len, (len + len - len) * sizeof (uint64_t)); - c0 = c00; - KRML_CHECK_SIZE(sizeof (uint64_t), len); - { - uint64_t tmp0[len]; - memset(tmp0, 0U, len * sizeof (uint64_t)); - { - uint64_t c1 = Hacl_Bignum_Addition_bn_sub_eq_len_u64(len, a_mod, n, tmp0); - uint64_t m = (uint64_t)0U - c0; - { - uint32_t i; - for (i = (uint32_t)0U; i < len; i++) - { - uint64_t *os = a_mod; - uint64_t x = (m & tmp0[i]) | (~m & a_mod[i]); - os[i] = x; - } - } - KRML_CHECK_SIZE(sizeof (uint64_t), len + len); - { - uint64_t c[len + len]; - memset(c, 0U, (len + len) * sizeof (uint64_t)); - KRML_CHECK_SIZE(sizeof (uint64_t), (uint32_t)4U * len); - { - uint64_t tmp[(uint32_t)4U * len]; - memset(tmp, 0U, (uint32_t)4U * len * sizeof (uint64_t)); - Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint64(len, a_mod, r2, tmp, c); - Hacl_Bignum_Montgomery_bn_mont_reduction_u64(len, n, mu, c, res); - } - } - } - } - } - } - } -} - -/** -Write `a mod n` in `res`. - - The argument a is meant to be `2*len` limbs in size, i.e. uint64_t[2*len]. - The argument n and the outparam res are meant to be `len` limbs in size, i.e. uint64_t[len]. - - The function returns false if any of the following preconditions are violated, - true otherwise. - • 1 < n - • n % 2 = 1 -*/ -bool Hacl_Bignum64_mod(uint32_t len, uint64_t *n, uint64_t *a, uint64_t *res) -{ - KRML_CHECK_SIZE(sizeof (uint64_t), len); - { - uint64_t one[len]; - memset(one, 0U, len * sizeof (uint64_t)); - { - uint64_t bit0; - uint64_t m0; - memset(one, 0U, len * sizeof (uint64_t)); - one[0U] = (uint64_t)1U; - bit0 = n[0U] & (uint64_t)1U; - m0 = (uint64_t)0U - bit0; - { - uint64_t acc = (uint64_t)0U; - uint64_t m1; - uint64_t is_valid_m; - uint32_t nBits; - { - uint32_t i; - for (i = (uint32_t)0U; i < len; i++) - { - uint64_t beq = FStar_UInt64_eq_mask(one[i], n[i]); - uint64_t blt = ~FStar_UInt64_gte_mask(one[i], n[i]); - acc = - (beq & acc) - | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U))); - } - } - m1 = acc; - is_valid_m = m0 & m1; - nBits = (uint32_t)64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64(len, n); - if (is_valid_m == (uint64_t)0xFFFFFFFFFFFFFFFFU) - { - KRML_CHECK_SIZE(sizeof (uint64_t), len); - { - uint64_t r2[len]; - memset(r2, 0U, len * sizeof (uint64_t)); - Hacl_Bignum_Montgomery_bn_precomp_r2_mod_n_u64(len, nBits, n, r2); - { - uint64_t mu = Hacl_Bignum_ModInvLimb_mod_inv_uint64(n[0U]); - bn_slow_precomp(len, n, mu, r2, a, res); - } - } - } - else - { - memset(res, 0U, len * sizeof (uint64_t)); - } - return is_valid_m == (uint64_t)0xFFFFFFFFFFFFFFFFU; - } - } - } -} - -/** -Write `a ^ b mod n` in `res`. - - The arguments a, n and the outparam res are meant to be `len` limbs in size, i.e. uint64_t[len]. - - The argument b is a bignum of any size, and bBits is an upper bound on the - number of significant bits of b. A tighter bound results in faster execution - time. When in doubt, the number of bits for the bignum size is always a safe - default, e.g. if b is a 4096-bit bignum, bBits should be 4096. - - The function is *NOT* constant-time on the argument b. See the - mod_exp_consttime_* functions for constant-time variants. - - The function returns false if any of the following preconditions are violated, - true otherwise. - • n % 2 = 1 - • 1 < n - • b < pow2 bBits - • a < n -*/ -bool -Hacl_Bignum64_mod_exp_vartime( - uint32_t len, - uint64_t *n, - uint64_t *a, - uint32_t bBits, - uint64_t *b, - uint64_t *res -) -{ - uint64_t is_valid_m = Hacl_Bignum_Exponentiation_bn_check_mod_exp_u64(len, n, a, bBits, b); - uint32_t nBits = (uint32_t)64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64(len, n); - if (is_valid_m == (uint64_t)0xFFFFFFFFFFFFFFFFU) - { - Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_u64(len, nBits, n, a, bBits, b, res); - } - else - { - memset(res, 0U, len * sizeof (uint64_t)); - } - return is_valid_m == (uint64_t)0xFFFFFFFFFFFFFFFFU; -} - -/** -Write `a ^ b mod n` in `res`. - - The arguments a, n and the outparam res are meant to be `len` limbs in size, i.e. uint64_t[len]. - - The argument b is a bignum of any size, and bBits is an upper bound on the - number of significant bits of b. A tighter bound results in faster execution - time. When in doubt, the number of bits for the bignum size is always a safe - default, e.g. if b is a 4096-bit bignum, bBits should be 4096. - - This function is constant-time over its argument b, at the cost of a slower - execution time than mod_exp_vartime. - - The function returns false if any of the following preconditions are violated, - true otherwise. - • n % 2 = 1 - • 1 < n - • b < pow2 bBits - • a < n -*/ -bool -Hacl_Bignum64_mod_exp_consttime( - uint32_t len, - uint64_t *n, - uint64_t *a, - uint32_t bBits, - uint64_t *b, - uint64_t *res -) -{ - uint64_t is_valid_m = Hacl_Bignum_Exponentiation_bn_check_mod_exp_u64(len, n, a, bBits, b); - uint32_t nBits = (uint32_t)64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64(len, n); - if (is_valid_m == (uint64_t)0xFFFFFFFFFFFFFFFFU) - { - Hacl_Bignum_Exponentiation_bn_mod_exp_consttime_u64(len, nBits, n, a, bBits, b, res); - } - else - { - memset(res, 0U, len * sizeof (uint64_t)); - } - return is_valid_m == (uint64_t)0xFFFFFFFFFFFFFFFFU; -} - -/** -Write `a ^ (-1) mod n` in `res`. - - The arguments a, n and the outparam res are meant to be `len` limbs in size, i.e. uint64_t[len]. - - Before calling this function, the caller will need to ensure that the following - preconditions are observed. - • n is a prime - - The function returns false if any of the following preconditions are violated, - true otherwise. - • n % 2 = 1 - • 1 < n - • 0 < a - • a < n -*/ -bool Hacl_Bignum64_mod_inv_prime_vartime(uint32_t len, uint64_t *n, uint64_t *a, uint64_t *res) -{ - KRML_CHECK_SIZE(sizeof (uint64_t), len); - { - uint64_t one[len]; - memset(one, 0U, len * sizeof (uint64_t)); - { - uint64_t bit0; - uint64_t m00; - memset(one, 0U, len * sizeof (uint64_t)); - one[0U] = (uint64_t)1U; - bit0 = n[0U] & (uint64_t)1U; - m00 = (uint64_t)0U - bit0; - { - uint64_t acc0 = (uint64_t)0U; - uint64_t m10; - uint64_t m0; - { - uint32_t i; - for (i = (uint32_t)0U; i < len; i++) - { - uint64_t beq = FStar_UInt64_eq_mask(one[i], n[i]); - uint64_t blt = ~FStar_UInt64_gte_mask(one[i], n[i]); - acc0 = - (beq & acc0) - | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U))); - } - } - m10 = acc0; - m0 = m00 & m10; - KRML_CHECK_SIZE(sizeof (uint64_t), len); - { - uint64_t bn_zero[len]; - memset(bn_zero, 0U, len * sizeof (uint64_t)); - { - uint64_t mask = (uint64_t)0xFFFFFFFFFFFFFFFFU; - uint64_t mask1; - uint64_t res10; - uint64_t m1; - { - uint32_t i; - for (i = (uint32_t)0U; i < len; i++) - { - uint64_t uu____0 = FStar_UInt64_eq_mask(a[i], bn_zero[i]); - mask = uu____0 & mask; - } - } - mask1 = mask; - res10 = mask1; - m1 = res10; - { - uint64_t acc = (uint64_t)0U; - uint64_t m2; - uint64_t is_valid_m; - uint32_t nBits; - { - uint32_t i; - for (i = (uint32_t)0U; i < len; i++) - { - uint64_t beq = FStar_UInt64_eq_mask(a[i], n[i]); - uint64_t blt = ~FStar_UInt64_gte_mask(a[i], n[i]); - acc = - (beq & acc) - | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U))); - } - } - m2 = acc; - is_valid_m = (m0 & ~m1) & m2; - nBits = (uint32_t)64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64(len, n); - if (is_valid_m == (uint64_t)0xFFFFFFFFFFFFFFFFU) - { - KRML_CHECK_SIZE(sizeof (uint64_t), len); - { - uint64_t n2[len]; - memset(n2, 0U, len * sizeof (uint64_t)); - { - uint64_t - c0 = - Lib_IntTypes_Intrinsics_sub_borrow_u64((uint64_t)0U, - n[0U], - (uint64_t)2U, - n2); - uint64_t c1; - if ((uint32_t)1U < len) - { - uint64_t *a1 = n + (uint32_t)1U; - uint64_t *res1 = n2 + (uint32_t)1U; - uint64_t c = c0; - { - uint32_t i; - for (i = (uint32_t)0U; i < (len - (uint32_t)1U) / (uint32_t)4U; i++) - { - uint64_t t1 = a1[(uint32_t)4U * i]; - uint64_t *res_i0 = res1 + (uint32_t)4U * i; - c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, (uint64_t)0U, res_i0); - { - uint64_t t10 = a1[(uint32_t)4U * i + (uint32_t)1U]; - uint64_t *res_i1 = res1 + (uint32_t)4U * i + (uint32_t)1U; - c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t10, (uint64_t)0U, res_i1); - { - uint64_t t11 = a1[(uint32_t)4U * i + (uint32_t)2U]; - uint64_t *res_i2 = res1 + (uint32_t)4U * i + (uint32_t)2U; - c = - Lib_IntTypes_Intrinsics_sub_borrow_u64(c, - t11, - (uint64_t)0U, - res_i2); - { - uint64_t t12 = a1[(uint32_t)4U * i + (uint32_t)3U]; - uint64_t *res_i = res1 + (uint32_t)4U * i + (uint32_t)3U; - c = - Lib_IntTypes_Intrinsics_sub_borrow_u64(c, - t12, - (uint64_t)0U, - res_i); - } - } - } - } - } - { - uint32_t i; - for - (i - = (len - (uint32_t)1U) / (uint32_t)4U * (uint32_t)4U; - i - < len - (uint32_t)1U; - i++) - { - uint64_t t1 = a1[i]; - uint64_t *res_i = res1 + i; - c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, (uint64_t)0U, res_i); - } - } - { - uint64_t c10 = c; - c1 = c10; - } - } - else - { - c1 = c0; - } - Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_u64(len, - nBits, - n, - a, - (uint32_t)64U * len, - n2, - res); - } - } - } - else - { - memset(res, 0U, len * sizeof (uint64_t)); - } - return is_valid_m == (uint64_t)0xFFFFFFFFFFFFFFFFU; - } - } - } - } - } - } -} - - -/**********************************************/ -/* Arithmetic functions with precomputations. */ -/**********************************************/ - - -/** -Heap-allocate and initialize a montgomery context. - - The argument n is meant to be `len` limbs in size, i.e. uint64_t[len]. - - Before calling this function, the caller will need to ensure that the following - preconditions are observed. - • n % 2 = 1 - • 1 < n - - The caller will need to call Hacl_Bignum64_mont_ctx_free on the return value - to avoid memory leaks. -*/ -Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 -*Hacl_Bignum64_mont_ctx_init(uint32_t len, uint64_t *n) -{ - KRML_CHECK_SIZE(sizeof (uint64_t), len); - { - uint64_t *r2 = (uint64_t *)KRML_HOST_CALLOC(len, sizeof (uint64_t)); - KRML_CHECK_SIZE(sizeof (uint64_t), len); - { - uint64_t *n1 = (uint64_t *)KRML_HOST_CALLOC(len, sizeof (uint64_t)); - uint64_t *r21 = r2; - uint64_t *n11 = n1; - uint32_t nBits; - uint64_t mu; - memcpy(n11, n, len * sizeof (uint64_t)); - nBits = (uint32_t)64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64(len, n); - Hacl_Bignum_Montgomery_bn_precomp_r2_mod_n_u64(len, nBits, n, r21); - mu = Hacl_Bignum_ModInvLimb_mod_inv_uint64(n[0U]); - { - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 res; - res.len = len; - res.n = n11; - res.mu = mu; - res.r2 = r21; - KRML_CHECK_SIZE(sizeof (Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64), (uint32_t)1U); - { - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 - *buf = - (Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 *)KRML_HOST_MALLOC(sizeof ( - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 - )); - buf[0U] = res; - return buf; - } - } - } - } -} - -/** -Deallocate the memory previously allocated by Hacl_Bignum64_mont_ctx_init. - - The argument k is a montgomery context obtained through Hacl_Bignum64_mont_ctx_init. -*/ -void Hacl_Bignum64_mont_ctx_free(Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 *k) -{ - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 k1 = *k; - uint64_t *n = k1.n; - uint64_t *r2 = k1.r2; - KRML_HOST_FREE(n); - KRML_HOST_FREE(r2); - KRML_HOST_FREE(k); -} - -/** -Write `a mod n` in `res`. - - The argument a is meant to be `2*len` limbs in size, i.e. uint64_t[2*len]. - The outparam res is meant to be `len` limbs in size, i.e. uint64_t[len]. - The argument k is a montgomery context obtained through Hacl_Bignum64_mont_ctx_init. -*/ -void -Hacl_Bignum64_mod_precomp( - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 *k, - uint64_t *a, - uint64_t *res -) -{ - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 k10 = *k; - uint32_t len1 = k10.len; - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 k1 = *k; - bn_slow_precomp(len1, k1.n, k1.mu, k1.r2, a, res); -} - -/** -Write `a ^ b mod n` in `res`. - - The arguments a and the outparam res are meant to be `len` limbs in size, i.e. uint64_t[len]. - The argument k is a montgomery context obtained through Hacl_Bignum64_mont_ctx_init. - - The argument b is a bignum of any size, and bBits is an upper bound on the - number of significant bits of b. A tighter bound results in faster execution - time. When in doubt, the number of bits for the bignum size is always a safe - default, e.g. if b is a 4096-bit bignum, bBits should be 4096. - - The function is *NOT* constant-time on the argument b. See the - mod_exp_consttime_* functions for constant-time variants. - - Before calling this function, the caller will need to ensure that the following - preconditions are observed. - • b < pow2 bBits - • a < n -*/ -void -Hacl_Bignum64_mod_exp_vartime_precomp( - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 *k, - uint64_t *a, - uint32_t bBits, - uint64_t *b, - uint64_t *res -) -{ - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 k10 = *k; - uint32_t len1 = k10.len; - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 k1 = *k; - Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_precomp_u64(len1, - k1.n, - k1.mu, - k1.r2, - a, - bBits, - b, - res); -} - -/** -Write `a ^ b mod n` in `res`. - - The arguments a and the outparam res are meant to be `len` limbs in size, i.e. uint64_t[len]. - The argument k is a montgomery context obtained through Hacl_Bignum64_mont_ctx_init. - - The argument b is a bignum of any size, and bBits is an upper bound on the - number of significant bits of b. A tighter bound results in faster execution - time. When in doubt, the number of bits for the bignum size is always a safe - default, e.g. if b is a 4096-bit bignum, bBits should be 4096. - - This function is constant-time over its argument b, at the cost of a slower - execution time than mod_exp_vartime_*. - - Before calling this function, the caller will need to ensure that the following - preconditions are observed. - • b < pow2 bBits - • a < n -*/ -void -Hacl_Bignum64_mod_exp_consttime_precomp( - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 *k, - uint64_t *a, - uint32_t bBits, - uint64_t *b, - uint64_t *res -) -{ - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 k10 = *k; - uint32_t len1 = k10.len; - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 k1 = *k; - Hacl_Bignum_Exponentiation_bn_mod_exp_consttime_precomp_u64(len1, - k1.n, - k1.mu, - k1.r2, - a, - bBits, - b, - res); -} - -/** -Write `a ^ (-1) mod n` in `res`. - - The argument a and the outparam res are meant to be `len` limbs in size, i.e. uint64_t[len]. - The argument k is a montgomery context obtained through Hacl_Bignum64_mont_ctx_init. - - Before calling this function, the caller will need to ensure that the following - preconditions are observed. - • n is a prime - • 0 < a - • a < n -*/ -void -Hacl_Bignum64_mod_inv_prime_vartime_precomp( - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 *k, - uint64_t *a, - uint64_t *res -) -{ - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 k10 = *k; - uint32_t len1 = k10.len; - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 k1 = *k; - KRML_CHECK_SIZE(sizeof (uint64_t), len1); - { - uint64_t n2[len1]; - memset(n2, 0U, len1 * sizeof (uint64_t)); - { - uint64_t - c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64((uint64_t)0U, k1.n[0U], (uint64_t)2U, n2); - uint64_t c1; - if ((uint32_t)1U < len1) - { - uint64_t *a1 = k1.n + (uint32_t)1U; - uint64_t *res1 = n2 + (uint32_t)1U; - uint64_t c = c0; - { - uint32_t i; - for (i = (uint32_t)0U; i < (len1 - (uint32_t)1U) / (uint32_t)4U; i++) - { - uint64_t t1 = a1[(uint32_t)4U * i]; - uint64_t *res_i0 = res1 + (uint32_t)4U * i; - c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, (uint64_t)0U, res_i0); - { - uint64_t t10 = a1[(uint32_t)4U * i + (uint32_t)1U]; - uint64_t *res_i1 = res1 + (uint32_t)4U * i + (uint32_t)1U; - c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t10, (uint64_t)0U, res_i1); - { - uint64_t t11 = a1[(uint32_t)4U * i + (uint32_t)2U]; - uint64_t *res_i2 = res1 + (uint32_t)4U * i + (uint32_t)2U; - c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t11, (uint64_t)0U, res_i2); - { - uint64_t t12 = a1[(uint32_t)4U * i + (uint32_t)3U]; - uint64_t *res_i = res1 + (uint32_t)4U * i + (uint32_t)3U; - c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t12, (uint64_t)0U, res_i); - } - } - } - } - } - { - uint32_t i; - for - (i - = (len1 - (uint32_t)1U) / (uint32_t)4U * (uint32_t)4U; - i - < len1 - (uint32_t)1U; - i++) - { - uint64_t t1 = a1[i]; - uint64_t *res_i = res1 + i; - c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, (uint64_t)0U, res_i); - } - } - { - uint64_t c10 = c; - c1 = c10; - } - } - else - { - c1 = c0; - } - Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_precomp_u64(len1, - k1.n, - k1.mu, - k1.r2, - a, - (uint32_t)64U * len1, - n2, - res); - } - } -} - - -/********************/ -/* Loads and stores */ -/********************/ - - -/** -Load a bid-endian bignum from memory. - - The argument b points to `len` bytes of valid memory. - The function returns a heap-allocated bignum of size sufficient to hold the - result of loading b, or NULL if either the allocation failed, or the amount of - required memory would exceed 4GB. - - If the return value is non-null, clients must eventually call free(3) on it to - avoid memory leaks. -*/ -uint64_t *Hacl_Bignum64_new_bn_from_bytes_be(uint32_t len, uint8_t *b) -{ - if - ( - len - == (uint32_t)0U - || !((len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U <= (uint32_t)536870911U) - ) - { - return NULL; - } - KRML_CHECK_SIZE(sizeof (uint64_t), (len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U); - { - uint64_t - *res = - (uint64_t *)KRML_HOST_CALLOC((len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U, - sizeof (uint64_t)); - if (res == NULL) - { - return res; - } - { - uint64_t *res1 = res; - uint64_t *res2 = res1; - uint32_t bnLen = (len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U; - uint32_t tmpLen = (uint32_t)8U * bnLen; - KRML_CHECK_SIZE(sizeof (uint8_t), tmpLen); - { - uint8_t tmp[tmpLen]; - memset(tmp, 0U, tmpLen * sizeof (uint8_t)); - memcpy(tmp + tmpLen - len, b, len * sizeof (uint8_t)); - { - uint32_t i; - for (i = (uint32_t)0U; i < bnLen; i++) - { - uint64_t *os = res2; - uint64_t u = load64_be(tmp + (bnLen - i - (uint32_t)1U) * (uint32_t)8U); - uint64_t x = u; - os[i] = x; - } - } - return res2; - } - } - } -} - -/** -Load a little-endian bignum from memory. - - The argument b points to `len` bytes of valid memory. - The function returns a heap-allocated bignum of size sufficient to hold the - result of loading b, or NULL if either the allocation failed, or the amount of - required memory would exceed 4GB. - - If the return value is non-null, clients must eventually call free(3) on it to - avoid memory leaks. -*/ -uint64_t *Hacl_Bignum64_new_bn_from_bytes_le(uint32_t len, uint8_t *b) -{ - if - ( - len - == (uint32_t)0U - || !((len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U <= (uint32_t)536870911U) - ) - { - return NULL; - } - KRML_CHECK_SIZE(sizeof (uint64_t), (len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U); - { - uint64_t - *res = - (uint64_t *)KRML_HOST_CALLOC((len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U, - sizeof (uint64_t)); - if (res == NULL) - { - return res; - } - { - uint64_t *res1 = res; - uint64_t *res2 = res1; - uint32_t bnLen = (len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U; - uint32_t tmpLen = (uint32_t)8U * bnLen; - KRML_CHECK_SIZE(sizeof (uint8_t), tmpLen); - { - uint8_t tmp[tmpLen]; - memset(tmp, 0U, tmpLen * sizeof (uint8_t)); - memcpy(tmp, b, len * sizeof (uint8_t)); - { - uint32_t i; - for (i = (uint32_t)0U; i < (len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U; i++) - { - uint64_t *os = res2; - uint8_t *bj = tmp + i * (uint32_t)8U; - uint64_t u = load64_le(bj); - uint64_t r1 = u; - uint64_t x = r1; - os[i] = x; - } - } - return res2; - } - } - } -} - -/** -Serialize a bignum into big-endian memory. - - The argument b points to a bignum of ⌈len / 8⌉ size. - The outparam res points to `len` bytes of valid memory. -*/ -void Hacl_Bignum64_bn_to_bytes_be(uint32_t len, uint64_t *b, uint8_t *res) -{ - uint32_t bnLen = (len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U; - uint32_t tmpLen = (uint32_t)8U * bnLen; - KRML_CHECK_SIZE(sizeof (uint8_t), tmpLen); - { - uint8_t tmp[tmpLen]; - memset(tmp, 0U, tmpLen * sizeof (uint8_t)); - { - uint32_t i; - for (i = (uint32_t)0U; i < bnLen; i++) - { - store64_be(tmp + i * (uint32_t)8U, b[bnLen - i - (uint32_t)1U]); - } - } - memcpy(res, tmp + tmpLen - len, len * sizeof (uint8_t)); - } -} - -/** -Serialize a bignum into little-endian memory. - - The argument b points to a bignum of ⌈len / 8⌉ size. - The outparam res points to `len` bytes of valid memory. -*/ -void Hacl_Bignum64_bn_to_bytes_le(uint32_t len, uint64_t *b, uint8_t *res) -{ - uint32_t bnLen = (len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U; - uint32_t tmpLen = (uint32_t)8U * bnLen; - KRML_CHECK_SIZE(sizeof (uint8_t), tmpLen); - { - uint8_t tmp[tmpLen]; - memset(tmp, 0U, tmpLen * sizeof (uint8_t)); - { - uint32_t i; - for (i = (uint32_t)0U; i < bnLen; i++) - { - store64_le(tmp + i * (uint32_t)8U, b[i]); - } - } - memcpy(res, tmp, len * sizeof (uint8_t)); - } -} - - -/***************/ -/* Comparisons */ -/***************/ - - -/** -Returns 2^64 - 1 if a < b, otherwise returns 0. - - The arguments a and b are meant to be `len` limbs in size, i.e. uint64_t[len]. -*/ -uint64_t Hacl_Bignum64_lt_mask(uint32_t len, uint64_t *a, uint64_t *b) -{ - uint64_t acc = (uint64_t)0U; - { - uint32_t i; - for (i = (uint32_t)0U; i < len; i++) - { - uint64_t beq = FStar_UInt64_eq_mask(a[i], b[i]); - uint64_t blt = ~FStar_UInt64_gte_mask(a[i], b[i]); - acc = (beq & acc) | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U))); - } - } - return acc; -} - -/** -Returns 2^64 - 1 if a = b, otherwise returns 0. - - The arguments a and b are meant to be `len` limbs in size, i.e. uint64_t[len]. -*/ -uint64_t Hacl_Bignum64_eq_mask(uint32_t len, uint64_t *a, uint64_t *b) -{ - uint64_t mask = (uint64_t)0xFFFFFFFFFFFFFFFFU; - uint64_t mask1; - { - uint32_t i; - for (i = (uint32_t)0U; i < len; i++) - { - uint64_t uu____0 = FStar_UInt64_eq_mask(a[i], b[i]); - mask = uu____0 & mask; - } - } - mask1 = mask; - return mask1; -} - diff --git a/dist/c89-compatible/Hacl_Bignum64.h b/dist/c89-compatible/Hacl_Bignum64.h deleted file mode 100644 index 4ccc9775a8..0000000000 --- a/dist/c89-compatible/Hacl_Bignum64.h +++ /dev/null @@ -1,399 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#ifndef __Hacl_Bignum64_H -#define __Hacl_Bignum64_H - -#if defined(__cplusplus) -extern "C" { -#endif - -#include -#include "krml/internal/types.h" -#include "krml/lowstar_endianness.h" -#include "krml/internal/target.h" - - -#include "Hacl_Krmllib.h" -#include "Hacl_Bignum_Base.h" -#include "Hacl_Bignum.h" -#include "evercrypt_targetconfig.h" -typedef Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 *Hacl_Bignum64_pbn_mont_ctx_u64; - -/******************************************************************************* - -A verified bignum library. - -This is a 64-bit optimized version, where bignums are represented as an array -of `len` unsigned 64-bit integers, i.e. uint64_t[len]. - -*******************************************************************************/ - -/************************/ -/* Arithmetic functions */ -/************************/ - - -/** -Write `a + b mod 2 ^ (64 * len)` in `res`. - - This functions returns the carry. - - The arguments a, b and the outparam res are meant to be `len` limbs in size, i.e. uint64_t[len] -*/ -uint64_t Hacl_Bignum64_add(uint32_t len, uint64_t *a, uint64_t *b, uint64_t *res); - -/** -Write `a - b mod 2 ^ (64 * len)` in `res`. - - This functions returns the carry. - - The arguments a, b and the outparam res are meant to be `len` limbs in size, i.e. uint64_t[len] -*/ -uint64_t Hacl_Bignum64_sub(uint32_t len, uint64_t *a, uint64_t *b, uint64_t *res); - -/** -Write `(a + b) mod n` in `res`. - - The arguments a, b, n and the outparam res are meant to be `len` limbs in size, i.e. uint64_t[len]. - - Before calling this function, the caller will need to ensure that the following - preconditions are observed. - • a < n - • b < n -*/ -void Hacl_Bignum64_add_mod(uint32_t len, uint64_t *n, uint64_t *a, uint64_t *b, uint64_t *res); - -/** -Write `(a - b) mod n` in `res`. - - The arguments a, b, n and the outparam res are meant to be `len` limbs in size, i.e. uint64_t[len]. - - Before calling this function, the caller will need to ensure that the following - preconditions are observed. - • a < n - • b < n -*/ -void Hacl_Bignum64_sub_mod(uint32_t len, uint64_t *n, uint64_t *a, uint64_t *b, uint64_t *res); - -/** -Write `a * b` in `res`. - - The arguments a and b are meant to be `len` limbs in size, i.e. uint64_t[len]. - The outparam res is meant to be `2*len` limbs in size, i.e. uint64_t[2*len]. -*/ -void Hacl_Bignum64_mul(uint32_t len, uint64_t *a, uint64_t *b, uint64_t *res); - -/** -Write `a * a` in `res`. - - The argument a is meant to be `len` limbs in size, i.e. uint64_t[len]. - The outparam res is meant to be `2*len` limbs in size, i.e. uint64_t[2*len]. -*/ -void Hacl_Bignum64_sqr(uint32_t len, uint64_t *a, uint64_t *res); - -/** -Write `a mod n` in `res`. - - The argument a is meant to be `2*len` limbs in size, i.e. uint64_t[2*len]. - The argument n and the outparam res are meant to be `len` limbs in size, i.e. uint64_t[len]. - - The function returns false if any of the following preconditions are violated, - true otherwise. - • 1 < n - • n % 2 = 1 -*/ -bool Hacl_Bignum64_mod(uint32_t len, uint64_t *n, uint64_t *a, uint64_t *res); - -/** -Write `a ^ b mod n` in `res`. - - The arguments a, n and the outparam res are meant to be `len` limbs in size, i.e. uint64_t[len]. - - The argument b is a bignum of any size, and bBits is an upper bound on the - number of significant bits of b. A tighter bound results in faster execution - time. When in doubt, the number of bits for the bignum size is always a safe - default, e.g. if b is a 4096-bit bignum, bBits should be 4096. - - The function is *NOT* constant-time on the argument b. See the - mod_exp_consttime_* functions for constant-time variants. - - The function returns false if any of the following preconditions are violated, - true otherwise. - • n % 2 = 1 - • 1 < n - • b < pow2 bBits - • a < n -*/ -bool -Hacl_Bignum64_mod_exp_vartime( - uint32_t len, - uint64_t *n, - uint64_t *a, - uint32_t bBits, - uint64_t *b, - uint64_t *res -); - -/** -Write `a ^ b mod n` in `res`. - - The arguments a, n and the outparam res are meant to be `len` limbs in size, i.e. uint64_t[len]. - - The argument b is a bignum of any size, and bBits is an upper bound on the - number of significant bits of b. A tighter bound results in faster execution - time. When in doubt, the number of bits for the bignum size is always a safe - default, e.g. if b is a 4096-bit bignum, bBits should be 4096. - - This function is constant-time over its argument b, at the cost of a slower - execution time than mod_exp_vartime. - - The function returns false if any of the following preconditions are violated, - true otherwise. - • n % 2 = 1 - • 1 < n - • b < pow2 bBits - • a < n -*/ -bool -Hacl_Bignum64_mod_exp_consttime( - uint32_t len, - uint64_t *n, - uint64_t *a, - uint32_t bBits, - uint64_t *b, - uint64_t *res -); - -/** -Write `a ^ (-1) mod n` in `res`. - - The arguments a, n and the outparam res are meant to be `len` limbs in size, i.e. uint64_t[len]. - - Before calling this function, the caller will need to ensure that the following - preconditions are observed. - • n is a prime - - The function returns false if any of the following preconditions are violated, - true otherwise. - • n % 2 = 1 - • 1 < n - • 0 < a - • a < n -*/ -bool -Hacl_Bignum64_mod_inv_prime_vartime(uint32_t len, uint64_t *n, uint64_t *a, uint64_t *res); - - -/**********************************************/ -/* Arithmetic functions with precomputations. */ -/**********************************************/ - - -/** -Heap-allocate and initialize a montgomery context. - - The argument n is meant to be `len` limbs in size, i.e. uint64_t[len]. - - Before calling this function, the caller will need to ensure that the following - preconditions are observed. - • n % 2 = 1 - • 1 < n - - The caller will need to call Hacl_Bignum64_mont_ctx_free on the return value - to avoid memory leaks. -*/ -Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 -*Hacl_Bignum64_mont_ctx_init(uint32_t len, uint64_t *n); - -/** -Deallocate the memory previously allocated by Hacl_Bignum64_mont_ctx_init. - - The argument k is a montgomery context obtained through Hacl_Bignum64_mont_ctx_init. -*/ -void Hacl_Bignum64_mont_ctx_free(Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 *k); - -/** -Write `a mod n` in `res`. - - The argument a is meant to be `2*len` limbs in size, i.e. uint64_t[2*len]. - The outparam res is meant to be `len` limbs in size, i.e. uint64_t[len]. - The argument k is a montgomery context obtained through Hacl_Bignum64_mont_ctx_init. -*/ -void -Hacl_Bignum64_mod_precomp( - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 *k, - uint64_t *a, - uint64_t *res -); - -/** -Write `a ^ b mod n` in `res`. - - The arguments a and the outparam res are meant to be `len` limbs in size, i.e. uint64_t[len]. - The argument k is a montgomery context obtained through Hacl_Bignum64_mont_ctx_init. - - The argument b is a bignum of any size, and bBits is an upper bound on the - number of significant bits of b. A tighter bound results in faster execution - time. When in doubt, the number of bits for the bignum size is always a safe - default, e.g. if b is a 4096-bit bignum, bBits should be 4096. - - The function is *NOT* constant-time on the argument b. See the - mod_exp_consttime_* functions for constant-time variants. - - Before calling this function, the caller will need to ensure that the following - preconditions are observed. - • b < pow2 bBits - • a < n -*/ -void -Hacl_Bignum64_mod_exp_vartime_precomp( - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 *k, - uint64_t *a, - uint32_t bBits, - uint64_t *b, - uint64_t *res -); - -/** -Write `a ^ b mod n` in `res`. - - The arguments a and the outparam res are meant to be `len` limbs in size, i.e. uint64_t[len]. - The argument k is a montgomery context obtained through Hacl_Bignum64_mont_ctx_init. - - The argument b is a bignum of any size, and bBits is an upper bound on the - number of significant bits of b. A tighter bound results in faster execution - time. When in doubt, the number of bits for the bignum size is always a safe - default, e.g. if b is a 4096-bit bignum, bBits should be 4096. - - This function is constant-time over its argument b, at the cost of a slower - execution time than mod_exp_vartime_*. - - Before calling this function, the caller will need to ensure that the following - preconditions are observed. - • b < pow2 bBits - • a < n -*/ -void -Hacl_Bignum64_mod_exp_consttime_precomp( - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 *k, - uint64_t *a, - uint32_t bBits, - uint64_t *b, - uint64_t *res -); - -/** -Write `a ^ (-1) mod n` in `res`. - - The argument a and the outparam res are meant to be `len` limbs in size, i.e. uint64_t[len]. - The argument k is a montgomery context obtained through Hacl_Bignum64_mont_ctx_init. - - Before calling this function, the caller will need to ensure that the following - preconditions are observed. - • n is a prime - • 0 < a - • a < n -*/ -void -Hacl_Bignum64_mod_inv_prime_vartime_precomp( - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 *k, - uint64_t *a, - uint64_t *res -); - - -/********************/ -/* Loads and stores */ -/********************/ - - -/** -Load a bid-endian bignum from memory. - - The argument b points to `len` bytes of valid memory. - The function returns a heap-allocated bignum of size sufficient to hold the - result of loading b, or NULL if either the allocation failed, or the amount of - required memory would exceed 4GB. - - If the return value is non-null, clients must eventually call free(3) on it to - avoid memory leaks. -*/ -uint64_t *Hacl_Bignum64_new_bn_from_bytes_be(uint32_t len, uint8_t *b); - -/** -Load a little-endian bignum from memory. - - The argument b points to `len` bytes of valid memory. - The function returns a heap-allocated bignum of size sufficient to hold the - result of loading b, or NULL if either the allocation failed, or the amount of - required memory would exceed 4GB. - - If the return value is non-null, clients must eventually call free(3) on it to - avoid memory leaks. -*/ -uint64_t *Hacl_Bignum64_new_bn_from_bytes_le(uint32_t len, uint8_t *b); - -/** -Serialize a bignum into big-endian memory. - - The argument b points to a bignum of ⌈len / 8⌉ size. - The outparam res points to `len` bytes of valid memory. -*/ -void Hacl_Bignum64_bn_to_bytes_be(uint32_t len, uint64_t *b, uint8_t *res); - -/** -Serialize a bignum into little-endian memory. - - The argument b points to a bignum of ⌈len / 8⌉ size. - The outparam res points to `len` bytes of valid memory. -*/ -void Hacl_Bignum64_bn_to_bytes_le(uint32_t len, uint64_t *b, uint8_t *res); - - -/***************/ -/* Comparisons */ -/***************/ - - -/** -Returns 2^64 - 1 if a < b, otherwise returns 0. - - The arguments a and b are meant to be `len` limbs in size, i.e. uint64_t[len]. -*/ -uint64_t Hacl_Bignum64_lt_mask(uint32_t len, uint64_t *a, uint64_t *b); - -/** -Returns 2^64 - 1 if a = b, otherwise returns 0. - - The arguments a and b are meant to be `len` limbs in size, i.e. uint64_t[len]. -*/ -uint64_t Hacl_Bignum64_eq_mask(uint32_t len, uint64_t *a, uint64_t *b); - -#if defined(__cplusplus) -} -#endif - -#define __Hacl_Bignum64_H_DEFINED -#endif diff --git a/dist/c89-compatible/Hacl_Bignum_Base.h b/dist/c89-compatible/Hacl_Bignum_Base.h deleted file mode 100644 index 21513611e3..0000000000 --- a/dist/c89-compatible/Hacl_Bignum_Base.h +++ /dev/null @@ -1,589 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#ifndef __Hacl_Bignum_Base_H -#define __Hacl_Bignum_Base_H - -#if defined(__cplusplus) -extern "C" { -#endif - -#include -#include "krml/internal/types.h" -#include "krml/lowstar_endianness.h" -#include "krml/internal/target.h" - - -#include "Hacl_Krmllib.h" -#include "evercrypt_targetconfig.h" -#include "lib_intrinsics.h" -static inline void -Hacl_Bignum_Convert_bn_from_bytes_be_uint64(uint32_t len, uint8_t *b, uint64_t *res) -{ - uint32_t bnLen = (len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U; - uint32_t tmpLen = (uint32_t)8U * bnLen; - KRML_CHECK_SIZE(sizeof (uint8_t), tmpLen); - { - uint8_t tmp[tmpLen]; - memset(tmp, 0U, tmpLen * sizeof (uint8_t)); - memcpy(tmp + tmpLen - len, b, len * sizeof (uint8_t)); - { - uint32_t i; - for (i = (uint32_t)0U; i < bnLen; i++) - { - uint64_t *os = res; - uint64_t u = load64_be(tmp + (bnLen - i - (uint32_t)1U) * (uint32_t)8U); - uint64_t x = u; - os[i] = x; - } - } - } -} - -static inline void -Hacl_Bignum_Convert_bn_to_bytes_be_uint64(uint32_t len, uint64_t *b, uint8_t *res) -{ - uint32_t bnLen = (len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U; - uint32_t tmpLen = (uint32_t)8U * bnLen; - KRML_CHECK_SIZE(sizeof (uint8_t), tmpLen); - { - uint8_t tmp[tmpLen]; - memset(tmp, 0U, tmpLen * sizeof (uint8_t)); - { - uint32_t i; - for (i = (uint32_t)0U; i < bnLen; i++) - { - store64_be(tmp + i * (uint32_t)8U, b[bnLen - i - (uint32_t)1U]); - } - } - memcpy(res, tmp + tmpLen - len, len * sizeof (uint8_t)); - } -} - -static inline uint64_t -Hacl_Bignum_Base_mul_wide_add_u64(uint64_t a, uint64_t b, uint64_t c_in, uint64_t *out) -{ - FStar_UInt128_uint128 - res = FStar_UInt128_add(FStar_UInt128_mul_wide(a, b), FStar_UInt128_uint64_to_uint128(c_in)); - out[0U] = FStar_UInt128_uint128_to_uint64(res); - return FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(res, (uint32_t)64U)); -} - -static inline uint32_t -Hacl_Bignum_Base_mul_wide_add2_u32(uint32_t a, uint32_t b, uint32_t c_in, uint32_t *out) -{ - uint32_t out0 = out[0U]; - uint64_t res = (uint64_t)a * (uint64_t)b + (uint64_t)c_in + (uint64_t)out0; - out[0U] = (uint32_t)res; - return (uint32_t)(res >> (uint32_t)32U); -} - -static inline uint64_t -Hacl_Bignum_Base_mul_wide_add2_u64(uint64_t a, uint64_t b, uint64_t c_in, uint64_t *out) -{ - uint64_t out0 = out[0U]; - FStar_UInt128_uint128 - res = - FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_mul_wide(a, b), - FStar_UInt128_uint64_to_uint128(c_in)), - FStar_UInt128_uint64_to_uint128(out0)); - out[0U] = FStar_UInt128_uint128_to_uint64(res); - return FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(res, (uint32_t)64U)); -} - -static inline uint32_t Hacl_Bignum_Lib_bn_get_top_index_u32(uint32_t len, uint32_t *b) -{ - uint32_t priv = (uint32_t)0U; - { - uint32_t i; - for (i = (uint32_t)0U; i < len; i++) - { - uint32_t mask = FStar_UInt32_eq_mask(b[i], (uint32_t)0U); - priv = (mask & priv) | (~mask & i); - } - } - return priv; -} - -static inline uint64_t Hacl_Bignum_Lib_bn_get_top_index_u64(uint32_t len, uint64_t *b) -{ - uint64_t priv = (uint64_t)0U; - { - uint32_t i; - for (i = (uint32_t)0U; i < len; i++) - { - uint64_t mask = FStar_UInt64_eq_mask(b[i], (uint64_t)0U); - priv = (mask & priv) | (~mask & (uint64_t)i); - } - } - return priv; -} - -static inline uint32_t -Hacl_Bignum_Addition_bn_sub_eq_len_u32(uint32_t aLen, uint32_t *a, uint32_t *b, uint32_t *res) -{ - uint32_t c = (uint32_t)0U; - { - uint32_t i; - for (i = (uint32_t)0U; i < aLen / (uint32_t)4U; i++) - { - uint32_t t1 = a[(uint32_t)4U * i]; - uint32_t t20 = b[(uint32_t)4U * i]; - uint32_t *res_i0 = res + (uint32_t)4U * i; - c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, t20, res_i0); - { - uint32_t t10 = a[(uint32_t)4U * i + (uint32_t)1U]; - uint32_t t21 = b[(uint32_t)4U * i + (uint32_t)1U]; - uint32_t *res_i1 = res + (uint32_t)4U * i + (uint32_t)1U; - c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t10, t21, res_i1); - { - uint32_t t11 = a[(uint32_t)4U * i + (uint32_t)2U]; - uint32_t t22 = b[(uint32_t)4U * i + (uint32_t)2U]; - uint32_t *res_i2 = res + (uint32_t)4U * i + (uint32_t)2U; - c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t11, t22, res_i2); - { - uint32_t t12 = a[(uint32_t)4U * i + (uint32_t)3U]; - uint32_t t2 = b[(uint32_t)4U * i + (uint32_t)3U]; - uint32_t *res_i = res + (uint32_t)4U * i + (uint32_t)3U; - c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t12, t2, res_i); - } - } - } - } - } - { - uint32_t i; - for (i = aLen / (uint32_t)4U * (uint32_t)4U; i < aLen; i++) - { - uint32_t t1 = a[i]; - uint32_t t2 = b[i]; - uint32_t *res_i = res + i; - c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, t2, res_i); - } - } - return c; -} - -static inline uint64_t -Hacl_Bignum_Addition_bn_sub_eq_len_u64(uint32_t aLen, uint64_t *a, uint64_t *b, uint64_t *res) -{ - uint64_t c = (uint64_t)0U; - { - uint32_t i; - for (i = (uint32_t)0U; i < aLen / (uint32_t)4U; i++) - { - uint64_t t1 = a[(uint32_t)4U * i]; - uint64_t t20 = b[(uint32_t)4U * i]; - uint64_t *res_i0 = res + (uint32_t)4U * i; - c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, t20, res_i0); - { - uint64_t t10 = a[(uint32_t)4U * i + (uint32_t)1U]; - uint64_t t21 = b[(uint32_t)4U * i + (uint32_t)1U]; - uint64_t *res_i1 = res + (uint32_t)4U * i + (uint32_t)1U; - c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t10, t21, res_i1); - { - uint64_t t11 = a[(uint32_t)4U * i + (uint32_t)2U]; - uint64_t t22 = b[(uint32_t)4U * i + (uint32_t)2U]; - uint64_t *res_i2 = res + (uint32_t)4U * i + (uint32_t)2U; - c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t11, t22, res_i2); - { - uint64_t t12 = a[(uint32_t)4U * i + (uint32_t)3U]; - uint64_t t2 = b[(uint32_t)4U * i + (uint32_t)3U]; - uint64_t *res_i = res + (uint32_t)4U * i + (uint32_t)3U; - c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t12, t2, res_i); - } - } - } - } - } - { - uint32_t i; - for (i = aLen / (uint32_t)4U * (uint32_t)4U; i < aLen; i++) - { - uint64_t t1 = a[i]; - uint64_t t2 = b[i]; - uint64_t *res_i = res + i; - c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, t2, res_i); - } - } - return c; -} - -static inline uint32_t -Hacl_Bignum_Addition_bn_add_eq_len_u32(uint32_t aLen, uint32_t *a, uint32_t *b, uint32_t *res) -{ - uint32_t c = (uint32_t)0U; - { - uint32_t i; - for (i = (uint32_t)0U; i < aLen / (uint32_t)4U; i++) - { - uint32_t t1 = a[(uint32_t)4U * i]; - uint32_t t20 = b[(uint32_t)4U * i]; - uint32_t *res_i0 = res + (uint32_t)4U * i; - c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t1, t20, res_i0); - { - uint32_t t10 = a[(uint32_t)4U * i + (uint32_t)1U]; - uint32_t t21 = b[(uint32_t)4U * i + (uint32_t)1U]; - uint32_t *res_i1 = res + (uint32_t)4U * i + (uint32_t)1U; - c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t10, t21, res_i1); - { - uint32_t t11 = a[(uint32_t)4U * i + (uint32_t)2U]; - uint32_t t22 = b[(uint32_t)4U * i + (uint32_t)2U]; - uint32_t *res_i2 = res + (uint32_t)4U * i + (uint32_t)2U; - c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t11, t22, res_i2); - { - uint32_t t12 = a[(uint32_t)4U * i + (uint32_t)3U]; - uint32_t t2 = b[(uint32_t)4U * i + (uint32_t)3U]; - uint32_t *res_i = res + (uint32_t)4U * i + (uint32_t)3U; - c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t12, t2, res_i); - } - } - } - } - } - { - uint32_t i; - for (i = aLen / (uint32_t)4U * (uint32_t)4U; i < aLen; i++) - { - uint32_t t1 = a[i]; - uint32_t t2 = b[i]; - uint32_t *res_i = res + i; - c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t1, t2, res_i); - } - } - return c; -} - -static inline uint64_t -Hacl_Bignum_Addition_bn_add_eq_len_u64(uint32_t aLen, uint64_t *a, uint64_t *b, uint64_t *res) -{ - uint64_t c = (uint64_t)0U; - { - uint32_t i; - for (i = (uint32_t)0U; i < aLen / (uint32_t)4U; i++) - { - uint64_t t1 = a[(uint32_t)4U * i]; - uint64_t t20 = b[(uint32_t)4U * i]; - uint64_t *res_i0 = res + (uint32_t)4U * i; - c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t1, t20, res_i0); - { - uint64_t t10 = a[(uint32_t)4U * i + (uint32_t)1U]; - uint64_t t21 = b[(uint32_t)4U * i + (uint32_t)1U]; - uint64_t *res_i1 = res + (uint32_t)4U * i + (uint32_t)1U; - c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t10, t21, res_i1); - { - uint64_t t11 = a[(uint32_t)4U * i + (uint32_t)2U]; - uint64_t t22 = b[(uint32_t)4U * i + (uint32_t)2U]; - uint64_t *res_i2 = res + (uint32_t)4U * i + (uint32_t)2U; - c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t11, t22, res_i2); - { - uint64_t t12 = a[(uint32_t)4U * i + (uint32_t)3U]; - uint64_t t2 = b[(uint32_t)4U * i + (uint32_t)3U]; - uint64_t *res_i = res + (uint32_t)4U * i + (uint32_t)3U; - c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t12, t2, res_i); - } - } - } - } - } - { - uint32_t i; - for (i = aLen / (uint32_t)4U * (uint32_t)4U; i < aLen; i++) - { - uint64_t t1 = a[i]; - uint64_t t2 = b[i]; - uint64_t *res_i = res + i; - c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t1, t2, res_i); - } - } - return c; -} - -static inline void -Hacl_Bignum_Multiplication_bn_mul_u32( - uint32_t aLen, - uint32_t *a, - uint32_t bLen, - uint32_t *b, - uint32_t *res -) -{ - uint32_t i; - memset(res, 0U, (aLen + bLen) * sizeof (uint32_t)); - for (i = (uint32_t)0U; i < bLen; i++) - { - uint32_t bj = b[i]; - uint32_t *res_j = res + i; - uint32_t c = (uint32_t)0U; - uint32_t r; - { - uint32_t i0; - for (i0 = (uint32_t)0U; i0 < aLen / (uint32_t)4U; i0++) - { - uint32_t a_i = a[(uint32_t)4U * i0]; - uint32_t *res_i0 = res_j + (uint32_t)4U * i0; - c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i, bj, c, res_i0); - { - uint32_t a_i0 = a[(uint32_t)4U * i0 + (uint32_t)1U]; - uint32_t *res_i1 = res_j + (uint32_t)4U * i0 + (uint32_t)1U; - c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i0, bj, c, res_i1); - { - uint32_t a_i1 = a[(uint32_t)4U * i0 + (uint32_t)2U]; - uint32_t *res_i2 = res_j + (uint32_t)4U * i0 + (uint32_t)2U; - c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i1, bj, c, res_i2); - { - uint32_t a_i2 = a[(uint32_t)4U * i0 + (uint32_t)3U]; - uint32_t *res_i = res_j + (uint32_t)4U * i0 + (uint32_t)3U; - c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i2, bj, c, res_i); - } - } - } - } - } - { - uint32_t i0; - for (i0 = aLen / (uint32_t)4U * (uint32_t)4U; i0 < aLen; i0++) - { - uint32_t a_i = a[i0]; - uint32_t *res_i = res_j + i0; - c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i, bj, c, res_i); - } - } - r = c; - res[aLen + i] = r; - } -} - -static inline void -Hacl_Bignum_Multiplication_bn_mul_u64( - uint32_t aLen, - uint64_t *a, - uint32_t bLen, - uint64_t *b, - uint64_t *res -) -{ - uint32_t i; - memset(res, 0U, (aLen + bLen) * sizeof (uint64_t)); - for (i = (uint32_t)0U; i < bLen; i++) - { - uint64_t bj = b[i]; - uint64_t *res_j = res + i; - uint64_t c = (uint64_t)0U; - uint64_t r; - { - uint32_t i0; - for (i0 = (uint32_t)0U; i0 < aLen / (uint32_t)4U; i0++) - { - uint64_t a_i = a[(uint32_t)4U * i0]; - uint64_t *res_i0 = res_j + (uint32_t)4U * i0; - c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, bj, c, res_i0); - { - uint64_t a_i0 = a[(uint32_t)4U * i0 + (uint32_t)1U]; - uint64_t *res_i1 = res_j + (uint32_t)4U * i0 + (uint32_t)1U; - c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i0, bj, c, res_i1); - { - uint64_t a_i1 = a[(uint32_t)4U * i0 + (uint32_t)2U]; - uint64_t *res_i2 = res_j + (uint32_t)4U * i0 + (uint32_t)2U; - c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i1, bj, c, res_i2); - { - uint64_t a_i2 = a[(uint32_t)4U * i0 + (uint32_t)3U]; - uint64_t *res_i = res_j + (uint32_t)4U * i0 + (uint32_t)3U; - c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i2, bj, c, res_i); - } - } - } - } - } - { - uint32_t i0; - for (i0 = aLen / (uint32_t)4U * (uint32_t)4U; i0 < aLen; i0++) - { - uint64_t a_i = a[i0]; - uint64_t *res_i = res_j + i0; - c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, bj, c, res_i); - } - } - r = c; - res[aLen + i] = r; - } -} - -static inline void -Hacl_Bignum_Multiplication_bn_sqr_u32(uint32_t aLen, uint32_t *a, uint32_t *res) -{ - uint32_t c0; - memset(res, 0U, (aLen + aLen) * sizeof (uint32_t)); - { - uint32_t i0; - for (i0 = (uint32_t)0U; i0 < aLen; i0++) - { - uint32_t *ab = a; - uint32_t a_j = a[i0]; - uint32_t *res_j = res + i0; - uint32_t c = (uint32_t)0U; - { - uint32_t i; - for (i = (uint32_t)0U; i < i0 / (uint32_t)4U; i++) - { - uint32_t a_i = ab[(uint32_t)4U * i]; - uint32_t *res_i0 = res_j + (uint32_t)4U * i; - c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i, a_j, c, res_i0); - { - uint32_t a_i0 = ab[(uint32_t)4U * i + (uint32_t)1U]; - uint32_t *res_i1 = res_j + (uint32_t)4U * i + (uint32_t)1U; - c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i0, a_j, c, res_i1); - { - uint32_t a_i1 = ab[(uint32_t)4U * i + (uint32_t)2U]; - uint32_t *res_i2 = res_j + (uint32_t)4U * i + (uint32_t)2U; - c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i1, a_j, c, res_i2); - { - uint32_t a_i2 = ab[(uint32_t)4U * i + (uint32_t)3U]; - uint32_t *res_i = res_j + (uint32_t)4U * i + (uint32_t)3U; - c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i2, a_j, c, res_i); - } - } - } - } - } - { - uint32_t i; - for (i = i0 / (uint32_t)4U * (uint32_t)4U; i < i0; i++) - { - uint32_t a_i = ab[i]; - uint32_t *res_i = res_j + i; - c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i, a_j, c, res_i); - } - } - { - uint32_t r = c; - res[i0 + i0] = r; - } - } - } - c0 = Hacl_Bignum_Addition_bn_add_eq_len_u32(aLen + aLen, res, res, res); - KRML_CHECK_SIZE(sizeof (uint32_t), aLen + aLen); - { - uint32_t tmp[aLen + aLen]; - memset(tmp, 0U, (aLen + aLen) * sizeof (uint32_t)); - { - uint32_t c1; - { - uint32_t i; - for (i = (uint32_t)0U; i < aLen; i++) - { - uint64_t res1 = (uint64_t)a[i] * (uint64_t)a[i]; - uint32_t hi = (uint32_t)(res1 >> (uint32_t)32U); - uint32_t lo = (uint32_t)res1; - tmp[(uint32_t)2U * i] = lo; - tmp[(uint32_t)2U * i + (uint32_t)1U] = hi; - } - } - c1 = Hacl_Bignum_Addition_bn_add_eq_len_u32(aLen + aLen, res, tmp, res); - } - } -} - -static inline void -Hacl_Bignum_Multiplication_bn_sqr_u64(uint32_t aLen, uint64_t *a, uint64_t *res) -{ - uint64_t c0; - memset(res, 0U, (aLen + aLen) * sizeof (uint64_t)); - { - uint32_t i0; - for (i0 = (uint32_t)0U; i0 < aLen; i0++) - { - uint64_t *ab = a; - uint64_t a_j = a[i0]; - uint64_t *res_j = res + i0; - uint64_t c = (uint64_t)0U; - { - uint32_t i; - for (i = (uint32_t)0U; i < i0 / (uint32_t)4U; i++) - { - uint64_t a_i = ab[(uint32_t)4U * i]; - uint64_t *res_i0 = res_j + (uint32_t)4U * i; - c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, a_j, c, res_i0); - { - uint64_t a_i0 = ab[(uint32_t)4U * i + (uint32_t)1U]; - uint64_t *res_i1 = res_j + (uint32_t)4U * i + (uint32_t)1U; - c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i0, a_j, c, res_i1); - { - uint64_t a_i1 = ab[(uint32_t)4U * i + (uint32_t)2U]; - uint64_t *res_i2 = res_j + (uint32_t)4U * i + (uint32_t)2U; - c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i1, a_j, c, res_i2); - { - uint64_t a_i2 = ab[(uint32_t)4U * i + (uint32_t)3U]; - uint64_t *res_i = res_j + (uint32_t)4U * i + (uint32_t)3U; - c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i2, a_j, c, res_i); - } - } - } - } - } - { - uint32_t i; - for (i = i0 / (uint32_t)4U * (uint32_t)4U; i < i0; i++) - { - uint64_t a_i = ab[i]; - uint64_t *res_i = res_j + i; - c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, a_j, c, res_i); - } - } - { - uint64_t r = c; - res[i0 + i0] = r; - } - } - } - c0 = Hacl_Bignum_Addition_bn_add_eq_len_u64(aLen + aLen, res, res, res); - KRML_CHECK_SIZE(sizeof (uint64_t), aLen + aLen); - { - uint64_t tmp[aLen + aLen]; - memset(tmp, 0U, (aLen + aLen) * sizeof (uint64_t)); - { - uint64_t c1; - { - uint32_t i; - for (i = (uint32_t)0U; i < aLen; i++) - { - FStar_UInt128_uint128 res1 = FStar_UInt128_mul_wide(a[i], a[i]); - uint64_t - hi = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(res1, (uint32_t)64U)); - uint64_t lo = FStar_UInt128_uint128_to_uint64(res1); - tmp[(uint32_t)2U * i] = lo; - tmp[(uint32_t)2U * i + (uint32_t)1U] = hi; - } - } - c1 = Hacl_Bignum_Addition_bn_add_eq_len_u64(aLen + aLen, res, tmp, res); - } - } -} - -#if defined(__cplusplus) -} -#endif - -#define __Hacl_Bignum_Base_H_DEFINED -#endif diff --git a/dist/c89-compatible/Hacl_Bignum_K256.h b/dist/c89-compatible/Hacl_Bignum_K256.h deleted file mode 100644 index 07f30151dd..0000000000 --- a/dist/c89-compatible/Hacl_Bignum_K256.h +++ /dev/null @@ -1,717 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#ifndef __Hacl_Bignum_K256_H -#define __Hacl_Bignum_K256_H - -#if defined(__cplusplus) -extern "C" { -#endif - -#include -#include "krml/internal/types.h" -#include "krml/lowstar_endianness.h" -#include "krml/internal/target.h" - - -#include "Hacl_Krmllib.h" -#include "evercrypt_targetconfig.h" -static inline bool Hacl_K256_Field_is_felem_zero_vartime(uint64_t *f) -{ - uint64_t f0 = f[0U]; - uint64_t f1 = f[1U]; - uint64_t f2 = f[2U]; - uint64_t f3 = f[3U]; - uint64_t f4 = f[4U]; - return - f0 - == (uint64_t)0U - && f1 == (uint64_t)0U - && f2 == (uint64_t)0U - && f3 == (uint64_t)0U - && f4 == (uint64_t)0U; -} - -static inline bool Hacl_K256_Field_is_felem_eq_vartime(uint64_t *f1, uint64_t *f2) -{ - uint64_t a0 = f1[0U]; - uint64_t a1 = f1[1U]; - uint64_t a2 = f1[2U]; - uint64_t a3 = f1[3U]; - uint64_t a4 = f1[4U]; - uint64_t b0 = f2[0U]; - uint64_t b1 = f2[1U]; - uint64_t b2 = f2[2U]; - uint64_t b3 = f2[3U]; - uint64_t b4 = f2[4U]; - return a0 == b0 && a1 == b1 && a2 == b2 && a3 == b3 && a4 == b4; -} - -static inline bool Hacl_K256_Field_is_felem_lt_prime_minus_order_vartime(uint64_t *f) -{ - uint64_t f0 = f[0U]; - uint64_t f1 = f[1U]; - uint64_t f2 = f[2U]; - uint64_t f3 = f[3U]; - uint64_t f4 = f[4U]; - if (f4 > (uint64_t)0U) - { - return false; - } - if (f3 > (uint64_t)0U) - { - return false; - } - if (f2 < (uint64_t)0x1455123U) - { - return true; - } - if (f2 > (uint64_t)0x1455123U) - { - return false; - } - if (f1 < (uint64_t)0x1950b75fc4402U) - { - return true; - } - if (f1 > (uint64_t)0x1950b75fc4402U) - { - return false; - } - return f0 < (uint64_t)0xda1722fc9baeeU; -} - -static inline void Hacl_K256_Field_load_felem(uint64_t *f, uint8_t *b) -{ - uint64_t tmp[4U] = { 0U }; - uint64_t s0; - uint64_t s1; - uint64_t s2; - uint64_t s3; - uint64_t f00; - uint64_t f10; - uint64_t f20; - uint64_t f30; - uint64_t f40; - uint64_t f0; - uint64_t f1; - uint64_t f2; - uint64_t f3; - uint64_t f4; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *os = tmp; - uint8_t *bj = b + i * (uint32_t)8U; - uint64_t u = load64_be(bj); - uint64_t r = u; - uint64_t x = r; - os[i] = x;); - s0 = tmp[3U]; - s1 = tmp[2U]; - s2 = tmp[1U]; - s3 = tmp[0U]; - f00 = s0 & (uint64_t)0xfffffffffffffU; - f10 = s0 >> (uint32_t)52U | (s1 & (uint64_t)0xffffffffffU) << (uint32_t)12U; - f20 = s1 >> (uint32_t)40U | (s2 & (uint64_t)0xfffffffU) << (uint32_t)24U; - f30 = s2 >> (uint32_t)28U | (s3 & (uint64_t)0xffffU) << (uint32_t)36U; - f40 = s3 >> (uint32_t)16U; - f0 = f00; - f1 = f10; - f2 = f20; - f3 = f30; - f4 = f40; - f[0U] = f0; - f[1U] = f1; - f[2U] = f2; - f[3U] = f3; - f[4U] = f4; -} - -static inline bool Hacl_K256_Field_load_felem_vartime(uint64_t *f, uint8_t *b) -{ - uint64_t f0; - uint64_t f1; - uint64_t f2; - uint64_t f3; - uint64_t f4; - bool is_ge_p; - bool ite; - Hacl_K256_Field_load_felem(f, b); - f0 = f[0U]; - f1 = f[1U]; - f2 = f[2U]; - f3 = f[3U]; - f4 = f[4U]; - is_ge_p = - f0 - >= (uint64_t)0xffffefffffc2fU - && f1 == (uint64_t)0xfffffffffffffU - && f2 == (uint64_t)0xfffffffffffffU - && f3 == (uint64_t)0xfffffffffffffU - && f4 == (uint64_t)0xffffffffffffU; - if (is_ge_p) - { - ite = false; - } - else - { - ite = !Hacl_K256_Field_is_felem_zero_vartime(f); - } - return ite; -} - -static inline void Hacl_K256_Field_store_felem(uint8_t *b, uint64_t *f) -{ - uint64_t tmp[4U] = { 0U }; - uint64_t f00 = f[0U]; - uint64_t f10 = f[1U]; - uint64_t f20 = f[2U]; - uint64_t f30 = f[3U]; - uint64_t f4 = f[4U]; - uint64_t o0 = f00 | f10 << (uint32_t)52U; - uint64_t o1 = f10 >> (uint32_t)12U | f20 << (uint32_t)40U; - uint64_t o2 = f20 >> (uint32_t)24U | f30 << (uint32_t)28U; - uint64_t o3 = f30 >> (uint32_t)36U | f4 << (uint32_t)16U; - uint64_t f0 = o0; - uint64_t f1 = o1; - uint64_t f2 = o2; - uint64_t f3 = o3; - tmp[0U] = f3; - tmp[1U] = f2; - tmp[2U] = f1; - tmp[3U] = f0; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - store64_be(b + i * (uint32_t)8U, tmp[i]);); -} - -static inline void Hacl_K256_Field_fmul_small_num(uint64_t *out, uint64_t *f, uint64_t num) -{ - uint64_t f00 = f[0U]; - uint64_t f10 = f[1U]; - uint64_t f20 = f[2U]; - uint64_t f30 = f[3U]; - uint64_t f40 = f[4U]; - uint64_t o0 = f00 * num; - uint64_t o1 = f10 * num; - uint64_t o2 = f20 * num; - uint64_t o3 = f30 * num; - uint64_t o4 = f40 * num; - uint64_t f0 = o0; - uint64_t f1 = o1; - uint64_t f2 = o2; - uint64_t f3 = o3; - uint64_t f4 = o4; - out[0U] = f0; - out[1U] = f1; - out[2U] = f2; - out[3U] = f3; - out[4U] = f4; -} - -static inline void Hacl_K256_Field_fadd(uint64_t *out, uint64_t *f1, uint64_t *f2) -{ - uint64_t a0 = f1[0U]; - uint64_t a1 = f1[1U]; - uint64_t a2 = f1[2U]; - uint64_t a3 = f1[3U]; - uint64_t a4 = f1[4U]; - uint64_t b0 = f2[0U]; - uint64_t b1 = f2[1U]; - uint64_t b2 = f2[2U]; - uint64_t b3 = f2[3U]; - uint64_t b4 = f2[4U]; - uint64_t o0 = a0 + b0; - uint64_t o1 = a1 + b1; - uint64_t o2 = a2 + b2; - uint64_t o3 = a3 + b3; - uint64_t o4 = a4 + b4; - uint64_t f0 = o0; - uint64_t f11 = o1; - uint64_t f21 = o2; - uint64_t f3 = o3; - uint64_t f4 = o4; - out[0U] = f0; - out[1U] = f11; - out[2U] = f21; - out[3U] = f3; - out[4U] = f4; -} - -static inline void Hacl_K256_Field_fsub(uint64_t *out, uint64_t *f1, uint64_t *f2, uint64_t x) -{ - uint64_t a0 = f1[0U]; - uint64_t a1 = f1[1U]; - uint64_t a2 = f1[2U]; - uint64_t a3 = f1[3U]; - uint64_t a4 = f1[4U]; - uint64_t b0 = f2[0U]; - uint64_t b1 = f2[1U]; - uint64_t b2 = f2[2U]; - uint64_t b3 = f2[3U]; - uint64_t b4 = f2[4U]; - uint64_t r00 = (uint64_t)9007190664804446U * x - b0; - uint64_t r10 = (uint64_t)9007199254740990U * x - b1; - uint64_t r20 = (uint64_t)9007199254740990U * x - b2; - uint64_t r30 = (uint64_t)9007199254740990U * x - b3; - uint64_t r40 = (uint64_t)562949953421310U * x - b4; - uint64_t r0 = r00; - uint64_t r1 = r10; - uint64_t r2 = r20; - uint64_t r3 = r30; - uint64_t r4 = r40; - uint64_t o0 = a0 + r0; - uint64_t o1 = a1 + r1; - uint64_t o2 = a2 + r2; - uint64_t o3 = a3 + r3; - uint64_t o4 = a4 + r4; - uint64_t f0 = o0; - uint64_t f11 = o1; - uint64_t f21 = o2; - uint64_t f3 = o3; - uint64_t f4 = o4; - out[0U] = f0; - out[1U] = f11; - out[2U] = f21; - out[3U] = f3; - out[4U] = f4; -} - -static inline void Hacl_K256_Field_fmul(uint64_t *out, uint64_t *f1, uint64_t *f2) -{ - uint64_t a0 = f1[0U]; - uint64_t a1 = f1[1U]; - uint64_t a2 = f1[2U]; - uint64_t a3 = f1[3U]; - uint64_t a4 = f1[4U]; - uint64_t b0 = f2[0U]; - uint64_t b1 = f2[1U]; - uint64_t b2 = f2[2U]; - uint64_t b3 = f2[3U]; - uint64_t b4 = f2[4U]; - uint64_t r = (uint64_t)0x1000003D10U; - FStar_UInt128_uint128 - d0 = - FStar_UInt128_add_mod(FStar_UInt128_add_mod(FStar_UInt128_add_mod(FStar_UInt128_mul_wide(a0, - b3), - FStar_UInt128_mul_wide(a1, b2)), - FStar_UInt128_mul_wide(a2, b1)), - FStar_UInt128_mul_wide(a3, b0)); - FStar_UInt128_uint128 c0 = FStar_UInt128_mul_wide(a4, b4); - FStar_UInt128_uint128 - d1 = FStar_UInt128_add_mod(d0, FStar_UInt128_mul_wide(r, FStar_UInt128_uint128_to_uint64(c0))); - uint64_t c1 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(c0, (uint32_t)64U)); - uint64_t t3 = FStar_UInt128_uint128_to_uint64(d1) & (uint64_t)0xfffffffffffffU; - FStar_UInt128_uint128 d2 = FStar_UInt128_shift_right(d1, (uint32_t)52U); - FStar_UInt128_uint128 - d3 = - FStar_UInt128_add_mod(FStar_UInt128_add_mod(FStar_UInt128_add_mod(FStar_UInt128_add_mod(FStar_UInt128_add_mod(d2, - FStar_UInt128_mul_wide(a0, b4)), - FStar_UInt128_mul_wide(a1, b3)), - FStar_UInt128_mul_wide(a2, b2)), - FStar_UInt128_mul_wide(a3, b1)), - FStar_UInt128_mul_wide(a4, b0)); - FStar_UInt128_uint128 - d4 = FStar_UInt128_add_mod(d3, FStar_UInt128_mul_wide(r << (uint32_t)12U, c1)); - uint64_t t4 = FStar_UInt128_uint128_to_uint64(d4) & (uint64_t)0xfffffffffffffU; - FStar_UInt128_uint128 d5 = FStar_UInt128_shift_right(d4, (uint32_t)52U); - uint64_t tx = t4 >> (uint32_t)48U; - uint64_t t4_ = t4 & (uint64_t)0xffffffffffffU; - FStar_UInt128_uint128 c2 = FStar_UInt128_mul_wide(a0, b0); - FStar_UInt128_uint128 - d6 = - FStar_UInt128_add_mod(FStar_UInt128_add_mod(FStar_UInt128_add_mod(FStar_UInt128_add_mod(d5, - FStar_UInt128_mul_wide(a1, b4)), - FStar_UInt128_mul_wide(a2, b3)), - FStar_UInt128_mul_wide(a3, b2)), - FStar_UInt128_mul_wide(a4, b1)); - uint64_t u0 = FStar_UInt128_uint128_to_uint64(d6) & (uint64_t)0xfffffffffffffU; - FStar_UInt128_uint128 d7 = FStar_UInt128_shift_right(d6, (uint32_t)52U); - uint64_t u0_ = tx | u0 << (uint32_t)4U; - FStar_UInt128_uint128 - c3 = FStar_UInt128_add_mod(c2, FStar_UInt128_mul_wide(u0_, r >> (uint32_t)4U)); - uint64_t r0 = FStar_UInt128_uint128_to_uint64(c3) & (uint64_t)0xfffffffffffffU; - FStar_UInt128_uint128 c4 = FStar_UInt128_shift_right(c3, (uint32_t)52U); - FStar_UInt128_uint128 - c5 = - FStar_UInt128_add_mod(FStar_UInt128_add_mod(c4, FStar_UInt128_mul_wide(a0, b1)), - FStar_UInt128_mul_wide(a1, b0)); - FStar_UInt128_uint128 - d8 = - FStar_UInt128_add_mod(FStar_UInt128_add_mod(FStar_UInt128_add_mod(d7, - FStar_UInt128_mul_wide(a2, b4)), - FStar_UInt128_mul_wide(a3, b3)), - FStar_UInt128_mul_wide(a4, b2)); - FStar_UInt128_uint128 - c6 = - FStar_UInt128_add_mod(c5, - FStar_UInt128_mul_wide(FStar_UInt128_uint128_to_uint64(d8) & (uint64_t)0xfffffffffffffU, r)); - FStar_UInt128_uint128 d9 = FStar_UInt128_shift_right(d8, (uint32_t)52U); - uint64_t r1 = FStar_UInt128_uint128_to_uint64(c6) & (uint64_t)0xfffffffffffffU; - FStar_UInt128_uint128 c7 = FStar_UInt128_shift_right(c6, (uint32_t)52U); - FStar_UInt128_uint128 - c8 = - FStar_UInt128_add_mod(FStar_UInt128_add_mod(FStar_UInt128_add_mod(c7, - FStar_UInt128_mul_wide(a0, b2)), - FStar_UInt128_mul_wide(a1, b1)), - FStar_UInt128_mul_wide(a2, b0)); - FStar_UInt128_uint128 - d10 = - FStar_UInt128_add_mod(FStar_UInt128_add_mod(d9, FStar_UInt128_mul_wide(a3, b4)), - FStar_UInt128_mul_wide(a4, b3)); - FStar_UInt128_uint128 - c9 = FStar_UInt128_add_mod(c8, FStar_UInt128_mul_wide(r, FStar_UInt128_uint128_to_uint64(d10))); - uint64_t d11 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(d10, (uint32_t)64U)); - uint64_t r2 = FStar_UInt128_uint128_to_uint64(c9) & (uint64_t)0xfffffffffffffU; - FStar_UInt128_uint128 c10 = FStar_UInt128_shift_right(c9, (uint32_t)52U); - FStar_UInt128_uint128 - c11 = - FStar_UInt128_add_mod(FStar_UInt128_add_mod(c10, - FStar_UInt128_mul_wide(r << (uint32_t)12U, d11)), - FStar_UInt128_uint64_to_uint128(t3)); - uint64_t r3 = FStar_UInt128_uint128_to_uint64(c11) & (uint64_t)0xfffffffffffffU; - uint64_t c12 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(c11, (uint32_t)52U)); - uint64_t r4 = c12 + t4_; - uint64_t f0 = r0; - uint64_t f11 = r1; - uint64_t f21 = r2; - uint64_t f3 = r3; - uint64_t f4 = r4; - out[0U] = f0; - out[1U] = f11; - out[2U] = f21; - out[3U] = f3; - out[4U] = f4; -} - -static inline void Hacl_K256_Field_fsqr(uint64_t *out, uint64_t *f) -{ - uint64_t a0 = f[0U]; - uint64_t a1 = f[1U]; - uint64_t a2 = f[2U]; - uint64_t a3 = f[3U]; - uint64_t a4 = f[4U]; - uint64_t r = (uint64_t)0x1000003D10U; - FStar_UInt128_uint128 - d0 = - FStar_UInt128_add_mod(FStar_UInt128_mul_wide(a0 * (uint64_t)2U, a3), - FStar_UInt128_mul_wide(a1 * (uint64_t)2U, a2)); - FStar_UInt128_uint128 c0 = FStar_UInt128_mul_wide(a4, a4); - FStar_UInt128_uint128 - d1 = FStar_UInt128_add_mod(d0, FStar_UInt128_mul_wide(r, FStar_UInt128_uint128_to_uint64(c0))); - uint64_t c1 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(c0, (uint32_t)64U)); - uint64_t t3 = FStar_UInt128_uint128_to_uint64(d1) & (uint64_t)0xfffffffffffffU; - FStar_UInt128_uint128 d2 = FStar_UInt128_shift_right(d1, (uint32_t)52U); - uint64_t a41 = a4 * (uint64_t)2U; - FStar_UInt128_uint128 - d3 = - FStar_UInt128_add_mod(FStar_UInt128_add_mod(FStar_UInt128_add_mod(d2, - FStar_UInt128_mul_wide(a0, a41)), - FStar_UInt128_mul_wide(a1 * (uint64_t)2U, a3)), - FStar_UInt128_mul_wide(a2, a2)); - FStar_UInt128_uint128 - d4 = FStar_UInt128_add_mod(d3, FStar_UInt128_mul_wide(r << (uint32_t)12U, c1)); - uint64_t t4 = FStar_UInt128_uint128_to_uint64(d4) & (uint64_t)0xfffffffffffffU; - FStar_UInt128_uint128 d5 = FStar_UInt128_shift_right(d4, (uint32_t)52U); - uint64_t tx = t4 >> (uint32_t)48U; - uint64_t t4_ = t4 & (uint64_t)0xffffffffffffU; - FStar_UInt128_uint128 c2 = FStar_UInt128_mul_wide(a0, a0); - FStar_UInt128_uint128 - d6 = - FStar_UInt128_add_mod(FStar_UInt128_add_mod(d5, FStar_UInt128_mul_wide(a1, a41)), - FStar_UInt128_mul_wide(a2 * (uint64_t)2U, a3)); - uint64_t u0 = FStar_UInt128_uint128_to_uint64(d6) & (uint64_t)0xfffffffffffffU; - FStar_UInt128_uint128 d7 = FStar_UInt128_shift_right(d6, (uint32_t)52U); - uint64_t u0_ = tx | u0 << (uint32_t)4U; - FStar_UInt128_uint128 - c3 = FStar_UInt128_add_mod(c2, FStar_UInt128_mul_wide(u0_, r >> (uint32_t)4U)); - uint64_t r0 = FStar_UInt128_uint128_to_uint64(c3) & (uint64_t)0xfffffffffffffU; - FStar_UInt128_uint128 c4 = FStar_UInt128_shift_right(c3, (uint32_t)52U); - uint64_t a01 = a0 * (uint64_t)2U; - FStar_UInt128_uint128 c5 = FStar_UInt128_add_mod(c4, FStar_UInt128_mul_wide(a01, a1)); - FStar_UInt128_uint128 - d8 = - FStar_UInt128_add_mod(FStar_UInt128_add_mod(d7, FStar_UInt128_mul_wide(a2, a41)), - FStar_UInt128_mul_wide(a3, a3)); - FStar_UInt128_uint128 - c6 = - FStar_UInt128_add_mod(c5, - FStar_UInt128_mul_wide(FStar_UInt128_uint128_to_uint64(d8) & (uint64_t)0xfffffffffffffU, r)); - FStar_UInt128_uint128 d9 = FStar_UInt128_shift_right(d8, (uint32_t)52U); - uint64_t r1 = FStar_UInt128_uint128_to_uint64(c6) & (uint64_t)0xfffffffffffffU; - FStar_UInt128_uint128 c7 = FStar_UInt128_shift_right(c6, (uint32_t)52U); - FStar_UInt128_uint128 - c8 = - FStar_UInt128_add_mod(FStar_UInt128_add_mod(c7, FStar_UInt128_mul_wide(a01, a2)), - FStar_UInt128_mul_wide(a1, a1)); - FStar_UInt128_uint128 d10 = FStar_UInt128_add_mod(d9, FStar_UInt128_mul_wide(a3, a41)); - FStar_UInt128_uint128 - c9 = FStar_UInt128_add_mod(c8, FStar_UInt128_mul_wide(r, FStar_UInt128_uint128_to_uint64(d10))); - uint64_t d11 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(d10, (uint32_t)64U)); - uint64_t r2 = FStar_UInt128_uint128_to_uint64(c9) & (uint64_t)0xfffffffffffffU; - FStar_UInt128_uint128 c10 = FStar_UInt128_shift_right(c9, (uint32_t)52U); - FStar_UInt128_uint128 - c11 = - FStar_UInt128_add_mod(FStar_UInt128_add_mod(c10, - FStar_UInt128_mul_wide(r << (uint32_t)12U, d11)), - FStar_UInt128_uint64_to_uint128(t3)); - uint64_t r3 = FStar_UInt128_uint128_to_uint64(c11) & (uint64_t)0xfffffffffffffU; - uint64_t c12 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(c11, (uint32_t)52U)); - uint64_t r4 = c12 + t4_; - uint64_t f0 = r0; - uint64_t f1 = r1; - uint64_t f2 = r2; - uint64_t f3 = r3; - uint64_t f4 = r4; - out[0U] = f0; - out[1U] = f1; - out[2U] = f2; - out[3U] = f3; - out[4U] = f4; -} - -static inline void Hacl_K256_Field_fnormalize_weak(uint64_t *out, uint64_t *f) -{ - uint64_t t0 = f[0U]; - uint64_t t1 = f[1U]; - uint64_t t2 = f[2U]; - uint64_t t3 = f[3U]; - uint64_t t4 = f[4U]; - uint64_t x0 = t4 >> (uint32_t)48U; - uint64_t t410 = t4 & (uint64_t)0xffffffffffffU; - uint64_t x = x0; - uint64_t t01 = t0; - uint64_t t11 = t1; - uint64_t t21 = t2; - uint64_t t31 = t3; - uint64_t t41 = t410; - uint64_t t02 = t01 + x * (uint64_t)0x1000003D1U; - uint64_t t12 = t11 + (t02 >> (uint32_t)52U); - uint64_t t03 = t02 & (uint64_t)0xfffffffffffffU; - uint64_t t22 = t21 + (t12 >> (uint32_t)52U); - uint64_t t13 = t12 & (uint64_t)0xfffffffffffffU; - uint64_t t32 = t31 + (t22 >> (uint32_t)52U); - uint64_t t23 = t22 & (uint64_t)0xfffffffffffffU; - uint64_t t42 = t41 + (t32 >> (uint32_t)52U); - uint64_t t33 = t32 & (uint64_t)0xfffffffffffffU; - uint64_t f0 = t03; - uint64_t f1 = t13; - uint64_t f2 = t23; - uint64_t f3 = t33; - uint64_t f4 = t42; - out[0U] = f0; - out[1U] = f1; - out[2U] = f2; - out[3U] = f3; - out[4U] = f4; -} - -static inline void Hacl_K256_Field_fnormalize(uint64_t *out, uint64_t *f) -{ - uint64_t f00 = f[0U]; - uint64_t f10 = f[1U]; - uint64_t f20 = f[2U]; - uint64_t f30 = f[3U]; - uint64_t f40 = f[4U]; - uint64_t x0 = f40 >> (uint32_t)48U; - uint64_t t40 = f40 & (uint64_t)0xffffffffffffU; - uint64_t x1 = x0; - uint64_t t00 = f00; - uint64_t t10 = f10; - uint64_t t20 = f20; - uint64_t t30 = f30; - uint64_t t42 = t40; - uint64_t t01 = t00 + x1 * (uint64_t)0x1000003D1U; - uint64_t t110 = t10 + (t01 >> (uint32_t)52U); - uint64_t t020 = t01 & (uint64_t)0xfffffffffffffU; - uint64_t t210 = t20 + (t110 >> (uint32_t)52U); - uint64_t t120 = t110 & (uint64_t)0xfffffffffffffU; - uint64_t t310 = t30 + (t210 >> (uint32_t)52U); - uint64_t t220 = t210 & (uint64_t)0xfffffffffffffU; - uint64_t t410 = t42 + (t310 >> (uint32_t)52U); - uint64_t t320 = t310 & (uint64_t)0xfffffffffffffU; - uint64_t t0 = t020; - uint64_t t1 = t120; - uint64_t t2 = t220; - uint64_t t3 = t320; - uint64_t t4 = t410; - uint64_t x2 = t4 >> (uint32_t)48U; - uint64_t t411 = t4 & (uint64_t)0xffffffffffffU; - uint64_t x = x2; - uint64_t r0 = t0; - uint64_t r1 = t1; - uint64_t r2 = t2; - uint64_t r3 = t3; - uint64_t r4 = t411; - uint64_t m4 = FStar_UInt64_eq_mask(r4, (uint64_t)0xffffffffffffU); - uint64_t m3 = FStar_UInt64_eq_mask(r3, (uint64_t)0xfffffffffffffU); - uint64_t m2 = FStar_UInt64_eq_mask(r2, (uint64_t)0xfffffffffffffU); - uint64_t m1 = FStar_UInt64_eq_mask(r1, (uint64_t)0xfffffffffffffU); - uint64_t m0 = FStar_UInt64_gte_mask(r0, (uint64_t)0xffffefffffc2fU); - uint64_t is_ge_p_m = (((m0 & m1) & m2) & m3) & m4; - uint64_t m_to_one = is_ge_p_m & (uint64_t)1U; - uint64_t x10 = m_to_one | x; - uint64_t t010 = r0 + x10 * (uint64_t)0x1000003D1U; - uint64_t t11 = r1 + (t010 >> (uint32_t)52U); - uint64_t t02 = t010 & (uint64_t)0xfffffffffffffU; - uint64_t t21 = r2 + (t11 >> (uint32_t)52U); - uint64_t t12 = t11 & (uint64_t)0xfffffffffffffU; - uint64_t t31 = r3 + (t21 >> (uint32_t)52U); - uint64_t t22 = t21 & (uint64_t)0xfffffffffffffU; - uint64_t t41 = r4 + (t31 >> (uint32_t)52U); - uint64_t t32 = t31 & (uint64_t)0xfffffffffffffU; - uint64_t s0 = t02; - uint64_t s1 = t12; - uint64_t s2 = t22; - uint64_t s3 = t32; - uint64_t s4 = t41; - uint64_t t412 = s4 & (uint64_t)0xffffffffffffU; - uint64_t k0 = s0; - uint64_t k1 = s1; - uint64_t k2 = s2; - uint64_t k3 = s3; - uint64_t k4 = t412; - uint64_t f0 = k0; - uint64_t f1 = k1; - uint64_t f2 = k2; - uint64_t f3 = k3; - uint64_t f4 = k4; - out[0U] = f0; - out[1U] = f1; - out[2U] = f2; - out[3U] = f3; - out[4U] = f4; -} - -static inline void Hacl_K256_Field_fnegate_conditional_vartime(uint64_t *f, bool is_negate) -{ - if (is_negate) - { - uint64_t a0 = f[0U]; - uint64_t a1 = f[1U]; - uint64_t a2 = f[2U]; - uint64_t a3 = f[3U]; - uint64_t a4 = f[4U]; - uint64_t r0 = (uint64_t)9007190664804446U - a0; - uint64_t r1 = (uint64_t)9007199254740990U - a1; - uint64_t r2 = (uint64_t)9007199254740990U - a2; - uint64_t r3 = (uint64_t)9007199254740990U - a3; - uint64_t r4 = (uint64_t)562949953421310U - a4; - uint64_t f0 = r0; - uint64_t f1 = r1; - uint64_t f2 = r2; - uint64_t f3 = r3; - uint64_t f4 = r4; - f[0U] = f0; - f[1U] = f1; - f[2U] = f2; - f[3U] = f3; - f[4U] = f4; - Hacl_K256_Field_fnormalize(f, f); - return; - } -} - -static inline void Hacl_Impl_K256_Finv_fsquare_times_in_place(uint64_t *out, uint32_t b) -{ - uint32_t i; - for (i = (uint32_t)0U; i < b; i++) - { - Hacl_K256_Field_fsqr(out, out); - } -} - -static inline void Hacl_Impl_K256_Finv_fsquare_times(uint64_t *out, uint64_t *a, uint32_t b) -{ - uint32_t i; - memcpy(out, a, (uint32_t)5U * sizeof (uint64_t)); - for (i = (uint32_t)0U; i < b; i++) - { - Hacl_K256_Field_fsqr(out, out); - } -} - -static inline void Hacl_Impl_K256_Finv_fexp_223_23(uint64_t *out, uint64_t *x2, uint64_t *f) -{ - uint64_t x3[5U] = { 0U }; - uint64_t x22[5U] = { 0U }; - uint64_t x44[5U] = { 0U }; - uint64_t x88[5U] = { 0U }; - Hacl_Impl_K256_Finv_fsquare_times(x2, f, (uint32_t)1U); - Hacl_K256_Field_fmul(x2, x2, f); - Hacl_Impl_K256_Finv_fsquare_times(x3, x2, (uint32_t)1U); - Hacl_K256_Field_fmul(x3, x3, f); - Hacl_Impl_K256_Finv_fsquare_times(out, x3, (uint32_t)3U); - Hacl_K256_Field_fmul(out, out, x3); - Hacl_Impl_K256_Finv_fsquare_times_in_place(out, (uint32_t)3U); - Hacl_K256_Field_fmul(out, out, x3); - Hacl_Impl_K256_Finv_fsquare_times_in_place(out, (uint32_t)2U); - Hacl_K256_Field_fmul(out, out, x2); - Hacl_Impl_K256_Finv_fsquare_times(x22, out, (uint32_t)11U); - Hacl_K256_Field_fmul(x22, x22, out); - Hacl_Impl_K256_Finv_fsquare_times(x44, x22, (uint32_t)22U); - Hacl_K256_Field_fmul(x44, x44, x22); - Hacl_Impl_K256_Finv_fsquare_times(x88, x44, (uint32_t)44U); - Hacl_K256_Field_fmul(x88, x88, x44); - Hacl_Impl_K256_Finv_fsquare_times(out, x88, (uint32_t)88U); - Hacl_K256_Field_fmul(out, out, x88); - Hacl_Impl_K256_Finv_fsquare_times_in_place(out, (uint32_t)44U); - Hacl_K256_Field_fmul(out, out, x44); - Hacl_Impl_K256_Finv_fsquare_times_in_place(out, (uint32_t)3U); - Hacl_K256_Field_fmul(out, out, x3); - Hacl_Impl_K256_Finv_fsquare_times_in_place(out, (uint32_t)23U); - Hacl_K256_Field_fmul(out, out, x22); -} - -static inline void Hacl_Impl_K256_Finv_finv(uint64_t *out, uint64_t *f) -{ - uint64_t x2[5U] = { 0U }; - Hacl_Impl_K256_Finv_fexp_223_23(out, x2, f); - Hacl_Impl_K256_Finv_fsquare_times_in_place(out, (uint32_t)5U); - Hacl_K256_Field_fmul(out, out, f); - Hacl_Impl_K256_Finv_fsquare_times_in_place(out, (uint32_t)3U); - Hacl_K256_Field_fmul(out, out, x2); - Hacl_Impl_K256_Finv_fsquare_times_in_place(out, (uint32_t)2U); - Hacl_K256_Field_fmul(out, out, f); -} - -static inline void Hacl_Impl_K256_Finv_fsqrt(uint64_t *out, uint64_t *f) -{ - uint64_t x2[5U] = { 0U }; - Hacl_Impl_K256_Finv_fexp_223_23(out, x2, f); - Hacl_Impl_K256_Finv_fsquare_times_in_place(out, (uint32_t)6U); - Hacl_K256_Field_fmul(out, out, x2); - Hacl_Impl_K256_Finv_fsquare_times_in_place(out, (uint32_t)2U); -} - -#if defined(__cplusplus) -} -#endif - -#define __Hacl_Bignum_K256_H_DEFINED -#endif diff --git a/dist/c89-compatible/Hacl_Chacha20.c b/dist/c89-compatible/Hacl_Chacha20.c deleted file mode 100644 index c6b5562c48..0000000000 --- a/dist/c89-compatible/Hacl_Chacha20.c +++ /dev/null @@ -1,256 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#include "internal/Hacl_Chacha20.h" - - - -const -uint32_t -Hacl_Impl_Chacha20_Vec_chacha20_constants[4U] = - { (uint32_t)0x61707865U, (uint32_t)0x3320646eU, (uint32_t)0x79622d32U, (uint32_t)0x6b206574U }; - -static inline void quarter_round(uint32_t *st, uint32_t a, uint32_t b, uint32_t c, uint32_t d) -{ - uint32_t sta0 = st[a]; - uint32_t stb0 = st[b]; - uint32_t std0 = st[d]; - uint32_t sta10 = sta0 + stb0; - uint32_t std10 = std0 ^ sta10; - uint32_t std20 = std10 << (uint32_t)16U | std10 >> (uint32_t)16U; - uint32_t sta2; - uint32_t stb1; - uint32_t std3; - uint32_t sta11; - uint32_t std11; - uint32_t std21; - uint32_t sta3; - uint32_t stb2; - uint32_t std4; - uint32_t sta12; - uint32_t std12; - uint32_t std22; - uint32_t sta; - uint32_t stb; - uint32_t std; - uint32_t sta1; - uint32_t std1; - uint32_t std2; - st[a] = sta10; - st[d] = std20; - sta2 = st[c]; - stb1 = st[d]; - std3 = st[b]; - sta11 = sta2 + stb1; - std11 = std3 ^ sta11; - std21 = std11 << (uint32_t)12U | std11 >> (uint32_t)20U; - st[c] = sta11; - st[b] = std21; - sta3 = st[a]; - stb2 = st[b]; - std4 = st[d]; - sta12 = sta3 + stb2; - std12 = std4 ^ sta12; - std22 = std12 << (uint32_t)8U | std12 >> (uint32_t)24U; - st[a] = sta12; - st[d] = std22; - sta = st[c]; - stb = st[d]; - std = st[b]; - sta1 = sta + stb; - std1 = std ^ sta1; - std2 = std1 << (uint32_t)7U | std1 >> (uint32_t)25U; - st[c] = sta1; - st[b] = std2; -} - -static inline void double_round(uint32_t *st) -{ - quarter_round(st, (uint32_t)0U, (uint32_t)4U, (uint32_t)8U, (uint32_t)12U); - quarter_round(st, (uint32_t)1U, (uint32_t)5U, (uint32_t)9U, (uint32_t)13U); - quarter_round(st, (uint32_t)2U, (uint32_t)6U, (uint32_t)10U, (uint32_t)14U); - quarter_round(st, (uint32_t)3U, (uint32_t)7U, (uint32_t)11U, (uint32_t)15U); - quarter_round(st, (uint32_t)0U, (uint32_t)5U, (uint32_t)10U, (uint32_t)15U); - quarter_round(st, (uint32_t)1U, (uint32_t)6U, (uint32_t)11U, (uint32_t)12U); - quarter_round(st, (uint32_t)2U, (uint32_t)7U, (uint32_t)8U, (uint32_t)13U); - quarter_round(st, (uint32_t)3U, (uint32_t)4U, (uint32_t)9U, (uint32_t)14U); -} - -static inline void rounds(uint32_t *st) -{ - double_round(st); - double_round(st); - double_round(st); - double_round(st); - double_round(st); - double_round(st); - double_round(st); - double_round(st); - double_round(st); - double_round(st); -} - -static inline void chacha20_core(uint32_t *k, uint32_t *ctx, uint32_t ctr) -{ - uint32_t ctr_u32; - memcpy(k, ctx, (uint32_t)16U * sizeof (uint32_t)); - ctr_u32 = ctr; - k[12U] = k[12U] + ctr_u32; - rounds(k); - KRML_MAYBE_FOR16(i, - (uint32_t)0U, - (uint32_t)16U, - (uint32_t)1U, - uint32_t *os = k; - uint32_t x = k[i] + ctx[i]; - os[i] = x;); - k[12U] = k[12U] + ctr_u32; -} - -static const -uint32_t -chacha20_constants[4U] = - { (uint32_t)0x61707865U, (uint32_t)0x3320646eU, (uint32_t)0x79622d32U, (uint32_t)0x6b206574U }; - -void Hacl_Impl_Chacha20_chacha20_init(uint32_t *ctx, uint8_t *k, uint8_t *n, uint32_t ctr) -{ - uint32_t i; - KRML_MAYBE_FOR4(i0, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint32_t *os = ctx; - uint32_t x = chacha20_constants[i0]; - os[i0] = x;); - KRML_MAYBE_FOR8(i0, - (uint32_t)0U, - (uint32_t)8U, - (uint32_t)1U, - uint32_t *os = ctx + (uint32_t)4U; - uint8_t *bj = k + i0 * (uint32_t)4U; - uint32_t u = load32_le(bj); - uint32_t r = u; - uint32_t x = r; - os[i0] = x;); - ctx[12U] = ctr; - for (i = (uint32_t)0U; i < (uint32_t)3U; i++) - { - uint32_t *os = ctx + (uint32_t)13U; - uint8_t *bj = n + i * (uint32_t)4U; - uint32_t u = load32_le(bj); - uint32_t r = u; - uint32_t x = r; - os[i] = x; - } -} - -static void chacha20_encrypt_block(uint32_t *ctx, uint8_t *out, uint32_t incr, uint8_t *text) -{ - uint32_t k[16U] = { 0U }; - chacha20_core(k, ctx, incr); - { - uint32_t bl[16U] = { 0U }; - KRML_MAYBE_FOR16(i, - (uint32_t)0U, - (uint32_t)16U, - (uint32_t)1U, - uint32_t *os = bl; - uint8_t *bj = text + i * (uint32_t)4U; - uint32_t u = load32_le(bj); - uint32_t r = u; - uint32_t x = r; - os[i] = x;); - KRML_MAYBE_FOR16(i, - (uint32_t)0U, - (uint32_t)16U, - (uint32_t)1U, - uint32_t *os = bl; - uint32_t x = bl[i] ^ k[i]; - os[i] = x;); - KRML_MAYBE_FOR16(i, - (uint32_t)0U, - (uint32_t)16U, - (uint32_t)1U, - store32_le(out + i * (uint32_t)4U, bl[i]);); - } -} - -static inline void -chacha20_encrypt_last(uint32_t *ctx, uint32_t len, uint8_t *out, uint32_t incr, uint8_t *text) -{ - uint8_t plain[64U] = { 0U }; - memcpy(plain, text, len * sizeof (uint8_t)); - chacha20_encrypt_block(ctx, plain, incr, plain); - memcpy(out, plain, len * sizeof (uint8_t)); -} - -void -Hacl_Impl_Chacha20_chacha20_update(uint32_t *ctx, uint32_t len, uint8_t *out, uint8_t *text) -{ - uint32_t rem = len % (uint32_t)64U; - uint32_t nb = len / (uint32_t)64U; - uint32_t rem1 = len % (uint32_t)64U; - { - uint32_t i; - for (i = (uint32_t)0U; i < nb; i++) - { - chacha20_encrypt_block(ctx, out + i * (uint32_t)64U, i, text + i * (uint32_t)64U); - } - } - if (rem1 > (uint32_t)0U) - { - chacha20_encrypt_last(ctx, rem, out + nb * (uint32_t)64U, nb, text + nb * (uint32_t)64U); - } -} - -void -Hacl_Chacha20_chacha20_encrypt( - uint32_t len, - uint8_t *out, - uint8_t *text, - uint8_t *key, - uint8_t *n, - uint32_t ctr -) -{ - uint32_t ctx[16U] = { 0U }; - Hacl_Impl_Chacha20_chacha20_init(ctx, key, n, ctr); - Hacl_Impl_Chacha20_chacha20_update(ctx, len, out, text); -} - -void -Hacl_Chacha20_chacha20_decrypt( - uint32_t len, - uint8_t *out, - uint8_t *cipher, - uint8_t *key, - uint8_t *n, - uint32_t ctr -) -{ - uint32_t ctx[16U] = { 0U }; - Hacl_Impl_Chacha20_chacha20_init(ctx, key, n, ctr); - Hacl_Impl_Chacha20_chacha20_update(ctx, len, out, cipher); -} - diff --git a/dist/c89-compatible/Hacl_Chacha20.h b/dist/c89-compatible/Hacl_Chacha20.h deleted file mode 100644 index 2fb96507f8..0000000000 --- a/dist/c89-compatible/Hacl_Chacha20.h +++ /dev/null @@ -1,65 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#ifndef __Hacl_Chacha20_H -#define __Hacl_Chacha20_H - -#if defined(__cplusplus) -extern "C" { -#endif - -#include -#include "krml/internal/types.h" -#include "krml/lowstar_endianness.h" -#include "krml/internal/target.h" - - -#include "Hacl_Krmllib.h" -#include "evercrypt_targetconfig.h" -void -Hacl_Chacha20_chacha20_encrypt( - uint32_t len, - uint8_t *out, - uint8_t *text, - uint8_t *key, - uint8_t *n, - uint32_t ctr -); - -void -Hacl_Chacha20_chacha20_decrypt( - uint32_t len, - uint8_t *out, - uint8_t *cipher, - uint8_t *key, - uint8_t *n, - uint32_t ctr -); - -#if defined(__cplusplus) -} -#endif - -#define __Hacl_Chacha20_H_DEFINED -#endif diff --git a/dist/c89-compatible/Hacl_Chacha20Poly1305_128.c b/dist/c89-compatible/Hacl_Chacha20Poly1305_128.c deleted file mode 100644 index e40b792b72..0000000000 --- a/dist/c89-compatible/Hacl_Chacha20Poly1305_128.c +++ /dev/null @@ -1,1355 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#include "Hacl_Chacha20Poly1305_128.h" - -#include "internal/Hacl_Poly1305_128.h" -#include "libintvector.h" -static inline void -poly1305_padded_128(Lib_IntVector_Intrinsics_vec128 *ctx, uint32_t len, uint8_t *text) -{ - uint32_t n = len / (uint32_t)16U; - uint32_t r = len % (uint32_t)16U; - uint8_t *blocks = text; - uint8_t *rem = text + n * (uint32_t)16U; - Lib_IntVector_Intrinsics_vec128 *pre0 = ctx + (uint32_t)5U; - Lib_IntVector_Intrinsics_vec128 *acc0 = ctx; - uint32_t sz_block = (uint32_t)32U; - uint32_t len0 = n * (uint32_t)16U / sz_block * sz_block; - uint8_t *t00 = blocks; - uint32_t len1; - uint8_t *t10; - uint32_t nb0; - uint32_t rem1; - if (len0 > (uint32_t)0U) - { - uint32_t bs = (uint32_t)32U; - uint8_t *text0 = t00; - Hacl_Impl_Poly1305_Field32xN_128_load_acc2(acc0, text0); - { - uint32_t len10 = len0 - bs; - uint8_t *text1 = t00 + bs; - uint32_t nb = len10 / bs; - { - uint32_t i; - for (i = (uint32_t)0U; i < nb; i++) - { - uint8_t *block = text1 + i * bs; - KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 e[5U] KRML_POST_ALIGN(16) = { 0U }; - Lib_IntVector_Intrinsics_vec128 b1 = Lib_IntVector_Intrinsics_vec128_load64_le(block); - Lib_IntVector_Intrinsics_vec128 - b2 = Lib_IntVector_Intrinsics_vec128_load64_le(block + (uint32_t)16U); - Lib_IntVector_Intrinsics_vec128 - lo = Lib_IntVector_Intrinsics_vec128_interleave_low64(b1, b2); - Lib_IntVector_Intrinsics_vec128 - hi = Lib_IntVector_Intrinsics_vec128_interleave_high64(b1, b2); - Lib_IntVector_Intrinsics_vec128 - f00 = - Lib_IntVector_Intrinsics_vec128_and(lo, - Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU)); - Lib_IntVector_Intrinsics_vec128 - f15 = - Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(lo, - (uint32_t)26U), - Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU)); - Lib_IntVector_Intrinsics_vec128 - f25 = - Lib_IntVector_Intrinsics_vec128_or(Lib_IntVector_Intrinsics_vec128_shift_right64(lo, - (uint32_t)52U), - Lib_IntVector_Intrinsics_vec128_shift_left64(Lib_IntVector_Intrinsics_vec128_and(hi, - Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3fffU)), - (uint32_t)12U)); - Lib_IntVector_Intrinsics_vec128 - f30 = - Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(hi, - (uint32_t)14U), - Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU)); - Lib_IntVector_Intrinsics_vec128 - f40 = Lib_IntVector_Intrinsics_vec128_shift_right64(hi, (uint32_t)40U); - Lib_IntVector_Intrinsics_vec128 f0 = f00; - Lib_IntVector_Intrinsics_vec128 f1 = f15; - Lib_IntVector_Intrinsics_vec128 f2 = f25; - Lib_IntVector_Intrinsics_vec128 f3 = f30; - Lib_IntVector_Intrinsics_vec128 f41 = f40; - e[0U] = f0; - e[1U] = f1; - e[2U] = f2; - e[3U] = f3; - e[4U] = f41; - { - uint64_t b = (uint64_t)0x1000000U; - Lib_IntVector_Intrinsics_vec128 mask = Lib_IntVector_Intrinsics_vec128_load64(b); - Lib_IntVector_Intrinsics_vec128 f4 = e[4U]; - e[4U] = Lib_IntVector_Intrinsics_vec128_or(f4, mask); - { - Lib_IntVector_Intrinsics_vec128 *rn = pre0 + (uint32_t)10U; - Lib_IntVector_Intrinsics_vec128 *rn5 = pre0 + (uint32_t)15U; - Lib_IntVector_Intrinsics_vec128 r0 = rn[0U]; - Lib_IntVector_Intrinsics_vec128 r1 = rn[1U]; - Lib_IntVector_Intrinsics_vec128 r2 = rn[2U]; - Lib_IntVector_Intrinsics_vec128 r3 = rn[3U]; - Lib_IntVector_Intrinsics_vec128 r4 = rn[4U]; - Lib_IntVector_Intrinsics_vec128 r51 = rn5[1U]; - Lib_IntVector_Intrinsics_vec128 r52 = rn5[2U]; - Lib_IntVector_Intrinsics_vec128 r53 = rn5[3U]; - Lib_IntVector_Intrinsics_vec128 r54 = rn5[4U]; - Lib_IntVector_Intrinsics_vec128 f10 = acc0[0U]; - Lib_IntVector_Intrinsics_vec128 f110 = acc0[1U]; - Lib_IntVector_Intrinsics_vec128 f120 = acc0[2U]; - Lib_IntVector_Intrinsics_vec128 f130 = acc0[3U]; - Lib_IntVector_Intrinsics_vec128 f140 = acc0[4U]; - Lib_IntVector_Intrinsics_vec128 a0 = Lib_IntVector_Intrinsics_vec128_mul64(r0, f10); - Lib_IntVector_Intrinsics_vec128 a1 = Lib_IntVector_Intrinsics_vec128_mul64(r1, f10); - Lib_IntVector_Intrinsics_vec128 a2 = Lib_IntVector_Intrinsics_vec128_mul64(r2, f10); - Lib_IntVector_Intrinsics_vec128 a3 = Lib_IntVector_Intrinsics_vec128_mul64(r3, f10); - Lib_IntVector_Intrinsics_vec128 a4 = Lib_IntVector_Intrinsics_vec128_mul64(r4, f10); - Lib_IntVector_Intrinsics_vec128 - a01 = - Lib_IntVector_Intrinsics_vec128_add64(a0, - Lib_IntVector_Intrinsics_vec128_mul64(r54, f110)); - Lib_IntVector_Intrinsics_vec128 - a11 = - Lib_IntVector_Intrinsics_vec128_add64(a1, - Lib_IntVector_Intrinsics_vec128_mul64(r0, f110)); - Lib_IntVector_Intrinsics_vec128 - a21 = - Lib_IntVector_Intrinsics_vec128_add64(a2, - Lib_IntVector_Intrinsics_vec128_mul64(r1, f110)); - Lib_IntVector_Intrinsics_vec128 - a31 = - Lib_IntVector_Intrinsics_vec128_add64(a3, - Lib_IntVector_Intrinsics_vec128_mul64(r2, f110)); - Lib_IntVector_Intrinsics_vec128 - a41 = - Lib_IntVector_Intrinsics_vec128_add64(a4, - Lib_IntVector_Intrinsics_vec128_mul64(r3, f110)); - Lib_IntVector_Intrinsics_vec128 - a02 = - Lib_IntVector_Intrinsics_vec128_add64(a01, - Lib_IntVector_Intrinsics_vec128_mul64(r53, f120)); - Lib_IntVector_Intrinsics_vec128 - a12 = - Lib_IntVector_Intrinsics_vec128_add64(a11, - Lib_IntVector_Intrinsics_vec128_mul64(r54, f120)); - Lib_IntVector_Intrinsics_vec128 - a22 = - Lib_IntVector_Intrinsics_vec128_add64(a21, - Lib_IntVector_Intrinsics_vec128_mul64(r0, f120)); - Lib_IntVector_Intrinsics_vec128 - a32 = - Lib_IntVector_Intrinsics_vec128_add64(a31, - Lib_IntVector_Intrinsics_vec128_mul64(r1, f120)); - Lib_IntVector_Intrinsics_vec128 - a42 = - Lib_IntVector_Intrinsics_vec128_add64(a41, - Lib_IntVector_Intrinsics_vec128_mul64(r2, f120)); - Lib_IntVector_Intrinsics_vec128 - a03 = - Lib_IntVector_Intrinsics_vec128_add64(a02, - Lib_IntVector_Intrinsics_vec128_mul64(r52, f130)); - Lib_IntVector_Intrinsics_vec128 - a13 = - Lib_IntVector_Intrinsics_vec128_add64(a12, - Lib_IntVector_Intrinsics_vec128_mul64(r53, f130)); - Lib_IntVector_Intrinsics_vec128 - a23 = - Lib_IntVector_Intrinsics_vec128_add64(a22, - Lib_IntVector_Intrinsics_vec128_mul64(r54, f130)); - Lib_IntVector_Intrinsics_vec128 - a33 = - Lib_IntVector_Intrinsics_vec128_add64(a32, - Lib_IntVector_Intrinsics_vec128_mul64(r0, f130)); - Lib_IntVector_Intrinsics_vec128 - a43 = - Lib_IntVector_Intrinsics_vec128_add64(a42, - Lib_IntVector_Intrinsics_vec128_mul64(r1, f130)); - Lib_IntVector_Intrinsics_vec128 - a04 = - Lib_IntVector_Intrinsics_vec128_add64(a03, - Lib_IntVector_Intrinsics_vec128_mul64(r51, f140)); - Lib_IntVector_Intrinsics_vec128 - a14 = - Lib_IntVector_Intrinsics_vec128_add64(a13, - Lib_IntVector_Intrinsics_vec128_mul64(r52, f140)); - Lib_IntVector_Intrinsics_vec128 - a24 = - Lib_IntVector_Intrinsics_vec128_add64(a23, - Lib_IntVector_Intrinsics_vec128_mul64(r53, f140)); - Lib_IntVector_Intrinsics_vec128 - a34 = - Lib_IntVector_Intrinsics_vec128_add64(a33, - Lib_IntVector_Intrinsics_vec128_mul64(r54, f140)); - Lib_IntVector_Intrinsics_vec128 - a44 = - Lib_IntVector_Intrinsics_vec128_add64(a43, - Lib_IntVector_Intrinsics_vec128_mul64(r0, f140)); - Lib_IntVector_Intrinsics_vec128 t01 = a04; - Lib_IntVector_Intrinsics_vec128 t1 = a14; - Lib_IntVector_Intrinsics_vec128 t2 = a24; - Lib_IntVector_Intrinsics_vec128 t3 = a34; - Lib_IntVector_Intrinsics_vec128 t4 = a44; - Lib_IntVector_Intrinsics_vec128 - mask26 = Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU); - Lib_IntVector_Intrinsics_vec128 - z0 = Lib_IntVector_Intrinsics_vec128_shift_right64(t01, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec128 - z1 = Lib_IntVector_Intrinsics_vec128_shift_right64(t3, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec128 x0 = Lib_IntVector_Intrinsics_vec128_and(t01, mask26); - Lib_IntVector_Intrinsics_vec128 x3 = Lib_IntVector_Intrinsics_vec128_and(t3, mask26); - Lib_IntVector_Intrinsics_vec128 x1 = Lib_IntVector_Intrinsics_vec128_add64(t1, z0); - Lib_IntVector_Intrinsics_vec128 x4 = Lib_IntVector_Intrinsics_vec128_add64(t4, z1); - Lib_IntVector_Intrinsics_vec128 - z01 = Lib_IntVector_Intrinsics_vec128_shift_right64(x1, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec128 - z11 = Lib_IntVector_Intrinsics_vec128_shift_right64(x4, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec128 - t = Lib_IntVector_Intrinsics_vec128_shift_left64(z11, (uint32_t)2U); - Lib_IntVector_Intrinsics_vec128 z12 = Lib_IntVector_Intrinsics_vec128_add64(z11, t); - Lib_IntVector_Intrinsics_vec128 x11 = Lib_IntVector_Intrinsics_vec128_and(x1, mask26); - Lib_IntVector_Intrinsics_vec128 x41 = Lib_IntVector_Intrinsics_vec128_and(x4, mask26); - Lib_IntVector_Intrinsics_vec128 x2 = Lib_IntVector_Intrinsics_vec128_add64(t2, z01); - Lib_IntVector_Intrinsics_vec128 x01 = Lib_IntVector_Intrinsics_vec128_add64(x0, z12); - Lib_IntVector_Intrinsics_vec128 - z02 = Lib_IntVector_Intrinsics_vec128_shift_right64(x2, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec128 - z13 = Lib_IntVector_Intrinsics_vec128_shift_right64(x01, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec128 x21 = Lib_IntVector_Intrinsics_vec128_and(x2, mask26); - Lib_IntVector_Intrinsics_vec128 - x02 = Lib_IntVector_Intrinsics_vec128_and(x01, mask26); - Lib_IntVector_Intrinsics_vec128 x31 = Lib_IntVector_Intrinsics_vec128_add64(x3, z02); - Lib_IntVector_Intrinsics_vec128 x12 = Lib_IntVector_Intrinsics_vec128_add64(x11, z13); - Lib_IntVector_Intrinsics_vec128 - z03 = Lib_IntVector_Intrinsics_vec128_shift_right64(x31, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec128 - x32 = Lib_IntVector_Intrinsics_vec128_and(x31, mask26); - Lib_IntVector_Intrinsics_vec128 x42 = Lib_IntVector_Intrinsics_vec128_add64(x41, z03); - Lib_IntVector_Intrinsics_vec128 o00 = x02; - Lib_IntVector_Intrinsics_vec128 o10 = x12; - Lib_IntVector_Intrinsics_vec128 o20 = x21; - Lib_IntVector_Intrinsics_vec128 o30 = x32; - Lib_IntVector_Intrinsics_vec128 o40 = x42; - acc0[0U] = o00; - acc0[1U] = o10; - acc0[2U] = o20; - acc0[3U] = o30; - acc0[4U] = o40; - { - Lib_IntVector_Intrinsics_vec128 f100 = acc0[0U]; - Lib_IntVector_Intrinsics_vec128 f11 = acc0[1U]; - Lib_IntVector_Intrinsics_vec128 f12 = acc0[2U]; - Lib_IntVector_Intrinsics_vec128 f13 = acc0[3U]; - Lib_IntVector_Intrinsics_vec128 f14 = acc0[4U]; - Lib_IntVector_Intrinsics_vec128 f20 = e[0U]; - Lib_IntVector_Intrinsics_vec128 f21 = e[1U]; - Lib_IntVector_Intrinsics_vec128 f22 = e[2U]; - Lib_IntVector_Intrinsics_vec128 f23 = e[3U]; - Lib_IntVector_Intrinsics_vec128 f24 = e[4U]; - Lib_IntVector_Intrinsics_vec128 - o0 = Lib_IntVector_Intrinsics_vec128_add64(f100, f20); - Lib_IntVector_Intrinsics_vec128 - o1 = Lib_IntVector_Intrinsics_vec128_add64(f11, f21); - Lib_IntVector_Intrinsics_vec128 - o2 = Lib_IntVector_Intrinsics_vec128_add64(f12, f22); - Lib_IntVector_Intrinsics_vec128 - o3 = Lib_IntVector_Intrinsics_vec128_add64(f13, f23); - Lib_IntVector_Intrinsics_vec128 - o4 = Lib_IntVector_Intrinsics_vec128_add64(f14, f24); - acc0[0U] = o0; - acc0[1U] = o1; - acc0[2U] = o2; - acc0[3U] = o3; - acc0[4U] = o4; - } - } - } - } - } - Hacl_Impl_Poly1305_Field32xN_128_fmul_r2_normalize(acc0, pre0); - } - } - len1 = n * (uint32_t)16U - len0; - t10 = blocks + len0; - nb0 = len1 / (uint32_t)16U; - rem1 = len1 % (uint32_t)16U; - { - uint32_t i; - for (i = (uint32_t)0U; i < nb0; i++) - { - uint8_t *block = t10 + i * (uint32_t)16U; - KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 e[5U] KRML_POST_ALIGN(16) = { 0U }; - uint64_t u0 = load64_le(block); - uint64_t lo = u0; - uint64_t u = load64_le(block + (uint32_t)8U); - uint64_t hi = u; - Lib_IntVector_Intrinsics_vec128 f0 = Lib_IntVector_Intrinsics_vec128_load64(lo); - Lib_IntVector_Intrinsics_vec128 f1 = Lib_IntVector_Intrinsics_vec128_load64(hi); - Lib_IntVector_Intrinsics_vec128 - f010 = - Lib_IntVector_Intrinsics_vec128_and(f0, - Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU)); - Lib_IntVector_Intrinsics_vec128 - f110 = - Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(f0, - (uint32_t)26U), - Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU)); - Lib_IntVector_Intrinsics_vec128 - f20 = - Lib_IntVector_Intrinsics_vec128_or(Lib_IntVector_Intrinsics_vec128_shift_right64(f0, - (uint32_t)52U), - Lib_IntVector_Intrinsics_vec128_shift_left64(Lib_IntVector_Intrinsics_vec128_and(f1, - Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3fffU)), - (uint32_t)12U)); - Lib_IntVector_Intrinsics_vec128 - f30 = - Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(f1, - (uint32_t)14U), - Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU)); - Lib_IntVector_Intrinsics_vec128 - f40 = Lib_IntVector_Intrinsics_vec128_shift_right64(f1, (uint32_t)40U); - Lib_IntVector_Intrinsics_vec128 f01 = f010; - Lib_IntVector_Intrinsics_vec128 f111 = f110; - Lib_IntVector_Intrinsics_vec128 f2 = f20; - Lib_IntVector_Intrinsics_vec128 f3 = f30; - Lib_IntVector_Intrinsics_vec128 f41 = f40; - e[0U] = f01; - e[1U] = f111; - e[2U] = f2; - e[3U] = f3; - e[4U] = f41; - { - uint64_t b = (uint64_t)0x1000000U; - Lib_IntVector_Intrinsics_vec128 mask = Lib_IntVector_Intrinsics_vec128_load64(b); - Lib_IntVector_Intrinsics_vec128 f4 = e[4U]; - e[4U] = Lib_IntVector_Intrinsics_vec128_or(f4, mask); - { - Lib_IntVector_Intrinsics_vec128 *r1 = pre0; - Lib_IntVector_Intrinsics_vec128 *r5 = pre0 + (uint32_t)5U; - Lib_IntVector_Intrinsics_vec128 r0 = r1[0U]; - Lib_IntVector_Intrinsics_vec128 r11 = r1[1U]; - Lib_IntVector_Intrinsics_vec128 r2 = r1[2U]; - Lib_IntVector_Intrinsics_vec128 r3 = r1[3U]; - Lib_IntVector_Intrinsics_vec128 r4 = r1[4U]; - Lib_IntVector_Intrinsics_vec128 r51 = r5[1U]; - Lib_IntVector_Intrinsics_vec128 r52 = r5[2U]; - Lib_IntVector_Intrinsics_vec128 r53 = r5[3U]; - Lib_IntVector_Intrinsics_vec128 r54 = r5[4U]; - Lib_IntVector_Intrinsics_vec128 f10 = e[0U]; - Lib_IntVector_Intrinsics_vec128 f11 = e[1U]; - Lib_IntVector_Intrinsics_vec128 f12 = e[2U]; - Lib_IntVector_Intrinsics_vec128 f13 = e[3U]; - Lib_IntVector_Intrinsics_vec128 f14 = e[4U]; - Lib_IntVector_Intrinsics_vec128 a0 = acc0[0U]; - Lib_IntVector_Intrinsics_vec128 a1 = acc0[1U]; - Lib_IntVector_Intrinsics_vec128 a2 = acc0[2U]; - Lib_IntVector_Intrinsics_vec128 a3 = acc0[3U]; - Lib_IntVector_Intrinsics_vec128 a4 = acc0[4U]; - Lib_IntVector_Intrinsics_vec128 a01 = Lib_IntVector_Intrinsics_vec128_add64(a0, f10); - Lib_IntVector_Intrinsics_vec128 a11 = Lib_IntVector_Intrinsics_vec128_add64(a1, f11); - Lib_IntVector_Intrinsics_vec128 a21 = Lib_IntVector_Intrinsics_vec128_add64(a2, f12); - Lib_IntVector_Intrinsics_vec128 a31 = Lib_IntVector_Intrinsics_vec128_add64(a3, f13); - Lib_IntVector_Intrinsics_vec128 a41 = Lib_IntVector_Intrinsics_vec128_add64(a4, f14); - Lib_IntVector_Intrinsics_vec128 a02 = Lib_IntVector_Intrinsics_vec128_mul64(r0, a01); - Lib_IntVector_Intrinsics_vec128 a12 = Lib_IntVector_Intrinsics_vec128_mul64(r11, a01); - Lib_IntVector_Intrinsics_vec128 a22 = Lib_IntVector_Intrinsics_vec128_mul64(r2, a01); - Lib_IntVector_Intrinsics_vec128 a32 = Lib_IntVector_Intrinsics_vec128_mul64(r3, a01); - Lib_IntVector_Intrinsics_vec128 a42 = Lib_IntVector_Intrinsics_vec128_mul64(r4, a01); - Lib_IntVector_Intrinsics_vec128 - a03 = - Lib_IntVector_Intrinsics_vec128_add64(a02, - Lib_IntVector_Intrinsics_vec128_mul64(r54, a11)); - Lib_IntVector_Intrinsics_vec128 - a13 = - Lib_IntVector_Intrinsics_vec128_add64(a12, - Lib_IntVector_Intrinsics_vec128_mul64(r0, a11)); - Lib_IntVector_Intrinsics_vec128 - a23 = - Lib_IntVector_Intrinsics_vec128_add64(a22, - Lib_IntVector_Intrinsics_vec128_mul64(r11, a11)); - Lib_IntVector_Intrinsics_vec128 - a33 = - Lib_IntVector_Intrinsics_vec128_add64(a32, - Lib_IntVector_Intrinsics_vec128_mul64(r2, a11)); - Lib_IntVector_Intrinsics_vec128 - a43 = - Lib_IntVector_Intrinsics_vec128_add64(a42, - Lib_IntVector_Intrinsics_vec128_mul64(r3, a11)); - Lib_IntVector_Intrinsics_vec128 - a04 = - Lib_IntVector_Intrinsics_vec128_add64(a03, - Lib_IntVector_Intrinsics_vec128_mul64(r53, a21)); - Lib_IntVector_Intrinsics_vec128 - a14 = - Lib_IntVector_Intrinsics_vec128_add64(a13, - Lib_IntVector_Intrinsics_vec128_mul64(r54, a21)); - Lib_IntVector_Intrinsics_vec128 - a24 = - Lib_IntVector_Intrinsics_vec128_add64(a23, - Lib_IntVector_Intrinsics_vec128_mul64(r0, a21)); - Lib_IntVector_Intrinsics_vec128 - a34 = - Lib_IntVector_Intrinsics_vec128_add64(a33, - Lib_IntVector_Intrinsics_vec128_mul64(r11, a21)); - Lib_IntVector_Intrinsics_vec128 - a44 = - Lib_IntVector_Intrinsics_vec128_add64(a43, - Lib_IntVector_Intrinsics_vec128_mul64(r2, a21)); - Lib_IntVector_Intrinsics_vec128 - a05 = - Lib_IntVector_Intrinsics_vec128_add64(a04, - Lib_IntVector_Intrinsics_vec128_mul64(r52, a31)); - Lib_IntVector_Intrinsics_vec128 - a15 = - Lib_IntVector_Intrinsics_vec128_add64(a14, - Lib_IntVector_Intrinsics_vec128_mul64(r53, a31)); - Lib_IntVector_Intrinsics_vec128 - a25 = - Lib_IntVector_Intrinsics_vec128_add64(a24, - Lib_IntVector_Intrinsics_vec128_mul64(r54, a31)); - Lib_IntVector_Intrinsics_vec128 - a35 = - Lib_IntVector_Intrinsics_vec128_add64(a34, - Lib_IntVector_Intrinsics_vec128_mul64(r0, a31)); - Lib_IntVector_Intrinsics_vec128 - a45 = - Lib_IntVector_Intrinsics_vec128_add64(a44, - Lib_IntVector_Intrinsics_vec128_mul64(r11, a31)); - Lib_IntVector_Intrinsics_vec128 - a06 = - Lib_IntVector_Intrinsics_vec128_add64(a05, - Lib_IntVector_Intrinsics_vec128_mul64(r51, a41)); - Lib_IntVector_Intrinsics_vec128 - a16 = - Lib_IntVector_Intrinsics_vec128_add64(a15, - Lib_IntVector_Intrinsics_vec128_mul64(r52, a41)); - Lib_IntVector_Intrinsics_vec128 - a26 = - Lib_IntVector_Intrinsics_vec128_add64(a25, - Lib_IntVector_Intrinsics_vec128_mul64(r53, a41)); - Lib_IntVector_Intrinsics_vec128 - a36 = - Lib_IntVector_Intrinsics_vec128_add64(a35, - Lib_IntVector_Intrinsics_vec128_mul64(r54, a41)); - Lib_IntVector_Intrinsics_vec128 - a46 = - Lib_IntVector_Intrinsics_vec128_add64(a45, - Lib_IntVector_Intrinsics_vec128_mul64(r0, a41)); - Lib_IntVector_Intrinsics_vec128 t01 = a06; - Lib_IntVector_Intrinsics_vec128 t11 = a16; - Lib_IntVector_Intrinsics_vec128 t2 = a26; - Lib_IntVector_Intrinsics_vec128 t3 = a36; - Lib_IntVector_Intrinsics_vec128 t4 = a46; - Lib_IntVector_Intrinsics_vec128 - mask26 = Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU); - Lib_IntVector_Intrinsics_vec128 - z0 = Lib_IntVector_Intrinsics_vec128_shift_right64(t01, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec128 - z1 = Lib_IntVector_Intrinsics_vec128_shift_right64(t3, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec128 x0 = Lib_IntVector_Intrinsics_vec128_and(t01, mask26); - Lib_IntVector_Intrinsics_vec128 x3 = Lib_IntVector_Intrinsics_vec128_and(t3, mask26); - Lib_IntVector_Intrinsics_vec128 x1 = Lib_IntVector_Intrinsics_vec128_add64(t11, z0); - Lib_IntVector_Intrinsics_vec128 x4 = Lib_IntVector_Intrinsics_vec128_add64(t4, z1); - Lib_IntVector_Intrinsics_vec128 - z01 = Lib_IntVector_Intrinsics_vec128_shift_right64(x1, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec128 - z11 = Lib_IntVector_Intrinsics_vec128_shift_right64(x4, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec128 - t = Lib_IntVector_Intrinsics_vec128_shift_left64(z11, (uint32_t)2U); - Lib_IntVector_Intrinsics_vec128 z12 = Lib_IntVector_Intrinsics_vec128_add64(z11, t); - Lib_IntVector_Intrinsics_vec128 x11 = Lib_IntVector_Intrinsics_vec128_and(x1, mask26); - Lib_IntVector_Intrinsics_vec128 x41 = Lib_IntVector_Intrinsics_vec128_and(x4, mask26); - Lib_IntVector_Intrinsics_vec128 x2 = Lib_IntVector_Intrinsics_vec128_add64(t2, z01); - Lib_IntVector_Intrinsics_vec128 x01 = Lib_IntVector_Intrinsics_vec128_add64(x0, z12); - Lib_IntVector_Intrinsics_vec128 - z02 = Lib_IntVector_Intrinsics_vec128_shift_right64(x2, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec128 - z13 = Lib_IntVector_Intrinsics_vec128_shift_right64(x01, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec128 x21 = Lib_IntVector_Intrinsics_vec128_and(x2, mask26); - Lib_IntVector_Intrinsics_vec128 x02 = Lib_IntVector_Intrinsics_vec128_and(x01, mask26); - Lib_IntVector_Intrinsics_vec128 x31 = Lib_IntVector_Intrinsics_vec128_add64(x3, z02); - Lib_IntVector_Intrinsics_vec128 x12 = Lib_IntVector_Intrinsics_vec128_add64(x11, z13); - Lib_IntVector_Intrinsics_vec128 - z03 = Lib_IntVector_Intrinsics_vec128_shift_right64(x31, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec128 x32 = Lib_IntVector_Intrinsics_vec128_and(x31, mask26); - Lib_IntVector_Intrinsics_vec128 x42 = Lib_IntVector_Intrinsics_vec128_add64(x41, z03); - Lib_IntVector_Intrinsics_vec128 o0 = x02; - Lib_IntVector_Intrinsics_vec128 o1 = x12; - Lib_IntVector_Intrinsics_vec128 o2 = x21; - Lib_IntVector_Intrinsics_vec128 o3 = x32; - Lib_IntVector_Intrinsics_vec128 o4 = x42; - acc0[0U] = o0; - acc0[1U] = o1; - acc0[2U] = o2; - acc0[3U] = o3; - acc0[4U] = o4; - } - } - } - } - if (rem1 > (uint32_t)0U) - { - uint8_t *last = t10 + nb0 * (uint32_t)16U; - KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 e[5U] KRML_POST_ALIGN(16) = { 0U }; - uint8_t tmp[16U] = { 0U }; - memcpy(tmp, last, rem1 * sizeof (uint8_t)); - { - uint64_t u0 = load64_le(tmp); - uint64_t lo = u0; - uint64_t u = load64_le(tmp + (uint32_t)8U); - uint64_t hi = u; - Lib_IntVector_Intrinsics_vec128 f0 = Lib_IntVector_Intrinsics_vec128_load64(lo); - Lib_IntVector_Intrinsics_vec128 f1 = Lib_IntVector_Intrinsics_vec128_load64(hi); - Lib_IntVector_Intrinsics_vec128 - f010 = - Lib_IntVector_Intrinsics_vec128_and(f0, - Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU)); - Lib_IntVector_Intrinsics_vec128 - f110 = - Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(f0, - (uint32_t)26U), - Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU)); - Lib_IntVector_Intrinsics_vec128 - f20 = - Lib_IntVector_Intrinsics_vec128_or(Lib_IntVector_Intrinsics_vec128_shift_right64(f0, - (uint32_t)52U), - Lib_IntVector_Intrinsics_vec128_shift_left64(Lib_IntVector_Intrinsics_vec128_and(f1, - Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3fffU)), - (uint32_t)12U)); - Lib_IntVector_Intrinsics_vec128 - f30 = - Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(f1, - (uint32_t)14U), - Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU)); - Lib_IntVector_Intrinsics_vec128 - f40 = Lib_IntVector_Intrinsics_vec128_shift_right64(f1, (uint32_t)40U); - Lib_IntVector_Intrinsics_vec128 f01 = f010; - Lib_IntVector_Intrinsics_vec128 f111 = f110; - Lib_IntVector_Intrinsics_vec128 f2 = f20; - Lib_IntVector_Intrinsics_vec128 f3 = f30; - Lib_IntVector_Intrinsics_vec128 f4 = f40; - e[0U] = f01; - e[1U] = f111; - e[2U] = f2; - e[3U] = f3; - e[4U] = f4; - { - uint64_t b = (uint64_t)1U << rem1 * (uint32_t)8U % (uint32_t)26U; - Lib_IntVector_Intrinsics_vec128 mask = Lib_IntVector_Intrinsics_vec128_load64(b); - Lib_IntVector_Intrinsics_vec128 fi = e[rem1 * (uint32_t)8U / (uint32_t)26U]; - e[rem1 * (uint32_t)8U / (uint32_t)26U] = Lib_IntVector_Intrinsics_vec128_or(fi, mask); - { - Lib_IntVector_Intrinsics_vec128 *r1 = pre0; - Lib_IntVector_Intrinsics_vec128 *r5 = pre0 + (uint32_t)5U; - Lib_IntVector_Intrinsics_vec128 r0 = r1[0U]; - Lib_IntVector_Intrinsics_vec128 r11 = r1[1U]; - Lib_IntVector_Intrinsics_vec128 r2 = r1[2U]; - Lib_IntVector_Intrinsics_vec128 r3 = r1[3U]; - Lib_IntVector_Intrinsics_vec128 r4 = r1[4U]; - Lib_IntVector_Intrinsics_vec128 r51 = r5[1U]; - Lib_IntVector_Intrinsics_vec128 r52 = r5[2U]; - Lib_IntVector_Intrinsics_vec128 r53 = r5[3U]; - Lib_IntVector_Intrinsics_vec128 r54 = r5[4U]; - Lib_IntVector_Intrinsics_vec128 f10 = e[0U]; - Lib_IntVector_Intrinsics_vec128 f11 = e[1U]; - Lib_IntVector_Intrinsics_vec128 f12 = e[2U]; - Lib_IntVector_Intrinsics_vec128 f13 = e[3U]; - Lib_IntVector_Intrinsics_vec128 f14 = e[4U]; - Lib_IntVector_Intrinsics_vec128 a0 = acc0[0U]; - Lib_IntVector_Intrinsics_vec128 a1 = acc0[1U]; - Lib_IntVector_Intrinsics_vec128 a2 = acc0[2U]; - Lib_IntVector_Intrinsics_vec128 a3 = acc0[3U]; - Lib_IntVector_Intrinsics_vec128 a4 = acc0[4U]; - Lib_IntVector_Intrinsics_vec128 a01 = Lib_IntVector_Intrinsics_vec128_add64(a0, f10); - Lib_IntVector_Intrinsics_vec128 a11 = Lib_IntVector_Intrinsics_vec128_add64(a1, f11); - Lib_IntVector_Intrinsics_vec128 a21 = Lib_IntVector_Intrinsics_vec128_add64(a2, f12); - Lib_IntVector_Intrinsics_vec128 a31 = Lib_IntVector_Intrinsics_vec128_add64(a3, f13); - Lib_IntVector_Intrinsics_vec128 a41 = Lib_IntVector_Intrinsics_vec128_add64(a4, f14); - Lib_IntVector_Intrinsics_vec128 a02 = Lib_IntVector_Intrinsics_vec128_mul64(r0, a01); - Lib_IntVector_Intrinsics_vec128 a12 = Lib_IntVector_Intrinsics_vec128_mul64(r11, a01); - Lib_IntVector_Intrinsics_vec128 a22 = Lib_IntVector_Intrinsics_vec128_mul64(r2, a01); - Lib_IntVector_Intrinsics_vec128 a32 = Lib_IntVector_Intrinsics_vec128_mul64(r3, a01); - Lib_IntVector_Intrinsics_vec128 a42 = Lib_IntVector_Intrinsics_vec128_mul64(r4, a01); - Lib_IntVector_Intrinsics_vec128 - a03 = - Lib_IntVector_Intrinsics_vec128_add64(a02, - Lib_IntVector_Intrinsics_vec128_mul64(r54, a11)); - Lib_IntVector_Intrinsics_vec128 - a13 = - Lib_IntVector_Intrinsics_vec128_add64(a12, - Lib_IntVector_Intrinsics_vec128_mul64(r0, a11)); - Lib_IntVector_Intrinsics_vec128 - a23 = - Lib_IntVector_Intrinsics_vec128_add64(a22, - Lib_IntVector_Intrinsics_vec128_mul64(r11, a11)); - Lib_IntVector_Intrinsics_vec128 - a33 = - Lib_IntVector_Intrinsics_vec128_add64(a32, - Lib_IntVector_Intrinsics_vec128_mul64(r2, a11)); - Lib_IntVector_Intrinsics_vec128 - a43 = - Lib_IntVector_Intrinsics_vec128_add64(a42, - Lib_IntVector_Intrinsics_vec128_mul64(r3, a11)); - Lib_IntVector_Intrinsics_vec128 - a04 = - Lib_IntVector_Intrinsics_vec128_add64(a03, - Lib_IntVector_Intrinsics_vec128_mul64(r53, a21)); - Lib_IntVector_Intrinsics_vec128 - a14 = - Lib_IntVector_Intrinsics_vec128_add64(a13, - Lib_IntVector_Intrinsics_vec128_mul64(r54, a21)); - Lib_IntVector_Intrinsics_vec128 - a24 = - Lib_IntVector_Intrinsics_vec128_add64(a23, - Lib_IntVector_Intrinsics_vec128_mul64(r0, a21)); - Lib_IntVector_Intrinsics_vec128 - a34 = - Lib_IntVector_Intrinsics_vec128_add64(a33, - Lib_IntVector_Intrinsics_vec128_mul64(r11, a21)); - Lib_IntVector_Intrinsics_vec128 - a44 = - Lib_IntVector_Intrinsics_vec128_add64(a43, - Lib_IntVector_Intrinsics_vec128_mul64(r2, a21)); - Lib_IntVector_Intrinsics_vec128 - a05 = - Lib_IntVector_Intrinsics_vec128_add64(a04, - Lib_IntVector_Intrinsics_vec128_mul64(r52, a31)); - Lib_IntVector_Intrinsics_vec128 - a15 = - Lib_IntVector_Intrinsics_vec128_add64(a14, - Lib_IntVector_Intrinsics_vec128_mul64(r53, a31)); - Lib_IntVector_Intrinsics_vec128 - a25 = - Lib_IntVector_Intrinsics_vec128_add64(a24, - Lib_IntVector_Intrinsics_vec128_mul64(r54, a31)); - Lib_IntVector_Intrinsics_vec128 - a35 = - Lib_IntVector_Intrinsics_vec128_add64(a34, - Lib_IntVector_Intrinsics_vec128_mul64(r0, a31)); - Lib_IntVector_Intrinsics_vec128 - a45 = - Lib_IntVector_Intrinsics_vec128_add64(a44, - Lib_IntVector_Intrinsics_vec128_mul64(r11, a31)); - Lib_IntVector_Intrinsics_vec128 - a06 = - Lib_IntVector_Intrinsics_vec128_add64(a05, - Lib_IntVector_Intrinsics_vec128_mul64(r51, a41)); - Lib_IntVector_Intrinsics_vec128 - a16 = - Lib_IntVector_Intrinsics_vec128_add64(a15, - Lib_IntVector_Intrinsics_vec128_mul64(r52, a41)); - Lib_IntVector_Intrinsics_vec128 - a26 = - Lib_IntVector_Intrinsics_vec128_add64(a25, - Lib_IntVector_Intrinsics_vec128_mul64(r53, a41)); - Lib_IntVector_Intrinsics_vec128 - a36 = - Lib_IntVector_Intrinsics_vec128_add64(a35, - Lib_IntVector_Intrinsics_vec128_mul64(r54, a41)); - Lib_IntVector_Intrinsics_vec128 - a46 = - Lib_IntVector_Intrinsics_vec128_add64(a45, - Lib_IntVector_Intrinsics_vec128_mul64(r0, a41)); - Lib_IntVector_Intrinsics_vec128 t01 = a06; - Lib_IntVector_Intrinsics_vec128 t11 = a16; - Lib_IntVector_Intrinsics_vec128 t2 = a26; - Lib_IntVector_Intrinsics_vec128 t3 = a36; - Lib_IntVector_Intrinsics_vec128 t4 = a46; - Lib_IntVector_Intrinsics_vec128 - mask26 = Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU); - Lib_IntVector_Intrinsics_vec128 - z0 = Lib_IntVector_Intrinsics_vec128_shift_right64(t01, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec128 - z1 = Lib_IntVector_Intrinsics_vec128_shift_right64(t3, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec128 x0 = Lib_IntVector_Intrinsics_vec128_and(t01, mask26); - Lib_IntVector_Intrinsics_vec128 x3 = Lib_IntVector_Intrinsics_vec128_and(t3, mask26); - Lib_IntVector_Intrinsics_vec128 x1 = Lib_IntVector_Intrinsics_vec128_add64(t11, z0); - Lib_IntVector_Intrinsics_vec128 x4 = Lib_IntVector_Intrinsics_vec128_add64(t4, z1); - Lib_IntVector_Intrinsics_vec128 - z01 = Lib_IntVector_Intrinsics_vec128_shift_right64(x1, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec128 - z11 = Lib_IntVector_Intrinsics_vec128_shift_right64(x4, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec128 - t = Lib_IntVector_Intrinsics_vec128_shift_left64(z11, (uint32_t)2U); - Lib_IntVector_Intrinsics_vec128 z12 = Lib_IntVector_Intrinsics_vec128_add64(z11, t); - Lib_IntVector_Intrinsics_vec128 x11 = Lib_IntVector_Intrinsics_vec128_and(x1, mask26); - Lib_IntVector_Intrinsics_vec128 x41 = Lib_IntVector_Intrinsics_vec128_and(x4, mask26); - Lib_IntVector_Intrinsics_vec128 x2 = Lib_IntVector_Intrinsics_vec128_add64(t2, z01); - Lib_IntVector_Intrinsics_vec128 x01 = Lib_IntVector_Intrinsics_vec128_add64(x0, z12); - Lib_IntVector_Intrinsics_vec128 - z02 = Lib_IntVector_Intrinsics_vec128_shift_right64(x2, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec128 - z13 = Lib_IntVector_Intrinsics_vec128_shift_right64(x01, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec128 x21 = Lib_IntVector_Intrinsics_vec128_and(x2, mask26); - Lib_IntVector_Intrinsics_vec128 x02 = Lib_IntVector_Intrinsics_vec128_and(x01, mask26); - Lib_IntVector_Intrinsics_vec128 x31 = Lib_IntVector_Intrinsics_vec128_add64(x3, z02); - Lib_IntVector_Intrinsics_vec128 x12 = Lib_IntVector_Intrinsics_vec128_add64(x11, z13); - Lib_IntVector_Intrinsics_vec128 - z03 = Lib_IntVector_Intrinsics_vec128_shift_right64(x31, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec128 x32 = Lib_IntVector_Intrinsics_vec128_and(x31, mask26); - Lib_IntVector_Intrinsics_vec128 x42 = Lib_IntVector_Intrinsics_vec128_add64(x41, z03); - Lib_IntVector_Intrinsics_vec128 o0 = x02; - Lib_IntVector_Intrinsics_vec128 o1 = x12; - Lib_IntVector_Intrinsics_vec128 o2 = x21; - Lib_IntVector_Intrinsics_vec128 o3 = x32; - Lib_IntVector_Intrinsics_vec128 o4 = x42; - acc0[0U] = o0; - acc0[1U] = o1; - acc0[2U] = o2; - acc0[3U] = o3; - acc0[4U] = o4; - } - } - } - } - { - uint8_t tmp[16U] = { 0U }; - memcpy(tmp, rem, r * sizeof (uint8_t)); - if (r > (uint32_t)0U) - { - Lib_IntVector_Intrinsics_vec128 *pre = ctx + (uint32_t)5U; - Lib_IntVector_Intrinsics_vec128 *acc = ctx; - KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 e[5U] KRML_POST_ALIGN(16) = { 0U }; - uint64_t u0 = load64_le(tmp); - uint64_t lo = u0; - uint64_t u = load64_le(tmp + (uint32_t)8U); - uint64_t hi = u; - Lib_IntVector_Intrinsics_vec128 f0 = Lib_IntVector_Intrinsics_vec128_load64(lo); - Lib_IntVector_Intrinsics_vec128 f1 = Lib_IntVector_Intrinsics_vec128_load64(hi); - Lib_IntVector_Intrinsics_vec128 - f010 = - Lib_IntVector_Intrinsics_vec128_and(f0, - Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU)); - Lib_IntVector_Intrinsics_vec128 - f110 = - Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(f0, - (uint32_t)26U), - Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU)); - Lib_IntVector_Intrinsics_vec128 - f20 = - Lib_IntVector_Intrinsics_vec128_or(Lib_IntVector_Intrinsics_vec128_shift_right64(f0, - (uint32_t)52U), - Lib_IntVector_Intrinsics_vec128_shift_left64(Lib_IntVector_Intrinsics_vec128_and(f1, - Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3fffU)), - (uint32_t)12U)); - Lib_IntVector_Intrinsics_vec128 - f30 = - Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(f1, - (uint32_t)14U), - Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU)); - Lib_IntVector_Intrinsics_vec128 - f40 = Lib_IntVector_Intrinsics_vec128_shift_right64(f1, (uint32_t)40U); - Lib_IntVector_Intrinsics_vec128 f01 = f010; - Lib_IntVector_Intrinsics_vec128 f111 = f110; - Lib_IntVector_Intrinsics_vec128 f2 = f20; - Lib_IntVector_Intrinsics_vec128 f3 = f30; - Lib_IntVector_Intrinsics_vec128 f41 = f40; - uint64_t b; - Lib_IntVector_Intrinsics_vec128 mask; - Lib_IntVector_Intrinsics_vec128 f4; - Lib_IntVector_Intrinsics_vec128 *r1; - Lib_IntVector_Intrinsics_vec128 *r5; - Lib_IntVector_Intrinsics_vec128 r0; - Lib_IntVector_Intrinsics_vec128 r11; - Lib_IntVector_Intrinsics_vec128 r2; - Lib_IntVector_Intrinsics_vec128 r3; - Lib_IntVector_Intrinsics_vec128 r4; - Lib_IntVector_Intrinsics_vec128 r51; - Lib_IntVector_Intrinsics_vec128 r52; - Lib_IntVector_Intrinsics_vec128 r53; - Lib_IntVector_Intrinsics_vec128 r54; - Lib_IntVector_Intrinsics_vec128 f10; - Lib_IntVector_Intrinsics_vec128 f11; - Lib_IntVector_Intrinsics_vec128 f12; - Lib_IntVector_Intrinsics_vec128 f13; - Lib_IntVector_Intrinsics_vec128 f14; - Lib_IntVector_Intrinsics_vec128 a0; - Lib_IntVector_Intrinsics_vec128 a1; - Lib_IntVector_Intrinsics_vec128 a2; - Lib_IntVector_Intrinsics_vec128 a3; - Lib_IntVector_Intrinsics_vec128 a4; - Lib_IntVector_Intrinsics_vec128 a01; - Lib_IntVector_Intrinsics_vec128 a11; - Lib_IntVector_Intrinsics_vec128 a21; - Lib_IntVector_Intrinsics_vec128 a31; - Lib_IntVector_Intrinsics_vec128 a41; - Lib_IntVector_Intrinsics_vec128 a02; - Lib_IntVector_Intrinsics_vec128 a12; - Lib_IntVector_Intrinsics_vec128 a22; - Lib_IntVector_Intrinsics_vec128 a32; - Lib_IntVector_Intrinsics_vec128 a42; - Lib_IntVector_Intrinsics_vec128 a03; - Lib_IntVector_Intrinsics_vec128 a13; - Lib_IntVector_Intrinsics_vec128 a23; - Lib_IntVector_Intrinsics_vec128 a33; - Lib_IntVector_Intrinsics_vec128 a43; - Lib_IntVector_Intrinsics_vec128 a04; - Lib_IntVector_Intrinsics_vec128 a14; - Lib_IntVector_Intrinsics_vec128 a24; - Lib_IntVector_Intrinsics_vec128 a34; - Lib_IntVector_Intrinsics_vec128 a44; - Lib_IntVector_Intrinsics_vec128 a05; - Lib_IntVector_Intrinsics_vec128 a15; - Lib_IntVector_Intrinsics_vec128 a25; - Lib_IntVector_Intrinsics_vec128 a35; - Lib_IntVector_Intrinsics_vec128 a45; - Lib_IntVector_Intrinsics_vec128 a06; - Lib_IntVector_Intrinsics_vec128 a16; - Lib_IntVector_Intrinsics_vec128 a26; - Lib_IntVector_Intrinsics_vec128 a36; - Lib_IntVector_Intrinsics_vec128 a46; - Lib_IntVector_Intrinsics_vec128 t0; - Lib_IntVector_Intrinsics_vec128 t1; - Lib_IntVector_Intrinsics_vec128 t2; - Lib_IntVector_Intrinsics_vec128 t3; - Lib_IntVector_Intrinsics_vec128 t4; - Lib_IntVector_Intrinsics_vec128 mask26; - Lib_IntVector_Intrinsics_vec128 z0; - Lib_IntVector_Intrinsics_vec128 z1; - Lib_IntVector_Intrinsics_vec128 x0; - Lib_IntVector_Intrinsics_vec128 x3; - Lib_IntVector_Intrinsics_vec128 x1; - Lib_IntVector_Intrinsics_vec128 x4; - Lib_IntVector_Intrinsics_vec128 z01; - Lib_IntVector_Intrinsics_vec128 z11; - Lib_IntVector_Intrinsics_vec128 t; - Lib_IntVector_Intrinsics_vec128 z12; - Lib_IntVector_Intrinsics_vec128 x11; - Lib_IntVector_Intrinsics_vec128 x41; - Lib_IntVector_Intrinsics_vec128 x2; - Lib_IntVector_Intrinsics_vec128 x01; - Lib_IntVector_Intrinsics_vec128 z02; - Lib_IntVector_Intrinsics_vec128 z13; - Lib_IntVector_Intrinsics_vec128 x21; - Lib_IntVector_Intrinsics_vec128 x02; - Lib_IntVector_Intrinsics_vec128 x31; - Lib_IntVector_Intrinsics_vec128 x12; - Lib_IntVector_Intrinsics_vec128 z03; - Lib_IntVector_Intrinsics_vec128 x32; - Lib_IntVector_Intrinsics_vec128 x42; - Lib_IntVector_Intrinsics_vec128 o0; - Lib_IntVector_Intrinsics_vec128 o1; - Lib_IntVector_Intrinsics_vec128 o2; - Lib_IntVector_Intrinsics_vec128 o3; - Lib_IntVector_Intrinsics_vec128 o4; - e[0U] = f01; - e[1U] = f111; - e[2U] = f2; - e[3U] = f3; - e[4U] = f41; - b = (uint64_t)0x1000000U; - mask = Lib_IntVector_Intrinsics_vec128_load64(b); - f4 = e[4U]; - e[4U] = Lib_IntVector_Intrinsics_vec128_or(f4, mask); - r1 = pre; - r5 = pre + (uint32_t)5U; - r0 = r1[0U]; - r11 = r1[1U]; - r2 = r1[2U]; - r3 = r1[3U]; - r4 = r1[4U]; - r51 = r5[1U]; - r52 = r5[2U]; - r53 = r5[3U]; - r54 = r5[4U]; - f10 = e[0U]; - f11 = e[1U]; - f12 = e[2U]; - f13 = e[3U]; - f14 = e[4U]; - a0 = acc[0U]; - a1 = acc[1U]; - a2 = acc[2U]; - a3 = acc[3U]; - a4 = acc[4U]; - a01 = Lib_IntVector_Intrinsics_vec128_add64(a0, f10); - a11 = Lib_IntVector_Intrinsics_vec128_add64(a1, f11); - a21 = Lib_IntVector_Intrinsics_vec128_add64(a2, f12); - a31 = Lib_IntVector_Intrinsics_vec128_add64(a3, f13); - a41 = Lib_IntVector_Intrinsics_vec128_add64(a4, f14); - a02 = Lib_IntVector_Intrinsics_vec128_mul64(r0, a01); - a12 = Lib_IntVector_Intrinsics_vec128_mul64(r11, a01); - a22 = Lib_IntVector_Intrinsics_vec128_mul64(r2, a01); - a32 = Lib_IntVector_Intrinsics_vec128_mul64(r3, a01); - a42 = Lib_IntVector_Intrinsics_vec128_mul64(r4, a01); - a03 = - Lib_IntVector_Intrinsics_vec128_add64(a02, - Lib_IntVector_Intrinsics_vec128_mul64(r54, a11)); - a13 = - Lib_IntVector_Intrinsics_vec128_add64(a12, - Lib_IntVector_Intrinsics_vec128_mul64(r0, a11)); - a23 = - Lib_IntVector_Intrinsics_vec128_add64(a22, - Lib_IntVector_Intrinsics_vec128_mul64(r11, a11)); - a33 = - Lib_IntVector_Intrinsics_vec128_add64(a32, - Lib_IntVector_Intrinsics_vec128_mul64(r2, a11)); - a43 = - Lib_IntVector_Intrinsics_vec128_add64(a42, - Lib_IntVector_Intrinsics_vec128_mul64(r3, a11)); - a04 = - Lib_IntVector_Intrinsics_vec128_add64(a03, - Lib_IntVector_Intrinsics_vec128_mul64(r53, a21)); - a14 = - Lib_IntVector_Intrinsics_vec128_add64(a13, - Lib_IntVector_Intrinsics_vec128_mul64(r54, a21)); - a24 = - Lib_IntVector_Intrinsics_vec128_add64(a23, - Lib_IntVector_Intrinsics_vec128_mul64(r0, a21)); - a34 = - Lib_IntVector_Intrinsics_vec128_add64(a33, - Lib_IntVector_Intrinsics_vec128_mul64(r11, a21)); - a44 = - Lib_IntVector_Intrinsics_vec128_add64(a43, - Lib_IntVector_Intrinsics_vec128_mul64(r2, a21)); - a05 = - Lib_IntVector_Intrinsics_vec128_add64(a04, - Lib_IntVector_Intrinsics_vec128_mul64(r52, a31)); - a15 = - Lib_IntVector_Intrinsics_vec128_add64(a14, - Lib_IntVector_Intrinsics_vec128_mul64(r53, a31)); - a25 = - Lib_IntVector_Intrinsics_vec128_add64(a24, - Lib_IntVector_Intrinsics_vec128_mul64(r54, a31)); - a35 = - Lib_IntVector_Intrinsics_vec128_add64(a34, - Lib_IntVector_Intrinsics_vec128_mul64(r0, a31)); - a45 = - Lib_IntVector_Intrinsics_vec128_add64(a44, - Lib_IntVector_Intrinsics_vec128_mul64(r11, a31)); - a06 = - Lib_IntVector_Intrinsics_vec128_add64(a05, - Lib_IntVector_Intrinsics_vec128_mul64(r51, a41)); - a16 = - Lib_IntVector_Intrinsics_vec128_add64(a15, - Lib_IntVector_Intrinsics_vec128_mul64(r52, a41)); - a26 = - Lib_IntVector_Intrinsics_vec128_add64(a25, - Lib_IntVector_Intrinsics_vec128_mul64(r53, a41)); - a36 = - Lib_IntVector_Intrinsics_vec128_add64(a35, - Lib_IntVector_Intrinsics_vec128_mul64(r54, a41)); - a46 = - Lib_IntVector_Intrinsics_vec128_add64(a45, - Lib_IntVector_Intrinsics_vec128_mul64(r0, a41)); - t0 = a06; - t1 = a16; - t2 = a26; - t3 = a36; - t4 = a46; - mask26 = Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU); - z0 = Lib_IntVector_Intrinsics_vec128_shift_right64(t0, (uint32_t)26U); - z1 = Lib_IntVector_Intrinsics_vec128_shift_right64(t3, (uint32_t)26U); - x0 = Lib_IntVector_Intrinsics_vec128_and(t0, mask26); - x3 = Lib_IntVector_Intrinsics_vec128_and(t3, mask26); - x1 = Lib_IntVector_Intrinsics_vec128_add64(t1, z0); - x4 = Lib_IntVector_Intrinsics_vec128_add64(t4, z1); - z01 = Lib_IntVector_Intrinsics_vec128_shift_right64(x1, (uint32_t)26U); - z11 = Lib_IntVector_Intrinsics_vec128_shift_right64(x4, (uint32_t)26U); - t = Lib_IntVector_Intrinsics_vec128_shift_left64(z11, (uint32_t)2U); - z12 = Lib_IntVector_Intrinsics_vec128_add64(z11, t); - x11 = Lib_IntVector_Intrinsics_vec128_and(x1, mask26); - x41 = Lib_IntVector_Intrinsics_vec128_and(x4, mask26); - x2 = Lib_IntVector_Intrinsics_vec128_add64(t2, z01); - x01 = Lib_IntVector_Intrinsics_vec128_add64(x0, z12); - z02 = Lib_IntVector_Intrinsics_vec128_shift_right64(x2, (uint32_t)26U); - z13 = Lib_IntVector_Intrinsics_vec128_shift_right64(x01, (uint32_t)26U); - x21 = Lib_IntVector_Intrinsics_vec128_and(x2, mask26); - x02 = Lib_IntVector_Intrinsics_vec128_and(x01, mask26); - x31 = Lib_IntVector_Intrinsics_vec128_add64(x3, z02); - x12 = Lib_IntVector_Intrinsics_vec128_add64(x11, z13); - z03 = Lib_IntVector_Intrinsics_vec128_shift_right64(x31, (uint32_t)26U); - x32 = Lib_IntVector_Intrinsics_vec128_and(x31, mask26); - x42 = Lib_IntVector_Intrinsics_vec128_add64(x41, z03); - o0 = x02; - o1 = x12; - o2 = x21; - o3 = x32; - o4 = x42; - acc[0U] = o0; - acc[1U] = o1; - acc[2U] = o2; - acc[3U] = o3; - acc[4U] = o4; - return; - } - } -} - -static inline void -poly1305_do_128( - uint8_t *k, - uint32_t aadlen, - uint8_t *aad, - uint32_t mlen, - uint8_t *m, - uint8_t *out -) -{ - KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 ctx[25U] KRML_POST_ALIGN(16) = { 0U }; - uint8_t block[16U] = { 0U }; - Lib_IntVector_Intrinsics_vec128 *pre; - Lib_IntVector_Intrinsics_vec128 *acc; - Hacl_Poly1305_128_poly1305_init(ctx, k); - if (aadlen != (uint32_t)0U) - { - poly1305_padded_128(ctx, aadlen, aad); - } - if (mlen != (uint32_t)0U) - { - poly1305_padded_128(ctx, mlen, m); - } - store64_le(block, (uint64_t)aadlen); - store64_le(block + (uint32_t)8U, (uint64_t)mlen); - pre = ctx + (uint32_t)5U; - acc = ctx; - { - KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 e[5U] KRML_POST_ALIGN(16) = { 0U }; - uint64_t u0 = load64_le(block); - uint64_t lo = u0; - uint64_t u = load64_le(block + (uint32_t)8U); - uint64_t hi = u; - Lib_IntVector_Intrinsics_vec128 f0 = Lib_IntVector_Intrinsics_vec128_load64(lo); - Lib_IntVector_Intrinsics_vec128 f1 = Lib_IntVector_Intrinsics_vec128_load64(hi); - Lib_IntVector_Intrinsics_vec128 - f010 = - Lib_IntVector_Intrinsics_vec128_and(f0, - Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU)); - Lib_IntVector_Intrinsics_vec128 - f110 = - Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(f0, - (uint32_t)26U), - Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU)); - Lib_IntVector_Intrinsics_vec128 - f20 = - Lib_IntVector_Intrinsics_vec128_or(Lib_IntVector_Intrinsics_vec128_shift_right64(f0, - (uint32_t)52U), - Lib_IntVector_Intrinsics_vec128_shift_left64(Lib_IntVector_Intrinsics_vec128_and(f1, - Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3fffU)), - (uint32_t)12U)); - Lib_IntVector_Intrinsics_vec128 - f30 = - Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(f1, - (uint32_t)14U), - Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU)); - Lib_IntVector_Intrinsics_vec128 - f40 = Lib_IntVector_Intrinsics_vec128_shift_right64(f1, (uint32_t)40U); - Lib_IntVector_Intrinsics_vec128 f01 = f010; - Lib_IntVector_Intrinsics_vec128 f111 = f110; - Lib_IntVector_Intrinsics_vec128 f2 = f20; - Lib_IntVector_Intrinsics_vec128 f3 = f30; - Lib_IntVector_Intrinsics_vec128 f41 = f40; - uint64_t b; - Lib_IntVector_Intrinsics_vec128 mask; - Lib_IntVector_Intrinsics_vec128 f4; - Lib_IntVector_Intrinsics_vec128 *r; - Lib_IntVector_Intrinsics_vec128 *r5; - Lib_IntVector_Intrinsics_vec128 r0; - Lib_IntVector_Intrinsics_vec128 r1; - Lib_IntVector_Intrinsics_vec128 r2; - Lib_IntVector_Intrinsics_vec128 r3; - Lib_IntVector_Intrinsics_vec128 r4; - Lib_IntVector_Intrinsics_vec128 r51; - Lib_IntVector_Intrinsics_vec128 r52; - Lib_IntVector_Intrinsics_vec128 r53; - Lib_IntVector_Intrinsics_vec128 r54; - Lib_IntVector_Intrinsics_vec128 f10; - Lib_IntVector_Intrinsics_vec128 f11; - Lib_IntVector_Intrinsics_vec128 f12; - Lib_IntVector_Intrinsics_vec128 f13; - Lib_IntVector_Intrinsics_vec128 f14; - Lib_IntVector_Intrinsics_vec128 a0; - Lib_IntVector_Intrinsics_vec128 a1; - Lib_IntVector_Intrinsics_vec128 a2; - Lib_IntVector_Intrinsics_vec128 a3; - Lib_IntVector_Intrinsics_vec128 a4; - Lib_IntVector_Intrinsics_vec128 a01; - Lib_IntVector_Intrinsics_vec128 a11; - Lib_IntVector_Intrinsics_vec128 a21; - Lib_IntVector_Intrinsics_vec128 a31; - Lib_IntVector_Intrinsics_vec128 a41; - Lib_IntVector_Intrinsics_vec128 a02; - Lib_IntVector_Intrinsics_vec128 a12; - Lib_IntVector_Intrinsics_vec128 a22; - Lib_IntVector_Intrinsics_vec128 a32; - Lib_IntVector_Intrinsics_vec128 a42; - Lib_IntVector_Intrinsics_vec128 a03; - Lib_IntVector_Intrinsics_vec128 a13; - Lib_IntVector_Intrinsics_vec128 a23; - Lib_IntVector_Intrinsics_vec128 a33; - Lib_IntVector_Intrinsics_vec128 a43; - Lib_IntVector_Intrinsics_vec128 a04; - Lib_IntVector_Intrinsics_vec128 a14; - Lib_IntVector_Intrinsics_vec128 a24; - Lib_IntVector_Intrinsics_vec128 a34; - Lib_IntVector_Intrinsics_vec128 a44; - Lib_IntVector_Intrinsics_vec128 a05; - Lib_IntVector_Intrinsics_vec128 a15; - Lib_IntVector_Intrinsics_vec128 a25; - Lib_IntVector_Intrinsics_vec128 a35; - Lib_IntVector_Intrinsics_vec128 a45; - Lib_IntVector_Intrinsics_vec128 a06; - Lib_IntVector_Intrinsics_vec128 a16; - Lib_IntVector_Intrinsics_vec128 a26; - Lib_IntVector_Intrinsics_vec128 a36; - Lib_IntVector_Intrinsics_vec128 a46; - Lib_IntVector_Intrinsics_vec128 t0; - Lib_IntVector_Intrinsics_vec128 t1; - Lib_IntVector_Intrinsics_vec128 t2; - Lib_IntVector_Intrinsics_vec128 t3; - Lib_IntVector_Intrinsics_vec128 t4; - Lib_IntVector_Intrinsics_vec128 mask26; - Lib_IntVector_Intrinsics_vec128 z0; - Lib_IntVector_Intrinsics_vec128 z1; - Lib_IntVector_Intrinsics_vec128 x0; - Lib_IntVector_Intrinsics_vec128 x3; - Lib_IntVector_Intrinsics_vec128 x1; - Lib_IntVector_Intrinsics_vec128 x4; - Lib_IntVector_Intrinsics_vec128 z01; - Lib_IntVector_Intrinsics_vec128 z11; - Lib_IntVector_Intrinsics_vec128 t; - Lib_IntVector_Intrinsics_vec128 z12; - Lib_IntVector_Intrinsics_vec128 x11; - Lib_IntVector_Intrinsics_vec128 x41; - Lib_IntVector_Intrinsics_vec128 x2; - Lib_IntVector_Intrinsics_vec128 x01; - Lib_IntVector_Intrinsics_vec128 z02; - Lib_IntVector_Intrinsics_vec128 z13; - Lib_IntVector_Intrinsics_vec128 x21; - Lib_IntVector_Intrinsics_vec128 x02; - Lib_IntVector_Intrinsics_vec128 x31; - Lib_IntVector_Intrinsics_vec128 x12; - Lib_IntVector_Intrinsics_vec128 z03; - Lib_IntVector_Intrinsics_vec128 x32; - Lib_IntVector_Intrinsics_vec128 x42; - Lib_IntVector_Intrinsics_vec128 o0; - Lib_IntVector_Intrinsics_vec128 o1; - Lib_IntVector_Intrinsics_vec128 o2; - Lib_IntVector_Intrinsics_vec128 o3; - Lib_IntVector_Intrinsics_vec128 o4; - e[0U] = f01; - e[1U] = f111; - e[2U] = f2; - e[3U] = f3; - e[4U] = f41; - b = (uint64_t)0x1000000U; - mask = Lib_IntVector_Intrinsics_vec128_load64(b); - f4 = e[4U]; - e[4U] = Lib_IntVector_Intrinsics_vec128_or(f4, mask); - r = pre; - r5 = pre + (uint32_t)5U; - r0 = r[0U]; - r1 = r[1U]; - r2 = r[2U]; - r3 = r[3U]; - r4 = r[4U]; - r51 = r5[1U]; - r52 = r5[2U]; - r53 = r5[3U]; - r54 = r5[4U]; - f10 = e[0U]; - f11 = e[1U]; - f12 = e[2U]; - f13 = e[3U]; - f14 = e[4U]; - a0 = acc[0U]; - a1 = acc[1U]; - a2 = acc[2U]; - a3 = acc[3U]; - a4 = acc[4U]; - a01 = Lib_IntVector_Intrinsics_vec128_add64(a0, f10); - a11 = Lib_IntVector_Intrinsics_vec128_add64(a1, f11); - a21 = Lib_IntVector_Intrinsics_vec128_add64(a2, f12); - a31 = Lib_IntVector_Intrinsics_vec128_add64(a3, f13); - a41 = Lib_IntVector_Intrinsics_vec128_add64(a4, f14); - a02 = Lib_IntVector_Intrinsics_vec128_mul64(r0, a01); - a12 = Lib_IntVector_Intrinsics_vec128_mul64(r1, a01); - a22 = Lib_IntVector_Intrinsics_vec128_mul64(r2, a01); - a32 = Lib_IntVector_Intrinsics_vec128_mul64(r3, a01); - a42 = Lib_IntVector_Intrinsics_vec128_mul64(r4, a01); - a03 = - Lib_IntVector_Intrinsics_vec128_add64(a02, - Lib_IntVector_Intrinsics_vec128_mul64(r54, a11)); - a13 = - Lib_IntVector_Intrinsics_vec128_add64(a12, - Lib_IntVector_Intrinsics_vec128_mul64(r0, a11)); - a23 = - Lib_IntVector_Intrinsics_vec128_add64(a22, - Lib_IntVector_Intrinsics_vec128_mul64(r1, a11)); - a33 = - Lib_IntVector_Intrinsics_vec128_add64(a32, - Lib_IntVector_Intrinsics_vec128_mul64(r2, a11)); - a43 = - Lib_IntVector_Intrinsics_vec128_add64(a42, - Lib_IntVector_Intrinsics_vec128_mul64(r3, a11)); - a04 = - Lib_IntVector_Intrinsics_vec128_add64(a03, - Lib_IntVector_Intrinsics_vec128_mul64(r53, a21)); - a14 = - Lib_IntVector_Intrinsics_vec128_add64(a13, - Lib_IntVector_Intrinsics_vec128_mul64(r54, a21)); - a24 = - Lib_IntVector_Intrinsics_vec128_add64(a23, - Lib_IntVector_Intrinsics_vec128_mul64(r0, a21)); - a34 = - Lib_IntVector_Intrinsics_vec128_add64(a33, - Lib_IntVector_Intrinsics_vec128_mul64(r1, a21)); - a44 = - Lib_IntVector_Intrinsics_vec128_add64(a43, - Lib_IntVector_Intrinsics_vec128_mul64(r2, a21)); - a05 = - Lib_IntVector_Intrinsics_vec128_add64(a04, - Lib_IntVector_Intrinsics_vec128_mul64(r52, a31)); - a15 = - Lib_IntVector_Intrinsics_vec128_add64(a14, - Lib_IntVector_Intrinsics_vec128_mul64(r53, a31)); - a25 = - Lib_IntVector_Intrinsics_vec128_add64(a24, - Lib_IntVector_Intrinsics_vec128_mul64(r54, a31)); - a35 = - Lib_IntVector_Intrinsics_vec128_add64(a34, - Lib_IntVector_Intrinsics_vec128_mul64(r0, a31)); - a45 = - Lib_IntVector_Intrinsics_vec128_add64(a44, - Lib_IntVector_Intrinsics_vec128_mul64(r1, a31)); - a06 = - Lib_IntVector_Intrinsics_vec128_add64(a05, - Lib_IntVector_Intrinsics_vec128_mul64(r51, a41)); - a16 = - Lib_IntVector_Intrinsics_vec128_add64(a15, - Lib_IntVector_Intrinsics_vec128_mul64(r52, a41)); - a26 = - Lib_IntVector_Intrinsics_vec128_add64(a25, - Lib_IntVector_Intrinsics_vec128_mul64(r53, a41)); - a36 = - Lib_IntVector_Intrinsics_vec128_add64(a35, - Lib_IntVector_Intrinsics_vec128_mul64(r54, a41)); - a46 = - Lib_IntVector_Intrinsics_vec128_add64(a45, - Lib_IntVector_Intrinsics_vec128_mul64(r0, a41)); - t0 = a06; - t1 = a16; - t2 = a26; - t3 = a36; - t4 = a46; - mask26 = Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU); - z0 = Lib_IntVector_Intrinsics_vec128_shift_right64(t0, (uint32_t)26U); - z1 = Lib_IntVector_Intrinsics_vec128_shift_right64(t3, (uint32_t)26U); - x0 = Lib_IntVector_Intrinsics_vec128_and(t0, mask26); - x3 = Lib_IntVector_Intrinsics_vec128_and(t3, mask26); - x1 = Lib_IntVector_Intrinsics_vec128_add64(t1, z0); - x4 = Lib_IntVector_Intrinsics_vec128_add64(t4, z1); - z01 = Lib_IntVector_Intrinsics_vec128_shift_right64(x1, (uint32_t)26U); - z11 = Lib_IntVector_Intrinsics_vec128_shift_right64(x4, (uint32_t)26U); - t = Lib_IntVector_Intrinsics_vec128_shift_left64(z11, (uint32_t)2U); - z12 = Lib_IntVector_Intrinsics_vec128_add64(z11, t); - x11 = Lib_IntVector_Intrinsics_vec128_and(x1, mask26); - x41 = Lib_IntVector_Intrinsics_vec128_and(x4, mask26); - x2 = Lib_IntVector_Intrinsics_vec128_add64(t2, z01); - x01 = Lib_IntVector_Intrinsics_vec128_add64(x0, z12); - z02 = Lib_IntVector_Intrinsics_vec128_shift_right64(x2, (uint32_t)26U); - z13 = Lib_IntVector_Intrinsics_vec128_shift_right64(x01, (uint32_t)26U); - x21 = Lib_IntVector_Intrinsics_vec128_and(x2, mask26); - x02 = Lib_IntVector_Intrinsics_vec128_and(x01, mask26); - x31 = Lib_IntVector_Intrinsics_vec128_add64(x3, z02); - x12 = Lib_IntVector_Intrinsics_vec128_add64(x11, z13); - z03 = Lib_IntVector_Intrinsics_vec128_shift_right64(x31, (uint32_t)26U); - x32 = Lib_IntVector_Intrinsics_vec128_and(x31, mask26); - x42 = Lib_IntVector_Intrinsics_vec128_add64(x41, z03); - o0 = x02; - o1 = x12; - o2 = x21; - o3 = x32; - o4 = x42; - acc[0U] = o0; - acc[1U] = o1; - acc[2U] = o2; - acc[3U] = o3; - acc[4U] = o4; - Hacl_Poly1305_128_poly1305_finish(out, k, ctx); - } -} - -void -Hacl_Chacha20Poly1305_128_aead_encrypt( - uint8_t *k, - uint8_t *n, - uint32_t aadlen, - uint8_t *aad, - uint32_t mlen, - uint8_t *m, - uint8_t *cipher, - uint8_t *mac -) -{ - Hacl_Chacha20_Vec128_chacha20_encrypt_128(mlen, cipher, m, k, n, (uint32_t)1U); - { - uint8_t tmp[64U] = { 0U }; - uint8_t *key; - Hacl_Chacha20_Vec128_chacha20_encrypt_128((uint32_t)64U, tmp, tmp, k, n, (uint32_t)0U); - key = tmp; - poly1305_do_128(key, aadlen, aad, mlen, cipher, mac); - } -} - -uint32_t -Hacl_Chacha20Poly1305_128_aead_decrypt( - uint8_t *k, - uint8_t *n, - uint32_t aadlen, - uint8_t *aad, - uint32_t mlen, - uint8_t *m, - uint8_t *cipher, - uint8_t *mac -) -{ - uint8_t computed_mac[16U] = { 0U }; - uint8_t tmp[64U] = { 0U }; - uint8_t *key; - Hacl_Chacha20_Vec128_chacha20_encrypt_128((uint32_t)64U, tmp, tmp, k, n, (uint32_t)0U); - key = tmp; - poly1305_do_128(key, aadlen, aad, mlen, cipher, computed_mac); - { - uint8_t res0 = (uint8_t)255U; - uint8_t z; - uint32_t res; - KRML_MAYBE_FOR16(i, - (uint32_t)0U, - (uint32_t)16U, - (uint32_t)1U, - uint8_t uu____0 = FStar_UInt8_eq_mask(computed_mac[i], mac[i]); - res0 = uu____0 & res0;); - z = res0; - if (z == (uint8_t)255U) - { - Hacl_Chacha20_Vec128_chacha20_encrypt_128(mlen, m, cipher, k, n, (uint32_t)1U); - res = (uint32_t)0U; - } - else - { - res = (uint32_t)1U; - } - return res; - } -} - diff --git a/dist/c89-compatible/Hacl_Chacha20Poly1305_128.h b/dist/c89-compatible/Hacl_Chacha20Poly1305_128.h deleted file mode 100644 index 17b14ecb8c..0000000000 --- a/dist/c89-compatible/Hacl_Chacha20Poly1305_128.h +++ /dev/null @@ -1,71 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#ifndef __Hacl_Chacha20Poly1305_128_H -#define __Hacl_Chacha20Poly1305_128_H - -#if defined(__cplusplus) -extern "C" { -#endif - -#include -#include "krml/internal/types.h" -#include "krml/lowstar_endianness.h" -#include "krml/internal/target.h" - - -#include "Hacl_Poly1305_128.h" -#include "Hacl_Krmllib.h" -#include "Hacl_Chacha20_Vec128.h" -#include "evercrypt_targetconfig.h" -void -Hacl_Chacha20Poly1305_128_aead_encrypt( - uint8_t *k, - uint8_t *n, - uint32_t aadlen, - uint8_t *aad, - uint32_t mlen, - uint8_t *m, - uint8_t *cipher, - uint8_t *mac -); - -uint32_t -Hacl_Chacha20Poly1305_128_aead_decrypt( - uint8_t *k, - uint8_t *n, - uint32_t aadlen, - uint8_t *aad, - uint32_t mlen, - uint8_t *m, - uint8_t *cipher, - uint8_t *mac -); - -#if defined(__cplusplus) -} -#endif - -#define __Hacl_Chacha20Poly1305_128_H_DEFINED -#endif diff --git a/dist/c89-compatible/Hacl_Chacha20Poly1305_256.c b/dist/c89-compatible/Hacl_Chacha20Poly1305_256.c deleted file mode 100644 index 62f7c8379f..0000000000 --- a/dist/c89-compatible/Hacl_Chacha20Poly1305_256.c +++ /dev/null @@ -1,1356 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#include "Hacl_Chacha20Poly1305_256.h" - -#include "internal/Hacl_Poly1305_256.h" -#include "libintvector.h" -static inline void -poly1305_padded_256(Lib_IntVector_Intrinsics_vec256 *ctx, uint32_t len, uint8_t *text) -{ - uint32_t n = len / (uint32_t)16U; - uint32_t r = len % (uint32_t)16U; - uint8_t *blocks = text; - uint8_t *rem = text + n * (uint32_t)16U; - Lib_IntVector_Intrinsics_vec256 *pre0 = ctx + (uint32_t)5U; - Lib_IntVector_Intrinsics_vec256 *acc0 = ctx; - uint32_t sz_block = (uint32_t)64U; - uint32_t len0 = n * (uint32_t)16U / sz_block * sz_block; - uint8_t *t00 = blocks; - uint32_t len1; - uint8_t *t10; - uint32_t nb0; - uint32_t rem1; - if (len0 > (uint32_t)0U) - { - uint32_t bs = (uint32_t)64U; - uint8_t *text0 = t00; - Hacl_Impl_Poly1305_Field32xN_256_load_acc4(acc0, text0); - { - uint32_t len10 = len0 - bs; - uint8_t *text1 = t00 + bs; - uint32_t nb = len10 / bs; - { - uint32_t i; - for (i = (uint32_t)0U; i < nb; i++) - { - uint8_t *block = text1 + i * bs; - KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 e[5U] KRML_POST_ALIGN(32) = { 0U }; - Lib_IntVector_Intrinsics_vec256 lo = Lib_IntVector_Intrinsics_vec256_load64_le(block); - Lib_IntVector_Intrinsics_vec256 - hi = Lib_IntVector_Intrinsics_vec256_load64_le(block + (uint32_t)32U); - Lib_IntVector_Intrinsics_vec256 - mask260 = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU); - Lib_IntVector_Intrinsics_vec256 - m0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(lo, hi); - Lib_IntVector_Intrinsics_vec256 - m1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(lo, hi); - Lib_IntVector_Intrinsics_vec256 - m2 = Lib_IntVector_Intrinsics_vec256_shift_right(m0, (uint32_t)48U); - Lib_IntVector_Intrinsics_vec256 - m3 = Lib_IntVector_Intrinsics_vec256_shift_right(m1, (uint32_t)48U); - Lib_IntVector_Intrinsics_vec256 - m4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(m0, m1); - Lib_IntVector_Intrinsics_vec256 - t010 = Lib_IntVector_Intrinsics_vec256_interleave_low64(m0, m1); - Lib_IntVector_Intrinsics_vec256 - t30 = Lib_IntVector_Intrinsics_vec256_interleave_low64(m2, m3); - Lib_IntVector_Intrinsics_vec256 - t20 = Lib_IntVector_Intrinsics_vec256_shift_right64(t30, (uint32_t)4U); - Lib_IntVector_Intrinsics_vec256 o20 = Lib_IntVector_Intrinsics_vec256_and(t20, mask260); - Lib_IntVector_Intrinsics_vec256 - t11 = Lib_IntVector_Intrinsics_vec256_shift_right64(t010, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec256 o10 = Lib_IntVector_Intrinsics_vec256_and(t11, mask260); - Lib_IntVector_Intrinsics_vec256 o5 = Lib_IntVector_Intrinsics_vec256_and(t010, mask260); - Lib_IntVector_Intrinsics_vec256 - t31 = Lib_IntVector_Intrinsics_vec256_shift_right64(t30, (uint32_t)30U); - Lib_IntVector_Intrinsics_vec256 o30 = Lib_IntVector_Intrinsics_vec256_and(t31, mask260); - Lib_IntVector_Intrinsics_vec256 - o40 = Lib_IntVector_Intrinsics_vec256_shift_right64(m4, (uint32_t)40U); - Lib_IntVector_Intrinsics_vec256 o00 = o5; - Lib_IntVector_Intrinsics_vec256 o11 = o10; - Lib_IntVector_Intrinsics_vec256 o21 = o20; - Lib_IntVector_Intrinsics_vec256 o31 = o30; - Lib_IntVector_Intrinsics_vec256 o41 = o40; - e[0U] = o00; - e[1U] = o11; - e[2U] = o21; - e[3U] = o31; - e[4U] = o41; - { - uint64_t b = (uint64_t)0x1000000U; - Lib_IntVector_Intrinsics_vec256 mask = Lib_IntVector_Intrinsics_vec256_load64(b); - Lib_IntVector_Intrinsics_vec256 f4 = e[4U]; - e[4U] = Lib_IntVector_Intrinsics_vec256_or(f4, mask); - { - Lib_IntVector_Intrinsics_vec256 *rn = pre0 + (uint32_t)10U; - Lib_IntVector_Intrinsics_vec256 *rn5 = pre0 + (uint32_t)15U; - Lib_IntVector_Intrinsics_vec256 r0 = rn[0U]; - Lib_IntVector_Intrinsics_vec256 r1 = rn[1U]; - Lib_IntVector_Intrinsics_vec256 r2 = rn[2U]; - Lib_IntVector_Intrinsics_vec256 r3 = rn[3U]; - Lib_IntVector_Intrinsics_vec256 r4 = rn[4U]; - Lib_IntVector_Intrinsics_vec256 r51 = rn5[1U]; - Lib_IntVector_Intrinsics_vec256 r52 = rn5[2U]; - Lib_IntVector_Intrinsics_vec256 r53 = rn5[3U]; - Lib_IntVector_Intrinsics_vec256 r54 = rn5[4U]; - Lib_IntVector_Intrinsics_vec256 f10 = acc0[0U]; - Lib_IntVector_Intrinsics_vec256 f110 = acc0[1U]; - Lib_IntVector_Intrinsics_vec256 f120 = acc0[2U]; - Lib_IntVector_Intrinsics_vec256 f130 = acc0[3U]; - Lib_IntVector_Intrinsics_vec256 f140 = acc0[4U]; - Lib_IntVector_Intrinsics_vec256 a0 = Lib_IntVector_Intrinsics_vec256_mul64(r0, f10); - Lib_IntVector_Intrinsics_vec256 a1 = Lib_IntVector_Intrinsics_vec256_mul64(r1, f10); - Lib_IntVector_Intrinsics_vec256 a2 = Lib_IntVector_Intrinsics_vec256_mul64(r2, f10); - Lib_IntVector_Intrinsics_vec256 a3 = Lib_IntVector_Intrinsics_vec256_mul64(r3, f10); - Lib_IntVector_Intrinsics_vec256 a4 = Lib_IntVector_Intrinsics_vec256_mul64(r4, f10); - Lib_IntVector_Intrinsics_vec256 - a01 = - Lib_IntVector_Intrinsics_vec256_add64(a0, - Lib_IntVector_Intrinsics_vec256_mul64(r54, f110)); - Lib_IntVector_Intrinsics_vec256 - a11 = - Lib_IntVector_Intrinsics_vec256_add64(a1, - Lib_IntVector_Intrinsics_vec256_mul64(r0, f110)); - Lib_IntVector_Intrinsics_vec256 - a21 = - Lib_IntVector_Intrinsics_vec256_add64(a2, - Lib_IntVector_Intrinsics_vec256_mul64(r1, f110)); - Lib_IntVector_Intrinsics_vec256 - a31 = - Lib_IntVector_Intrinsics_vec256_add64(a3, - Lib_IntVector_Intrinsics_vec256_mul64(r2, f110)); - Lib_IntVector_Intrinsics_vec256 - a41 = - Lib_IntVector_Intrinsics_vec256_add64(a4, - Lib_IntVector_Intrinsics_vec256_mul64(r3, f110)); - Lib_IntVector_Intrinsics_vec256 - a02 = - Lib_IntVector_Intrinsics_vec256_add64(a01, - Lib_IntVector_Intrinsics_vec256_mul64(r53, f120)); - Lib_IntVector_Intrinsics_vec256 - a12 = - Lib_IntVector_Intrinsics_vec256_add64(a11, - Lib_IntVector_Intrinsics_vec256_mul64(r54, f120)); - Lib_IntVector_Intrinsics_vec256 - a22 = - Lib_IntVector_Intrinsics_vec256_add64(a21, - Lib_IntVector_Intrinsics_vec256_mul64(r0, f120)); - Lib_IntVector_Intrinsics_vec256 - a32 = - Lib_IntVector_Intrinsics_vec256_add64(a31, - Lib_IntVector_Intrinsics_vec256_mul64(r1, f120)); - Lib_IntVector_Intrinsics_vec256 - a42 = - Lib_IntVector_Intrinsics_vec256_add64(a41, - Lib_IntVector_Intrinsics_vec256_mul64(r2, f120)); - Lib_IntVector_Intrinsics_vec256 - a03 = - Lib_IntVector_Intrinsics_vec256_add64(a02, - Lib_IntVector_Intrinsics_vec256_mul64(r52, f130)); - Lib_IntVector_Intrinsics_vec256 - a13 = - Lib_IntVector_Intrinsics_vec256_add64(a12, - Lib_IntVector_Intrinsics_vec256_mul64(r53, f130)); - Lib_IntVector_Intrinsics_vec256 - a23 = - Lib_IntVector_Intrinsics_vec256_add64(a22, - Lib_IntVector_Intrinsics_vec256_mul64(r54, f130)); - Lib_IntVector_Intrinsics_vec256 - a33 = - Lib_IntVector_Intrinsics_vec256_add64(a32, - Lib_IntVector_Intrinsics_vec256_mul64(r0, f130)); - Lib_IntVector_Intrinsics_vec256 - a43 = - Lib_IntVector_Intrinsics_vec256_add64(a42, - Lib_IntVector_Intrinsics_vec256_mul64(r1, f130)); - Lib_IntVector_Intrinsics_vec256 - a04 = - Lib_IntVector_Intrinsics_vec256_add64(a03, - Lib_IntVector_Intrinsics_vec256_mul64(r51, f140)); - Lib_IntVector_Intrinsics_vec256 - a14 = - Lib_IntVector_Intrinsics_vec256_add64(a13, - Lib_IntVector_Intrinsics_vec256_mul64(r52, f140)); - Lib_IntVector_Intrinsics_vec256 - a24 = - Lib_IntVector_Intrinsics_vec256_add64(a23, - Lib_IntVector_Intrinsics_vec256_mul64(r53, f140)); - Lib_IntVector_Intrinsics_vec256 - a34 = - Lib_IntVector_Intrinsics_vec256_add64(a33, - Lib_IntVector_Intrinsics_vec256_mul64(r54, f140)); - Lib_IntVector_Intrinsics_vec256 - a44 = - Lib_IntVector_Intrinsics_vec256_add64(a43, - Lib_IntVector_Intrinsics_vec256_mul64(r0, f140)); - Lib_IntVector_Intrinsics_vec256 t01 = a04; - Lib_IntVector_Intrinsics_vec256 t1 = a14; - Lib_IntVector_Intrinsics_vec256 t2 = a24; - Lib_IntVector_Intrinsics_vec256 t3 = a34; - Lib_IntVector_Intrinsics_vec256 t4 = a44; - Lib_IntVector_Intrinsics_vec256 - mask26 = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU); - Lib_IntVector_Intrinsics_vec256 - z0 = Lib_IntVector_Intrinsics_vec256_shift_right64(t01, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec256 - z1 = Lib_IntVector_Intrinsics_vec256_shift_right64(t3, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec256 x0 = Lib_IntVector_Intrinsics_vec256_and(t01, mask26); - Lib_IntVector_Intrinsics_vec256 x3 = Lib_IntVector_Intrinsics_vec256_and(t3, mask26); - Lib_IntVector_Intrinsics_vec256 x1 = Lib_IntVector_Intrinsics_vec256_add64(t1, z0); - Lib_IntVector_Intrinsics_vec256 x4 = Lib_IntVector_Intrinsics_vec256_add64(t4, z1); - Lib_IntVector_Intrinsics_vec256 - z01 = Lib_IntVector_Intrinsics_vec256_shift_right64(x1, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec256 - z11 = Lib_IntVector_Intrinsics_vec256_shift_right64(x4, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec256 - t = Lib_IntVector_Intrinsics_vec256_shift_left64(z11, (uint32_t)2U); - Lib_IntVector_Intrinsics_vec256 z12 = Lib_IntVector_Intrinsics_vec256_add64(z11, t); - Lib_IntVector_Intrinsics_vec256 x11 = Lib_IntVector_Intrinsics_vec256_and(x1, mask26); - Lib_IntVector_Intrinsics_vec256 x41 = Lib_IntVector_Intrinsics_vec256_and(x4, mask26); - Lib_IntVector_Intrinsics_vec256 x2 = Lib_IntVector_Intrinsics_vec256_add64(t2, z01); - Lib_IntVector_Intrinsics_vec256 x01 = Lib_IntVector_Intrinsics_vec256_add64(x0, z12); - Lib_IntVector_Intrinsics_vec256 - z02 = Lib_IntVector_Intrinsics_vec256_shift_right64(x2, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec256 - z13 = Lib_IntVector_Intrinsics_vec256_shift_right64(x01, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec256 x21 = Lib_IntVector_Intrinsics_vec256_and(x2, mask26); - Lib_IntVector_Intrinsics_vec256 - x02 = Lib_IntVector_Intrinsics_vec256_and(x01, mask26); - Lib_IntVector_Intrinsics_vec256 x31 = Lib_IntVector_Intrinsics_vec256_add64(x3, z02); - Lib_IntVector_Intrinsics_vec256 x12 = Lib_IntVector_Intrinsics_vec256_add64(x11, z13); - Lib_IntVector_Intrinsics_vec256 - z03 = Lib_IntVector_Intrinsics_vec256_shift_right64(x31, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec256 - x32 = Lib_IntVector_Intrinsics_vec256_and(x31, mask26); - Lib_IntVector_Intrinsics_vec256 x42 = Lib_IntVector_Intrinsics_vec256_add64(x41, z03); - Lib_IntVector_Intrinsics_vec256 o01 = x02; - Lib_IntVector_Intrinsics_vec256 o12 = x12; - Lib_IntVector_Intrinsics_vec256 o22 = x21; - Lib_IntVector_Intrinsics_vec256 o32 = x32; - Lib_IntVector_Intrinsics_vec256 o42 = x42; - acc0[0U] = o01; - acc0[1U] = o12; - acc0[2U] = o22; - acc0[3U] = o32; - acc0[4U] = o42; - { - Lib_IntVector_Intrinsics_vec256 f100 = acc0[0U]; - Lib_IntVector_Intrinsics_vec256 f11 = acc0[1U]; - Lib_IntVector_Intrinsics_vec256 f12 = acc0[2U]; - Lib_IntVector_Intrinsics_vec256 f13 = acc0[3U]; - Lib_IntVector_Intrinsics_vec256 f14 = acc0[4U]; - Lib_IntVector_Intrinsics_vec256 f20 = e[0U]; - Lib_IntVector_Intrinsics_vec256 f21 = e[1U]; - Lib_IntVector_Intrinsics_vec256 f22 = e[2U]; - Lib_IntVector_Intrinsics_vec256 f23 = e[3U]; - Lib_IntVector_Intrinsics_vec256 f24 = e[4U]; - Lib_IntVector_Intrinsics_vec256 - o0 = Lib_IntVector_Intrinsics_vec256_add64(f100, f20); - Lib_IntVector_Intrinsics_vec256 - o1 = Lib_IntVector_Intrinsics_vec256_add64(f11, f21); - Lib_IntVector_Intrinsics_vec256 - o2 = Lib_IntVector_Intrinsics_vec256_add64(f12, f22); - Lib_IntVector_Intrinsics_vec256 - o3 = Lib_IntVector_Intrinsics_vec256_add64(f13, f23); - Lib_IntVector_Intrinsics_vec256 - o4 = Lib_IntVector_Intrinsics_vec256_add64(f14, f24); - acc0[0U] = o0; - acc0[1U] = o1; - acc0[2U] = o2; - acc0[3U] = o3; - acc0[4U] = o4; - } - } - } - } - } - Hacl_Impl_Poly1305_Field32xN_256_fmul_r4_normalize(acc0, pre0); - } - } - len1 = n * (uint32_t)16U - len0; - t10 = blocks + len0; - nb0 = len1 / (uint32_t)16U; - rem1 = len1 % (uint32_t)16U; - { - uint32_t i; - for (i = (uint32_t)0U; i < nb0; i++) - { - uint8_t *block = t10 + i * (uint32_t)16U; - KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 e[5U] KRML_POST_ALIGN(32) = { 0U }; - uint64_t u0 = load64_le(block); - uint64_t lo = u0; - uint64_t u = load64_le(block + (uint32_t)8U); - uint64_t hi = u; - Lib_IntVector_Intrinsics_vec256 f0 = Lib_IntVector_Intrinsics_vec256_load64(lo); - Lib_IntVector_Intrinsics_vec256 f1 = Lib_IntVector_Intrinsics_vec256_load64(hi); - Lib_IntVector_Intrinsics_vec256 - f010 = - Lib_IntVector_Intrinsics_vec256_and(f0, - Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU)); - Lib_IntVector_Intrinsics_vec256 - f110 = - Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(f0, - (uint32_t)26U), - Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU)); - Lib_IntVector_Intrinsics_vec256 - f20 = - Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_right64(f0, - (uint32_t)52U), - Lib_IntVector_Intrinsics_vec256_shift_left64(Lib_IntVector_Intrinsics_vec256_and(f1, - Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3fffU)), - (uint32_t)12U)); - Lib_IntVector_Intrinsics_vec256 - f30 = - Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(f1, - (uint32_t)14U), - Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU)); - Lib_IntVector_Intrinsics_vec256 - f40 = Lib_IntVector_Intrinsics_vec256_shift_right64(f1, (uint32_t)40U); - Lib_IntVector_Intrinsics_vec256 f01 = f010; - Lib_IntVector_Intrinsics_vec256 f111 = f110; - Lib_IntVector_Intrinsics_vec256 f2 = f20; - Lib_IntVector_Intrinsics_vec256 f3 = f30; - Lib_IntVector_Intrinsics_vec256 f41 = f40; - e[0U] = f01; - e[1U] = f111; - e[2U] = f2; - e[3U] = f3; - e[4U] = f41; - { - uint64_t b = (uint64_t)0x1000000U; - Lib_IntVector_Intrinsics_vec256 mask = Lib_IntVector_Intrinsics_vec256_load64(b); - Lib_IntVector_Intrinsics_vec256 f4 = e[4U]; - e[4U] = Lib_IntVector_Intrinsics_vec256_or(f4, mask); - { - Lib_IntVector_Intrinsics_vec256 *r1 = pre0; - Lib_IntVector_Intrinsics_vec256 *r5 = pre0 + (uint32_t)5U; - Lib_IntVector_Intrinsics_vec256 r0 = r1[0U]; - Lib_IntVector_Intrinsics_vec256 r11 = r1[1U]; - Lib_IntVector_Intrinsics_vec256 r2 = r1[2U]; - Lib_IntVector_Intrinsics_vec256 r3 = r1[3U]; - Lib_IntVector_Intrinsics_vec256 r4 = r1[4U]; - Lib_IntVector_Intrinsics_vec256 r51 = r5[1U]; - Lib_IntVector_Intrinsics_vec256 r52 = r5[2U]; - Lib_IntVector_Intrinsics_vec256 r53 = r5[3U]; - Lib_IntVector_Intrinsics_vec256 r54 = r5[4U]; - Lib_IntVector_Intrinsics_vec256 f10 = e[0U]; - Lib_IntVector_Intrinsics_vec256 f11 = e[1U]; - Lib_IntVector_Intrinsics_vec256 f12 = e[2U]; - Lib_IntVector_Intrinsics_vec256 f13 = e[3U]; - Lib_IntVector_Intrinsics_vec256 f14 = e[4U]; - Lib_IntVector_Intrinsics_vec256 a0 = acc0[0U]; - Lib_IntVector_Intrinsics_vec256 a1 = acc0[1U]; - Lib_IntVector_Intrinsics_vec256 a2 = acc0[2U]; - Lib_IntVector_Intrinsics_vec256 a3 = acc0[3U]; - Lib_IntVector_Intrinsics_vec256 a4 = acc0[4U]; - Lib_IntVector_Intrinsics_vec256 a01 = Lib_IntVector_Intrinsics_vec256_add64(a0, f10); - Lib_IntVector_Intrinsics_vec256 a11 = Lib_IntVector_Intrinsics_vec256_add64(a1, f11); - Lib_IntVector_Intrinsics_vec256 a21 = Lib_IntVector_Intrinsics_vec256_add64(a2, f12); - Lib_IntVector_Intrinsics_vec256 a31 = Lib_IntVector_Intrinsics_vec256_add64(a3, f13); - Lib_IntVector_Intrinsics_vec256 a41 = Lib_IntVector_Intrinsics_vec256_add64(a4, f14); - Lib_IntVector_Intrinsics_vec256 a02 = Lib_IntVector_Intrinsics_vec256_mul64(r0, a01); - Lib_IntVector_Intrinsics_vec256 a12 = Lib_IntVector_Intrinsics_vec256_mul64(r11, a01); - Lib_IntVector_Intrinsics_vec256 a22 = Lib_IntVector_Intrinsics_vec256_mul64(r2, a01); - Lib_IntVector_Intrinsics_vec256 a32 = Lib_IntVector_Intrinsics_vec256_mul64(r3, a01); - Lib_IntVector_Intrinsics_vec256 a42 = Lib_IntVector_Intrinsics_vec256_mul64(r4, a01); - Lib_IntVector_Intrinsics_vec256 - a03 = - Lib_IntVector_Intrinsics_vec256_add64(a02, - Lib_IntVector_Intrinsics_vec256_mul64(r54, a11)); - Lib_IntVector_Intrinsics_vec256 - a13 = - Lib_IntVector_Intrinsics_vec256_add64(a12, - Lib_IntVector_Intrinsics_vec256_mul64(r0, a11)); - Lib_IntVector_Intrinsics_vec256 - a23 = - Lib_IntVector_Intrinsics_vec256_add64(a22, - Lib_IntVector_Intrinsics_vec256_mul64(r11, a11)); - Lib_IntVector_Intrinsics_vec256 - a33 = - Lib_IntVector_Intrinsics_vec256_add64(a32, - Lib_IntVector_Intrinsics_vec256_mul64(r2, a11)); - Lib_IntVector_Intrinsics_vec256 - a43 = - Lib_IntVector_Intrinsics_vec256_add64(a42, - Lib_IntVector_Intrinsics_vec256_mul64(r3, a11)); - Lib_IntVector_Intrinsics_vec256 - a04 = - Lib_IntVector_Intrinsics_vec256_add64(a03, - Lib_IntVector_Intrinsics_vec256_mul64(r53, a21)); - Lib_IntVector_Intrinsics_vec256 - a14 = - Lib_IntVector_Intrinsics_vec256_add64(a13, - Lib_IntVector_Intrinsics_vec256_mul64(r54, a21)); - Lib_IntVector_Intrinsics_vec256 - a24 = - Lib_IntVector_Intrinsics_vec256_add64(a23, - Lib_IntVector_Intrinsics_vec256_mul64(r0, a21)); - Lib_IntVector_Intrinsics_vec256 - a34 = - Lib_IntVector_Intrinsics_vec256_add64(a33, - Lib_IntVector_Intrinsics_vec256_mul64(r11, a21)); - Lib_IntVector_Intrinsics_vec256 - a44 = - Lib_IntVector_Intrinsics_vec256_add64(a43, - Lib_IntVector_Intrinsics_vec256_mul64(r2, a21)); - Lib_IntVector_Intrinsics_vec256 - a05 = - Lib_IntVector_Intrinsics_vec256_add64(a04, - Lib_IntVector_Intrinsics_vec256_mul64(r52, a31)); - Lib_IntVector_Intrinsics_vec256 - a15 = - Lib_IntVector_Intrinsics_vec256_add64(a14, - Lib_IntVector_Intrinsics_vec256_mul64(r53, a31)); - Lib_IntVector_Intrinsics_vec256 - a25 = - Lib_IntVector_Intrinsics_vec256_add64(a24, - Lib_IntVector_Intrinsics_vec256_mul64(r54, a31)); - Lib_IntVector_Intrinsics_vec256 - a35 = - Lib_IntVector_Intrinsics_vec256_add64(a34, - Lib_IntVector_Intrinsics_vec256_mul64(r0, a31)); - Lib_IntVector_Intrinsics_vec256 - a45 = - Lib_IntVector_Intrinsics_vec256_add64(a44, - Lib_IntVector_Intrinsics_vec256_mul64(r11, a31)); - Lib_IntVector_Intrinsics_vec256 - a06 = - Lib_IntVector_Intrinsics_vec256_add64(a05, - Lib_IntVector_Intrinsics_vec256_mul64(r51, a41)); - Lib_IntVector_Intrinsics_vec256 - a16 = - Lib_IntVector_Intrinsics_vec256_add64(a15, - Lib_IntVector_Intrinsics_vec256_mul64(r52, a41)); - Lib_IntVector_Intrinsics_vec256 - a26 = - Lib_IntVector_Intrinsics_vec256_add64(a25, - Lib_IntVector_Intrinsics_vec256_mul64(r53, a41)); - Lib_IntVector_Intrinsics_vec256 - a36 = - Lib_IntVector_Intrinsics_vec256_add64(a35, - Lib_IntVector_Intrinsics_vec256_mul64(r54, a41)); - Lib_IntVector_Intrinsics_vec256 - a46 = - Lib_IntVector_Intrinsics_vec256_add64(a45, - Lib_IntVector_Intrinsics_vec256_mul64(r0, a41)); - Lib_IntVector_Intrinsics_vec256 t01 = a06; - Lib_IntVector_Intrinsics_vec256 t11 = a16; - Lib_IntVector_Intrinsics_vec256 t2 = a26; - Lib_IntVector_Intrinsics_vec256 t3 = a36; - Lib_IntVector_Intrinsics_vec256 t4 = a46; - Lib_IntVector_Intrinsics_vec256 - mask26 = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU); - Lib_IntVector_Intrinsics_vec256 - z0 = Lib_IntVector_Intrinsics_vec256_shift_right64(t01, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec256 - z1 = Lib_IntVector_Intrinsics_vec256_shift_right64(t3, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec256 x0 = Lib_IntVector_Intrinsics_vec256_and(t01, mask26); - Lib_IntVector_Intrinsics_vec256 x3 = Lib_IntVector_Intrinsics_vec256_and(t3, mask26); - Lib_IntVector_Intrinsics_vec256 x1 = Lib_IntVector_Intrinsics_vec256_add64(t11, z0); - Lib_IntVector_Intrinsics_vec256 x4 = Lib_IntVector_Intrinsics_vec256_add64(t4, z1); - Lib_IntVector_Intrinsics_vec256 - z01 = Lib_IntVector_Intrinsics_vec256_shift_right64(x1, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec256 - z11 = Lib_IntVector_Intrinsics_vec256_shift_right64(x4, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec256 - t = Lib_IntVector_Intrinsics_vec256_shift_left64(z11, (uint32_t)2U); - Lib_IntVector_Intrinsics_vec256 z12 = Lib_IntVector_Intrinsics_vec256_add64(z11, t); - Lib_IntVector_Intrinsics_vec256 x11 = Lib_IntVector_Intrinsics_vec256_and(x1, mask26); - Lib_IntVector_Intrinsics_vec256 x41 = Lib_IntVector_Intrinsics_vec256_and(x4, mask26); - Lib_IntVector_Intrinsics_vec256 x2 = Lib_IntVector_Intrinsics_vec256_add64(t2, z01); - Lib_IntVector_Intrinsics_vec256 x01 = Lib_IntVector_Intrinsics_vec256_add64(x0, z12); - Lib_IntVector_Intrinsics_vec256 - z02 = Lib_IntVector_Intrinsics_vec256_shift_right64(x2, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec256 - z13 = Lib_IntVector_Intrinsics_vec256_shift_right64(x01, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec256 x21 = Lib_IntVector_Intrinsics_vec256_and(x2, mask26); - Lib_IntVector_Intrinsics_vec256 x02 = Lib_IntVector_Intrinsics_vec256_and(x01, mask26); - Lib_IntVector_Intrinsics_vec256 x31 = Lib_IntVector_Intrinsics_vec256_add64(x3, z02); - Lib_IntVector_Intrinsics_vec256 x12 = Lib_IntVector_Intrinsics_vec256_add64(x11, z13); - Lib_IntVector_Intrinsics_vec256 - z03 = Lib_IntVector_Intrinsics_vec256_shift_right64(x31, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec256 x32 = Lib_IntVector_Intrinsics_vec256_and(x31, mask26); - Lib_IntVector_Intrinsics_vec256 x42 = Lib_IntVector_Intrinsics_vec256_add64(x41, z03); - Lib_IntVector_Intrinsics_vec256 o0 = x02; - Lib_IntVector_Intrinsics_vec256 o1 = x12; - Lib_IntVector_Intrinsics_vec256 o2 = x21; - Lib_IntVector_Intrinsics_vec256 o3 = x32; - Lib_IntVector_Intrinsics_vec256 o4 = x42; - acc0[0U] = o0; - acc0[1U] = o1; - acc0[2U] = o2; - acc0[3U] = o3; - acc0[4U] = o4; - } - } - } - } - if (rem1 > (uint32_t)0U) - { - uint8_t *last = t10 + nb0 * (uint32_t)16U; - KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 e[5U] KRML_POST_ALIGN(32) = { 0U }; - uint8_t tmp[16U] = { 0U }; - memcpy(tmp, last, rem1 * sizeof (uint8_t)); - { - uint64_t u0 = load64_le(tmp); - uint64_t lo = u0; - uint64_t u = load64_le(tmp + (uint32_t)8U); - uint64_t hi = u; - Lib_IntVector_Intrinsics_vec256 f0 = Lib_IntVector_Intrinsics_vec256_load64(lo); - Lib_IntVector_Intrinsics_vec256 f1 = Lib_IntVector_Intrinsics_vec256_load64(hi); - Lib_IntVector_Intrinsics_vec256 - f010 = - Lib_IntVector_Intrinsics_vec256_and(f0, - Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU)); - Lib_IntVector_Intrinsics_vec256 - f110 = - Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(f0, - (uint32_t)26U), - Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU)); - Lib_IntVector_Intrinsics_vec256 - f20 = - Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_right64(f0, - (uint32_t)52U), - Lib_IntVector_Intrinsics_vec256_shift_left64(Lib_IntVector_Intrinsics_vec256_and(f1, - Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3fffU)), - (uint32_t)12U)); - Lib_IntVector_Intrinsics_vec256 - f30 = - Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(f1, - (uint32_t)14U), - Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU)); - Lib_IntVector_Intrinsics_vec256 - f40 = Lib_IntVector_Intrinsics_vec256_shift_right64(f1, (uint32_t)40U); - Lib_IntVector_Intrinsics_vec256 f01 = f010; - Lib_IntVector_Intrinsics_vec256 f111 = f110; - Lib_IntVector_Intrinsics_vec256 f2 = f20; - Lib_IntVector_Intrinsics_vec256 f3 = f30; - Lib_IntVector_Intrinsics_vec256 f4 = f40; - e[0U] = f01; - e[1U] = f111; - e[2U] = f2; - e[3U] = f3; - e[4U] = f4; - { - uint64_t b = (uint64_t)1U << rem1 * (uint32_t)8U % (uint32_t)26U; - Lib_IntVector_Intrinsics_vec256 mask = Lib_IntVector_Intrinsics_vec256_load64(b); - Lib_IntVector_Intrinsics_vec256 fi = e[rem1 * (uint32_t)8U / (uint32_t)26U]; - e[rem1 * (uint32_t)8U / (uint32_t)26U] = Lib_IntVector_Intrinsics_vec256_or(fi, mask); - { - Lib_IntVector_Intrinsics_vec256 *r1 = pre0; - Lib_IntVector_Intrinsics_vec256 *r5 = pre0 + (uint32_t)5U; - Lib_IntVector_Intrinsics_vec256 r0 = r1[0U]; - Lib_IntVector_Intrinsics_vec256 r11 = r1[1U]; - Lib_IntVector_Intrinsics_vec256 r2 = r1[2U]; - Lib_IntVector_Intrinsics_vec256 r3 = r1[3U]; - Lib_IntVector_Intrinsics_vec256 r4 = r1[4U]; - Lib_IntVector_Intrinsics_vec256 r51 = r5[1U]; - Lib_IntVector_Intrinsics_vec256 r52 = r5[2U]; - Lib_IntVector_Intrinsics_vec256 r53 = r5[3U]; - Lib_IntVector_Intrinsics_vec256 r54 = r5[4U]; - Lib_IntVector_Intrinsics_vec256 f10 = e[0U]; - Lib_IntVector_Intrinsics_vec256 f11 = e[1U]; - Lib_IntVector_Intrinsics_vec256 f12 = e[2U]; - Lib_IntVector_Intrinsics_vec256 f13 = e[3U]; - Lib_IntVector_Intrinsics_vec256 f14 = e[4U]; - Lib_IntVector_Intrinsics_vec256 a0 = acc0[0U]; - Lib_IntVector_Intrinsics_vec256 a1 = acc0[1U]; - Lib_IntVector_Intrinsics_vec256 a2 = acc0[2U]; - Lib_IntVector_Intrinsics_vec256 a3 = acc0[3U]; - Lib_IntVector_Intrinsics_vec256 a4 = acc0[4U]; - Lib_IntVector_Intrinsics_vec256 a01 = Lib_IntVector_Intrinsics_vec256_add64(a0, f10); - Lib_IntVector_Intrinsics_vec256 a11 = Lib_IntVector_Intrinsics_vec256_add64(a1, f11); - Lib_IntVector_Intrinsics_vec256 a21 = Lib_IntVector_Intrinsics_vec256_add64(a2, f12); - Lib_IntVector_Intrinsics_vec256 a31 = Lib_IntVector_Intrinsics_vec256_add64(a3, f13); - Lib_IntVector_Intrinsics_vec256 a41 = Lib_IntVector_Intrinsics_vec256_add64(a4, f14); - Lib_IntVector_Intrinsics_vec256 a02 = Lib_IntVector_Intrinsics_vec256_mul64(r0, a01); - Lib_IntVector_Intrinsics_vec256 a12 = Lib_IntVector_Intrinsics_vec256_mul64(r11, a01); - Lib_IntVector_Intrinsics_vec256 a22 = Lib_IntVector_Intrinsics_vec256_mul64(r2, a01); - Lib_IntVector_Intrinsics_vec256 a32 = Lib_IntVector_Intrinsics_vec256_mul64(r3, a01); - Lib_IntVector_Intrinsics_vec256 a42 = Lib_IntVector_Intrinsics_vec256_mul64(r4, a01); - Lib_IntVector_Intrinsics_vec256 - a03 = - Lib_IntVector_Intrinsics_vec256_add64(a02, - Lib_IntVector_Intrinsics_vec256_mul64(r54, a11)); - Lib_IntVector_Intrinsics_vec256 - a13 = - Lib_IntVector_Intrinsics_vec256_add64(a12, - Lib_IntVector_Intrinsics_vec256_mul64(r0, a11)); - Lib_IntVector_Intrinsics_vec256 - a23 = - Lib_IntVector_Intrinsics_vec256_add64(a22, - Lib_IntVector_Intrinsics_vec256_mul64(r11, a11)); - Lib_IntVector_Intrinsics_vec256 - a33 = - Lib_IntVector_Intrinsics_vec256_add64(a32, - Lib_IntVector_Intrinsics_vec256_mul64(r2, a11)); - Lib_IntVector_Intrinsics_vec256 - a43 = - Lib_IntVector_Intrinsics_vec256_add64(a42, - Lib_IntVector_Intrinsics_vec256_mul64(r3, a11)); - Lib_IntVector_Intrinsics_vec256 - a04 = - Lib_IntVector_Intrinsics_vec256_add64(a03, - Lib_IntVector_Intrinsics_vec256_mul64(r53, a21)); - Lib_IntVector_Intrinsics_vec256 - a14 = - Lib_IntVector_Intrinsics_vec256_add64(a13, - Lib_IntVector_Intrinsics_vec256_mul64(r54, a21)); - Lib_IntVector_Intrinsics_vec256 - a24 = - Lib_IntVector_Intrinsics_vec256_add64(a23, - Lib_IntVector_Intrinsics_vec256_mul64(r0, a21)); - Lib_IntVector_Intrinsics_vec256 - a34 = - Lib_IntVector_Intrinsics_vec256_add64(a33, - Lib_IntVector_Intrinsics_vec256_mul64(r11, a21)); - Lib_IntVector_Intrinsics_vec256 - a44 = - Lib_IntVector_Intrinsics_vec256_add64(a43, - Lib_IntVector_Intrinsics_vec256_mul64(r2, a21)); - Lib_IntVector_Intrinsics_vec256 - a05 = - Lib_IntVector_Intrinsics_vec256_add64(a04, - Lib_IntVector_Intrinsics_vec256_mul64(r52, a31)); - Lib_IntVector_Intrinsics_vec256 - a15 = - Lib_IntVector_Intrinsics_vec256_add64(a14, - Lib_IntVector_Intrinsics_vec256_mul64(r53, a31)); - Lib_IntVector_Intrinsics_vec256 - a25 = - Lib_IntVector_Intrinsics_vec256_add64(a24, - Lib_IntVector_Intrinsics_vec256_mul64(r54, a31)); - Lib_IntVector_Intrinsics_vec256 - a35 = - Lib_IntVector_Intrinsics_vec256_add64(a34, - Lib_IntVector_Intrinsics_vec256_mul64(r0, a31)); - Lib_IntVector_Intrinsics_vec256 - a45 = - Lib_IntVector_Intrinsics_vec256_add64(a44, - Lib_IntVector_Intrinsics_vec256_mul64(r11, a31)); - Lib_IntVector_Intrinsics_vec256 - a06 = - Lib_IntVector_Intrinsics_vec256_add64(a05, - Lib_IntVector_Intrinsics_vec256_mul64(r51, a41)); - Lib_IntVector_Intrinsics_vec256 - a16 = - Lib_IntVector_Intrinsics_vec256_add64(a15, - Lib_IntVector_Intrinsics_vec256_mul64(r52, a41)); - Lib_IntVector_Intrinsics_vec256 - a26 = - Lib_IntVector_Intrinsics_vec256_add64(a25, - Lib_IntVector_Intrinsics_vec256_mul64(r53, a41)); - Lib_IntVector_Intrinsics_vec256 - a36 = - Lib_IntVector_Intrinsics_vec256_add64(a35, - Lib_IntVector_Intrinsics_vec256_mul64(r54, a41)); - Lib_IntVector_Intrinsics_vec256 - a46 = - Lib_IntVector_Intrinsics_vec256_add64(a45, - Lib_IntVector_Intrinsics_vec256_mul64(r0, a41)); - Lib_IntVector_Intrinsics_vec256 t01 = a06; - Lib_IntVector_Intrinsics_vec256 t11 = a16; - Lib_IntVector_Intrinsics_vec256 t2 = a26; - Lib_IntVector_Intrinsics_vec256 t3 = a36; - Lib_IntVector_Intrinsics_vec256 t4 = a46; - Lib_IntVector_Intrinsics_vec256 - mask26 = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU); - Lib_IntVector_Intrinsics_vec256 - z0 = Lib_IntVector_Intrinsics_vec256_shift_right64(t01, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec256 - z1 = Lib_IntVector_Intrinsics_vec256_shift_right64(t3, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec256 x0 = Lib_IntVector_Intrinsics_vec256_and(t01, mask26); - Lib_IntVector_Intrinsics_vec256 x3 = Lib_IntVector_Intrinsics_vec256_and(t3, mask26); - Lib_IntVector_Intrinsics_vec256 x1 = Lib_IntVector_Intrinsics_vec256_add64(t11, z0); - Lib_IntVector_Intrinsics_vec256 x4 = Lib_IntVector_Intrinsics_vec256_add64(t4, z1); - Lib_IntVector_Intrinsics_vec256 - z01 = Lib_IntVector_Intrinsics_vec256_shift_right64(x1, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec256 - z11 = Lib_IntVector_Intrinsics_vec256_shift_right64(x4, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec256 - t = Lib_IntVector_Intrinsics_vec256_shift_left64(z11, (uint32_t)2U); - Lib_IntVector_Intrinsics_vec256 z12 = Lib_IntVector_Intrinsics_vec256_add64(z11, t); - Lib_IntVector_Intrinsics_vec256 x11 = Lib_IntVector_Intrinsics_vec256_and(x1, mask26); - Lib_IntVector_Intrinsics_vec256 x41 = Lib_IntVector_Intrinsics_vec256_and(x4, mask26); - Lib_IntVector_Intrinsics_vec256 x2 = Lib_IntVector_Intrinsics_vec256_add64(t2, z01); - Lib_IntVector_Intrinsics_vec256 x01 = Lib_IntVector_Intrinsics_vec256_add64(x0, z12); - Lib_IntVector_Intrinsics_vec256 - z02 = Lib_IntVector_Intrinsics_vec256_shift_right64(x2, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec256 - z13 = Lib_IntVector_Intrinsics_vec256_shift_right64(x01, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec256 x21 = Lib_IntVector_Intrinsics_vec256_and(x2, mask26); - Lib_IntVector_Intrinsics_vec256 x02 = Lib_IntVector_Intrinsics_vec256_and(x01, mask26); - Lib_IntVector_Intrinsics_vec256 x31 = Lib_IntVector_Intrinsics_vec256_add64(x3, z02); - Lib_IntVector_Intrinsics_vec256 x12 = Lib_IntVector_Intrinsics_vec256_add64(x11, z13); - Lib_IntVector_Intrinsics_vec256 - z03 = Lib_IntVector_Intrinsics_vec256_shift_right64(x31, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec256 x32 = Lib_IntVector_Intrinsics_vec256_and(x31, mask26); - Lib_IntVector_Intrinsics_vec256 x42 = Lib_IntVector_Intrinsics_vec256_add64(x41, z03); - Lib_IntVector_Intrinsics_vec256 o0 = x02; - Lib_IntVector_Intrinsics_vec256 o1 = x12; - Lib_IntVector_Intrinsics_vec256 o2 = x21; - Lib_IntVector_Intrinsics_vec256 o3 = x32; - Lib_IntVector_Intrinsics_vec256 o4 = x42; - acc0[0U] = o0; - acc0[1U] = o1; - acc0[2U] = o2; - acc0[3U] = o3; - acc0[4U] = o4; - } - } - } - } - { - uint8_t tmp[16U] = { 0U }; - memcpy(tmp, rem, r * sizeof (uint8_t)); - if (r > (uint32_t)0U) - { - Lib_IntVector_Intrinsics_vec256 *pre = ctx + (uint32_t)5U; - Lib_IntVector_Intrinsics_vec256 *acc = ctx; - KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 e[5U] KRML_POST_ALIGN(32) = { 0U }; - uint64_t u0 = load64_le(tmp); - uint64_t lo = u0; - uint64_t u = load64_le(tmp + (uint32_t)8U); - uint64_t hi = u; - Lib_IntVector_Intrinsics_vec256 f0 = Lib_IntVector_Intrinsics_vec256_load64(lo); - Lib_IntVector_Intrinsics_vec256 f1 = Lib_IntVector_Intrinsics_vec256_load64(hi); - Lib_IntVector_Intrinsics_vec256 - f010 = - Lib_IntVector_Intrinsics_vec256_and(f0, - Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU)); - Lib_IntVector_Intrinsics_vec256 - f110 = - Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(f0, - (uint32_t)26U), - Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU)); - Lib_IntVector_Intrinsics_vec256 - f20 = - Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_right64(f0, - (uint32_t)52U), - Lib_IntVector_Intrinsics_vec256_shift_left64(Lib_IntVector_Intrinsics_vec256_and(f1, - Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3fffU)), - (uint32_t)12U)); - Lib_IntVector_Intrinsics_vec256 - f30 = - Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(f1, - (uint32_t)14U), - Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU)); - Lib_IntVector_Intrinsics_vec256 - f40 = Lib_IntVector_Intrinsics_vec256_shift_right64(f1, (uint32_t)40U); - Lib_IntVector_Intrinsics_vec256 f01 = f010; - Lib_IntVector_Intrinsics_vec256 f111 = f110; - Lib_IntVector_Intrinsics_vec256 f2 = f20; - Lib_IntVector_Intrinsics_vec256 f3 = f30; - Lib_IntVector_Intrinsics_vec256 f41 = f40; - uint64_t b; - Lib_IntVector_Intrinsics_vec256 mask; - Lib_IntVector_Intrinsics_vec256 f4; - Lib_IntVector_Intrinsics_vec256 *r1; - Lib_IntVector_Intrinsics_vec256 *r5; - Lib_IntVector_Intrinsics_vec256 r0; - Lib_IntVector_Intrinsics_vec256 r11; - Lib_IntVector_Intrinsics_vec256 r2; - Lib_IntVector_Intrinsics_vec256 r3; - Lib_IntVector_Intrinsics_vec256 r4; - Lib_IntVector_Intrinsics_vec256 r51; - Lib_IntVector_Intrinsics_vec256 r52; - Lib_IntVector_Intrinsics_vec256 r53; - Lib_IntVector_Intrinsics_vec256 r54; - Lib_IntVector_Intrinsics_vec256 f10; - Lib_IntVector_Intrinsics_vec256 f11; - Lib_IntVector_Intrinsics_vec256 f12; - Lib_IntVector_Intrinsics_vec256 f13; - Lib_IntVector_Intrinsics_vec256 f14; - Lib_IntVector_Intrinsics_vec256 a0; - Lib_IntVector_Intrinsics_vec256 a1; - Lib_IntVector_Intrinsics_vec256 a2; - Lib_IntVector_Intrinsics_vec256 a3; - Lib_IntVector_Intrinsics_vec256 a4; - Lib_IntVector_Intrinsics_vec256 a01; - Lib_IntVector_Intrinsics_vec256 a11; - Lib_IntVector_Intrinsics_vec256 a21; - Lib_IntVector_Intrinsics_vec256 a31; - Lib_IntVector_Intrinsics_vec256 a41; - Lib_IntVector_Intrinsics_vec256 a02; - Lib_IntVector_Intrinsics_vec256 a12; - Lib_IntVector_Intrinsics_vec256 a22; - Lib_IntVector_Intrinsics_vec256 a32; - Lib_IntVector_Intrinsics_vec256 a42; - Lib_IntVector_Intrinsics_vec256 a03; - Lib_IntVector_Intrinsics_vec256 a13; - Lib_IntVector_Intrinsics_vec256 a23; - Lib_IntVector_Intrinsics_vec256 a33; - Lib_IntVector_Intrinsics_vec256 a43; - Lib_IntVector_Intrinsics_vec256 a04; - Lib_IntVector_Intrinsics_vec256 a14; - Lib_IntVector_Intrinsics_vec256 a24; - Lib_IntVector_Intrinsics_vec256 a34; - Lib_IntVector_Intrinsics_vec256 a44; - Lib_IntVector_Intrinsics_vec256 a05; - Lib_IntVector_Intrinsics_vec256 a15; - Lib_IntVector_Intrinsics_vec256 a25; - Lib_IntVector_Intrinsics_vec256 a35; - Lib_IntVector_Intrinsics_vec256 a45; - Lib_IntVector_Intrinsics_vec256 a06; - Lib_IntVector_Intrinsics_vec256 a16; - Lib_IntVector_Intrinsics_vec256 a26; - Lib_IntVector_Intrinsics_vec256 a36; - Lib_IntVector_Intrinsics_vec256 a46; - Lib_IntVector_Intrinsics_vec256 t0; - Lib_IntVector_Intrinsics_vec256 t1; - Lib_IntVector_Intrinsics_vec256 t2; - Lib_IntVector_Intrinsics_vec256 t3; - Lib_IntVector_Intrinsics_vec256 t4; - Lib_IntVector_Intrinsics_vec256 mask26; - Lib_IntVector_Intrinsics_vec256 z0; - Lib_IntVector_Intrinsics_vec256 z1; - Lib_IntVector_Intrinsics_vec256 x0; - Lib_IntVector_Intrinsics_vec256 x3; - Lib_IntVector_Intrinsics_vec256 x1; - Lib_IntVector_Intrinsics_vec256 x4; - Lib_IntVector_Intrinsics_vec256 z01; - Lib_IntVector_Intrinsics_vec256 z11; - Lib_IntVector_Intrinsics_vec256 t; - Lib_IntVector_Intrinsics_vec256 z12; - Lib_IntVector_Intrinsics_vec256 x11; - Lib_IntVector_Intrinsics_vec256 x41; - Lib_IntVector_Intrinsics_vec256 x2; - Lib_IntVector_Intrinsics_vec256 x01; - Lib_IntVector_Intrinsics_vec256 z02; - Lib_IntVector_Intrinsics_vec256 z13; - Lib_IntVector_Intrinsics_vec256 x21; - Lib_IntVector_Intrinsics_vec256 x02; - Lib_IntVector_Intrinsics_vec256 x31; - Lib_IntVector_Intrinsics_vec256 x12; - Lib_IntVector_Intrinsics_vec256 z03; - Lib_IntVector_Intrinsics_vec256 x32; - Lib_IntVector_Intrinsics_vec256 x42; - Lib_IntVector_Intrinsics_vec256 o0; - Lib_IntVector_Intrinsics_vec256 o1; - Lib_IntVector_Intrinsics_vec256 o2; - Lib_IntVector_Intrinsics_vec256 o3; - Lib_IntVector_Intrinsics_vec256 o4; - e[0U] = f01; - e[1U] = f111; - e[2U] = f2; - e[3U] = f3; - e[4U] = f41; - b = (uint64_t)0x1000000U; - mask = Lib_IntVector_Intrinsics_vec256_load64(b); - f4 = e[4U]; - e[4U] = Lib_IntVector_Intrinsics_vec256_or(f4, mask); - r1 = pre; - r5 = pre + (uint32_t)5U; - r0 = r1[0U]; - r11 = r1[1U]; - r2 = r1[2U]; - r3 = r1[3U]; - r4 = r1[4U]; - r51 = r5[1U]; - r52 = r5[2U]; - r53 = r5[3U]; - r54 = r5[4U]; - f10 = e[0U]; - f11 = e[1U]; - f12 = e[2U]; - f13 = e[3U]; - f14 = e[4U]; - a0 = acc[0U]; - a1 = acc[1U]; - a2 = acc[2U]; - a3 = acc[3U]; - a4 = acc[4U]; - a01 = Lib_IntVector_Intrinsics_vec256_add64(a0, f10); - a11 = Lib_IntVector_Intrinsics_vec256_add64(a1, f11); - a21 = Lib_IntVector_Intrinsics_vec256_add64(a2, f12); - a31 = Lib_IntVector_Intrinsics_vec256_add64(a3, f13); - a41 = Lib_IntVector_Intrinsics_vec256_add64(a4, f14); - a02 = Lib_IntVector_Intrinsics_vec256_mul64(r0, a01); - a12 = Lib_IntVector_Intrinsics_vec256_mul64(r11, a01); - a22 = Lib_IntVector_Intrinsics_vec256_mul64(r2, a01); - a32 = Lib_IntVector_Intrinsics_vec256_mul64(r3, a01); - a42 = Lib_IntVector_Intrinsics_vec256_mul64(r4, a01); - a03 = - Lib_IntVector_Intrinsics_vec256_add64(a02, - Lib_IntVector_Intrinsics_vec256_mul64(r54, a11)); - a13 = - Lib_IntVector_Intrinsics_vec256_add64(a12, - Lib_IntVector_Intrinsics_vec256_mul64(r0, a11)); - a23 = - Lib_IntVector_Intrinsics_vec256_add64(a22, - Lib_IntVector_Intrinsics_vec256_mul64(r11, a11)); - a33 = - Lib_IntVector_Intrinsics_vec256_add64(a32, - Lib_IntVector_Intrinsics_vec256_mul64(r2, a11)); - a43 = - Lib_IntVector_Intrinsics_vec256_add64(a42, - Lib_IntVector_Intrinsics_vec256_mul64(r3, a11)); - a04 = - Lib_IntVector_Intrinsics_vec256_add64(a03, - Lib_IntVector_Intrinsics_vec256_mul64(r53, a21)); - a14 = - Lib_IntVector_Intrinsics_vec256_add64(a13, - Lib_IntVector_Intrinsics_vec256_mul64(r54, a21)); - a24 = - Lib_IntVector_Intrinsics_vec256_add64(a23, - Lib_IntVector_Intrinsics_vec256_mul64(r0, a21)); - a34 = - Lib_IntVector_Intrinsics_vec256_add64(a33, - Lib_IntVector_Intrinsics_vec256_mul64(r11, a21)); - a44 = - Lib_IntVector_Intrinsics_vec256_add64(a43, - Lib_IntVector_Intrinsics_vec256_mul64(r2, a21)); - a05 = - Lib_IntVector_Intrinsics_vec256_add64(a04, - Lib_IntVector_Intrinsics_vec256_mul64(r52, a31)); - a15 = - Lib_IntVector_Intrinsics_vec256_add64(a14, - Lib_IntVector_Intrinsics_vec256_mul64(r53, a31)); - a25 = - Lib_IntVector_Intrinsics_vec256_add64(a24, - Lib_IntVector_Intrinsics_vec256_mul64(r54, a31)); - a35 = - Lib_IntVector_Intrinsics_vec256_add64(a34, - Lib_IntVector_Intrinsics_vec256_mul64(r0, a31)); - a45 = - Lib_IntVector_Intrinsics_vec256_add64(a44, - Lib_IntVector_Intrinsics_vec256_mul64(r11, a31)); - a06 = - Lib_IntVector_Intrinsics_vec256_add64(a05, - Lib_IntVector_Intrinsics_vec256_mul64(r51, a41)); - a16 = - Lib_IntVector_Intrinsics_vec256_add64(a15, - Lib_IntVector_Intrinsics_vec256_mul64(r52, a41)); - a26 = - Lib_IntVector_Intrinsics_vec256_add64(a25, - Lib_IntVector_Intrinsics_vec256_mul64(r53, a41)); - a36 = - Lib_IntVector_Intrinsics_vec256_add64(a35, - Lib_IntVector_Intrinsics_vec256_mul64(r54, a41)); - a46 = - Lib_IntVector_Intrinsics_vec256_add64(a45, - Lib_IntVector_Intrinsics_vec256_mul64(r0, a41)); - t0 = a06; - t1 = a16; - t2 = a26; - t3 = a36; - t4 = a46; - mask26 = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU); - z0 = Lib_IntVector_Intrinsics_vec256_shift_right64(t0, (uint32_t)26U); - z1 = Lib_IntVector_Intrinsics_vec256_shift_right64(t3, (uint32_t)26U); - x0 = Lib_IntVector_Intrinsics_vec256_and(t0, mask26); - x3 = Lib_IntVector_Intrinsics_vec256_and(t3, mask26); - x1 = Lib_IntVector_Intrinsics_vec256_add64(t1, z0); - x4 = Lib_IntVector_Intrinsics_vec256_add64(t4, z1); - z01 = Lib_IntVector_Intrinsics_vec256_shift_right64(x1, (uint32_t)26U); - z11 = Lib_IntVector_Intrinsics_vec256_shift_right64(x4, (uint32_t)26U); - t = Lib_IntVector_Intrinsics_vec256_shift_left64(z11, (uint32_t)2U); - z12 = Lib_IntVector_Intrinsics_vec256_add64(z11, t); - x11 = Lib_IntVector_Intrinsics_vec256_and(x1, mask26); - x41 = Lib_IntVector_Intrinsics_vec256_and(x4, mask26); - x2 = Lib_IntVector_Intrinsics_vec256_add64(t2, z01); - x01 = Lib_IntVector_Intrinsics_vec256_add64(x0, z12); - z02 = Lib_IntVector_Intrinsics_vec256_shift_right64(x2, (uint32_t)26U); - z13 = Lib_IntVector_Intrinsics_vec256_shift_right64(x01, (uint32_t)26U); - x21 = Lib_IntVector_Intrinsics_vec256_and(x2, mask26); - x02 = Lib_IntVector_Intrinsics_vec256_and(x01, mask26); - x31 = Lib_IntVector_Intrinsics_vec256_add64(x3, z02); - x12 = Lib_IntVector_Intrinsics_vec256_add64(x11, z13); - z03 = Lib_IntVector_Intrinsics_vec256_shift_right64(x31, (uint32_t)26U); - x32 = Lib_IntVector_Intrinsics_vec256_and(x31, mask26); - x42 = Lib_IntVector_Intrinsics_vec256_add64(x41, z03); - o0 = x02; - o1 = x12; - o2 = x21; - o3 = x32; - o4 = x42; - acc[0U] = o0; - acc[1U] = o1; - acc[2U] = o2; - acc[3U] = o3; - acc[4U] = o4; - return; - } - } -} - -static inline void -poly1305_do_256( - uint8_t *k, - uint32_t aadlen, - uint8_t *aad, - uint32_t mlen, - uint8_t *m, - uint8_t *out -) -{ - KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ctx[25U] KRML_POST_ALIGN(32) = { 0U }; - uint8_t block[16U] = { 0U }; - Lib_IntVector_Intrinsics_vec256 *pre; - Lib_IntVector_Intrinsics_vec256 *acc; - Hacl_Poly1305_256_poly1305_init(ctx, k); - if (aadlen != (uint32_t)0U) - { - poly1305_padded_256(ctx, aadlen, aad); - } - if (mlen != (uint32_t)0U) - { - poly1305_padded_256(ctx, mlen, m); - } - store64_le(block, (uint64_t)aadlen); - store64_le(block + (uint32_t)8U, (uint64_t)mlen); - pre = ctx + (uint32_t)5U; - acc = ctx; - { - KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 e[5U] KRML_POST_ALIGN(32) = { 0U }; - uint64_t u0 = load64_le(block); - uint64_t lo = u0; - uint64_t u = load64_le(block + (uint32_t)8U); - uint64_t hi = u; - Lib_IntVector_Intrinsics_vec256 f0 = Lib_IntVector_Intrinsics_vec256_load64(lo); - Lib_IntVector_Intrinsics_vec256 f1 = Lib_IntVector_Intrinsics_vec256_load64(hi); - Lib_IntVector_Intrinsics_vec256 - f010 = - Lib_IntVector_Intrinsics_vec256_and(f0, - Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU)); - Lib_IntVector_Intrinsics_vec256 - f110 = - Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(f0, - (uint32_t)26U), - Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU)); - Lib_IntVector_Intrinsics_vec256 - f20 = - Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_right64(f0, - (uint32_t)52U), - Lib_IntVector_Intrinsics_vec256_shift_left64(Lib_IntVector_Intrinsics_vec256_and(f1, - Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3fffU)), - (uint32_t)12U)); - Lib_IntVector_Intrinsics_vec256 - f30 = - Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(f1, - (uint32_t)14U), - Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU)); - Lib_IntVector_Intrinsics_vec256 - f40 = Lib_IntVector_Intrinsics_vec256_shift_right64(f1, (uint32_t)40U); - Lib_IntVector_Intrinsics_vec256 f01 = f010; - Lib_IntVector_Intrinsics_vec256 f111 = f110; - Lib_IntVector_Intrinsics_vec256 f2 = f20; - Lib_IntVector_Intrinsics_vec256 f3 = f30; - Lib_IntVector_Intrinsics_vec256 f41 = f40; - uint64_t b; - Lib_IntVector_Intrinsics_vec256 mask; - Lib_IntVector_Intrinsics_vec256 f4; - Lib_IntVector_Intrinsics_vec256 *r; - Lib_IntVector_Intrinsics_vec256 *r5; - Lib_IntVector_Intrinsics_vec256 r0; - Lib_IntVector_Intrinsics_vec256 r1; - Lib_IntVector_Intrinsics_vec256 r2; - Lib_IntVector_Intrinsics_vec256 r3; - Lib_IntVector_Intrinsics_vec256 r4; - Lib_IntVector_Intrinsics_vec256 r51; - Lib_IntVector_Intrinsics_vec256 r52; - Lib_IntVector_Intrinsics_vec256 r53; - Lib_IntVector_Intrinsics_vec256 r54; - Lib_IntVector_Intrinsics_vec256 f10; - Lib_IntVector_Intrinsics_vec256 f11; - Lib_IntVector_Intrinsics_vec256 f12; - Lib_IntVector_Intrinsics_vec256 f13; - Lib_IntVector_Intrinsics_vec256 f14; - Lib_IntVector_Intrinsics_vec256 a0; - Lib_IntVector_Intrinsics_vec256 a1; - Lib_IntVector_Intrinsics_vec256 a2; - Lib_IntVector_Intrinsics_vec256 a3; - Lib_IntVector_Intrinsics_vec256 a4; - Lib_IntVector_Intrinsics_vec256 a01; - Lib_IntVector_Intrinsics_vec256 a11; - Lib_IntVector_Intrinsics_vec256 a21; - Lib_IntVector_Intrinsics_vec256 a31; - Lib_IntVector_Intrinsics_vec256 a41; - Lib_IntVector_Intrinsics_vec256 a02; - Lib_IntVector_Intrinsics_vec256 a12; - Lib_IntVector_Intrinsics_vec256 a22; - Lib_IntVector_Intrinsics_vec256 a32; - Lib_IntVector_Intrinsics_vec256 a42; - Lib_IntVector_Intrinsics_vec256 a03; - Lib_IntVector_Intrinsics_vec256 a13; - Lib_IntVector_Intrinsics_vec256 a23; - Lib_IntVector_Intrinsics_vec256 a33; - Lib_IntVector_Intrinsics_vec256 a43; - Lib_IntVector_Intrinsics_vec256 a04; - Lib_IntVector_Intrinsics_vec256 a14; - Lib_IntVector_Intrinsics_vec256 a24; - Lib_IntVector_Intrinsics_vec256 a34; - Lib_IntVector_Intrinsics_vec256 a44; - Lib_IntVector_Intrinsics_vec256 a05; - Lib_IntVector_Intrinsics_vec256 a15; - Lib_IntVector_Intrinsics_vec256 a25; - Lib_IntVector_Intrinsics_vec256 a35; - Lib_IntVector_Intrinsics_vec256 a45; - Lib_IntVector_Intrinsics_vec256 a06; - Lib_IntVector_Intrinsics_vec256 a16; - Lib_IntVector_Intrinsics_vec256 a26; - Lib_IntVector_Intrinsics_vec256 a36; - Lib_IntVector_Intrinsics_vec256 a46; - Lib_IntVector_Intrinsics_vec256 t0; - Lib_IntVector_Intrinsics_vec256 t1; - Lib_IntVector_Intrinsics_vec256 t2; - Lib_IntVector_Intrinsics_vec256 t3; - Lib_IntVector_Intrinsics_vec256 t4; - Lib_IntVector_Intrinsics_vec256 mask26; - Lib_IntVector_Intrinsics_vec256 z0; - Lib_IntVector_Intrinsics_vec256 z1; - Lib_IntVector_Intrinsics_vec256 x0; - Lib_IntVector_Intrinsics_vec256 x3; - Lib_IntVector_Intrinsics_vec256 x1; - Lib_IntVector_Intrinsics_vec256 x4; - Lib_IntVector_Intrinsics_vec256 z01; - Lib_IntVector_Intrinsics_vec256 z11; - Lib_IntVector_Intrinsics_vec256 t; - Lib_IntVector_Intrinsics_vec256 z12; - Lib_IntVector_Intrinsics_vec256 x11; - Lib_IntVector_Intrinsics_vec256 x41; - Lib_IntVector_Intrinsics_vec256 x2; - Lib_IntVector_Intrinsics_vec256 x01; - Lib_IntVector_Intrinsics_vec256 z02; - Lib_IntVector_Intrinsics_vec256 z13; - Lib_IntVector_Intrinsics_vec256 x21; - Lib_IntVector_Intrinsics_vec256 x02; - Lib_IntVector_Intrinsics_vec256 x31; - Lib_IntVector_Intrinsics_vec256 x12; - Lib_IntVector_Intrinsics_vec256 z03; - Lib_IntVector_Intrinsics_vec256 x32; - Lib_IntVector_Intrinsics_vec256 x42; - Lib_IntVector_Intrinsics_vec256 o0; - Lib_IntVector_Intrinsics_vec256 o1; - Lib_IntVector_Intrinsics_vec256 o2; - Lib_IntVector_Intrinsics_vec256 o3; - Lib_IntVector_Intrinsics_vec256 o4; - e[0U] = f01; - e[1U] = f111; - e[2U] = f2; - e[3U] = f3; - e[4U] = f41; - b = (uint64_t)0x1000000U; - mask = Lib_IntVector_Intrinsics_vec256_load64(b); - f4 = e[4U]; - e[4U] = Lib_IntVector_Intrinsics_vec256_or(f4, mask); - r = pre; - r5 = pre + (uint32_t)5U; - r0 = r[0U]; - r1 = r[1U]; - r2 = r[2U]; - r3 = r[3U]; - r4 = r[4U]; - r51 = r5[1U]; - r52 = r5[2U]; - r53 = r5[3U]; - r54 = r5[4U]; - f10 = e[0U]; - f11 = e[1U]; - f12 = e[2U]; - f13 = e[3U]; - f14 = e[4U]; - a0 = acc[0U]; - a1 = acc[1U]; - a2 = acc[2U]; - a3 = acc[3U]; - a4 = acc[4U]; - a01 = Lib_IntVector_Intrinsics_vec256_add64(a0, f10); - a11 = Lib_IntVector_Intrinsics_vec256_add64(a1, f11); - a21 = Lib_IntVector_Intrinsics_vec256_add64(a2, f12); - a31 = Lib_IntVector_Intrinsics_vec256_add64(a3, f13); - a41 = Lib_IntVector_Intrinsics_vec256_add64(a4, f14); - a02 = Lib_IntVector_Intrinsics_vec256_mul64(r0, a01); - a12 = Lib_IntVector_Intrinsics_vec256_mul64(r1, a01); - a22 = Lib_IntVector_Intrinsics_vec256_mul64(r2, a01); - a32 = Lib_IntVector_Intrinsics_vec256_mul64(r3, a01); - a42 = Lib_IntVector_Intrinsics_vec256_mul64(r4, a01); - a03 = - Lib_IntVector_Intrinsics_vec256_add64(a02, - Lib_IntVector_Intrinsics_vec256_mul64(r54, a11)); - a13 = - Lib_IntVector_Intrinsics_vec256_add64(a12, - Lib_IntVector_Intrinsics_vec256_mul64(r0, a11)); - a23 = - Lib_IntVector_Intrinsics_vec256_add64(a22, - Lib_IntVector_Intrinsics_vec256_mul64(r1, a11)); - a33 = - Lib_IntVector_Intrinsics_vec256_add64(a32, - Lib_IntVector_Intrinsics_vec256_mul64(r2, a11)); - a43 = - Lib_IntVector_Intrinsics_vec256_add64(a42, - Lib_IntVector_Intrinsics_vec256_mul64(r3, a11)); - a04 = - Lib_IntVector_Intrinsics_vec256_add64(a03, - Lib_IntVector_Intrinsics_vec256_mul64(r53, a21)); - a14 = - Lib_IntVector_Intrinsics_vec256_add64(a13, - Lib_IntVector_Intrinsics_vec256_mul64(r54, a21)); - a24 = - Lib_IntVector_Intrinsics_vec256_add64(a23, - Lib_IntVector_Intrinsics_vec256_mul64(r0, a21)); - a34 = - Lib_IntVector_Intrinsics_vec256_add64(a33, - Lib_IntVector_Intrinsics_vec256_mul64(r1, a21)); - a44 = - Lib_IntVector_Intrinsics_vec256_add64(a43, - Lib_IntVector_Intrinsics_vec256_mul64(r2, a21)); - a05 = - Lib_IntVector_Intrinsics_vec256_add64(a04, - Lib_IntVector_Intrinsics_vec256_mul64(r52, a31)); - a15 = - Lib_IntVector_Intrinsics_vec256_add64(a14, - Lib_IntVector_Intrinsics_vec256_mul64(r53, a31)); - a25 = - Lib_IntVector_Intrinsics_vec256_add64(a24, - Lib_IntVector_Intrinsics_vec256_mul64(r54, a31)); - a35 = - Lib_IntVector_Intrinsics_vec256_add64(a34, - Lib_IntVector_Intrinsics_vec256_mul64(r0, a31)); - a45 = - Lib_IntVector_Intrinsics_vec256_add64(a44, - Lib_IntVector_Intrinsics_vec256_mul64(r1, a31)); - a06 = - Lib_IntVector_Intrinsics_vec256_add64(a05, - Lib_IntVector_Intrinsics_vec256_mul64(r51, a41)); - a16 = - Lib_IntVector_Intrinsics_vec256_add64(a15, - Lib_IntVector_Intrinsics_vec256_mul64(r52, a41)); - a26 = - Lib_IntVector_Intrinsics_vec256_add64(a25, - Lib_IntVector_Intrinsics_vec256_mul64(r53, a41)); - a36 = - Lib_IntVector_Intrinsics_vec256_add64(a35, - Lib_IntVector_Intrinsics_vec256_mul64(r54, a41)); - a46 = - Lib_IntVector_Intrinsics_vec256_add64(a45, - Lib_IntVector_Intrinsics_vec256_mul64(r0, a41)); - t0 = a06; - t1 = a16; - t2 = a26; - t3 = a36; - t4 = a46; - mask26 = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU); - z0 = Lib_IntVector_Intrinsics_vec256_shift_right64(t0, (uint32_t)26U); - z1 = Lib_IntVector_Intrinsics_vec256_shift_right64(t3, (uint32_t)26U); - x0 = Lib_IntVector_Intrinsics_vec256_and(t0, mask26); - x3 = Lib_IntVector_Intrinsics_vec256_and(t3, mask26); - x1 = Lib_IntVector_Intrinsics_vec256_add64(t1, z0); - x4 = Lib_IntVector_Intrinsics_vec256_add64(t4, z1); - z01 = Lib_IntVector_Intrinsics_vec256_shift_right64(x1, (uint32_t)26U); - z11 = Lib_IntVector_Intrinsics_vec256_shift_right64(x4, (uint32_t)26U); - t = Lib_IntVector_Intrinsics_vec256_shift_left64(z11, (uint32_t)2U); - z12 = Lib_IntVector_Intrinsics_vec256_add64(z11, t); - x11 = Lib_IntVector_Intrinsics_vec256_and(x1, mask26); - x41 = Lib_IntVector_Intrinsics_vec256_and(x4, mask26); - x2 = Lib_IntVector_Intrinsics_vec256_add64(t2, z01); - x01 = Lib_IntVector_Intrinsics_vec256_add64(x0, z12); - z02 = Lib_IntVector_Intrinsics_vec256_shift_right64(x2, (uint32_t)26U); - z13 = Lib_IntVector_Intrinsics_vec256_shift_right64(x01, (uint32_t)26U); - x21 = Lib_IntVector_Intrinsics_vec256_and(x2, mask26); - x02 = Lib_IntVector_Intrinsics_vec256_and(x01, mask26); - x31 = Lib_IntVector_Intrinsics_vec256_add64(x3, z02); - x12 = Lib_IntVector_Intrinsics_vec256_add64(x11, z13); - z03 = Lib_IntVector_Intrinsics_vec256_shift_right64(x31, (uint32_t)26U); - x32 = Lib_IntVector_Intrinsics_vec256_and(x31, mask26); - x42 = Lib_IntVector_Intrinsics_vec256_add64(x41, z03); - o0 = x02; - o1 = x12; - o2 = x21; - o3 = x32; - o4 = x42; - acc[0U] = o0; - acc[1U] = o1; - acc[2U] = o2; - acc[3U] = o3; - acc[4U] = o4; - Hacl_Poly1305_256_poly1305_finish(out, k, ctx); - } -} - -void -Hacl_Chacha20Poly1305_256_aead_encrypt( - uint8_t *k, - uint8_t *n, - uint32_t aadlen, - uint8_t *aad, - uint32_t mlen, - uint8_t *m, - uint8_t *cipher, - uint8_t *mac -) -{ - Hacl_Chacha20_Vec256_chacha20_encrypt_256(mlen, cipher, m, k, n, (uint32_t)1U); - { - uint8_t tmp[64U] = { 0U }; - uint8_t *key; - Hacl_Chacha20_Vec256_chacha20_encrypt_256((uint32_t)64U, tmp, tmp, k, n, (uint32_t)0U); - key = tmp; - poly1305_do_256(key, aadlen, aad, mlen, cipher, mac); - } -} - -uint32_t -Hacl_Chacha20Poly1305_256_aead_decrypt( - uint8_t *k, - uint8_t *n, - uint32_t aadlen, - uint8_t *aad, - uint32_t mlen, - uint8_t *m, - uint8_t *cipher, - uint8_t *mac -) -{ - uint8_t computed_mac[16U] = { 0U }; - uint8_t tmp[64U] = { 0U }; - uint8_t *key; - Hacl_Chacha20_Vec256_chacha20_encrypt_256((uint32_t)64U, tmp, tmp, k, n, (uint32_t)0U); - key = tmp; - poly1305_do_256(key, aadlen, aad, mlen, cipher, computed_mac); - { - uint8_t res0 = (uint8_t)255U; - uint8_t z; - uint32_t res; - KRML_MAYBE_FOR16(i, - (uint32_t)0U, - (uint32_t)16U, - (uint32_t)1U, - uint8_t uu____0 = FStar_UInt8_eq_mask(computed_mac[i], mac[i]); - res0 = uu____0 & res0;); - z = res0; - if (z == (uint8_t)255U) - { - Hacl_Chacha20_Vec256_chacha20_encrypt_256(mlen, m, cipher, k, n, (uint32_t)1U); - res = (uint32_t)0U; - } - else - { - res = (uint32_t)1U; - } - return res; - } -} - diff --git a/dist/c89-compatible/Hacl_Chacha20Poly1305_256.h b/dist/c89-compatible/Hacl_Chacha20Poly1305_256.h deleted file mode 100644 index 27bd351500..0000000000 --- a/dist/c89-compatible/Hacl_Chacha20Poly1305_256.h +++ /dev/null @@ -1,71 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#ifndef __Hacl_Chacha20Poly1305_256_H -#define __Hacl_Chacha20Poly1305_256_H - -#if defined(__cplusplus) -extern "C" { -#endif - -#include -#include "krml/internal/types.h" -#include "krml/lowstar_endianness.h" -#include "krml/internal/target.h" - - -#include "Hacl_Poly1305_256.h" -#include "Hacl_Krmllib.h" -#include "Hacl_Chacha20_Vec256.h" -#include "evercrypt_targetconfig.h" -void -Hacl_Chacha20Poly1305_256_aead_encrypt( - uint8_t *k, - uint8_t *n, - uint32_t aadlen, - uint8_t *aad, - uint32_t mlen, - uint8_t *m, - uint8_t *cipher, - uint8_t *mac -); - -uint32_t -Hacl_Chacha20Poly1305_256_aead_decrypt( - uint8_t *k, - uint8_t *n, - uint32_t aadlen, - uint8_t *aad, - uint32_t mlen, - uint8_t *m, - uint8_t *cipher, - uint8_t *mac -); - -#if defined(__cplusplus) -} -#endif - -#define __Hacl_Chacha20Poly1305_256_H_DEFINED -#endif diff --git a/dist/c89-compatible/Hacl_Chacha20Poly1305_32.c b/dist/c89-compatible/Hacl_Chacha20Poly1305_32.c deleted file mode 100644 index 460b38c0b5..0000000000 --- a/dist/c89-compatible/Hacl_Chacha20Poly1305_32.c +++ /dev/null @@ -1,809 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#include "Hacl_Chacha20Poly1305_32.h" - - - -static inline void poly1305_padded_32(uint64_t *ctx, uint32_t len, uint8_t *text) -{ - uint32_t n = len / (uint32_t)16U; - uint32_t r = len % (uint32_t)16U; - uint8_t *blocks = text; - uint8_t *rem = text + n * (uint32_t)16U; - uint64_t *pre0 = ctx + (uint32_t)5U; - uint64_t *acc0 = ctx; - uint32_t nb = n * (uint32_t)16U / (uint32_t)16U; - uint32_t rem1 = n * (uint32_t)16U % (uint32_t)16U; - { - uint32_t i; - for (i = (uint32_t)0U; i < nb; i++) - { - uint8_t *block = blocks + i * (uint32_t)16U; - uint64_t e[5U] = { 0U }; - uint64_t u0 = load64_le(block); - uint64_t lo = u0; - uint64_t u = load64_le(block + (uint32_t)8U); - uint64_t hi = u; - uint64_t f0 = lo; - uint64_t f1 = hi; - uint64_t f010 = f0 & (uint64_t)0x3ffffffU; - uint64_t f110 = f0 >> (uint32_t)26U & (uint64_t)0x3ffffffU; - uint64_t f20 = f0 >> (uint32_t)52U | (f1 & (uint64_t)0x3fffU) << (uint32_t)12U; - uint64_t f30 = f1 >> (uint32_t)14U & (uint64_t)0x3ffffffU; - uint64_t f40 = f1 >> (uint32_t)40U; - uint64_t f01 = f010; - uint64_t f111 = f110; - uint64_t f2 = f20; - uint64_t f3 = f30; - uint64_t f41 = f40; - e[0U] = f01; - e[1U] = f111; - e[2U] = f2; - e[3U] = f3; - e[4U] = f41; - { - uint64_t b = (uint64_t)0x1000000U; - uint64_t mask = b; - uint64_t f4 = e[4U]; - e[4U] = f4 | mask; - { - uint64_t *r1 = pre0; - uint64_t *r5 = pre0 + (uint32_t)5U; - uint64_t r0 = r1[0U]; - uint64_t r11 = r1[1U]; - uint64_t r2 = r1[2U]; - uint64_t r3 = r1[3U]; - uint64_t r4 = r1[4U]; - uint64_t r51 = r5[1U]; - uint64_t r52 = r5[2U]; - uint64_t r53 = r5[3U]; - uint64_t r54 = r5[4U]; - uint64_t f10 = e[0U]; - uint64_t f11 = e[1U]; - uint64_t f12 = e[2U]; - uint64_t f13 = e[3U]; - uint64_t f14 = e[4U]; - uint64_t a0 = acc0[0U]; - uint64_t a1 = acc0[1U]; - uint64_t a2 = acc0[2U]; - uint64_t a3 = acc0[3U]; - uint64_t a4 = acc0[4U]; - uint64_t a01 = a0 + f10; - uint64_t a11 = a1 + f11; - uint64_t a21 = a2 + f12; - uint64_t a31 = a3 + f13; - uint64_t a41 = a4 + f14; - uint64_t a02 = r0 * a01; - uint64_t a12 = r11 * a01; - uint64_t a22 = r2 * a01; - uint64_t a32 = r3 * a01; - uint64_t a42 = r4 * a01; - uint64_t a03 = a02 + r54 * a11; - uint64_t a13 = a12 + r0 * a11; - uint64_t a23 = a22 + r11 * a11; - uint64_t a33 = a32 + r2 * a11; - uint64_t a43 = a42 + r3 * a11; - uint64_t a04 = a03 + r53 * a21; - uint64_t a14 = a13 + r54 * a21; - uint64_t a24 = a23 + r0 * a21; - uint64_t a34 = a33 + r11 * a21; - uint64_t a44 = a43 + r2 * a21; - uint64_t a05 = a04 + r52 * a31; - uint64_t a15 = a14 + r53 * a31; - uint64_t a25 = a24 + r54 * a31; - uint64_t a35 = a34 + r0 * a31; - uint64_t a45 = a44 + r11 * a31; - uint64_t a06 = a05 + r51 * a41; - uint64_t a16 = a15 + r52 * a41; - uint64_t a26 = a25 + r53 * a41; - uint64_t a36 = a35 + r54 * a41; - uint64_t a46 = a45 + r0 * a41; - uint64_t t0 = a06; - uint64_t t1 = a16; - uint64_t t2 = a26; - uint64_t t3 = a36; - uint64_t t4 = a46; - uint64_t mask26 = (uint64_t)0x3ffffffU; - uint64_t z0 = t0 >> (uint32_t)26U; - uint64_t z1 = t3 >> (uint32_t)26U; - uint64_t x0 = t0 & mask26; - uint64_t x3 = t3 & mask26; - uint64_t x1 = t1 + z0; - uint64_t x4 = t4 + z1; - uint64_t z01 = x1 >> (uint32_t)26U; - uint64_t z11 = x4 >> (uint32_t)26U; - uint64_t t = z11 << (uint32_t)2U; - uint64_t z12 = z11 + t; - uint64_t x11 = x1 & mask26; - uint64_t x41 = x4 & mask26; - uint64_t x2 = t2 + z01; - uint64_t x01 = x0 + z12; - uint64_t z02 = x2 >> (uint32_t)26U; - uint64_t z13 = x01 >> (uint32_t)26U; - uint64_t x21 = x2 & mask26; - uint64_t x02 = x01 & mask26; - uint64_t x31 = x3 + z02; - uint64_t x12 = x11 + z13; - uint64_t z03 = x31 >> (uint32_t)26U; - uint64_t x32 = x31 & mask26; - uint64_t x42 = x41 + z03; - uint64_t o0 = x02; - uint64_t o1 = x12; - uint64_t o2 = x21; - uint64_t o3 = x32; - uint64_t o4 = x42; - acc0[0U] = o0; - acc0[1U] = o1; - acc0[2U] = o2; - acc0[3U] = o3; - acc0[4U] = o4; - } - } - } - } - if (rem1 > (uint32_t)0U) - { - uint8_t *last = blocks + nb * (uint32_t)16U; - uint64_t e[5U] = { 0U }; - uint8_t tmp[16U] = { 0U }; - memcpy(tmp, last, rem1 * sizeof (uint8_t)); - { - uint64_t u0 = load64_le(tmp); - uint64_t lo = u0; - uint64_t u = load64_le(tmp + (uint32_t)8U); - uint64_t hi = u; - uint64_t f0 = lo; - uint64_t f1 = hi; - uint64_t f010 = f0 & (uint64_t)0x3ffffffU; - uint64_t f110 = f0 >> (uint32_t)26U & (uint64_t)0x3ffffffU; - uint64_t f20 = f0 >> (uint32_t)52U | (f1 & (uint64_t)0x3fffU) << (uint32_t)12U; - uint64_t f30 = f1 >> (uint32_t)14U & (uint64_t)0x3ffffffU; - uint64_t f40 = f1 >> (uint32_t)40U; - uint64_t f01 = f010; - uint64_t f111 = f110; - uint64_t f2 = f20; - uint64_t f3 = f30; - uint64_t f4 = f40; - e[0U] = f01; - e[1U] = f111; - e[2U] = f2; - e[3U] = f3; - e[4U] = f4; - { - uint64_t b = (uint64_t)1U << rem1 * (uint32_t)8U % (uint32_t)26U; - uint64_t mask = b; - uint64_t fi = e[rem1 * (uint32_t)8U / (uint32_t)26U]; - e[rem1 * (uint32_t)8U / (uint32_t)26U] = fi | mask; - { - uint64_t *r1 = pre0; - uint64_t *r5 = pre0 + (uint32_t)5U; - uint64_t r0 = r1[0U]; - uint64_t r11 = r1[1U]; - uint64_t r2 = r1[2U]; - uint64_t r3 = r1[3U]; - uint64_t r4 = r1[4U]; - uint64_t r51 = r5[1U]; - uint64_t r52 = r5[2U]; - uint64_t r53 = r5[3U]; - uint64_t r54 = r5[4U]; - uint64_t f10 = e[0U]; - uint64_t f11 = e[1U]; - uint64_t f12 = e[2U]; - uint64_t f13 = e[3U]; - uint64_t f14 = e[4U]; - uint64_t a0 = acc0[0U]; - uint64_t a1 = acc0[1U]; - uint64_t a2 = acc0[2U]; - uint64_t a3 = acc0[3U]; - uint64_t a4 = acc0[4U]; - uint64_t a01 = a0 + f10; - uint64_t a11 = a1 + f11; - uint64_t a21 = a2 + f12; - uint64_t a31 = a3 + f13; - uint64_t a41 = a4 + f14; - uint64_t a02 = r0 * a01; - uint64_t a12 = r11 * a01; - uint64_t a22 = r2 * a01; - uint64_t a32 = r3 * a01; - uint64_t a42 = r4 * a01; - uint64_t a03 = a02 + r54 * a11; - uint64_t a13 = a12 + r0 * a11; - uint64_t a23 = a22 + r11 * a11; - uint64_t a33 = a32 + r2 * a11; - uint64_t a43 = a42 + r3 * a11; - uint64_t a04 = a03 + r53 * a21; - uint64_t a14 = a13 + r54 * a21; - uint64_t a24 = a23 + r0 * a21; - uint64_t a34 = a33 + r11 * a21; - uint64_t a44 = a43 + r2 * a21; - uint64_t a05 = a04 + r52 * a31; - uint64_t a15 = a14 + r53 * a31; - uint64_t a25 = a24 + r54 * a31; - uint64_t a35 = a34 + r0 * a31; - uint64_t a45 = a44 + r11 * a31; - uint64_t a06 = a05 + r51 * a41; - uint64_t a16 = a15 + r52 * a41; - uint64_t a26 = a25 + r53 * a41; - uint64_t a36 = a35 + r54 * a41; - uint64_t a46 = a45 + r0 * a41; - uint64_t t0 = a06; - uint64_t t1 = a16; - uint64_t t2 = a26; - uint64_t t3 = a36; - uint64_t t4 = a46; - uint64_t mask26 = (uint64_t)0x3ffffffU; - uint64_t z0 = t0 >> (uint32_t)26U; - uint64_t z1 = t3 >> (uint32_t)26U; - uint64_t x0 = t0 & mask26; - uint64_t x3 = t3 & mask26; - uint64_t x1 = t1 + z0; - uint64_t x4 = t4 + z1; - uint64_t z01 = x1 >> (uint32_t)26U; - uint64_t z11 = x4 >> (uint32_t)26U; - uint64_t t = z11 << (uint32_t)2U; - uint64_t z12 = z11 + t; - uint64_t x11 = x1 & mask26; - uint64_t x41 = x4 & mask26; - uint64_t x2 = t2 + z01; - uint64_t x01 = x0 + z12; - uint64_t z02 = x2 >> (uint32_t)26U; - uint64_t z13 = x01 >> (uint32_t)26U; - uint64_t x21 = x2 & mask26; - uint64_t x02 = x01 & mask26; - uint64_t x31 = x3 + z02; - uint64_t x12 = x11 + z13; - uint64_t z03 = x31 >> (uint32_t)26U; - uint64_t x32 = x31 & mask26; - uint64_t x42 = x41 + z03; - uint64_t o0 = x02; - uint64_t o1 = x12; - uint64_t o2 = x21; - uint64_t o3 = x32; - uint64_t o4 = x42; - acc0[0U] = o0; - acc0[1U] = o1; - acc0[2U] = o2; - acc0[3U] = o3; - acc0[4U] = o4; - } - } - } - } - { - uint8_t tmp[16U] = { 0U }; - memcpy(tmp, rem, r * sizeof (uint8_t)); - if (r > (uint32_t)0U) - { - uint64_t *pre = ctx + (uint32_t)5U; - uint64_t *acc = ctx; - uint64_t e[5U] = { 0U }; - uint64_t u0 = load64_le(tmp); - uint64_t lo = u0; - uint64_t u = load64_le(tmp + (uint32_t)8U); - uint64_t hi = u; - uint64_t f0 = lo; - uint64_t f1 = hi; - uint64_t f010 = f0 & (uint64_t)0x3ffffffU; - uint64_t f110 = f0 >> (uint32_t)26U & (uint64_t)0x3ffffffU; - uint64_t f20 = f0 >> (uint32_t)52U | (f1 & (uint64_t)0x3fffU) << (uint32_t)12U; - uint64_t f30 = f1 >> (uint32_t)14U & (uint64_t)0x3ffffffU; - uint64_t f40 = f1 >> (uint32_t)40U; - uint64_t f01 = f010; - uint64_t f111 = f110; - uint64_t f2 = f20; - uint64_t f3 = f30; - uint64_t f41 = f40; - uint64_t b; - uint64_t mask; - uint64_t f4; - uint64_t *r1; - uint64_t *r5; - uint64_t r0; - uint64_t r11; - uint64_t r2; - uint64_t r3; - uint64_t r4; - uint64_t r51; - uint64_t r52; - uint64_t r53; - uint64_t r54; - uint64_t f10; - uint64_t f11; - uint64_t f12; - uint64_t f13; - uint64_t f14; - uint64_t a0; - uint64_t a1; - uint64_t a2; - uint64_t a3; - uint64_t a4; - uint64_t a01; - uint64_t a11; - uint64_t a21; - uint64_t a31; - uint64_t a41; - uint64_t a02; - uint64_t a12; - uint64_t a22; - uint64_t a32; - uint64_t a42; - uint64_t a03; - uint64_t a13; - uint64_t a23; - uint64_t a33; - uint64_t a43; - uint64_t a04; - uint64_t a14; - uint64_t a24; - uint64_t a34; - uint64_t a44; - uint64_t a05; - uint64_t a15; - uint64_t a25; - uint64_t a35; - uint64_t a45; - uint64_t a06; - uint64_t a16; - uint64_t a26; - uint64_t a36; - uint64_t a46; - uint64_t t0; - uint64_t t1; - uint64_t t2; - uint64_t t3; - uint64_t t4; - uint64_t mask26; - uint64_t z0; - uint64_t z1; - uint64_t x0; - uint64_t x3; - uint64_t x1; - uint64_t x4; - uint64_t z01; - uint64_t z11; - uint64_t t; - uint64_t z12; - uint64_t x11; - uint64_t x41; - uint64_t x2; - uint64_t x01; - uint64_t z02; - uint64_t z13; - uint64_t x21; - uint64_t x02; - uint64_t x31; - uint64_t x12; - uint64_t z03; - uint64_t x32; - uint64_t x42; - uint64_t o0; - uint64_t o1; - uint64_t o2; - uint64_t o3; - uint64_t o4; - e[0U] = f01; - e[1U] = f111; - e[2U] = f2; - e[3U] = f3; - e[4U] = f41; - b = (uint64_t)0x1000000U; - mask = b; - f4 = e[4U]; - e[4U] = f4 | mask; - r1 = pre; - r5 = pre + (uint32_t)5U; - r0 = r1[0U]; - r11 = r1[1U]; - r2 = r1[2U]; - r3 = r1[3U]; - r4 = r1[4U]; - r51 = r5[1U]; - r52 = r5[2U]; - r53 = r5[3U]; - r54 = r5[4U]; - f10 = e[0U]; - f11 = e[1U]; - f12 = e[2U]; - f13 = e[3U]; - f14 = e[4U]; - a0 = acc[0U]; - a1 = acc[1U]; - a2 = acc[2U]; - a3 = acc[3U]; - a4 = acc[4U]; - a01 = a0 + f10; - a11 = a1 + f11; - a21 = a2 + f12; - a31 = a3 + f13; - a41 = a4 + f14; - a02 = r0 * a01; - a12 = r11 * a01; - a22 = r2 * a01; - a32 = r3 * a01; - a42 = r4 * a01; - a03 = a02 + r54 * a11; - a13 = a12 + r0 * a11; - a23 = a22 + r11 * a11; - a33 = a32 + r2 * a11; - a43 = a42 + r3 * a11; - a04 = a03 + r53 * a21; - a14 = a13 + r54 * a21; - a24 = a23 + r0 * a21; - a34 = a33 + r11 * a21; - a44 = a43 + r2 * a21; - a05 = a04 + r52 * a31; - a15 = a14 + r53 * a31; - a25 = a24 + r54 * a31; - a35 = a34 + r0 * a31; - a45 = a44 + r11 * a31; - a06 = a05 + r51 * a41; - a16 = a15 + r52 * a41; - a26 = a25 + r53 * a41; - a36 = a35 + r54 * a41; - a46 = a45 + r0 * a41; - t0 = a06; - t1 = a16; - t2 = a26; - t3 = a36; - t4 = a46; - mask26 = (uint64_t)0x3ffffffU; - z0 = t0 >> (uint32_t)26U; - z1 = t3 >> (uint32_t)26U; - x0 = t0 & mask26; - x3 = t3 & mask26; - x1 = t1 + z0; - x4 = t4 + z1; - z01 = x1 >> (uint32_t)26U; - z11 = x4 >> (uint32_t)26U; - t = z11 << (uint32_t)2U; - z12 = z11 + t; - x11 = x1 & mask26; - x41 = x4 & mask26; - x2 = t2 + z01; - x01 = x0 + z12; - z02 = x2 >> (uint32_t)26U; - z13 = x01 >> (uint32_t)26U; - x21 = x2 & mask26; - x02 = x01 & mask26; - x31 = x3 + z02; - x12 = x11 + z13; - z03 = x31 >> (uint32_t)26U; - x32 = x31 & mask26; - x42 = x41 + z03; - o0 = x02; - o1 = x12; - o2 = x21; - o3 = x32; - o4 = x42; - acc[0U] = o0; - acc[1U] = o1; - acc[2U] = o2; - acc[3U] = o3; - acc[4U] = o4; - return; - } - } -} - -static inline void -poly1305_do_32( - uint8_t *k, - uint32_t aadlen, - uint8_t *aad, - uint32_t mlen, - uint8_t *m, - uint8_t *out -) -{ - uint64_t ctx[25U] = { 0U }; - uint8_t block[16U] = { 0U }; - uint64_t *pre; - uint64_t *acc; - Hacl_Poly1305_32_poly1305_init(ctx, k); - if (aadlen != (uint32_t)0U) - { - poly1305_padded_32(ctx, aadlen, aad); - } - if (mlen != (uint32_t)0U) - { - poly1305_padded_32(ctx, mlen, m); - } - store64_le(block, (uint64_t)aadlen); - store64_le(block + (uint32_t)8U, (uint64_t)mlen); - pre = ctx + (uint32_t)5U; - acc = ctx; - { - uint64_t e[5U] = { 0U }; - uint64_t u0 = load64_le(block); - uint64_t lo = u0; - uint64_t u = load64_le(block + (uint32_t)8U); - uint64_t hi = u; - uint64_t f0 = lo; - uint64_t f1 = hi; - uint64_t f010 = f0 & (uint64_t)0x3ffffffU; - uint64_t f110 = f0 >> (uint32_t)26U & (uint64_t)0x3ffffffU; - uint64_t f20 = f0 >> (uint32_t)52U | (f1 & (uint64_t)0x3fffU) << (uint32_t)12U; - uint64_t f30 = f1 >> (uint32_t)14U & (uint64_t)0x3ffffffU; - uint64_t f40 = f1 >> (uint32_t)40U; - uint64_t f01 = f010; - uint64_t f111 = f110; - uint64_t f2 = f20; - uint64_t f3 = f30; - uint64_t f41 = f40; - uint64_t b; - uint64_t mask; - uint64_t f4; - uint64_t *r; - uint64_t *r5; - uint64_t r0; - uint64_t r1; - uint64_t r2; - uint64_t r3; - uint64_t r4; - uint64_t r51; - uint64_t r52; - uint64_t r53; - uint64_t r54; - uint64_t f10; - uint64_t f11; - uint64_t f12; - uint64_t f13; - uint64_t f14; - uint64_t a0; - uint64_t a1; - uint64_t a2; - uint64_t a3; - uint64_t a4; - uint64_t a01; - uint64_t a11; - uint64_t a21; - uint64_t a31; - uint64_t a41; - uint64_t a02; - uint64_t a12; - uint64_t a22; - uint64_t a32; - uint64_t a42; - uint64_t a03; - uint64_t a13; - uint64_t a23; - uint64_t a33; - uint64_t a43; - uint64_t a04; - uint64_t a14; - uint64_t a24; - uint64_t a34; - uint64_t a44; - uint64_t a05; - uint64_t a15; - uint64_t a25; - uint64_t a35; - uint64_t a45; - uint64_t a06; - uint64_t a16; - uint64_t a26; - uint64_t a36; - uint64_t a46; - uint64_t t0; - uint64_t t1; - uint64_t t2; - uint64_t t3; - uint64_t t4; - uint64_t mask26; - uint64_t z0; - uint64_t z1; - uint64_t x0; - uint64_t x3; - uint64_t x1; - uint64_t x4; - uint64_t z01; - uint64_t z11; - uint64_t t; - uint64_t z12; - uint64_t x11; - uint64_t x41; - uint64_t x2; - uint64_t x01; - uint64_t z02; - uint64_t z13; - uint64_t x21; - uint64_t x02; - uint64_t x31; - uint64_t x12; - uint64_t z03; - uint64_t x32; - uint64_t x42; - uint64_t o0; - uint64_t o1; - uint64_t o2; - uint64_t o3; - uint64_t o4; - e[0U] = f01; - e[1U] = f111; - e[2U] = f2; - e[3U] = f3; - e[4U] = f41; - b = (uint64_t)0x1000000U; - mask = b; - f4 = e[4U]; - e[4U] = f4 | mask; - r = pre; - r5 = pre + (uint32_t)5U; - r0 = r[0U]; - r1 = r[1U]; - r2 = r[2U]; - r3 = r[3U]; - r4 = r[4U]; - r51 = r5[1U]; - r52 = r5[2U]; - r53 = r5[3U]; - r54 = r5[4U]; - f10 = e[0U]; - f11 = e[1U]; - f12 = e[2U]; - f13 = e[3U]; - f14 = e[4U]; - a0 = acc[0U]; - a1 = acc[1U]; - a2 = acc[2U]; - a3 = acc[3U]; - a4 = acc[4U]; - a01 = a0 + f10; - a11 = a1 + f11; - a21 = a2 + f12; - a31 = a3 + f13; - a41 = a4 + f14; - a02 = r0 * a01; - a12 = r1 * a01; - a22 = r2 * a01; - a32 = r3 * a01; - a42 = r4 * a01; - a03 = a02 + r54 * a11; - a13 = a12 + r0 * a11; - a23 = a22 + r1 * a11; - a33 = a32 + r2 * a11; - a43 = a42 + r3 * a11; - a04 = a03 + r53 * a21; - a14 = a13 + r54 * a21; - a24 = a23 + r0 * a21; - a34 = a33 + r1 * a21; - a44 = a43 + r2 * a21; - a05 = a04 + r52 * a31; - a15 = a14 + r53 * a31; - a25 = a24 + r54 * a31; - a35 = a34 + r0 * a31; - a45 = a44 + r1 * a31; - a06 = a05 + r51 * a41; - a16 = a15 + r52 * a41; - a26 = a25 + r53 * a41; - a36 = a35 + r54 * a41; - a46 = a45 + r0 * a41; - t0 = a06; - t1 = a16; - t2 = a26; - t3 = a36; - t4 = a46; - mask26 = (uint64_t)0x3ffffffU; - z0 = t0 >> (uint32_t)26U; - z1 = t3 >> (uint32_t)26U; - x0 = t0 & mask26; - x3 = t3 & mask26; - x1 = t1 + z0; - x4 = t4 + z1; - z01 = x1 >> (uint32_t)26U; - z11 = x4 >> (uint32_t)26U; - t = z11 << (uint32_t)2U; - z12 = z11 + t; - x11 = x1 & mask26; - x41 = x4 & mask26; - x2 = t2 + z01; - x01 = x0 + z12; - z02 = x2 >> (uint32_t)26U; - z13 = x01 >> (uint32_t)26U; - x21 = x2 & mask26; - x02 = x01 & mask26; - x31 = x3 + z02; - x12 = x11 + z13; - z03 = x31 >> (uint32_t)26U; - x32 = x31 & mask26; - x42 = x41 + z03; - o0 = x02; - o1 = x12; - o2 = x21; - o3 = x32; - o4 = x42; - acc[0U] = o0; - acc[1U] = o1; - acc[2U] = o2; - acc[3U] = o3; - acc[4U] = o4; - Hacl_Poly1305_32_poly1305_finish(out, k, ctx); - } -} - -void -Hacl_Chacha20Poly1305_32_aead_encrypt( - uint8_t *k, - uint8_t *n, - uint32_t aadlen, - uint8_t *aad, - uint32_t mlen, - uint8_t *m, - uint8_t *cipher, - uint8_t *mac -) -{ - Hacl_Chacha20_chacha20_encrypt(mlen, cipher, m, k, n, (uint32_t)1U); - { - uint8_t tmp[64U] = { 0U }; - uint8_t *key; - Hacl_Chacha20_chacha20_encrypt((uint32_t)64U, tmp, tmp, k, n, (uint32_t)0U); - key = tmp; - poly1305_do_32(key, aadlen, aad, mlen, cipher, mac); - } -} - -uint32_t -Hacl_Chacha20Poly1305_32_aead_decrypt( - uint8_t *k, - uint8_t *n, - uint32_t aadlen, - uint8_t *aad, - uint32_t mlen, - uint8_t *m, - uint8_t *cipher, - uint8_t *mac -) -{ - uint8_t computed_mac[16U] = { 0U }; - uint8_t tmp[64U] = { 0U }; - uint8_t *key; - Hacl_Chacha20_chacha20_encrypt((uint32_t)64U, tmp, tmp, k, n, (uint32_t)0U); - key = tmp; - poly1305_do_32(key, aadlen, aad, mlen, cipher, computed_mac); - { - uint8_t res0 = (uint8_t)255U; - uint8_t z; - uint32_t res; - KRML_MAYBE_FOR16(i, - (uint32_t)0U, - (uint32_t)16U, - (uint32_t)1U, - uint8_t uu____0 = FStar_UInt8_eq_mask(computed_mac[i], mac[i]); - res0 = uu____0 & res0;); - z = res0; - if (z == (uint8_t)255U) - { - Hacl_Chacha20_chacha20_encrypt(mlen, m, cipher, k, n, (uint32_t)1U); - res = (uint32_t)0U; - } - else - { - res = (uint32_t)1U; - } - return res; - } -} - diff --git a/dist/c89-compatible/Hacl_Chacha20Poly1305_32.h b/dist/c89-compatible/Hacl_Chacha20Poly1305_32.h deleted file mode 100644 index 161d026580..0000000000 --- a/dist/c89-compatible/Hacl_Chacha20Poly1305_32.h +++ /dev/null @@ -1,71 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#ifndef __Hacl_Chacha20Poly1305_32_H -#define __Hacl_Chacha20Poly1305_32_H - -#if defined(__cplusplus) -extern "C" { -#endif - -#include -#include "krml/internal/types.h" -#include "krml/lowstar_endianness.h" -#include "krml/internal/target.h" - - -#include "Hacl_Poly1305_32.h" -#include "Hacl_Krmllib.h" -#include "Hacl_Chacha20.h" -#include "evercrypt_targetconfig.h" -void -Hacl_Chacha20Poly1305_32_aead_encrypt( - uint8_t *k, - uint8_t *n, - uint32_t aadlen, - uint8_t *aad, - uint32_t mlen, - uint8_t *m, - uint8_t *cipher, - uint8_t *mac -); - -uint32_t -Hacl_Chacha20Poly1305_32_aead_decrypt( - uint8_t *k, - uint8_t *n, - uint32_t aadlen, - uint8_t *aad, - uint32_t mlen, - uint8_t *m, - uint8_t *cipher, - uint8_t *mac -); - -#if defined(__cplusplus) -} -#endif - -#define __Hacl_Chacha20Poly1305_32_H_DEFINED -#endif diff --git a/dist/c89-compatible/Hacl_Chacha20_Vec128.c b/dist/c89-compatible/Hacl_Chacha20_Vec128.c deleted file mode 100644 index d5b73af8fd..0000000000 --- a/dist/c89-compatible/Hacl_Chacha20_Vec128.c +++ /dev/null @@ -1,880 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#include "Hacl_Chacha20_Vec128.h" - -#include "internal/Hacl_Chacha20.h" -#include "libintvector.h" -static inline void double_round_128(Lib_IntVector_Intrinsics_vec128 *st) -{ - Lib_IntVector_Intrinsics_vec128 std0; - Lib_IntVector_Intrinsics_vec128 std1; - Lib_IntVector_Intrinsics_vec128 std2; - Lib_IntVector_Intrinsics_vec128 std3; - Lib_IntVector_Intrinsics_vec128 std4; - Lib_IntVector_Intrinsics_vec128 std5; - Lib_IntVector_Intrinsics_vec128 std6; - Lib_IntVector_Intrinsics_vec128 std7; - Lib_IntVector_Intrinsics_vec128 std8; - Lib_IntVector_Intrinsics_vec128 std9; - Lib_IntVector_Intrinsics_vec128 std10; - Lib_IntVector_Intrinsics_vec128 std11; - Lib_IntVector_Intrinsics_vec128 std12; - Lib_IntVector_Intrinsics_vec128 std13; - Lib_IntVector_Intrinsics_vec128 std14; - Lib_IntVector_Intrinsics_vec128 std15; - Lib_IntVector_Intrinsics_vec128 std16; - Lib_IntVector_Intrinsics_vec128 std17; - Lib_IntVector_Intrinsics_vec128 std18; - Lib_IntVector_Intrinsics_vec128 std19; - Lib_IntVector_Intrinsics_vec128 std20; - Lib_IntVector_Intrinsics_vec128 std21; - Lib_IntVector_Intrinsics_vec128 std22; - Lib_IntVector_Intrinsics_vec128 std23; - Lib_IntVector_Intrinsics_vec128 std24; - Lib_IntVector_Intrinsics_vec128 std25; - Lib_IntVector_Intrinsics_vec128 std26; - Lib_IntVector_Intrinsics_vec128 std27; - Lib_IntVector_Intrinsics_vec128 std28; - Lib_IntVector_Intrinsics_vec128 std29; - Lib_IntVector_Intrinsics_vec128 std30; - Lib_IntVector_Intrinsics_vec128 std; - st[0U] = Lib_IntVector_Intrinsics_vec128_add32(st[0U], st[4U]); - std0 = Lib_IntVector_Intrinsics_vec128_xor(st[12U], st[0U]); - st[12U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std0, (uint32_t)16U); - st[8U] = Lib_IntVector_Intrinsics_vec128_add32(st[8U], st[12U]); - std1 = Lib_IntVector_Intrinsics_vec128_xor(st[4U], st[8U]); - st[4U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std1, (uint32_t)12U); - st[0U] = Lib_IntVector_Intrinsics_vec128_add32(st[0U], st[4U]); - std2 = Lib_IntVector_Intrinsics_vec128_xor(st[12U], st[0U]); - st[12U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std2, (uint32_t)8U); - st[8U] = Lib_IntVector_Intrinsics_vec128_add32(st[8U], st[12U]); - std3 = Lib_IntVector_Intrinsics_vec128_xor(st[4U], st[8U]); - st[4U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std3, (uint32_t)7U); - st[1U] = Lib_IntVector_Intrinsics_vec128_add32(st[1U], st[5U]); - std4 = Lib_IntVector_Intrinsics_vec128_xor(st[13U], st[1U]); - st[13U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std4, (uint32_t)16U); - st[9U] = Lib_IntVector_Intrinsics_vec128_add32(st[9U], st[13U]); - std5 = Lib_IntVector_Intrinsics_vec128_xor(st[5U], st[9U]); - st[5U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std5, (uint32_t)12U); - st[1U] = Lib_IntVector_Intrinsics_vec128_add32(st[1U], st[5U]); - std6 = Lib_IntVector_Intrinsics_vec128_xor(st[13U], st[1U]); - st[13U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std6, (uint32_t)8U); - st[9U] = Lib_IntVector_Intrinsics_vec128_add32(st[9U], st[13U]); - std7 = Lib_IntVector_Intrinsics_vec128_xor(st[5U], st[9U]); - st[5U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std7, (uint32_t)7U); - st[2U] = Lib_IntVector_Intrinsics_vec128_add32(st[2U], st[6U]); - std8 = Lib_IntVector_Intrinsics_vec128_xor(st[14U], st[2U]); - st[14U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std8, (uint32_t)16U); - st[10U] = Lib_IntVector_Intrinsics_vec128_add32(st[10U], st[14U]); - std9 = Lib_IntVector_Intrinsics_vec128_xor(st[6U], st[10U]); - st[6U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std9, (uint32_t)12U); - st[2U] = Lib_IntVector_Intrinsics_vec128_add32(st[2U], st[6U]); - std10 = Lib_IntVector_Intrinsics_vec128_xor(st[14U], st[2U]); - st[14U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std10, (uint32_t)8U); - st[10U] = Lib_IntVector_Intrinsics_vec128_add32(st[10U], st[14U]); - std11 = Lib_IntVector_Intrinsics_vec128_xor(st[6U], st[10U]); - st[6U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std11, (uint32_t)7U); - st[3U] = Lib_IntVector_Intrinsics_vec128_add32(st[3U], st[7U]); - std12 = Lib_IntVector_Intrinsics_vec128_xor(st[15U], st[3U]); - st[15U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std12, (uint32_t)16U); - st[11U] = Lib_IntVector_Intrinsics_vec128_add32(st[11U], st[15U]); - std13 = Lib_IntVector_Intrinsics_vec128_xor(st[7U], st[11U]); - st[7U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std13, (uint32_t)12U); - st[3U] = Lib_IntVector_Intrinsics_vec128_add32(st[3U], st[7U]); - std14 = Lib_IntVector_Intrinsics_vec128_xor(st[15U], st[3U]); - st[15U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std14, (uint32_t)8U); - st[11U] = Lib_IntVector_Intrinsics_vec128_add32(st[11U], st[15U]); - std15 = Lib_IntVector_Intrinsics_vec128_xor(st[7U], st[11U]); - st[7U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std15, (uint32_t)7U); - st[0U] = Lib_IntVector_Intrinsics_vec128_add32(st[0U], st[5U]); - std16 = Lib_IntVector_Intrinsics_vec128_xor(st[15U], st[0U]); - st[15U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std16, (uint32_t)16U); - st[10U] = Lib_IntVector_Intrinsics_vec128_add32(st[10U], st[15U]); - std17 = Lib_IntVector_Intrinsics_vec128_xor(st[5U], st[10U]); - st[5U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std17, (uint32_t)12U); - st[0U] = Lib_IntVector_Intrinsics_vec128_add32(st[0U], st[5U]); - std18 = Lib_IntVector_Intrinsics_vec128_xor(st[15U], st[0U]); - st[15U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std18, (uint32_t)8U); - st[10U] = Lib_IntVector_Intrinsics_vec128_add32(st[10U], st[15U]); - std19 = Lib_IntVector_Intrinsics_vec128_xor(st[5U], st[10U]); - st[5U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std19, (uint32_t)7U); - st[1U] = Lib_IntVector_Intrinsics_vec128_add32(st[1U], st[6U]); - std20 = Lib_IntVector_Intrinsics_vec128_xor(st[12U], st[1U]); - st[12U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std20, (uint32_t)16U); - st[11U] = Lib_IntVector_Intrinsics_vec128_add32(st[11U], st[12U]); - std21 = Lib_IntVector_Intrinsics_vec128_xor(st[6U], st[11U]); - st[6U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std21, (uint32_t)12U); - st[1U] = Lib_IntVector_Intrinsics_vec128_add32(st[1U], st[6U]); - std22 = Lib_IntVector_Intrinsics_vec128_xor(st[12U], st[1U]); - st[12U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std22, (uint32_t)8U); - st[11U] = Lib_IntVector_Intrinsics_vec128_add32(st[11U], st[12U]); - std23 = Lib_IntVector_Intrinsics_vec128_xor(st[6U], st[11U]); - st[6U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std23, (uint32_t)7U); - st[2U] = Lib_IntVector_Intrinsics_vec128_add32(st[2U], st[7U]); - std24 = Lib_IntVector_Intrinsics_vec128_xor(st[13U], st[2U]); - st[13U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std24, (uint32_t)16U); - st[8U] = Lib_IntVector_Intrinsics_vec128_add32(st[8U], st[13U]); - std25 = Lib_IntVector_Intrinsics_vec128_xor(st[7U], st[8U]); - st[7U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std25, (uint32_t)12U); - st[2U] = Lib_IntVector_Intrinsics_vec128_add32(st[2U], st[7U]); - std26 = Lib_IntVector_Intrinsics_vec128_xor(st[13U], st[2U]); - st[13U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std26, (uint32_t)8U); - st[8U] = Lib_IntVector_Intrinsics_vec128_add32(st[8U], st[13U]); - std27 = Lib_IntVector_Intrinsics_vec128_xor(st[7U], st[8U]); - st[7U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std27, (uint32_t)7U); - st[3U] = Lib_IntVector_Intrinsics_vec128_add32(st[3U], st[4U]); - std28 = Lib_IntVector_Intrinsics_vec128_xor(st[14U], st[3U]); - st[14U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std28, (uint32_t)16U); - st[9U] = Lib_IntVector_Intrinsics_vec128_add32(st[9U], st[14U]); - std29 = Lib_IntVector_Intrinsics_vec128_xor(st[4U], st[9U]); - st[4U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std29, (uint32_t)12U); - st[3U] = Lib_IntVector_Intrinsics_vec128_add32(st[3U], st[4U]); - std30 = Lib_IntVector_Intrinsics_vec128_xor(st[14U], st[3U]); - st[14U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std30, (uint32_t)8U); - st[9U] = Lib_IntVector_Intrinsics_vec128_add32(st[9U], st[14U]); - std = Lib_IntVector_Intrinsics_vec128_xor(st[4U], st[9U]); - st[4U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std, (uint32_t)7U); -} - -static inline void -chacha20_core_128( - Lib_IntVector_Intrinsics_vec128 *k, - Lib_IntVector_Intrinsics_vec128 *ctx, - uint32_t ctr -) -{ - uint32_t ctr_u32; - Lib_IntVector_Intrinsics_vec128 cv; - memcpy(k, ctx, (uint32_t)16U * sizeof (Lib_IntVector_Intrinsics_vec128)); - ctr_u32 = (uint32_t)4U * ctr; - cv = Lib_IntVector_Intrinsics_vec128_load32(ctr_u32); - k[12U] = Lib_IntVector_Intrinsics_vec128_add32(k[12U], cv); - double_round_128(k); - double_round_128(k); - double_round_128(k); - double_round_128(k); - double_round_128(k); - double_round_128(k); - double_round_128(k); - double_round_128(k); - double_round_128(k); - double_round_128(k); - KRML_MAYBE_FOR16(i, - (uint32_t)0U, - (uint32_t)16U, - (uint32_t)1U, - Lib_IntVector_Intrinsics_vec128 *os = k; - Lib_IntVector_Intrinsics_vec128 x = Lib_IntVector_Intrinsics_vec128_add32(k[i], ctx[i]); - os[i] = x;); - k[12U] = Lib_IntVector_Intrinsics_vec128_add32(k[12U], cv); -} - -static inline void -chacha20_init_128(Lib_IntVector_Intrinsics_vec128 *ctx, uint8_t *k, uint8_t *n, uint32_t ctr) -{ - uint32_t ctx1[16U] = { 0U }; - Lib_IntVector_Intrinsics_vec128 ctr1; - Lib_IntVector_Intrinsics_vec128 c12; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint32_t *os = ctx1; - uint32_t x = Hacl_Impl_Chacha20_Vec_chacha20_constants[i]; - os[i] = x;); - KRML_MAYBE_FOR8(i, - (uint32_t)0U, - (uint32_t)8U, - (uint32_t)1U, - uint32_t *os = ctx1 + (uint32_t)4U; - uint8_t *bj = k + i * (uint32_t)4U; - uint32_t u = load32_le(bj); - uint32_t r = u; - uint32_t x = r; - os[i] = x;); - ctx1[12U] = ctr; - KRML_MAYBE_FOR3(i, - (uint32_t)0U, - (uint32_t)3U, - (uint32_t)1U, - uint32_t *os = ctx1 + (uint32_t)13U; - uint8_t *bj = n + i * (uint32_t)4U; - uint32_t u = load32_le(bj); - uint32_t r = u; - uint32_t x = r; - os[i] = x;); - KRML_MAYBE_FOR16(i, - (uint32_t)0U, - (uint32_t)16U, - (uint32_t)1U, - Lib_IntVector_Intrinsics_vec128 *os = ctx; - uint32_t x = ctx1[i]; - Lib_IntVector_Intrinsics_vec128 x0 = Lib_IntVector_Intrinsics_vec128_load32(x); - os[i] = x0;); - ctr1 = - Lib_IntVector_Intrinsics_vec128_load32s((uint32_t)0U, - (uint32_t)1U, - (uint32_t)2U, - (uint32_t)3U); - c12 = ctx[12U]; - ctx[12U] = Lib_IntVector_Intrinsics_vec128_add32(c12, ctr1); -} - -void -Hacl_Chacha20_Vec128_chacha20_encrypt_128( - uint32_t len, - uint8_t *out, - uint8_t *text, - uint8_t *key, - uint8_t *n, - uint32_t ctr -) -{ - KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 ctx[16U] KRML_POST_ALIGN(16) = { 0U }; - uint32_t rem; - uint32_t nb; - uint32_t rem1; - chacha20_init_128(ctx, key, n, ctr); - rem = len % (uint32_t)256U; - nb = len / (uint32_t)256U; - rem1 = len % (uint32_t)256U; - { - uint32_t i; - for (i = (uint32_t)0U; i < nb; i++) - { - uint8_t *uu____0 = out + i * (uint32_t)256U; - uint8_t *uu____1 = text + i * (uint32_t)256U; - KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 k[16U] KRML_POST_ALIGN(16) = { 0U }; - chacha20_core_128(k, ctx, i); - { - Lib_IntVector_Intrinsics_vec128 st0 = k[0U]; - Lib_IntVector_Intrinsics_vec128 st1 = k[1U]; - Lib_IntVector_Intrinsics_vec128 st2 = k[2U]; - Lib_IntVector_Intrinsics_vec128 st3 = k[3U]; - Lib_IntVector_Intrinsics_vec128 st4 = k[4U]; - Lib_IntVector_Intrinsics_vec128 st5 = k[5U]; - Lib_IntVector_Intrinsics_vec128 st6 = k[6U]; - Lib_IntVector_Intrinsics_vec128 st7 = k[7U]; - Lib_IntVector_Intrinsics_vec128 st8 = k[8U]; - Lib_IntVector_Intrinsics_vec128 st9 = k[9U]; - Lib_IntVector_Intrinsics_vec128 st10 = k[10U]; - Lib_IntVector_Intrinsics_vec128 st11 = k[11U]; - Lib_IntVector_Intrinsics_vec128 st12 = k[12U]; - Lib_IntVector_Intrinsics_vec128 st13 = k[13U]; - Lib_IntVector_Intrinsics_vec128 st14 = k[14U]; - Lib_IntVector_Intrinsics_vec128 st15 = k[15U]; - Lib_IntVector_Intrinsics_vec128 - v0_ = Lib_IntVector_Intrinsics_vec128_interleave_low32(st0, st1); - Lib_IntVector_Intrinsics_vec128 - v1_ = Lib_IntVector_Intrinsics_vec128_interleave_high32(st0, st1); - Lib_IntVector_Intrinsics_vec128 - v2_ = Lib_IntVector_Intrinsics_vec128_interleave_low32(st2, st3); - Lib_IntVector_Intrinsics_vec128 - v3_ = Lib_IntVector_Intrinsics_vec128_interleave_high32(st2, st3); - Lib_IntVector_Intrinsics_vec128 - v0__ = Lib_IntVector_Intrinsics_vec128_interleave_low64(v0_, v2_); - Lib_IntVector_Intrinsics_vec128 - v1__ = Lib_IntVector_Intrinsics_vec128_interleave_high64(v0_, v2_); - Lib_IntVector_Intrinsics_vec128 - v2__ = Lib_IntVector_Intrinsics_vec128_interleave_low64(v1_, v3_); - Lib_IntVector_Intrinsics_vec128 - v3__ = Lib_IntVector_Intrinsics_vec128_interleave_high64(v1_, v3_); - Lib_IntVector_Intrinsics_vec128 v0__0 = v0__; - Lib_IntVector_Intrinsics_vec128 v2__0 = v2__; - Lib_IntVector_Intrinsics_vec128 v1__0 = v1__; - Lib_IntVector_Intrinsics_vec128 v3__0 = v3__; - Lib_IntVector_Intrinsics_vec128 v0 = v0__0; - Lib_IntVector_Intrinsics_vec128 v1 = v1__0; - Lib_IntVector_Intrinsics_vec128 v2 = v2__0; - Lib_IntVector_Intrinsics_vec128 v3 = v3__0; - Lib_IntVector_Intrinsics_vec128 - v0_0 = Lib_IntVector_Intrinsics_vec128_interleave_low32(st4, st5); - Lib_IntVector_Intrinsics_vec128 - v1_0 = Lib_IntVector_Intrinsics_vec128_interleave_high32(st4, st5); - Lib_IntVector_Intrinsics_vec128 - v2_0 = Lib_IntVector_Intrinsics_vec128_interleave_low32(st6, st7); - Lib_IntVector_Intrinsics_vec128 - v3_0 = Lib_IntVector_Intrinsics_vec128_interleave_high32(st6, st7); - Lib_IntVector_Intrinsics_vec128 - v0__1 = Lib_IntVector_Intrinsics_vec128_interleave_low64(v0_0, v2_0); - Lib_IntVector_Intrinsics_vec128 - v1__1 = Lib_IntVector_Intrinsics_vec128_interleave_high64(v0_0, v2_0); - Lib_IntVector_Intrinsics_vec128 - v2__1 = Lib_IntVector_Intrinsics_vec128_interleave_low64(v1_0, v3_0); - Lib_IntVector_Intrinsics_vec128 - v3__1 = Lib_IntVector_Intrinsics_vec128_interleave_high64(v1_0, v3_0); - Lib_IntVector_Intrinsics_vec128 v0__2 = v0__1; - Lib_IntVector_Intrinsics_vec128 v2__2 = v2__1; - Lib_IntVector_Intrinsics_vec128 v1__2 = v1__1; - Lib_IntVector_Intrinsics_vec128 v3__2 = v3__1; - Lib_IntVector_Intrinsics_vec128 v4 = v0__2; - Lib_IntVector_Intrinsics_vec128 v5 = v1__2; - Lib_IntVector_Intrinsics_vec128 v6 = v2__2; - Lib_IntVector_Intrinsics_vec128 v7 = v3__2; - Lib_IntVector_Intrinsics_vec128 - v0_1 = Lib_IntVector_Intrinsics_vec128_interleave_low32(st8, st9); - Lib_IntVector_Intrinsics_vec128 - v1_1 = Lib_IntVector_Intrinsics_vec128_interleave_high32(st8, st9); - Lib_IntVector_Intrinsics_vec128 - v2_1 = Lib_IntVector_Intrinsics_vec128_interleave_low32(st10, st11); - Lib_IntVector_Intrinsics_vec128 - v3_1 = Lib_IntVector_Intrinsics_vec128_interleave_high32(st10, st11); - Lib_IntVector_Intrinsics_vec128 - v0__3 = Lib_IntVector_Intrinsics_vec128_interleave_low64(v0_1, v2_1); - Lib_IntVector_Intrinsics_vec128 - v1__3 = Lib_IntVector_Intrinsics_vec128_interleave_high64(v0_1, v2_1); - Lib_IntVector_Intrinsics_vec128 - v2__3 = Lib_IntVector_Intrinsics_vec128_interleave_low64(v1_1, v3_1); - Lib_IntVector_Intrinsics_vec128 - v3__3 = Lib_IntVector_Intrinsics_vec128_interleave_high64(v1_1, v3_1); - Lib_IntVector_Intrinsics_vec128 v0__4 = v0__3; - Lib_IntVector_Intrinsics_vec128 v2__4 = v2__3; - Lib_IntVector_Intrinsics_vec128 v1__4 = v1__3; - Lib_IntVector_Intrinsics_vec128 v3__4 = v3__3; - Lib_IntVector_Intrinsics_vec128 v8 = v0__4; - Lib_IntVector_Intrinsics_vec128 v9 = v1__4; - Lib_IntVector_Intrinsics_vec128 v10 = v2__4; - Lib_IntVector_Intrinsics_vec128 v11 = v3__4; - Lib_IntVector_Intrinsics_vec128 - v0_2 = Lib_IntVector_Intrinsics_vec128_interleave_low32(st12, st13); - Lib_IntVector_Intrinsics_vec128 - v1_2 = Lib_IntVector_Intrinsics_vec128_interleave_high32(st12, st13); - Lib_IntVector_Intrinsics_vec128 - v2_2 = Lib_IntVector_Intrinsics_vec128_interleave_low32(st14, st15); - Lib_IntVector_Intrinsics_vec128 - v3_2 = Lib_IntVector_Intrinsics_vec128_interleave_high32(st14, st15); - Lib_IntVector_Intrinsics_vec128 - v0__5 = Lib_IntVector_Intrinsics_vec128_interleave_low64(v0_2, v2_2); - Lib_IntVector_Intrinsics_vec128 - v1__5 = Lib_IntVector_Intrinsics_vec128_interleave_high64(v0_2, v2_2); - Lib_IntVector_Intrinsics_vec128 - v2__5 = Lib_IntVector_Intrinsics_vec128_interleave_low64(v1_2, v3_2); - Lib_IntVector_Intrinsics_vec128 - v3__5 = Lib_IntVector_Intrinsics_vec128_interleave_high64(v1_2, v3_2); - Lib_IntVector_Intrinsics_vec128 v0__6 = v0__5; - Lib_IntVector_Intrinsics_vec128 v2__6 = v2__5; - Lib_IntVector_Intrinsics_vec128 v1__6 = v1__5; - Lib_IntVector_Intrinsics_vec128 v3__6 = v3__5; - Lib_IntVector_Intrinsics_vec128 v12 = v0__6; - Lib_IntVector_Intrinsics_vec128 v13 = v1__6; - Lib_IntVector_Intrinsics_vec128 v14 = v2__6; - Lib_IntVector_Intrinsics_vec128 v15 = v3__6; - k[0U] = v0; - k[1U] = v4; - k[2U] = v8; - k[3U] = v12; - k[4U] = v1; - k[5U] = v5; - k[6U] = v9; - k[7U] = v13; - k[8U] = v2; - k[9U] = v6; - k[10U] = v10; - k[11U] = v14; - k[12U] = v3; - k[13U] = v7; - k[14U] = v11; - k[15U] = v15; - KRML_MAYBE_FOR16(i0, - (uint32_t)0U, - (uint32_t)16U, - (uint32_t)1U, - Lib_IntVector_Intrinsics_vec128 - x = Lib_IntVector_Intrinsics_vec128_load32_le(uu____1 + i0 * (uint32_t)16U); - Lib_IntVector_Intrinsics_vec128 y = Lib_IntVector_Intrinsics_vec128_xor(x, k[i0]); - Lib_IntVector_Intrinsics_vec128_store32_le(uu____0 + i0 * (uint32_t)16U, y);); - } - } - } - if (rem1 > (uint32_t)0U) - { - uint8_t *uu____2 = out + nb * (uint32_t)256U; - uint8_t *uu____3 = text + nb * (uint32_t)256U; - uint8_t plain[256U] = { 0U }; - memcpy(plain, uu____3, rem * sizeof (uint8_t)); - { - KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 k[16U] KRML_POST_ALIGN(16) = { 0U }; - chacha20_core_128(k, ctx, nb); - { - Lib_IntVector_Intrinsics_vec128 st0 = k[0U]; - Lib_IntVector_Intrinsics_vec128 st1 = k[1U]; - Lib_IntVector_Intrinsics_vec128 st2 = k[2U]; - Lib_IntVector_Intrinsics_vec128 st3 = k[3U]; - Lib_IntVector_Intrinsics_vec128 st4 = k[4U]; - Lib_IntVector_Intrinsics_vec128 st5 = k[5U]; - Lib_IntVector_Intrinsics_vec128 st6 = k[6U]; - Lib_IntVector_Intrinsics_vec128 st7 = k[7U]; - Lib_IntVector_Intrinsics_vec128 st8 = k[8U]; - Lib_IntVector_Intrinsics_vec128 st9 = k[9U]; - Lib_IntVector_Intrinsics_vec128 st10 = k[10U]; - Lib_IntVector_Intrinsics_vec128 st11 = k[11U]; - Lib_IntVector_Intrinsics_vec128 st12 = k[12U]; - Lib_IntVector_Intrinsics_vec128 st13 = k[13U]; - Lib_IntVector_Intrinsics_vec128 st14 = k[14U]; - Lib_IntVector_Intrinsics_vec128 st15 = k[15U]; - Lib_IntVector_Intrinsics_vec128 - v0_ = Lib_IntVector_Intrinsics_vec128_interleave_low32(st0, st1); - Lib_IntVector_Intrinsics_vec128 - v1_ = Lib_IntVector_Intrinsics_vec128_interleave_high32(st0, st1); - Lib_IntVector_Intrinsics_vec128 - v2_ = Lib_IntVector_Intrinsics_vec128_interleave_low32(st2, st3); - Lib_IntVector_Intrinsics_vec128 - v3_ = Lib_IntVector_Intrinsics_vec128_interleave_high32(st2, st3); - Lib_IntVector_Intrinsics_vec128 - v0__ = Lib_IntVector_Intrinsics_vec128_interleave_low64(v0_, v2_); - Lib_IntVector_Intrinsics_vec128 - v1__ = Lib_IntVector_Intrinsics_vec128_interleave_high64(v0_, v2_); - Lib_IntVector_Intrinsics_vec128 - v2__ = Lib_IntVector_Intrinsics_vec128_interleave_low64(v1_, v3_); - Lib_IntVector_Intrinsics_vec128 - v3__ = Lib_IntVector_Intrinsics_vec128_interleave_high64(v1_, v3_); - Lib_IntVector_Intrinsics_vec128 v0__0 = v0__; - Lib_IntVector_Intrinsics_vec128 v2__0 = v2__; - Lib_IntVector_Intrinsics_vec128 v1__0 = v1__; - Lib_IntVector_Intrinsics_vec128 v3__0 = v3__; - Lib_IntVector_Intrinsics_vec128 v0 = v0__0; - Lib_IntVector_Intrinsics_vec128 v1 = v1__0; - Lib_IntVector_Intrinsics_vec128 v2 = v2__0; - Lib_IntVector_Intrinsics_vec128 v3 = v3__0; - Lib_IntVector_Intrinsics_vec128 - v0_0 = Lib_IntVector_Intrinsics_vec128_interleave_low32(st4, st5); - Lib_IntVector_Intrinsics_vec128 - v1_0 = Lib_IntVector_Intrinsics_vec128_interleave_high32(st4, st5); - Lib_IntVector_Intrinsics_vec128 - v2_0 = Lib_IntVector_Intrinsics_vec128_interleave_low32(st6, st7); - Lib_IntVector_Intrinsics_vec128 - v3_0 = Lib_IntVector_Intrinsics_vec128_interleave_high32(st6, st7); - Lib_IntVector_Intrinsics_vec128 - v0__1 = Lib_IntVector_Intrinsics_vec128_interleave_low64(v0_0, v2_0); - Lib_IntVector_Intrinsics_vec128 - v1__1 = Lib_IntVector_Intrinsics_vec128_interleave_high64(v0_0, v2_0); - Lib_IntVector_Intrinsics_vec128 - v2__1 = Lib_IntVector_Intrinsics_vec128_interleave_low64(v1_0, v3_0); - Lib_IntVector_Intrinsics_vec128 - v3__1 = Lib_IntVector_Intrinsics_vec128_interleave_high64(v1_0, v3_0); - Lib_IntVector_Intrinsics_vec128 v0__2 = v0__1; - Lib_IntVector_Intrinsics_vec128 v2__2 = v2__1; - Lib_IntVector_Intrinsics_vec128 v1__2 = v1__1; - Lib_IntVector_Intrinsics_vec128 v3__2 = v3__1; - Lib_IntVector_Intrinsics_vec128 v4 = v0__2; - Lib_IntVector_Intrinsics_vec128 v5 = v1__2; - Lib_IntVector_Intrinsics_vec128 v6 = v2__2; - Lib_IntVector_Intrinsics_vec128 v7 = v3__2; - Lib_IntVector_Intrinsics_vec128 - v0_1 = Lib_IntVector_Intrinsics_vec128_interleave_low32(st8, st9); - Lib_IntVector_Intrinsics_vec128 - v1_1 = Lib_IntVector_Intrinsics_vec128_interleave_high32(st8, st9); - Lib_IntVector_Intrinsics_vec128 - v2_1 = Lib_IntVector_Intrinsics_vec128_interleave_low32(st10, st11); - Lib_IntVector_Intrinsics_vec128 - v3_1 = Lib_IntVector_Intrinsics_vec128_interleave_high32(st10, st11); - Lib_IntVector_Intrinsics_vec128 - v0__3 = Lib_IntVector_Intrinsics_vec128_interleave_low64(v0_1, v2_1); - Lib_IntVector_Intrinsics_vec128 - v1__3 = Lib_IntVector_Intrinsics_vec128_interleave_high64(v0_1, v2_1); - Lib_IntVector_Intrinsics_vec128 - v2__3 = Lib_IntVector_Intrinsics_vec128_interleave_low64(v1_1, v3_1); - Lib_IntVector_Intrinsics_vec128 - v3__3 = Lib_IntVector_Intrinsics_vec128_interleave_high64(v1_1, v3_1); - Lib_IntVector_Intrinsics_vec128 v0__4 = v0__3; - Lib_IntVector_Intrinsics_vec128 v2__4 = v2__3; - Lib_IntVector_Intrinsics_vec128 v1__4 = v1__3; - Lib_IntVector_Intrinsics_vec128 v3__4 = v3__3; - Lib_IntVector_Intrinsics_vec128 v8 = v0__4; - Lib_IntVector_Intrinsics_vec128 v9 = v1__4; - Lib_IntVector_Intrinsics_vec128 v10 = v2__4; - Lib_IntVector_Intrinsics_vec128 v11 = v3__4; - Lib_IntVector_Intrinsics_vec128 - v0_2 = Lib_IntVector_Intrinsics_vec128_interleave_low32(st12, st13); - Lib_IntVector_Intrinsics_vec128 - v1_2 = Lib_IntVector_Intrinsics_vec128_interleave_high32(st12, st13); - Lib_IntVector_Intrinsics_vec128 - v2_2 = Lib_IntVector_Intrinsics_vec128_interleave_low32(st14, st15); - Lib_IntVector_Intrinsics_vec128 - v3_2 = Lib_IntVector_Intrinsics_vec128_interleave_high32(st14, st15); - Lib_IntVector_Intrinsics_vec128 - v0__5 = Lib_IntVector_Intrinsics_vec128_interleave_low64(v0_2, v2_2); - Lib_IntVector_Intrinsics_vec128 - v1__5 = Lib_IntVector_Intrinsics_vec128_interleave_high64(v0_2, v2_2); - Lib_IntVector_Intrinsics_vec128 - v2__5 = Lib_IntVector_Intrinsics_vec128_interleave_low64(v1_2, v3_2); - Lib_IntVector_Intrinsics_vec128 - v3__5 = Lib_IntVector_Intrinsics_vec128_interleave_high64(v1_2, v3_2); - Lib_IntVector_Intrinsics_vec128 v0__6 = v0__5; - Lib_IntVector_Intrinsics_vec128 v2__6 = v2__5; - Lib_IntVector_Intrinsics_vec128 v1__6 = v1__5; - Lib_IntVector_Intrinsics_vec128 v3__6 = v3__5; - Lib_IntVector_Intrinsics_vec128 v12 = v0__6; - Lib_IntVector_Intrinsics_vec128 v13 = v1__6; - Lib_IntVector_Intrinsics_vec128 v14 = v2__6; - Lib_IntVector_Intrinsics_vec128 v15 = v3__6; - k[0U] = v0; - k[1U] = v4; - k[2U] = v8; - k[3U] = v12; - k[4U] = v1; - k[5U] = v5; - k[6U] = v9; - k[7U] = v13; - k[8U] = v2; - k[9U] = v6; - k[10U] = v10; - k[11U] = v14; - k[12U] = v3; - k[13U] = v7; - k[14U] = v11; - k[15U] = v15; - KRML_MAYBE_FOR16(i, - (uint32_t)0U, - (uint32_t)16U, - (uint32_t)1U, - Lib_IntVector_Intrinsics_vec128 - x = Lib_IntVector_Intrinsics_vec128_load32_le(plain + i * (uint32_t)16U); - Lib_IntVector_Intrinsics_vec128 y = Lib_IntVector_Intrinsics_vec128_xor(x, k[i]); - Lib_IntVector_Intrinsics_vec128_store32_le(plain + i * (uint32_t)16U, y);); - memcpy(uu____2, plain, rem * sizeof (uint8_t)); - } - } - } -} - -void -Hacl_Chacha20_Vec128_chacha20_decrypt_128( - uint32_t len, - uint8_t *out, - uint8_t *cipher, - uint8_t *key, - uint8_t *n, - uint32_t ctr -) -{ - KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 ctx[16U] KRML_POST_ALIGN(16) = { 0U }; - uint32_t rem; - uint32_t nb; - uint32_t rem1; - chacha20_init_128(ctx, key, n, ctr); - rem = len % (uint32_t)256U; - nb = len / (uint32_t)256U; - rem1 = len % (uint32_t)256U; - { - uint32_t i; - for (i = (uint32_t)0U; i < nb; i++) - { - uint8_t *uu____0 = out + i * (uint32_t)256U; - uint8_t *uu____1 = cipher + i * (uint32_t)256U; - KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 k[16U] KRML_POST_ALIGN(16) = { 0U }; - chacha20_core_128(k, ctx, i); - { - Lib_IntVector_Intrinsics_vec128 st0 = k[0U]; - Lib_IntVector_Intrinsics_vec128 st1 = k[1U]; - Lib_IntVector_Intrinsics_vec128 st2 = k[2U]; - Lib_IntVector_Intrinsics_vec128 st3 = k[3U]; - Lib_IntVector_Intrinsics_vec128 st4 = k[4U]; - Lib_IntVector_Intrinsics_vec128 st5 = k[5U]; - Lib_IntVector_Intrinsics_vec128 st6 = k[6U]; - Lib_IntVector_Intrinsics_vec128 st7 = k[7U]; - Lib_IntVector_Intrinsics_vec128 st8 = k[8U]; - Lib_IntVector_Intrinsics_vec128 st9 = k[9U]; - Lib_IntVector_Intrinsics_vec128 st10 = k[10U]; - Lib_IntVector_Intrinsics_vec128 st11 = k[11U]; - Lib_IntVector_Intrinsics_vec128 st12 = k[12U]; - Lib_IntVector_Intrinsics_vec128 st13 = k[13U]; - Lib_IntVector_Intrinsics_vec128 st14 = k[14U]; - Lib_IntVector_Intrinsics_vec128 st15 = k[15U]; - Lib_IntVector_Intrinsics_vec128 - v0_ = Lib_IntVector_Intrinsics_vec128_interleave_low32(st0, st1); - Lib_IntVector_Intrinsics_vec128 - v1_ = Lib_IntVector_Intrinsics_vec128_interleave_high32(st0, st1); - Lib_IntVector_Intrinsics_vec128 - v2_ = Lib_IntVector_Intrinsics_vec128_interleave_low32(st2, st3); - Lib_IntVector_Intrinsics_vec128 - v3_ = Lib_IntVector_Intrinsics_vec128_interleave_high32(st2, st3); - Lib_IntVector_Intrinsics_vec128 - v0__ = Lib_IntVector_Intrinsics_vec128_interleave_low64(v0_, v2_); - Lib_IntVector_Intrinsics_vec128 - v1__ = Lib_IntVector_Intrinsics_vec128_interleave_high64(v0_, v2_); - Lib_IntVector_Intrinsics_vec128 - v2__ = Lib_IntVector_Intrinsics_vec128_interleave_low64(v1_, v3_); - Lib_IntVector_Intrinsics_vec128 - v3__ = Lib_IntVector_Intrinsics_vec128_interleave_high64(v1_, v3_); - Lib_IntVector_Intrinsics_vec128 v0__0 = v0__; - Lib_IntVector_Intrinsics_vec128 v2__0 = v2__; - Lib_IntVector_Intrinsics_vec128 v1__0 = v1__; - Lib_IntVector_Intrinsics_vec128 v3__0 = v3__; - Lib_IntVector_Intrinsics_vec128 v0 = v0__0; - Lib_IntVector_Intrinsics_vec128 v1 = v1__0; - Lib_IntVector_Intrinsics_vec128 v2 = v2__0; - Lib_IntVector_Intrinsics_vec128 v3 = v3__0; - Lib_IntVector_Intrinsics_vec128 - v0_0 = Lib_IntVector_Intrinsics_vec128_interleave_low32(st4, st5); - Lib_IntVector_Intrinsics_vec128 - v1_0 = Lib_IntVector_Intrinsics_vec128_interleave_high32(st4, st5); - Lib_IntVector_Intrinsics_vec128 - v2_0 = Lib_IntVector_Intrinsics_vec128_interleave_low32(st6, st7); - Lib_IntVector_Intrinsics_vec128 - v3_0 = Lib_IntVector_Intrinsics_vec128_interleave_high32(st6, st7); - Lib_IntVector_Intrinsics_vec128 - v0__1 = Lib_IntVector_Intrinsics_vec128_interleave_low64(v0_0, v2_0); - Lib_IntVector_Intrinsics_vec128 - v1__1 = Lib_IntVector_Intrinsics_vec128_interleave_high64(v0_0, v2_0); - Lib_IntVector_Intrinsics_vec128 - v2__1 = Lib_IntVector_Intrinsics_vec128_interleave_low64(v1_0, v3_0); - Lib_IntVector_Intrinsics_vec128 - v3__1 = Lib_IntVector_Intrinsics_vec128_interleave_high64(v1_0, v3_0); - Lib_IntVector_Intrinsics_vec128 v0__2 = v0__1; - Lib_IntVector_Intrinsics_vec128 v2__2 = v2__1; - Lib_IntVector_Intrinsics_vec128 v1__2 = v1__1; - Lib_IntVector_Intrinsics_vec128 v3__2 = v3__1; - Lib_IntVector_Intrinsics_vec128 v4 = v0__2; - Lib_IntVector_Intrinsics_vec128 v5 = v1__2; - Lib_IntVector_Intrinsics_vec128 v6 = v2__2; - Lib_IntVector_Intrinsics_vec128 v7 = v3__2; - Lib_IntVector_Intrinsics_vec128 - v0_1 = Lib_IntVector_Intrinsics_vec128_interleave_low32(st8, st9); - Lib_IntVector_Intrinsics_vec128 - v1_1 = Lib_IntVector_Intrinsics_vec128_interleave_high32(st8, st9); - Lib_IntVector_Intrinsics_vec128 - v2_1 = Lib_IntVector_Intrinsics_vec128_interleave_low32(st10, st11); - Lib_IntVector_Intrinsics_vec128 - v3_1 = Lib_IntVector_Intrinsics_vec128_interleave_high32(st10, st11); - Lib_IntVector_Intrinsics_vec128 - v0__3 = Lib_IntVector_Intrinsics_vec128_interleave_low64(v0_1, v2_1); - Lib_IntVector_Intrinsics_vec128 - v1__3 = Lib_IntVector_Intrinsics_vec128_interleave_high64(v0_1, v2_1); - Lib_IntVector_Intrinsics_vec128 - v2__3 = Lib_IntVector_Intrinsics_vec128_interleave_low64(v1_1, v3_1); - Lib_IntVector_Intrinsics_vec128 - v3__3 = Lib_IntVector_Intrinsics_vec128_interleave_high64(v1_1, v3_1); - Lib_IntVector_Intrinsics_vec128 v0__4 = v0__3; - Lib_IntVector_Intrinsics_vec128 v2__4 = v2__3; - Lib_IntVector_Intrinsics_vec128 v1__4 = v1__3; - Lib_IntVector_Intrinsics_vec128 v3__4 = v3__3; - Lib_IntVector_Intrinsics_vec128 v8 = v0__4; - Lib_IntVector_Intrinsics_vec128 v9 = v1__4; - Lib_IntVector_Intrinsics_vec128 v10 = v2__4; - Lib_IntVector_Intrinsics_vec128 v11 = v3__4; - Lib_IntVector_Intrinsics_vec128 - v0_2 = Lib_IntVector_Intrinsics_vec128_interleave_low32(st12, st13); - Lib_IntVector_Intrinsics_vec128 - v1_2 = Lib_IntVector_Intrinsics_vec128_interleave_high32(st12, st13); - Lib_IntVector_Intrinsics_vec128 - v2_2 = Lib_IntVector_Intrinsics_vec128_interleave_low32(st14, st15); - Lib_IntVector_Intrinsics_vec128 - v3_2 = Lib_IntVector_Intrinsics_vec128_interleave_high32(st14, st15); - Lib_IntVector_Intrinsics_vec128 - v0__5 = Lib_IntVector_Intrinsics_vec128_interleave_low64(v0_2, v2_2); - Lib_IntVector_Intrinsics_vec128 - v1__5 = Lib_IntVector_Intrinsics_vec128_interleave_high64(v0_2, v2_2); - Lib_IntVector_Intrinsics_vec128 - v2__5 = Lib_IntVector_Intrinsics_vec128_interleave_low64(v1_2, v3_2); - Lib_IntVector_Intrinsics_vec128 - v3__5 = Lib_IntVector_Intrinsics_vec128_interleave_high64(v1_2, v3_2); - Lib_IntVector_Intrinsics_vec128 v0__6 = v0__5; - Lib_IntVector_Intrinsics_vec128 v2__6 = v2__5; - Lib_IntVector_Intrinsics_vec128 v1__6 = v1__5; - Lib_IntVector_Intrinsics_vec128 v3__6 = v3__5; - Lib_IntVector_Intrinsics_vec128 v12 = v0__6; - Lib_IntVector_Intrinsics_vec128 v13 = v1__6; - Lib_IntVector_Intrinsics_vec128 v14 = v2__6; - Lib_IntVector_Intrinsics_vec128 v15 = v3__6; - k[0U] = v0; - k[1U] = v4; - k[2U] = v8; - k[3U] = v12; - k[4U] = v1; - k[5U] = v5; - k[6U] = v9; - k[7U] = v13; - k[8U] = v2; - k[9U] = v6; - k[10U] = v10; - k[11U] = v14; - k[12U] = v3; - k[13U] = v7; - k[14U] = v11; - k[15U] = v15; - KRML_MAYBE_FOR16(i0, - (uint32_t)0U, - (uint32_t)16U, - (uint32_t)1U, - Lib_IntVector_Intrinsics_vec128 - x = Lib_IntVector_Intrinsics_vec128_load32_le(uu____1 + i0 * (uint32_t)16U); - Lib_IntVector_Intrinsics_vec128 y = Lib_IntVector_Intrinsics_vec128_xor(x, k[i0]); - Lib_IntVector_Intrinsics_vec128_store32_le(uu____0 + i0 * (uint32_t)16U, y);); - } - } - } - if (rem1 > (uint32_t)0U) - { - uint8_t *uu____2 = out + nb * (uint32_t)256U; - uint8_t *uu____3 = cipher + nb * (uint32_t)256U; - uint8_t plain[256U] = { 0U }; - memcpy(plain, uu____3, rem * sizeof (uint8_t)); - { - KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 k[16U] KRML_POST_ALIGN(16) = { 0U }; - chacha20_core_128(k, ctx, nb); - { - Lib_IntVector_Intrinsics_vec128 st0 = k[0U]; - Lib_IntVector_Intrinsics_vec128 st1 = k[1U]; - Lib_IntVector_Intrinsics_vec128 st2 = k[2U]; - Lib_IntVector_Intrinsics_vec128 st3 = k[3U]; - Lib_IntVector_Intrinsics_vec128 st4 = k[4U]; - Lib_IntVector_Intrinsics_vec128 st5 = k[5U]; - Lib_IntVector_Intrinsics_vec128 st6 = k[6U]; - Lib_IntVector_Intrinsics_vec128 st7 = k[7U]; - Lib_IntVector_Intrinsics_vec128 st8 = k[8U]; - Lib_IntVector_Intrinsics_vec128 st9 = k[9U]; - Lib_IntVector_Intrinsics_vec128 st10 = k[10U]; - Lib_IntVector_Intrinsics_vec128 st11 = k[11U]; - Lib_IntVector_Intrinsics_vec128 st12 = k[12U]; - Lib_IntVector_Intrinsics_vec128 st13 = k[13U]; - Lib_IntVector_Intrinsics_vec128 st14 = k[14U]; - Lib_IntVector_Intrinsics_vec128 st15 = k[15U]; - Lib_IntVector_Intrinsics_vec128 - v0_ = Lib_IntVector_Intrinsics_vec128_interleave_low32(st0, st1); - Lib_IntVector_Intrinsics_vec128 - v1_ = Lib_IntVector_Intrinsics_vec128_interleave_high32(st0, st1); - Lib_IntVector_Intrinsics_vec128 - v2_ = Lib_IntVector_Intrinsics_vec128_interleave_low32(st2, st3); - Lib_IntVector_Intrinsics_vec128 - v3_ = Lib_IntVector_Intrinsics_vec128_interleave_high32(st2, st3); - Lib_IntVector_Intrinsics_vec128 - v0__ = Lib_IntVector_Intrinsics_vec128_interleave_low64(v0_, v2_); - Lib_IntVector_Intrinsics_vec128 - v1__ = Lib_IntVector_Intrinsics_vec128_interleave_high64(v0_, v2_); - Lib_IntVector_Intrinsics_vec128 - v2__ = Lib_IntVector_Intrinsics_vec128_interleave_low64(v1_, v3_); - Lib_IntVector_Intrinsics_vec128 - v3__ = Lib_IntVector_Intrinsics_vec128_interleave_high64(v1_, v3_); - Lib_IntVector_Intrinsics_vec128 v0__0 = v0__; - Lib_IntVector_Intrinsics_vec128 v2__0 = v2__; - Lib_IntVector_Intrinsics_vec128 v1__0 = v1__; - Lib_IntVector_Intrinsics_vec128 v3__0 = v3__; - Lib_IntVector_Intrinsics_vec128 v0 = v0__0; - Lib_IntVector_Intrinsics_vec128 v1 = v1__0; - Lib_IntVector_Intrinsics_vec128 v2 = v2__0; - Lib_IntVector_Intrinsics_vec128 v3 = v3__0; - Lib_IntVector_Intrinsics_vec128 - v0_0 = Lib_IntVector_Intrinsics_vec128_interleave_low32(st4, st5); - Lib_IntVector_Intrinsics_vec128 - v1_0 = Lib_IntVector_Intrinsics_vec128_interleave_high32(st4, st5); - Lib_IntVector_Intrinsics_vec128 - v2_0 = Lib_IntVector_Intrinsics_vec128_interleave_low32(st6, st7); - Lib_IntVector_Intrinsics_vec128 - v3_0 = Lib_IntVector_Intrinsics_vec128_interleave_high32(st6, st7); - Lib_IntVector_Intrinsics_vec128 - v0__1 = Lib_IntVector_Intrinsics_vec128_interleave_low64(v0_0, v2_0); - Lib_IntVector_Intrinsics_vec128 - v1__1 = Lib_IntVector_Intrinsics_vec128_interleave_high64(v0_0, v2_0); - Lib_IntVector_Intrinsics_vec128 - v2__1 = Lib_IntVector_Intrinsics_vec128_interleave_low64(v1_0, v3_0); - Lib_IntVector_Intrinsics_vec128 - v3__1 = Lib_IntVector_Intrinsics_vec128_interleave_high64(v1_0, v3_0); - Lib_IntVector_Intrinsics_vec128 v0__2 = v0__1; - Lib_IntVector_Intrinsics_vec128 v2__2 = v2__1; - Lib_IntVector_Intrinsics_vec128 v1__2 = v1__1; - Lib_IntVector_Intrinsics_vec128 v3__2 = v3__1; - Lib_IntVector_Intrinsics_vec128 v4 = v0__2; - Lib_IntVector_Intrinsics_vec128 v5 = v1__2; - Lib_IntVector_Intrinsics_vec128 v6 = v2__2; - Lib_IntVector_Intrinsics_vec128 v7 = v3__2; - Lib_IntVector_Intrinsics_vec128 - v0_1 = Lib_IntVector_Intrinsics_vec128_interleave_low32(st8, st9); - Lib_IntVector_Intrinsics_vec128 - v1_1 = Lib_IntVector_Intrinsics_vec128_interleave_high32(st8, st9); - Lib_IntVector_Intrinsics_vec128 - v2_1 = Lib_IntVector_Intrinsics_vec128_interleave_low32(st10, st11); - Lib_IntVector_Intrinsics_vec128 - v3_1 = Lib_IntVector_Intrinsics_vec128_interleave_high32(st10, st11); - Lib_IntVector_Intrinsics_vec128 - v0__3 = Lib_IntVector_Intrinsics_vec128_interleave_low64(v0_1, v2_1); - Lib_IntVector_Intrinsics_vec128 - v1__3 = Lib_IntVector_Intrinsics_vec128_interleave_high64(v0_1, v2_1); - Lib_IntVector_Intrinsics_vec128 - v2__3 = Lib_IntVector_Intrinsics_vec128_interleave_low64(v1_1, v3_1); - Lib_IntVector_Intrinsics_vec128 - v3__3 = Lib_IntVector_Intrinsics_vec128_interleave_high64(v1_1, v3_1); - Lib_IntVector_Intrinsics_vec128 v0__4 = v0__3; - Lib_IntVector_Intrinsics_vec128 v2__4 = v2__3; - Lib_IntVector_Intrinsics_vec128 v1__4 = v1__3; - Lib_IntVector_Intrinsics_vec128 v3__4 = v3__3; - Lib_IntVector_Intrinsics_vec128 v8 = v0__4; - Lib_IntVector_Intrinsics_vec128 v9 = v1__4; - Lib_IntVector_Intrinsics_vec128 v10 = v2__4; - Lib_IntVector_Intrinsics_vec128 v11 = v3__4; - Lib_IntVector_Intrinsics_vec128 - v0_2 = Lib_IntVector_Intrinsics_vec128_interleave_low32(st12, st13); - Lib_IntVector_Intrinsics_vec128 - v1_2 = Lib_IntVector_Intrinsics_vec128_interleave_high32(st12, st13); - Lib_IntVector_Intrinsics_vec128 - v2_2 = Lib_IntVector_Intrinsics_vec128_interleave_low32(st14, st15); - Lib_IntVector_Intrinsics_vec128 - v3_2 = Lib_IntVector_Intrinsics_vec128_interleave_high32(st14, st15); - Lib_IntVector_Intrinsics_vec128 - v0__5 = Lib_IntVector_Intrinsics_vec128_interleave_low64(v0_2, v2_2); - Lib_IntVector_Intrinsics_vec128 - v1__5 = Lib_IntVector_Intrinsics_vec128_interleave_high64(v0_2, v2_2); - Lib_IntVector_Intrinsics_vec128 - v2__5 = Lib_IntVector_Intrinsics_vec128_interleave_low64(v1_2, v3_2); - Lib_IntVector_Intrinsics_vec128 - v3__5 = Lib_IntVector_Intrinsics_vec128_interleave_high64(v1_2, v3_2); - Lib_IntVector_Intrinsics_vec128 v0__6 = v0__5; - Lib_IntVector_Intrinsics_vec128 v2__6 = v2__5; - Lib_IntVector_Intrinsics_vec128 v1__6 = v1__5; - Lib_IntVector_Intrinsics_vec128 v3__6 = v3__5; - Lib_IntVector_Intrinsics_vec128 v12 = v0__6; - Lib_IntVector_Intrinsics_vec128 v13 = v1__6; - Lib_IntVector_Intrinsics_vec128 v14 = v2__6; - Lib_IntVector_Intrinsics_vec128 v15 = v3__6; - k[0U] = v0; - k[1U] = v4; - k[2U] = v8; - k[3U] = v12; - k[4U] = v1; - k[5U] = v5; - k[6U] = v9; - k[7U] = v13; - k[8U] = v2; - k[9U] = v6; - k[10U] = v10; - k[11U] = v14; - k[12U] = v3; - k[13U] = v7; - k[14U] = v11; - k[15U] = v15; - KRML_MAYBE_FOR16(i, - (uint32_t)0U, - (uint32_t)16U, - (uint32_t)1U, - Lib_IntVector_Intrinsics_vec128 - x = Lib_IntVector_Intrinsics_vec128_load32_le(plain + i * (uint32_t)16U); - Lib_IntVector_Intrinsics_vec128 y = Lib_IntVector_Intrinsics_vec128_xor(x, k[i]); - Lib_IntVector_Intrinsics_vec128_store32_le(plain + i * (uint32_t)16U, y);); - memcpy(uu____2, plain, rem * sizeof (uint8_t)); - } - } - } -} - diff --git a/dist/c89-compatible/Hacl_Chacha20_Vec128.h b/dist/c89-compatible/Hacl_Chacha20_Vec128.h deleted file mode 100644 index 26a879d936..0000000000 --- a/dist/c89-compatible/Hacl_Chacha20_Vec128.h +++ /dev/null @@ -1,65 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#ifndef __Hacl_Chacha20_Vec128_H -#define __Hacl_Chacha20_Vec128_H - -#if defined(__cplusplus) -extern "C" { -#endif - -#include -#include "krml/internal/types.h" -#include "krml/lowstar_endianness.h" -#include "krml/internal/target.h" - - -#include "Hacl_Krmllib.h" -#include "evercrypt_targetconfig.h" -void -Hacl_Chacha20_Vec128_chacha20_encrypt_128( - uint32_t len, - uint8_t *out, - uint8_t *text, - uint8_t *key, - uint8_t *n, - uint32_t ctr -); - -void -Hacl_Chacha20_Vec128_chacha20_decrypt_128( - uint32_t len, - uint8_t *out, - uint8_t *cipher, - uint8_t *key, - uint8_t *n, - uint32_t ctr -); - -#if defined(__cplusplus) -} -#endif - -#define __Hacl_Chacha20_Vec128_H_DEFINED -#endif diff --git a/dist/c89-compatible/Hacl_Chacha20_Vec256.c b/dist/c89-compatible/Hacl_Chacha20_Vec256.c deleted file mode 100644 index ee3f41a46c..0000000000 --- a/dist/c89-compatible/Hacl_Chacha20_Vec256.c +++ /dev/null @@ -1,1268 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#include "Hacl_Chacha20_Vec256.h" - -#include "internal/Hacl_Chacha20.h" -#include "libintvector.h" -static inline void double_round_256(Lib_IntVector_Intrinsics_vec256 *st) -{ - Lib_IntVector_Intrinsics_vec256 std0; - Lib_IntVector_Intrinsics_vec256 std1; - Lib_IntVector_Intrinsics_vec256 std2; - Lib_IntVector_Intrinsics_vec256 std3; - Lib_IntVector_Intrinsics_vec256 std4; - Lib_IntVector_Intrinsics_vec256 std5; - Lib_IntVector_Intrinsics_vec256 std6; - Lib_IntVector_Intrinsics_vec256 std7; - Lib_IntVector_Intrinsics_vec256 std8; - Lib_IntVector_Intrinsics_vec256 std9; - Lib_IntVector_Intrinsics_vec256 std10; - Lib_IntVector_Intrinsics_vec256 std11; - Lib_IntVector_Intrinsics_vec256 std12; - Lib_IntVector_Intrinsics_vec256 std13; - Lib_IntVector_Intrinsics_vec256 std14; - Lib_IntVector_Intrinsics_vec256 std15; - Lib_IntVector_Intrinsics_vec256 std16; - Lib_IntVector_Intrinsics_vec256 std17; - Lib_IntVector_Intrinsics_vec256 std18; - Lib_IntVector_Intrinsics_vec256 std19; - Lib_IntVector_Intrinsics_vec256 std20; - Lib_IntVector_Intrinsics_vec256 std21; - Lib_IntVector_Intrinsics_vec256 std22; - Lib_IntVector_Intrinsics_vec256 std23; - Lib_IntVector_Intrinsics_vec256 std24; - Lib_IntVector_Intrinsics_vec256 std25; - Lib_IntVector_Intrinsics_vec256 std26; - Lib_IntVector_Intrinsics_vec256 std27; - Lib_IntVector_Intrinsics_vec256 std28; - Lib_IntVector_Intrinsics_vec256 std29; - Lib_IntVector_Intrinsics_vec256 std30; - Lib_IntVector_Intrinsics_vec256 std; - st[0U] = Lib_IntVector_Intrinsics_vec256_add32(st[0U], st[4U]); - std0 = Lib_IntVector_Intrinsics_vec256_xor(st[12U], st[0U]); - st[12U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std0, (uint32_t)16U); - st[8U] = Lib_IntVector_Intrinsics_vec256_add32(st[8U], st[12U]); - std1 = Lib_IntVector_Intrinsics_vec256_xor(st[4U], st[8U]); - st[4U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std1, (uint32_t)12U); - st[0U] = Lib_IntVector_Intrinsics_vec256_add32(st[0U], st[4U]); - std2 = Lib_IntVector_Intrinsics_vec256_xor(st[12U], st[0U]); - st[12U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std2, (uint32_t)8U); - st[8U] = Lib_IntVector_Intrinsics_vec256_add32(st[8U], st[12U]); - std3 = Lib_IntVector_Intrinsics_vec256_xor(st[4U], st[8U]); - st[4U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std3, (uint32_t)7U); - st[1U] = Lib_IntVector_Intrinsics_vec256_add32(st[1U], st[5U]); - std4 = Lib_IntVector_Intrinsics_vec256_xor(st[13U], st[1U]); - st[13U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std4, (uint32_t)16U); - st[9U] = Lib_IntVector_Intrinsics_vec256_add32(st[9U], st[13U]); - std5 = Lib_IntVector_Intrinsics_vec256_xor(st[5U], st[9U]); - st[5U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std5, (uint32_t)12U); - st[1U] = Lib_IntVector_Intrinsics_vec256_add32(st[1U], st[5U]); - std6 = Lib_IntVector_Intrinsics_vec256_xor(st[13U], st[1U]); - st[13U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std6, (uint32_t)8U); - st[9U] = Lib_IntVector_Intrinsics_vec256_add32(st[9U], st[13U]); - std7 = Lib_IntVector_Intrinsics_vec256_xor(st[5U], st[9U]); - st[5U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std7, (uint32_t)7U); - st[2U] = Lib_IntVector_Intrinsics_vec256_add32(st[2U], st[6U]); - std8 = Lib_IntVector_Intrinsics_vec256_xor(st[14U], st[2U]); - st[14U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std8, (uint32_t)16U); - st[10U] = Lib_IntVector_Intrinsics_vec256_add32(st[10U], st[14U]); - std9 = Lib_IntVector_Intrinsics_vec256_xor(st[6U], st[10U]); - st[6U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std9, (uint32_t)12U); - st[2U] = Lib_IntVector_Intrinsics_vec256_add32(st[2U], st[6U]); - std10 = Lib_IntVector_Intrinsics_vec256_xor(st[14U], st[2U]); - st[14U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std10, (uint32_t)8U); - st[10U] = Lib_IntVector_Intrinsics_vec256_add32(st[10U], st[14U]); - std11 = Lib_IntVector_Intrinsics_vec256_xor(st[6U], st[10U]); - st[6U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std11, (uint32_t)7U); - st[3U] = Lib_IntVector_Intrinsics_vec256_add32(st[3U], st[7U]); - std12 = Lib_IntVector_Intrinsics_vec256_xor(st[15U], st[3U]); - st[15U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std12, (uint32_t)16U); - st[11U] = Lib_IntVector_Intrinsics_vec256_add32(st[11U], st[15U]); - std13 = Lib_IntVector_Intrinsics_vec256_xor(st[7U], st[11U]); - st[7U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std13, (uint32_t)12U); - st[3U] = Lib_IntVector_Intrinsics_vec256_add32(st[3U], st[7U]); - std14 = Lib_IntVector_Intrinsics_vec256_xor(st[15U], st[3U]); - st[15U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std14, (uint32_t)8U); - st[11U] = Lib_IntVector_Intrinsics_vec256_add32(st[11U], st[15U]); - std15 = Lib_IntVector_Intrinsics_vec256_xor(st[7U], st[11U]); - st[7U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std15, (uint32_t)7U); - st[0U] = Lib_IntVector_Intrinsics_vec256_add32(st[0U], st[5U]); - std16 = Lib_IntVector_Intrinsics_vec256_xor(st[15U], st[0U]); - st[15U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std16, (uint32_t)16U); - st[10U] = Lib_IntVector_Intrinsics_vec256_add32(st[10U], st[15U]); - std17 = Lib_IntVector_Intrinsics_vec256_xor(st[5U], st[10U]); - st[5U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std17, (uint32_t)12U); - st[0U] = Lib_IntVector_Intrinsics_vec256_add32(st[0U], st[5U]); - std18 = Lib_IntVector_Intrinsics_vec256_xor(st[15U], st[0U]); - st[15U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std18, (uint32_t)8U); - st[10U] = Lib_IntVector_Intrinsics_vec256_add32(st[10U], st[15U]); - std19 = Lib_IntVector_Intrinsics_vec256_xor(st[5U], st[10U]); - st[5U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std19, (uint32_t)7U); - st[1U] = Lib_IntVector_Intrinsics_vec256_add32(st[1U], st[6U]); - std20 = Lib_IntVector_Intrinsics_vec256_xor(st[12U], st[1U]); - st[12U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std20, (uint32_t)16U); - st[11U] = Lib_IntVector_Intrinsics_vec256_add32(st[11U], st[12U]); - std21 = Lib_IntVector_Intrinsics_vec256_xor(st[6U], st[11U]); - st[6U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std21, (uint32_t)12U); - st[1U] = Lib_IntVector_Intrinsics_vec256_add32(st[1U], st[6U]); - std22 = Lib_IntVector_Intrinsics_vec256_xor(st[12U], st[1U]); - st[12U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std22, (uint32_t)8U); - st[11U] = Lib_IntVector_Intrinsics_vec256_add32(st[11U], st[12U]); - std23 = Lib_IntVector_Intrinsics_vec256_xor(st[6U], st[11U]); - st[6U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std23, (uint32_t)7U); - st[2U] = Lib_IntVector_Intrinsics_vec256_add32(st[2U], st[7U]); - std24 = Lib_IntVector_Intrinsics_vec256_xor(st[13U], st[2U]); - st[13U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std24, (uint32_t)16U); - st[8U] = Lib_IntVector_Intrinsics_vec256_add32(st[8U], st[13U]); - std25 = Lib_IntVector_Intrinsics_vec256_xor(st[7U], st[8U]); - st[7U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std25, (uint32_t)12U); - st[2U] = Lib_IntVector_Intrinsics_vec256_add32(st[2U], st[7U]); - std26 = Lib_IntVector_Intrinsics_vec256_xor(st[13U], st[2U]); - st[13U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std26, (uint32_t)8U); - st[8U] = Lib_IntVector_Intrinsics_vec256_add32(st[8U], st[13U]); - std27 = Lib_IntVector_Intrinsics_vec256_xor(st[7U], st[8U]); - st[7U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std27, (uint32_t)7U); - st[3U] = Lib_IntVector_Intrinsics_vec256_add32(st[3U], st[4U]); - std28 = Lib_IntVector_Intrinsics_vec256_xor(st[14U], st[3U]); - st[14U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std28, (uint32_t)16U); - st[9U] = Lib_IntVector_Intrinsics_vec256_add32(st[9U], st[14U]); - std29 = Lib_IntVector_Intrinsics_vec256_xor(st[4U], st[9U]); - st[4U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std29, (uint32_t)12U); - st[3U] = Lib_IntVector_Intrinsics_vec256_add32(st[3U], st[4U]); - std30 = Lib_IntVector_Intrinsics_vec256_xor(st[14U], st[3U]); - st[14U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std30, (uint32_t)8U); - st[9U] = Lib_IntVector_Intrinsics_vec256_add32(st[9U], st[14U]); - std = Lib_IntVector_Intrinsics_vec256_xor(st[4U], st[9U]); - st[4U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std, (uint32_t)7U); -} - -static inline void -chacha20_core_256( - Lib_IntVector_Intrinsics_vec256 *k, - Lib_IntVector_Intrinsics_vec256 *ctx, - uint32_t ctr -) -{ - uint32_t ctr_u32; - Lib_IntVector_Intrinsics_vec256 cv; - memcpy(k, ctx, (uint32_t)16U * sizeof (Lib_IntVector_Intrinsics_vec256)); - ctr_u32 = (uint32_t)8U * ctr; - cv = Lib_IntVector_Intrinsics_vec256_load32(ctr_u32); - k[12U] = Lib_IntVector_Intrinsics_vec256_add32(k[12U], cv); - double_round_256(k); - double_round_256(k); - double_round_256(k); - double_round_256(k); - double_round_256(k); - double_round_256(k); - double_round_256(k); - double_round_256(k); - double_round_256(k); - double_round_256(k); - KRML_MAYBE_FOR16(i, - (uint32_t)0U, - (uint32_t)16U, - (uint32_t)1U, - Lib_IntVector_Intrinsics_vec256 *os = k; - Lib_IntVector_Intrinsics_vec256 x = Lib_IntVector_Intrinsics_vec256_add32(k[i], ctx[i]); - os[i] = x;); - k[12U] = Lib_IntVector_Intrinsics_vec256_add32(k[12U], cv); -} - -static inline void -chacha20_init_256(Lib_IntVector_Intrinsics_vec256 *ctx, uint8_t *k, uint8_t *n, uint32_t ctr) -{ - uint32_t ctx1[16U] = { 0U }; - Lib_IntVector_Intrinsics_vec256 ctr1; - Lib_IntVector_Intrinsics_vec256 c12; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint32_t *os = ctx1; - uint32_t x = Hacl_Impl_Chacha20_Vec_chacha20_constants[i]; - os[i] = x;); - KRML_MAYBE_FOR8(i, - (uint32_t)0U, - (uint32_t)8U, - (uint32_t)1U, - uint32_t *os = ctx1 + (uint32_t)4U; - uint8_t *bj = k + i * (uint32_t)4U; - uint32_t u = load32_le(bj); - uint32_t r = u; - uint32_t x = r; - os[i] = x;); - ctx1[12U] = ctr; - KRML_MAYBE_FOR3(i, - (uint32_t)0U, - (uint32_t)3U, - (uint32_t)1U, - uint32_t *os = ctx1 + (uint32_t)13U; - uint8_t *bj = n + i * (uint32_t)4U; - uint32_t u = load32_le(bj); - uint32_t r = u; - uint32_t x = r; - os[i] = x;); - KRML_MAYBE_FOR16(i, - (uint32_t)0U, - (uint32_t)16U, - (uint32_t)1U, - Lib_IntVector_Intrinsics_vec256 *os = ctx; - uint32_t x = ctx1[i]; - Lib_IntVector_Intrinsics_vec256 x0 = Lib_IntVector_Intrinsics_vec256_load32(x); - os[i] = x0;); - ctr1 = - Lib_IntVector_Intrinsics_vec256_load32s((uint32_t)0U, - (uint32_t)1U, - (uint32_t)2U, - (uint32_t)3U, - (uint32_t)4U, - (uint32_t)5U, - (uint32_t)6U, - (uint32_t)7U); - c12 = ctx[12U]; - ctx[12U] = Lib_IntVector_Intrinsics_vec256_add32(c12, ctr1); -} - -void -Hacl_Chacha20_Vec256_chacha20_encrypt_256( - uint32_t len, - uint8_t *out, - uint8_t *text, - uint8_t *key, - uint8_t *n, - uint32_t ctr -) -{ - KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ctx[16U] KRML_POST_ALIGN(32) = { 0U }; - uint32_t rem; - uint32_t nb; - uint32_t rem1; - chacha20_init_256(ctx, key, n, ctr); - rem = len % (uint32_t)512U; - nb = len / (uint32_t)512U; - rem1 = len % (uint32_t)512U; - { - uint32_t i; - for (i = (uint32_t)0U; i < nb; i++) - { - uint8_t *uu____0 = out + i * (uint32_t)512U; - uint8_t *uu____1 = text + i * (uint32_t)512U; - KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 k[16U] KRML_POST_ALIGN(32) = { 0U }; - chacha20_core_256(k, ctx, i); - { - Lib_IntVector_Intrinsics_vec256 st0 = k[0U]; - Lib_IntVector_Intrinsics_vec256 st1 = k[1U]; - Lib_IntVector_Intrinsics_vec256 st2 = k[2U]; - Lib_IntVector_Intrinsics_vec256 st3 = k[3U]; - Lib_IntVector_Intrinsics_vec256 st4 = k[4U]; - Lib_IntVector_Intrinsics_vec256 st5 = k[5U]; - Lib_IntVector_Intrinsics_vec256 st6 = k[6U]; - Lib_IntVector_Intrinsics_vec256 st7 = k[7U]; - Lib_IntVector_Intrinsics_vec256 st8 = k[8U]; - Lib_IntVector_Intrinsics_vec256 st9 = k[9U]; - Lib_IntVector_Intrinsics_vec256 st10 = k[10U]; - Lib_IntVector_Intrinsics_vec256 st11 = k[11U]; - Lib_IntVector_Intrinsics_vec256 st12 = k[12U]; - Lib_IntVector_Intrinsics_vec256 st13 = k[13U]; - Lib_IntVector_Intrinsics_vec256 st14 = k[14U]; - Lib_IntVector_Intrinsics_vec256 st15 = k[15U]; - Lib_IntVector_Intrinsics_vec256 v00 = st0; - Lib_IntVector_Intrinsics_vec256 v16 = st1; - Lib_IntVector_Intrinsics_vec256 v20 = st2; - Lib_IntVector_Intrinsics_vec256 v30 = st3; - Lib_IntVector_Intrinsics_vec256 v40 = st4; - Lib_IntVector_Intrinsics_vec256 v50 = st5; - Lib_IntVector_Intrinsics_vec256 v60 = st6; - Lib_IntVector_Intrinsics_vec256 v70 = st7; - Lib_IntVector_Intrinsics_vec256 - v0_ = Lib_IntVector_Intrinsics_vec256_interleave_low32(v00, v16); - Lib_IntVector_Intrinsics_vec256 - v1_ = Lib_IntVector_Intrinsics_vec256_interleave_high32(v00, v16); - Lib_IntVector_Intrinsics_vec256 - v2_ = Lib_IntVector_Intrinsics_vec256_interleave_low32(v20, v30); - Lib_IntVector_Intrinsics_vec256 - v3_ = Lib_IntVector_Intrinsics_vec256_interleave_high32(v20, v30); - Lib_IntVector_Intrinsics_vec256 - v4_ = Lib_IntVector_Intrinsics_vec256_interleave_low32(v40, v50); - Lib_IntVector_Intrinsics_vec256 - v5_ = Lib_IntVector_Intrinsics_vec256_interleave_high32(v40, v50); - Lib_IntVector_Intrinsics_vec256 - v6_ = Lib_IntVector_Intrinsics_vec256_interleave_low32(v60, v70); - Lib_IntVector_Intrinsics_vec256 - v7_ = Lib_IntVector_Intrinsics_vec256_interleave_high32(v60, v70); - Lib_IntVector_Intrinsics_vec256 v0_0 = v0_; - Lib_IntVector_Intrinsics_vec256 v1_0 = v1_; - Lib_IntVector_Intrinsics_vec256 v2_0 = v2_; - Lib_IntVector_Intrinsics_vec256 v3_0 = v3_; - Lib_IntVector_Intrinsics_vec256 v4_0 = v4_; - Lib_IntVector_Intrinsics_vec256 v5_0 = v5_; - Lib_IntVector_Intrinsics_vec256 v6_0 = v6_; - Lib_IntVector_Intrinsics_vec256 v7_0 = v7_; - Lib_IntVector_Intrinsics_vec256 - v0_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0_0, v2_0); - Lib_IntVector_Intrinsics_vec256 - v2_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0_0, v2_0); - Lib_IntVector_Intrinsics_vec256 - v1_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v1_0, v3_0); - Lib_IntVector_Intrinsics_vec256 - v3_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v1_0, v3_0); - Lib_IntVector_Intrinsics_vec256 - v4_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v4_0, v6_0); - Lib_IntVector_Intrinsics_vec256 - v6_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v4_0, v6_0); - Lib_IntVector_Intrinsics_vec256 - v5_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v5_0, v7_0); - Lib_IntVector_Intrinsics_vec256 - v7_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v5_0, v7_0); - Lib_IntVector_Intrinsics_vec256 v0_10 = v0_1; - Lib_IntVector_Intrinsics_vec256 v1_10 = v1_1; - Lib_IntVector_Intrinsics_vec256 v2_10 = v2_1; - Lib_IntVector_Intrinsics_vec256 v3_10 = v3_1; - Lib_IntVector_Intrinsics_vec256 v4_10 = v4_1; - Lib_IntVector_Intrinsics_vec256 v5_10 = v5_1; - Lib_IntVector_Intrinsics_vec256 v6_10 = v6_1; - Lib_IntVector_Intrinsics_vec256 v7_10 = v7_1; - Lib_IntVector_Intrinsics_vec256 - v0_2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_10, v4_10); - Lib_IntVector_Intrinsics_vec256 - v4_2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_10, v4_10); - Lib_IntVector_Intrinsics_vec256 - v1_2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_10, v5_10); - Lib_IntVector_Intrinsics_vec256 - v5_2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_10, v5_10); - Lib_IntVector_Intrinsics_vec256 - v2_2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v2_10, v6_10); - Lib_IntVector_Intrinsics_vec256 - v6_2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v2_10, v6_10); - Lib_IntVector_Intrinsics_vec256 - v3_2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v3_10, v7_10); - Lib_IntVector_Intrinsics_vec256 - v7_2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v3_10, v7_10); - Lib_IntVector_Intrinsics_vec256 v0_20 = v0_2; - Lib_IntVector_Intrinsics_vec256 v1_20 = v1_2; - Lib_IntVector_Intrinsics_vec256 v2_20 = v2_2; - Lib_IntVector_Intrinsics_vec256 v3_20 = v3_2; - Lib_IntVector_Intrinsics_vec256 v4_20 = v4_2; - Lib_IntVector_Intrinsics_vec256 v5_20 = v5_2; - Lib_IntVector_Intrinsics_vec256 v6_20 = v6_2; - Lib_IntVector_Intrinsics_vec256 v7_20 = v7_2; - Lib_IntVector_Intrinsics_vec256 v0_3 = v0_20; - Lib_IntVector_Intrinsics_vec256 v1_3 = v1_20; - Lib_IntVector_Intrinsics_vec256 v2_3 = v2_20; - Lib_IntVector_Intrinsics_vec256 v3_3 = v3_20; - Lib_IntVector_Intrinsics_vec256 v4_3 = v4_20; - Lib_IntVector_Intrinsics_vec256 v5_3 = v5_20; - Lib_IntVector_Intrinsics_vec256 v6_3 = v6_20; - Lib_IntVector_Intrinsics_vec256 v7_3 = v7_20; - Lib_IntVector_Intrinsics_vec256 v0 = v0_3; - Lib_IntVector_Intrinsics_vec256 v1 = v2_3; - Lib_IntVector_Intrinsics_vec256 v2 = v1_3; - Lib_IntVector_Intrinsics_vec256 v3 = v3_3; - Lib_IntVector_Intrinsics_vec256 v4 = v4_3; - Lib_IntVector_Intrinsics_vec256 v5 = v6_3; - Lib_IntVector_Intrinsics_vec256 v6 = v5_3; - Lib_IntVector_Intrinsics_vec256 v7 = v7_3; - Lib_IntVector_Intrinsics_vec256 v01 = st8; - Lib_IntVector_Intrinsics_vec256 v110 = st9; - Lib_IntVector_Intrinsics_vec256 v21 = st10; - Lib_IntVector_Intrinsics_vec256 v31 = st11; - Lib_IntVector_Intrinsics_vec256 v41 = st12; - Lib_IntVector_Intrinsics_vec256 v51 = st13; - Lib_IntVector_Intrinsics_vec256 v61 = st14; - Lib_IntVector_Intrinsics_vec256 v71 = st15; - Lib_IntVector_Intrinsics_vec256 - v0_4 = Lib_IntVector_Intrinsics_vec256_interleave_low32(v01, v110); - Lib_IntVector_Intrinsics_vec256 - v1_4 = Lib_IntVector_Intrinsics_vec256_interleave_high32(v01, v110); - Lib_IntVector_Intrinsics_vec256 - v2_4 = Lib_IntVector_Intrinsics_vec256_interleave_low32(v21, v31); - Lib_IntVector_Intrinsics_vec256 - v3_4 = Lib_IntVector_Intrinsics_vec256_interleave_high32(v21, v31); - Lib_IntVector_Intrinsics_vec256 - v4_4 = Lib_IntVector_Intrinsics_vec256_interleave_low32(v41, v51); - Lib_IntVector_Intrinsics_vec256 - v5_4 = Lib_IntVector_Intrinsics_vec256_interleave_high32(v41, v51); - Lib_IntVector_Intrinsics_vec256 - v6_4 = Lib_IntVector_Intrinsics_vec256_interleave_low32(v61, v71); - Lib_IntVector_Intrinsics_vec256 - v7_4 = Lib_IntVector_Intrinsics_vec256_interleave_high32(v61, v71); - Lib_IntVector_Intrinsics_vec256 v0_5 = v0_4; - Lib_IntVector_Intrinsics_vec256 v1_5 = v1_4; - Lib_IntVector_Intrinsics_vec256 v2_5 = v2_4; - Lib_IntVector_Intrinsics_vec256 v3_5 = v3_4; - Lib_IntVector_Intrinsics_vec256 v4_5 = v4_4; - Lib_IntVector_Intrinsics_vec256 v5_5 = v5_4; - Lib_IntVector_Intrinsics_vec256 v6_5 = v6_4; - Lib_IntVector_Intrinsics_vec256 v7_5 = v7_4; - Lib_IntVector_Intrinsics_vec256 - v0_11 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0_5, v2_5); - Lib_IntVector_Intrinsics_vec256 - v2_11 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0_5, v2_5); - Lib_IntVector_Intrinsics_vec256 - v1_11 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v1_5, v3_5); - Lib_IntVector_Intrinsics_vec256 - v3_11 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v1_5, v3_5); - Lib_IntVector_Intrinsics_vec256 - v4_11 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v4_5, v6_5); - Lib_IntVector_Intrinsics_vec256 - v6_11 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v4_5, v6_5); - Lib_IntVector_Intrinsics_vec256 - v5_11 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v5_5, v7_5); - Lib_IntVector_Intrinsics_vec256 - v7_11 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v5_5, v7_5); - Lib_IntVector_Intrinsics_vec256 v0_12 = v0_11; - Lib_IntVector_Intrinsics_vec256 v1_12 = v1_11; - Lib_IntVector_Intrinsics_vec256 v2_12 = v2_11; - Lib_IntVector_Intrinsics_vec256 v3_12 = v3_11; - Lib_IntVector_Intrinsics_vec256 v4_12 = v4_11; - Lib_IntVector_Intrinsics_vec256 v5_12 = v5_11; - Lib_IntVector_Intrinsics_vec256 v6_12 = v6_11; - Lib_IntVector_Intrinsics_vec256 v7_12 = v7_11; - Lib_IntVector_Intrinsics_vec256 - v0_21 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_12, v4_12); - Lib_IntVector_Intrinsics_vec256 - v4_21 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_12, v4_12); - Lib_IntVector_Intrinsics_vec256 - v1_21 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_12, v5_12); - Lib_IntVector_Intrinsics_vec256 - v5_21 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_12, v5_12); - Lib_IntVector_Intrinsics_vec256 - v2_21 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v2_12, v6_12); - Lib_IntVector_Intrinsics_vec256 - v6_21 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v2_12, v6_12); - Lib_IntVector_Intrinsics_vec256 - v3_21 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v3_12, v7_12); - Lib_IntVector_Intrinsics_vec256 - v7_21 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v3_12, v7_12); - Lib_IntVector_Intrinsics_vec256 v0_22 = v0_21; - Lib_IntVector_Intrinsics_vec256 v1_22 = v1_21; - Lib_IntVector_Intrinsics_vec256 v2_22 = v2_21; - Lib_IntVector_Intrinsics_vec256 v3_22 = v3_21; - Lib_IntVector_Intrinsics_vec256 v4_22 = v4_21; - Lib_IntVector_Intrinsics_vec256 v5_22 = v5_21; - Lib_IntVector_Intrinsics_vec256 v6_22 = v6_21; - Lib_IntVector_Intrinsics_vec256 v7_22 = v7_21; - Lib_IntVector_Intrinsics_vec256 v0_6 = v0_22; - Lib_IntVector_Intrinsics_vec256 v1_6 = v1_22; - Lib_IntVector_Intrinsics_vec256 v2_6 = v2_22; - Lib_IntVector_Intrinsics_vec256 v3_6 = v3_22; - Lib_IntVector_Intrinsics_vec256 v4_6 = v4_22; - Lib_IntVector_Intrinsics_vec256 v5_6 = v5_22; - Lib_IntVector_Intrinsics_vec256 v6_6 = v6_22; - Lib_IntVector_Intrinsics_vec256 v7_6 = v7_22; - Lib_IntVector_Intrinsics_vec256 v8 = v0_6; - Lib_IntVector_Intrinsics_vec256 v9 = v2_6; - Lib_IntVector_Intrinsics_vec256 v10 = v1_6; - Lib_IntVector_Intrinsics_vec256 v11 = v3_6; - Lib_IntVector_Intrinsics_vec256 v12 = v4_6; - Lib_IntVector_Intrinsics_vec256 v13 = v6_6; - Lib_IntVector_Intrinsics_vec256 v14 = v5_6; - Lib_IntVector_Intrinsics_vec256 v15 = v7_6; - k[0U] = v0; - k[1U] = v8; - k[2U] = v1; - k[3U] = v9; - k[4U] = v2; - k[5U] = v10; - k[6U] = v3; - k[7U] = v11; - k[8U] = v4; - k[9U] = v12; - k[10U] = v5; - k[11U] = v13; - k[12U] = v6; - k[13U] = v14; - k[14U] = v7; - k[15U] = v15; - KRML_MAYBE_FOR16(i0, - (uint32_t)0U, - (uint32_t)16U, - (uint32_t)1U, - Lib_IntVector_Intrinsics_vec256 - x = Lib_IntVector_Intrinsics_vec256_load32_le(uu____1 + i0 * (uint32_t)32U); - Lib_IntVector_Intrinsics_vec256 y = Lib_IntVector_Intrinsics_vec256_xor(x, k[i0]); - Lib_IntVector_Intrinsics_vec256_store32_le(uu____0 + i0 * (uint32_t)32U, y);); - } - } - } - if (rem1 > (uint32_t)0U) - { - uint8_t *uu____2 = out + nb * (uint32_t)512U; - uint8_t *uu____3 = text + nb * (uint32_t)512U; - uint8_t plain[512U] = { 0U }; - memcpy(plain, uu____3, rem * sizeof (uint8_t)); - { - KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 k[16U] KRML_POST_ALIGN(32) = { 0U }; - chacha20_core_256(k, ctx, nb); - { - Lib_IntVector_Intrinsics_vec256 st0 = k[0U]; - Lib_IntVector_Intrinsics_vec256 st1 = k[1U]; - Lib_IntVector_Intrinsics_vec256 st2 = k[2U]; - Lib_IntVector_Intrinsics_vec256 st3 = k[3U]; - Lib_IntVector_Intrinsics_vec256 st4 = k[4U]; - Lib_IntVector_Intrinsics_vec256 st5 = k[5U]; - Lib_IntVector_Intrinsics_vec256 st6 = k[6U]; - Lib_IntVector_Intrinsics_vec256 st7 = k[7U]; - Lib_IntVector_Intrinsics_vec256 st8 = k[8U]; - Lib_IntVector_Intrinsics_vec256 st9 = k[9U]; - Lib_IntVector_Intrinsics_vec256 st10 = k[10U]; - Lib_IntVector_Intrinsics_vec256 st11 = k[11U]; - Lib_IntVector_Intrinsics_vec256 st12 = k[12U]; - Lib_IntVector_Intrinsics_vec256 st13 = k[13U]; - Lib_IntVector_Intrinsics_vec256 st14 = k[14U]; - Lib_IntVector_Intrinsics_vec256 st15 = k[15U]; - Lib_IntVector_Intrinsics_vec256 v00 = st0; - Lib_IntVector_Intrinsics_vec256 v16 = st1; - Lib_IntVector_Intrinsics_vec256 v20 = st2; - Lib_IntVector_Intrinsics_vec256 v30 = st3; - Lib_IntVector_Intrinsics_vec256 v40 = st4; - Lib_IntVector_Intrinsics_vec256 v50 = st5; - Lib_IntVector_Intrinsics_vec256 v60 = st6; - Lib_IntVector_Intrinsics_vec256 v70 = st7; - Lib_IntVector_Intrinsics_vec256 - v0_ = Lib_IntVector_Intrinsics_vec256_interleave_low32(v00, v16); - Lib_IntVector_Intrinsics_vec256 - v1_ = Lib_IntVector_Intrinsics_vec256_interleave_high32(v00, v16); - Lib_IntVector_Intrinsics_vec256 - v2_ = Lib_IntVector_Intrinsics_vec256_interleave_low32(v20, v30); - Lib_IntVector_Intrinsics_vec256 - v3_ = Lib_IntVector_Intrinsics_vec256_interleave_high32(v20, v30); - Lib_IntVector_Intrinsics_vec256 - v4_ = Lib_IntVector_Intrinsics_vec256_interleave_low32(v40, v50); - Lib_IntVector_Intrinsics_vec256 - v5_ = Lib_IntVector_Intrinsics_vec256_interleave_high32(v40, v50); - Lib_IntVector_Intrinsics_vec256 - v6_ = Lib_IntVector_Intrinsics_vec256_interleave_low32(v60, v70); - Lib_IntVector_Intrinsics_vec256 - v7_ = Lib_IntVector_Intrinsics_vec256_interleave_high32(v60, v70); - Lib_IntVector_Intrinsics_vec256 v0_0 = v0_; - Lib_IntVector_Intrinsics_vec256 v1_0 = v1_; - Lib_IntVector_Intrinsics_vec256 v2_0 = v2_; - Lib_IntVector_Intrinsics_vec256 v3_0 = v3_; - Lib_IntVector_Intrinsics_vec256 v4_0 = v4_; - Lib_IntVector_Intrinsics_vec256 v5_0 = v5_; - Lib_IntVector_Intrinsics_vec256 v6_0 = v6_; - Lib_IntVector_Intrinsics_vec256 v7_0 = v7_; - Lib_IntVector_Intrinsics_vec256 - v0_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0_0, v2_0); - Lib_IntVector_Intrinsics_vec256 - v2_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0_0, v2_0); - Lib_IntVector_Intrinsics_vec256 - v1_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v1_0, v3_0); - Lib_IntVector_Intrinsics_vec256 - v3_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v1_0, v3_0); - Lib_IntVector_Intrinsics_vec256 - v4_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v4_0, v6_0); - Lib_IntVector_Intrinsics_vec256 - v6_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v4_0, v6_0); - Lib_IntVector_Intrinsics_vec256 - v5_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v5_0, v7_0); - Lib_IntVector_Intrinsics_vec256 - v7_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v5_0, v7_0); - Lib_IntVector_Intrinsics_vec256 v0_10 = v0_1; - Lib_IntVector_Intrinsics_vec256 v1_10 = v1_1; - Lib_IntVector_Intrinsics_vec256 v2_10 = v2_1; - Lib_IntVector_Intrinsics_vec256 v3_10 = v3_1; - Lib_IntVector_Intrinsics_vec256 v4_10 = v4_1; - Lib_IntVector_Intrinsics_vec256 v5_10 = v5_1; - Lib_IntVector_Intrinsics_vec256 v6_10 = v6_1; - Lib_IntVector_Intrinsics_vec256 v7_10 = v7_1; - Lib_IntVector_Intrinsics_vec256 - v0_2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_10, v4_10); - Lib_IntVector_Intrinsics_vec256 - v4_2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_10, v4_10); - Lib_IntVector_Intrinsics_vec256 - v1_2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_10, v5_10); - Lib_IntVector_Intrinsics_vec256 - v5_2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_10, v5_10); - Lib_IntVector_Intrinsics_vec256 - v2_2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v2_10, v6_10); - Lib_IntVector_Intrinsics_vec256 - v6_2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v2_10, v6_10); - Lib_IntVector_Intrinsics_vec256 - v3_2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v3_10, v7_10); - Lib_IntVector_Intrinsics_vec256 - v7_2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v3_10, v7_10); - Lib_IntVector_Intrinsics_vec256 v0_20 = v0_2; - Lib_IntVector_Intrinsics_vec256 v1_20 = v1_2; - Lib_IntVector_Intrinsics_vec256 v2_20 = v2_2; - Lib_IntVector_Intrinsics_vec256 v3_20 = v3_2; - Lib_IntVector_Intrinsics_vec256 v4_20 = v4_2; - Lib_IntVector_Intrinsics_vec256 v5_20 = v5_2; - Lib_IntVector_Intrinsics_vec256 v6_20 = v6_2; - Lib_IntVector_Intrinsics_vec256 v7_20 = v7_2; - Lib_IntVector_Intrinsics_vec256 v0_3 = v0_20; - Lib_IntVector_Intrinsics_vec256 v1_3 = v1_20; - Lib_IntVector_Intrinsics_vec256 v2_3 = v2_20; - Lib_IntVector_Intrinsics_vec256 v3_3 = v3_20; - Lib_IntVector_Intrinsics_vec256 v4_3 = v4_20; - Lib_IntVector_Intrinsics_vec256 v5_3 = v5_20; - Lib_IntVector_Intrinsics_vec256 v6_3 = v6_20; - Lib_IntVector_Intrinsics_vec256 v7_3 = v7_20; - Lib_IntVector_Intrinsics_vec256 v0 = v0_3; - Lib_IntVector_Intrinsics_vec256 v1 = v2_3; - Lib_IntVector_Intrinsics_vec256 v2 = v1_3; - Lib_IntVector_Intrinsics_vec256 v3 = v3_3; - Lib_IntVector_Intrinsics_vec256 v4 = v4_3; - Lib_IntVector_Intrinsics_vec256 v5 = v6_3; - Lib_IntVector_Intrinsics_vec256 v6 = v5_3; - Lib_IntVector_Intrinsics_vec256 v7 = v7_3; - Lib_IntVector_Intrinsics_vec256 v01 = st8; - Lib_IntVector_Intrinsics_vec256 v110 = st9; - Lib_IntVector_Intrinsics_vec256 v21 = st10; - Lib_IntVector_Intrinsics_vec256 v31 = st11; - Lib_IntVector_Intrinsics_vec256 v41 = st12; - Lib_IntVector_Intrinsics_vec256 v51 = st13; - Lib_IntVector_Intrinsics_vec256 v61 = st14; - Lib_IntVector_Intrinsics_vec256 v71 = st15; - Lib_IntVector_Intrinsics_vec256 - v0_4 = Lib_IntVector_Intrinsics_vec256_interleave_low32(v01, v110); - Lib_IntVector_Intrinsics_vec256 - v1_4 = Lib_IntVector_Intrinsics_vec256_interleave_high32(v01, v110); - Lib_IntVector_Intrinsics_vec256 - v2_4 = Lib_IntVector_Intrinsics_vec256_interleave_low32(v21, v31); - Lib_IntVector_Intrinsics_vec256 - v3_4 = Lib_IntVector_Intrinsics_vec256_interleave_high32(v21, v31); - Lib_IntVector_Intrinsics_vec256 - v4_4 = Lib_IntVector_Intrinsics_vec256_interleave_low32(v41, v51); - Lib_IntVector_Intrinsics_vec256 - v5_4 = Lib_IntVector_Intrinsics_vec256_interleave_high32(v41, v51); - Lib_IntVector_Intrinsics_vec256 - v6_4 = Lib_IntVector_Intrinsics_vec256_interleave_low32(v61, v71); - Lib_IntVector_Intrinsics_vec256 - v7_4 = Lib_IntVector_Intrinsics_vec256_interleave_high32(v61, v71); - Lib_IntVector_Intrinsics_vec256 v0_5 = v0_4; - Lib_IntVector_Intrinsics_vec256 v1_5 = v1_4; - Lib_IntVector_Intrinsics_vec256 v2_5 = v2_4; - Lib_IntVector_Intrinsics_vec256 v3_5 = v3_4; - Lib_IntVector_Intrinsics_vec256 v4_5 = v4_4; - Lib_IntVector_Intrinsics_vec256 v5_5 = v5_4; - Lib_IntVector_Intrinsics_vec256 v6_5 = v6_4; - Lib_IntVector_Intrinsics_vec256 v7_5 = v7_4; - Lib_IntVector_Intrinsics_vec256 - v0_11 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0_5, v2_5); - Lib_IntVector_Intrinsics_vec256 - v2_11 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0_5, v2_5); - Lib_IntVector_Intrinsics_vec256 - v1_11 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v1_5, v3_5); - Lib_IntVector_Intrinsics_vec256 - v3_11 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v1_5, v3_5); - Lib_IntVector_Intrinsics_vec256 - v4_11 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v4_5, v6_5); - Lib_IntVector_Intrinsics_vec256 - v6_11 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v4_5, v6_5); - Lib_IntVector_Intrinsics_vec256 - v5_11 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v5_5, v7_5); - Lib_IntVector_Intrinsics_vec256 - v7_11 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v5_5, v7_5); - Lib_IntVector_Intrinsics_vec256 v0_12 = v0_11; - Lib_IntVector_Intrinsics_vec256 v1_12 = v1_11; - Lib_IntVector_Intrinsics_vec256 v2_12 = v2_11; - Lib_IntVector_Intrinsics_vec256 v3_12 = v3_11; - Lib_IntVector_Intrinsics_vec256 v4_12 = v4_11; - Lib_IntVector_Intrinsics_vec256 v5_12 = v5_11; - Lib_IntVector_Intrinsics_vec256 v6_12 = v6_11; - Lib_IntVector_Intrinsics_vec256 v7_12 = v7_11; - Lib_IntVector_Intrinsics_vec256 - v0_21 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_12, v4_12); - Lib_IntVector_Intrinsics_vec256 - v4_21 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_12, v4_12); - Lib_IntVector_Intrinsics_vec256 - v1_21 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_12, v5_12); - Lib_IntVector_Intrinsics_vec256 - v5_21 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_12, v5_12); - Lib_IntVector_Intrinsics_vec256 - v2_21 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v2_12, v6_12); - Lib_IntVector_Intrinsics_vec256 - v6_21 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v2_12, v6_12); - Lib_IntVector_Intrinsics_vec256 - v3_21 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v3_12, v7_12); - Lib_IntVector_Intrinsics_vec256 - v7_21 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v3_12, v7_12); - Lib_IntVector_Intrinsics_vec256 v0_22 = v0_21; - Lib_IntVector_Intrinsics_vec256 v1_22 = v1_21; - Lib_IntVector_Intrinsics_vec256 v2_22 = v2_21; - Lib_IntVector_Intrinsics_vec256 v3_22 = v3_21; - Lib_IntVector_Intrinsics_vec256 v4_22 = v4_21; - Lib_IntVector_Intrinsics_vec256 v5_22 = v5_21; - Lib_IntVector_Intrinsics_vec256 v6_22 = v6_21; - Lib_IntVector_Intrinsics_vec256 v7_22 = v7_21; - Lib_IntVector_Intrinsics_vec256 v0_6 = v0_22; - Lib_IntVector_Intrinsics_vec256 v1_6 = v1_22; - Lib_IntVector_Intrinsics_vec256 v2_6 = v2_22; - Lib_IntVector_Intrinsics_vec256 v3_6 = v3_22; - Lib_IntVector_Intrinsics_vec256 v4_6 = v4_22; - Lib_IntVector_Intrinsics_vec256 v5_6 = v5_22; - Lib_IntVector_Intrinsics_vec256 v6_6 = v6_22; - Lib_IntVector_Intrinsics_vec256 v7_6 = v7_22; - Lib_IntVector_Intrinsics_vec256 v8 = v0_6; - Lib_IntVector_Intrinsics_vec256 v9 = v2_6; - Lib_IntVector_Intrinsics_vec256 v10 = v1_6; - Lib_IntVector_Intrinsics_vec256 v11 = v3_6; - Lib_IntVector_Intrinsics_vec256 v12 = v4_6; - Lib_IntVector_Intrinsics_vec256 v13 = v6_6; - Lib_IntVector_Intrinsics_vec256 v14 = v5_6; - Lib_IntVector_Intrinsics_vec256 v15 = v7_6; - k[0U] = v0; - k[1U] = v8; - k[2U] = v1; - k[3U] = v9; - k[4U] = v2; - k[5U] = v10; - k[6U] = v3; - k[7U] = v11; - k[8U] = v4; - k[9U] = v12; - k[10U] = v5; - k[11U] = v13; - k[12U] = v6; - k[13U] = v14; - k[14U] = v7; - k[15U] = v15; - KRML_MAYBE_FOR16(i, - (uint32_t)0U, - (uint32_t)16U, - (uint32_t)1U, - Lib_IntVector_Intrinsics_vec256 - x = Lib_IntVector_Intrinsics_vec256_load32_le(plain + i * (uint32_t)32U); - Lib_IntVector_Intrinsics_vec256 y = Lib_IntVector_Intrinsics_vec256_xor(x, k[i]); - Lib_IntVector_Intrinsics_vec256_store32_le(plain + i * (uint32_t)32U, y);); - memcpy(uu____2, plain, rem * sizeof (uint8_t)); - } - } - } -} - -void -Hacl_Chacha20_Vec256_chacha20_decrypt_256( - uint32_t len, - uint8_t *out, - uint8_t *cipher, - uint8_t *key, - uint8_t *n, - uint32_t ctr -) -{ - KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ctx[16U] KRML_POST_ALIGN(32) = { 0U }; - uint32_t rem; - uint32_t nb; - uint32_t rem1; - chacha20_init_256(ctx, key, n, ctr); - rem = len % (uint32_t)512U; - nb = len / (uint32_t)512U; - rem1 = len % (uint32_t)512U; - { - uint32_t i; - for (i = (uint32_t)0U; i < nb; i++) - { - uint8_t *uu____0 = out + i * (uint32_t)512U; - uint8_t *uu____1 = cipher + i * (uint32_t)512U; - KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 k[16U] KRML_POST_ALIGN(32) = { 0U }; - chacha20_core_256(k, ctx, i); - { - Lib_IntVector_Intrinsics_vec256 st0 = k[0U]; - Lib_IntVector_Intrinsics_vec256 st1 = k[1U]; - Lib_IntVector_Intrinsics_vec256 st2 = k[2U]; - Lib_IntVector_Intrinsics_vec256 st3 = k[3U]; - Lib_IntVector_Intrinsics_vec256 st4 = k[4U]; - Lib_IntVector_Intrinsics_vec256 st5 = k[5U]; - Lib_IntVector_Intrinsics_vec256 st6 = k[6U]; - Lib_IntVector_Intrinsics_vec256 st7 = k[7U]; - Lib_IntVector_Intrinsics_vec256 st8 = k[8U]; - Lib_IntVector_Intrinsics_vec256 st9 = k[9U]; - Lib_IntVector_Intrinsics_vec256 st10 = k[10U]; - Lib_IntVector_Intrinsics_vec256 st11 = k[11U]; - Lib_IntVector_Intrinsics_vec256 st12 = k[12U]; - Lib_IntVector_Intrinsics_vec256 st13 = k[13U]; - Lib_IntVector_Intrinsics_vec256 st14 = k[14U]; - Lib_IntVector_Intrinsics_vec256 st15 = k[15U]; - Lib_IntVector_Intrinsics_vec256 v00 = st0; - Lib_IntVector_Intrinsics_vec256 v16 = st1; - Lib_IntVector_Intrinsics_vec256 v20 = st2; - Lib_IntVector_Intrinsics_vec256 v30 = st3; - Lib_IntVector_Intrinsics_vec256 v40 = st4; - Lib_IntVector_Intrinsics_vec256 v50 = st5; - Lib_IntVector_Intrinsics_vec256 v60 = st6; - Lib_IntVector_Intrinsics_vec256 v70 = st7; - Lib_IntVector_Intrinsics_vec256 - v0_ = Lib_IntVector_Intrinsics_vec256_interleave_low32(v00, v16); - Lib_IntVector_Intrinsics_vec256 - v1_ = Lib_IntVector_Intrinsics_vec256_interleave_high32(v00, v16); - Lib_IntVector_Intrinsics_vec256 - v2_ = Lib_IntVector_Intrinsics_vec256_interleave_low32(v20, v30); - Lib_IntVector_Intrinsics_vec256 - v3_ = Lib_IntVector_Intrinsics_vec256_interleave_high32(v20, v30); - Lib_IntVector_Intrinsics_vec256 - v4_ = Lib_IntVector_Intrinsics_vec256_interleave_low32(v40, v50); - Lib_IntVector_Intrinsics_vec256 - v5_ = Lib_IntVector_Intrinsics_vec256_interleave_high32(v40, v50); - Lib_IntVector_Intrinsics_vec256 - v6_ = Lib_IntVector_Intrinsics_vec256_interleave_low32(v60, v70); - Lib_IntVector_Intrinsics_vec256 - v7_ = Lib_IntVector_Intrinsics_vec256_interleave_high32(v60, v70); - Lib_IntVector_Intrinsics_vec256 v0_0 = v0_; - Lib_IntVector_Intrinsics_vec256 v1_0 = v1_; - Lib_IntVector_Intrinsics_vec256 v2_0 = v2_; - Lib_IntVector_Intrinsics_vec256 v3_0 = v3_; - Lib_IntVector_Intrinsics_vec256 v4_0 = v4_; - Lib_IntVector_Intrinsics_vec256 v5_0 = v5_; - Lib_IntVector_Intrinsics_vec256 v6_0 = v6_; - Lib_IntVector_Intrinsics_vec256 v7_0 = v7_; - Lib_IntVector_Intrinsics_vec256 - v0_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0_0, v2_0); - Lib_IntVector_Intrinsics_vec256 - v2_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0_0, v2_0); - Lib_IntVector_Intrinsics_vec256 - v1_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v1_0, v3_0); - Lib_IntVector_Intrinsics_vec256 - v3_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v1_0, v3_0); - Lib_IntVector_Intrinsics_vec256 - v4_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v4_0, v6_0); - Lib_IntVector_Intrinsics_vec256 - v6_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v4_0, v6_0); - Lib_IntVector_Intrinsics_vec256 - v5_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v5_0, v7_0); - Lib_IntVector_Intrinsics_vec256 - v7_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v5_0, v7_0); - Lib_IntVector_Intrinsics_vec256 v0_10 = v0_1; - Lib_IntVector_Intrinsics_vec256 v1_10 = v1_1; - Lib_IntVector_Intrinsics_vec256 v2_10 = v2_1; - Lib_IntVector_Intrinsics_vec256 v3_10 = v3_1; - Lib_IntVector_Intrinsics_vec256 v4_10 = v4_1; - Lib_IntVector_Intrinsics_vec256 v5_10 = v5_1; - Lib_IntVector_Intrinsics_vec256 v6_10 = v6_1; - Lib_IntVector_Intrinsics_vec256 v7_10 = v7_1; - Lib_IntVector_Intrinsics_vec256 - v0_2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_10, v4_10); - Lib_IntVector_Intrinsics_vec256 - v4_2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_10, v4_10); - Lib_IntVector_Intrinsics_vec256 - v1_2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_10, v5_10); - Lib_IntVector_Intrinsics_vec256 - v5_2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_10, v5_10); - Lib_IntVector_Intrinsics_vec256 - v2_2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v2_10, v6_10); - Lib_IntVector_Intrinsics_vec256 - v6_2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v2_10, v6_10); - Lib_IntVector_Intrinsics_vec256 - v3_2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v3_10, v7_10); - Lib_IntVector_Intrinsics_vec256 - v7_2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v3_10, v7_10); - Lib_IntVector_Intrinsics_vec256 v0_20 = v0_2; - Lib_IntVector_Intrinsics_vec256 v1_20 = v1_2; - Lib_IntVector_Intrinsics_vec256 v2_20 = v2_2; - Lib_IntVector_Intrinsics_vec256 v3_20 = v3_2; - Lib_IntVector_Intrinsics_vec256 v4_20 = v4_2; - Lib_IntVector_Intrinsics_vec256 v5_20 = v5_2; - Lib_IntVector_Intrinsics_vec256 v6_20 = v6_2; - Lib_IntVector_Intrinsics_vec256 v7_20 = v7_2; - Lib_IntVector_Intrinsics_vec256 v0_3 = v0_20; - Lib_IntVector_Intrinsics_vec256 v1_3 = v1_20; - Lib_IntVector_Intrinsics_vec256 v2_3 = v2_20; - Lib_IntVector_Intrinsics_vec256 v3_3 = v3_20; - Lib_IntVector_Intrinsics_vec256 v4_3 = v4_20; - Lib_IntVector_Intrinsics_vec256 v5_3 = v5_20; - Lib_IntVector_Intrinsics_vec256 v6_3 = v6_20; - Lib_IntVector_Intrinsics_vec256 v7_3 = v7_20; - Lib_IntVector_Intrinsics_vec256 v0 = v0_3; - Lib_IntVector_Intrinsics_vec256 v1 = v2_3; - Lib_IntVector_Intrinsics_vec256 v2 = v1_3; - Lib_IntVector_Intrinsics_vec256 v3 = v3_3; - Lib_IntVector_Intrinsics_vec256 v4 = v4_3; - Lib_IntVector_Intrinsics_vec256 v5 = v6_3; - Lib_IntVector_Intrinsics_vec256 v6 = v5_3; - Lib_IntVector_Intrinsics_vec256 v7 = v7_3; - Lib_IntVector_Intrinsics_vec256 v01 = st8; - Lib_IntVector_Intrinsics_vec256 v110 = st9; - Lib_IntVector_Intrinsics_vec256 v21 = st10; - Lib_IntVector_Intrinsics_vec256 v31 = st11; - Lib_IntVector_Intrinsics_vec256 v41 = st12; - Lib_IntVector_Intrinsics_vec256 v51 = st13; - Lib_IntVector_Intrinsics_vec256 v61 = st14; - Lib_IntVector_Intrinsics_vec256 v71 = st15; - Lib_IntVector_Intrinsics_vec256 - v0_4 = Lib_IntVector_Intrinsics_vec256_interleave_low32(v01, v110); - Lib_IntVector_Intrinsics_vec256 - v1_4 = Lib_IntVector_Intrinsics_vec256_interleave_high32(v01, v110); - Lib_IntVector_Intrinsics_vec256 - v2_4 = Lib_IntVector_Intrinsics_vec256_interleave_low32(v21, v31); - Lib_IntVector_Intrinsics_vec256 - v3_4 = Lib_IntVector_Intrinsics_vec256_interleave_high32(v21, v31); - Lib_IntVector_Intrinsics_vec256 - v4_4 = Lib_IntVector_Intrinsics_vec256_interleave_low32(v41, v51); - Lib_IntVector_Intrinsics_vec256 - v5_4 = Lib_IntVector_Intrinsics_vec256_interleave_high32(v41, v51); - Lib_IntVector_Intrinsics_vec256 - v6_4 = Lib_IntVector_Intrinsics_vec256_interleave_low32(v61, v71); - Lib_IntVector_Intrinsics_vec256 - v7_4 = Lib_IntVector_Intrinsics_vec256_interleave_high32(v61, v71); - Lib_IntVector_Intrinsics_vec256 v0_5 = v0_4; - Lib_IntVector_Intrinsics_vec256 v1_5 = v1_4; - Lib_IntVector_Intrinsics_vec256 v2_5 = v2_4; - Lib_IntVector_Intrinsics_vec256 v3_5 = v3_4; - Lib_IntVector_Intrinsics_vec256 v4_5 = v4_4; - Lib_IntVector_Intrinsics_vec256 v5_5 = v5_4; - Lib_IntVector_Intrinsics_vec256 v6_5 = v6_4; - Lib_IntVector_Intrinsics_vec256 v7_5 = v7_4; - Lib_IntVector_Intrinsics_vec256 - v0_11 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0_5, v2_5); - Lib_IntVector_Intrinsics_vec256 - v2_11 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0_5, v2_5); - Lib_IntVector_Intrinsics_vec256 - v1_11 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v1_5, v3_5); - Lib_IntVector_Intrinsics_vec256 - v3_11 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v1_5, v3_5); - Lib_IntVector_Intrinsics_vec256 - v4_11 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v4_5, v6_5); - Lib_IntVector_Intrinsics_vec256 - v6_11 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v4_5, v6_5); - Lib_IntVector_Intrinsics_vec256 - v5_11 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v5_5, v7_5); - Lib_IntVector_Intrinsics_vec256 - v7_11 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v5_5, v7_5); - Lib_IntVector_Intrinsics_vec256 v0_12 = v0_11; - Lib_IntVector_Intrinsics_vec256 v1_12 = v1_11; - Lib_IntVector_Intrinsics_vec256 v2_12 = v2_11; - Lib_IntVector_Intrinsics_vec256 v3_12 = v3_11; - Lib_IntVector_Intrinsics_vec256 v4_12 = v4_11; - Lib_IntVector_Intrinsics_vec256 v5_12 = v5_11; - Lib_IntVector_Intrinsics_vec256 v6_12 = v6_11; - Lib_IntVector_Intrinsics_vec256 v7_12 = v7_11; - Lib_IntVector_Intrinsics_vec256 - v0_21 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_12, v4_12); - Lib_IntVector_Intrinsics_vec256 - v4_21 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_12, v4_12); - Lib_IntVector_Intrinsics_vec256 - v1_21 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_12, v5_12); - Lib_IntVector_Intrinsics_vec256 - v5_21 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_12, v5_12); - Lib_IntVector_Intrinsics_vec256 - v2_21 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v2_12, v6_12); - Lib_IntVector_Intrinsics_vec256 - v6_21 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v2_12, v6_12); - Lib_IntVector_Intrinsics_vec256 - v3_21 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v3_12, v7_12); - Lib_IntVector_Intrinsics_vec256 - v7_21 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v3_12, v7_12); - Lib_IntVector_Intrinsics_vec256 v0_22 = v0_21; - Lib_IntVector_Intrinsics_vec256 v1_22 = v1_21; - Lib_IntVector_Intrinsics_vec256 v2_22 = v2_21; - Lib_IntVector_Intrinsics_vec256 v3_22 = v3_21; - Lib_IntVector_Intrinsics_vec256 v4_22 = v4_21; - Lib_IntVector_Intrinsics_vec256 v5_22 = v5_21; - Lib_IntVector_Intrinsics_vec256 v6_22 = v6_21; - Lib_IntVector_Intrinsics_vec256 v7_22 = v7_21; - Lib_IntVector_Intrinsics_vec256 v0_6 = v0_22; - Lib_IntVector_Intrinsics_vec256 v1_6 = v1_22; - Lib_IntVector_Intrinsics_vec256 v2_6 = v2_22; - Lib_IntVector_Intrinsics_vec256 v3_6 = v3_22; - Lib_IntVector_Intrinsics_vec256 v4_6 = v4_22; - Lib_IntVector_Intrinsics_vec256 v5_6 = v5_22; - Lib_IntVector_Intrinsics_vec256 v6_6 = v6_22; - Lib_IntVector_Intrinsics_vec256 v7_6 = v7_22; - Lib_IntVector_Intrinsics_vec256 v8 = v0_6; - Lib_IntVector_Intrinsics_vec256 v9 = v2_6; - Lib_IntVector_Intrinsics_vec256 v10 = v1_6; - Lib_IntVector_Intrinsics_vec256 v11 = v3_6; - Lib_IntVector_Intrinsics_vec256 v12 = v4_6; - Lib_IntVector_Intrinsics_vec256 v13 = v6_6; - Lib_IntVector_Intrinsics_vec256 v14 = v5_6; - Lib_IntVector_Intrinsics_vec256 v15 = v7_6; - k[0U] = v0; - k[1U] = v8; - k[2U] = v1; - k[3U] = v9; - k[4U] = v2; - k[5U] = v10; - k[6U] = v3; - k[7U] = v11; - k[8U] = v4; - k[9U] = v12; - k[10U] = v5; - k[11U] = v13; - k[12U] = v6; - k[13U] = v14; - k[14U] = v7; - k[15U] = v15; - KRML_MAYBE_FOR16(i0, - (uint32_t)0U, - (uint32_t)16U, - (uint32_t)1U, - Lib_IntVector_Intrinsics_vec256 - x = Lib_IntVector_Intrinsics_vec256_load32_le(uu____1 + i0 * (uint32_t)32U); - Lib_IntVector_Intrinsics_vec256 y = Lib_IntVector_Intrinsics_vec256_xor(x, k[i0]); - Lib_IntVector_Intrinsics_vec256_store32_le(uu____0 + i0 * (uint32_t)32U, y);); - } - } - } - if (rem1 > (uint32_t)0U) - { - uint8_t *uu____2 = out + nb * (uint32_t)512U; - uint8_t *uu____3 = cipher + nb * (uint32_t)512U; - uint8_t plain[512U] = { 0U }; - memcpy(plain, uu____3, rem * sizeof (uint8_t)); - { - KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 k[16U] KRML_POST_ALIGN(32) = { 0U }; - chacha20_core_256(k, ctx, nb); - { - Lib_IntVector_Intrinsics_vec256 st0 = k[0U]; - Lib_IntVector_Intrinsics_vec256 st1 = k[1U]; - Lib_IntVector_Intrinsics_vec256 st2 = k[2U]; - Lib_IntVector_Intrinsics_vec256 st3 = k[3U]; - Lib_IntVector_Intrinsics_vec256 st4 = k[4U]; - Lib_IntVector_Intrinsics_vec256 st5 = k[5U]; - Lib_IntVector_Intrinsics_vec256 st6 = k[6U]; - Lib_IntVector_Intrinsics_vec256 st7 = k[7U]; - Lib_IntVector_Intrinsics_vec256 st8 = k[8U]; - Lib_IntVector_Intrinsics_vec256 st9 = k[9U]; - Lib_IntVector_Intrinsics_vec256 st10 = k[10U]; - Lib_IntVector_Intrinsics_vec256 st11 = k[11U]; - Lib_IntVector_Intrinsics_vec256 st12 = k[12U]; - Lib_IntVector_Intrinsics_vec256 st13 = k[13U]; - Lib_IntVector_Intrinsics_vec256 st14 = k[14U]; - Lib_IntVector_Intrinsics_vec256 st15 = k[15U]; - Lib_IntVector_Intrinsics_vec256 v00 = st0; - Lib_IntVector_Intrinsics_vec256 v16 = st1; - Lib_IntVector_Intrinsics_vec256 v20 = st2; - Lib_IntVector_Intrinsics_vec256 v30 = st3; - Lib_IntVector_Intrinsics_vec256 v40 = st4; - Lib_IntVector_Intrinsics_vec256 v50 = st5; - Lib_IntVector_Intrinsics_vec256 v60 = st6; - Lib_IntVector_Intrinsics_vec256 v70 = st7; - Lib_IntVector_Intrinsics_vec256 - v0_ = Lib_IntVector_Intrinsics_vec256_interleave_low32(v00, v16); - Lib_IntVector_Intrinsics_vec256 - v1_ = Lib_IntVector_Intrinsics_vec256_interleave_high32(v00, v16); - Lib_IntVector_Intrinsics_vec256 - v2_ = Lib_IntVector_Intrinsics_vec256_interleave_low32(v20, v30); - Lib_IntVector_Intrinsics_vec256 - v3_ = Lib_IntVector_Intrinsics_vec256_interleave_high32(v20, v30); - Lib_IntVector_Intrinsics_vec256 - v4_ = Lib_IntVector_Intrinsics_vec256_interleave_low32(v40, v50); - Lib_IntVector_Intrinsics_vec256 - v5_ = Lib_IntVector_Intrinsics_vec256_interleave_high32(v40, v50); - Lib_IntVector_Intrinsics_vec256 - v6_ = Lib_IntVector_Intrinsics_vec256_interleave_low32(v60, v70); - Lib_IntVector_Intrinsics_vec256 - v7_ = Lib_IntVector_Intrinsics_vec256_interleave_high32(v60, v70); - Lib_IntVector_Intrinsics_vec256 v0_0 = v0_; - Lib_IntVector_Intrinsics_vec256 v1_0 = v1_; - Lib_IntVector_Intrinsics_vec256 v2_0 = v2_; - Lib_IntVector_Intrinsics_vec256 v3_0 = v3_; - Lib_IntVector_Intrinsics_vec256 v4_0 = v4_; - Lib_IntVector_Intrinsics_vec256 v5_0 = v5_; - Lib_IntVector_Intrinsics_vec256 v6_0 = v6_; - Lib_IntVector_Intrinsics_vec256 v7_0 = v7_; - Lib_IntVector_Intrinsics_vec256 - v0_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0_0, v2_0); - Lib_IntVector_Intrinsics_vec256 - v2_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0_0, v2_0); - Lib_IntVector_Intrinsics_vec256 - v1_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v1_0, v3_0); - Lib_IntVector_Intrinsics_vec256 - v3_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v1_0, v3_0); - Lib_IntVector_Intrinsics_vec256 - v4_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v4_0, v6_0); - Lib_IntVector_Intrinsics_vec256 - v6_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v4_0, v6_0); - Lib_IntVector_Intrinsics_vec256 - v5_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v5_0, v7_0); - Lib_IntVector_Intrinsics_vec256 - v7_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v5_0, v7_0); - Lib_IntVector_Intrinsics_vec256 v0_10 = v0_1; - Lib_IntVector_Intrinsics_vec256 v1_10 = v1_1; - Lib_IntVector_Intrinsics_vec256 v2_10 = v2_1; - Lib_IntVector_Intrinsics_vec256 v3_10 = v3_1; - Lib_IntVector_Intrinsics_vec256 v4_10 = v4_1; - Lib_IntVector_Intrinsics_vec256 v5_10 = v5_1; - Lib_IntVector_Intrinsics_vec256 v6_10 = v6_1; - Lib_IntVector_Intrinsics_vec256 v7_10 = v7_1; - Lib_IntVector_Intrinsics_vec256 - v0_2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_10, v4_10); - Lib_IntVector_Intrinsics_vec256 - v4_2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_10, v4_10); - Lib_IntVector_Intrinsics_vec256 - v1_2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_10, v5_10); - Lib_IntVector_Intrinsics_vec256 - v5_2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_10, v5_10); - Lib_IntVector_Intrinsics_vec256 - v2_2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v2_10, v6_10); - Lib_IntVector_Intrinsics_vec256 - v6_2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v2_10, v6_10); - Lib_IntVector_Intrinsics_vec256 - v3_2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v3_10, v7_10); - Lib_IntVector_Intrinsics_vec256 - v7_2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v3_10, v7_10); - Lib_IntVector_Intrinsics_vec256 v0_20 = v0_2; - Lib_IntVector_Intrinsics_vec256 v1_20 = v1_2; - Lib_IntVector_Intrinsics_vec256 v2_20 = v2_2; - Lib_IntVector_Intrinsics_vec256 v3_20 = v3_2; - Lib_IntVector_Intrinsics_vec256 v4_20 = v4_2; - Lib_IntVector_Intrinsics_vec256 v5_20 = v5_2; - Lib_IntVector_Intrinsics_vec256 v6_20 = v6_2; - Lib_IntVector_Intrinsics_vec256 v7_20 = v7_2; - Lib_IntVector_Intrinsics_vec256 v0_3 = v0_20; - Lib_IntVector_Intrinsics_vec256 v1_3 = v1_20; - Lib_IntVector_Intrinsics_vec256 v2_3 = v2_20; - Lib_IntVector_Intrinsics_vec256 v3_3 = v3_20; - Lib_IntVector_Intrinsics_vec256 v4_3 = v4_20; - Lib_IntVector_Intrinsics_vec256 v5_3 = v5_20; - Lib_IntVector_Intrinsics_vec256 v6_3 = v6_20; - Lib_IntVector_Intrinsics_vec256 v7_3 = v7_20; - Lib_IntVector_Intrinsics_vec256 v0 = v0_3; - Lib_IntVector_Intrinsics_vec256 v1 = v2_3; - Lib_IntVector_Intrinsics_vec256 v2 = v1_3; - Lib_IntVector_Intrinsics_vec256 v3 = v3_3; - Lib_IntVector_Intrinsics_vec256 v4 = v4_3; - Lib_IntVector_Intrinsics_vec256 v5 = v6_3; - Lib_IntVector_Intrinsics_vec256 v6 = v5_3; - Lib_IntVector_Intrinsics_vec256 v7 = v7_3; - Lib_IntVector_Intrinsics_vec256 v01 = st8; - Lib_IntVector_Intrinsics_vec256 v110 = st9; - Lib_IntVector_Intrinsics_vec256 v21 = st10; - Lib_IntVector_Intrinsics_vec256 v31 = st11; - Lib_IntVector_Intrinsics_vec256 v41 = st12; - Lib_IntVector_Intrinsics_vec256 v51 = st13; - Lib_IntVector_Intrinsics_vec256 v61 = st14; - Lib_IntVector_Intrinsics_vec256 v71 = st15; - Lib_IntVector_Intrinsics_vec256 - v0_4 = Lib_IntVector_Intrinsics_vec256_interleave_low32(v01, v110); - Lib_IntVector_Intrinsics_vec256 - v1_4 = Lib_IntVector_Intrinsics_vec256_interleave_high32(v01, v110); - Lib_IntVector_Intrinsics_vec256 - v2_4 = Lib_IntVector_Intrinsics_vec256_interleave_low32(v21, v31); - Lib_IntVector_Intrinsics_vec256 - v3_4 = Lib_IntVector_Intrinsics_vec256_interleave_high32(v21, v31); - Lib_IntVector_Intrinsics_vec256 - v4_4 = Lib_IntVector_Intrinsics_vec256_interleave_low32(v41, v51); - Lib_IntVector_Intrinsics_vec256 - v5_4 = Lib_IntVector_Intrinsics_vec256_interleave_high32(v41, v51); - Lib_IntVector_Intrinsics_vec256 - v6_4 = Lib_IntVector_Intrinsics_vec256_interleave_low32(v61, v71); - Lib_IntVector_Intrinsics_vec256 - v7_4 = Lib_IntVector_Intrinsics_vec256_interleave_high32(v61, v71); - Lib_IntVector_Intrinsics_vec256 v0_5 = v0_4; - Lib_IntVector_Intrinsics_vec256 v1_5 = v1_4; - Lib_IntVector_Intrinsics_vec256 v2_5 = v2_4; - Lib_IntVector_Intrinsics_vec256 v3_5 = v3_4; - Lib_IntVector_Intrinsics_vec256 v4_5 = v4_4; - Lib_IntVector_Intrinsics_vec256 v5_5 = v5_4; - Lib_IntVector_Intrinsics_vec256 v6_5 = v6_4; - Lib_IntVector_Intrinsics_vec256 v7_5 = v7_4; - Lib_IntVector_Intrinsics_vec256 - v0_11 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0_5, v2_5); - Lib_IntVector_Intrinsics_vec256 - v2_11 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0_5, v2_5); - Lib_IntVector_Intrinsics_vec256 - v1_11 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v1_5, v3_5); - Lib_IntVector_Intrinsics_vec256 - v3_11 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v1_5, v3_5); - Lib_IntVector_Intrinsics_vec256 - v4_11 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v4_5, v6_5); - Lib_IntVector_Intrinsics_vec256 - v6_11 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v4_5, v6_5); - Lib_IntVector_Intrinsics_vec256 - v5_11 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v5_5, v7_5); - Lib_IntVector_Intrinsics_vec256 - v7_11 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v5_5, v7_5); - Lib_IntVector_Intrinsics_vec256 v0_12 = v0_11; - Lib_IntVector_Intrinsics_vec256 v1_12 = v1_11; - Lib_IntVector_Intrinsics_vec256 v2_12 = v2_11; - Lib_IntVector_Intrinsics_vec256 v3_12 = v3_11; - Lib_IntVector_Intrinsics_vec256 v4_12 = v4_11; - Lib_IntVector_Intrinsics_vec256 v5_12 = v5_11; - Lib_IntVector_Intrinsics_vec256 v6_12 = v6_11; - Lib_IntVector_Intrinsics_vec256 v7_12 = v7_11; - Lib_IntVector_Intrinsics_vec256 - v0_21 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_12, v4_12); - Lib_IntVector_Intrinsics_vec256 - v4_21 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_12, v4_12); - Lib_IntVector_Intrinsics_vec256 - v1_21 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_12, v5_12); - Lib_IntVector_Intrinsics_vec256 - v5_21 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_12, v5_12); - Lib_IntVector_Intrinsics_vec256 - v2_21 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v2_12, v6_12); - Lib_IntVector_Intrinsics_vec256 - v6_21 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v2_12, v6_12); - Lib_IntVector_Intrinsics_vec256 - v3_21 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v3_12, v7_12); - Lib_IntVector_Intrinsics_vec256 - v7_21 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v3_12, v7_12); - Lib_IntVector_Intrinsics_vec256 v0_22 = v0_21; - Lib_IntVector_Intrinsics_vec256 v1_22 = v1_21; - Lib_IntVector_Intrinsics_vec256 v2_22 = v2_21; - Lib_IntVector_Intrinsics_vec256 v3_22 = v3_21; - Lib_IntVector_Intrinsics_vec256 v4_22 = v4_21; - Lib_IntVector_Intrinsics_vec256 v5_22 = v5_21; - Lib_IntVector_Intrinsics_vec256 v6_22 = v6_21; - Lib_IntVector_Intrinsics_vec256 v7_22 = v7_21; - Lib_IntVector_Intrinsics_vec256 v0_6 = v0_22; - Lib_IntVector_Intrinsics_vec256 v1_6 = v1_22; - Lib_IntVector_Intrinsics_vec256 v2_6 = v2_22; - Lib_IntVector_Intrinsics_vec256 v3_6 = v3_22; - Lib_IntVector_Intrinsics_vec256 v4_6 = v4_22; - Lib_IntVector_Intrinsics_vec256 v5_6 = v5_22; - Lib_IntVector_Intrinsics_vec256 v6_6 = v6_22; - Lib_IntVector_Intrinsics_vec256 v7_6 = v7_22; - Lib_IntVector_Intrinsics_vec256 v8 = v0_6; - Lib_IntVector_Intrinsics_vec256 v9 = v2_6; - Lib_IntVector_Intrinsics_vec256 v10 = v1_6; - Lib_IntVector_Intrinsics_vec256 v11 = v3_6; - Lib_IntVector_Intrinsics_vec256 v12 = v4_6; - Lib_IntVector_Intrinsics_vec256 v13 = v6_6; - Lib_IntVector_Intrinsics_vec256 v14 = v5_6; - Lib_IntVector_Intrinsics_vec256 v15 = v7_6; - k[0U] = v0; - k[1U] = v8; - k[2U] = v1; - k[3U] = v9; - k[4U] = v2; - k[5U] = v10; - k[6U] = v3; - k[7U] = v11; - k[8U] = v4; - k[9U] = v12; - k[10U] = v5; - k[11U] = v13; - k[12U] = v6; - k[13U] = v14; - k[14U] = v7; - k[15U] = v15; - KRML_MAYBE_FOR16(i, - (uint32_t)0U, - (uint32_t)16U, - (uint32_t)1U, - Lib_IntVector_Intrinsics_vec256 - x = Lib_IntVector_Intrinsics_vec256_load32_le(plain + i * (uint32_t)32U); - Lib_IntVector_Intrinsics_vec256 y = Lib_IntVector_Intrinsics_vec256_xor(x, k[i]); - Lib_IntVector_Intrinsics_vec256_store32_le(plain + i * (uint32_t)32U, y);); - memcpy(uu____2, plain, rem * sizeof (uint8_t)); - } - } - } -} - diff --git a/dist/c89-compatible/Hacl_Chacha20_Vec256.h b/dist/c89-compatible/Hacl_Chacha20_Vec256.h deleted file mode 100644 index 0400e39cfa..0000000000 --- a/dist/c89-compatible/Hacl_Chacha20_Vec256.h +++ /dev/null @@ -1,65 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#ifndef __Hacl_Chacha20_Vec256_H -#define __Hacl_Chacha20_Vec256_H - -#if defined(__cplusplus) -extern "C" { -#endif - -#include -#include "krml/internal/types.h" -#include "krml/lowstar_endianness.h" -#include "krml/internal/target.h" - - -#include "Hacl_Krmllib.h" -#include "evercrypt_targetconfig.h" -void -Hacl_Chacha20_Vec256_chacha20_encrypt_256( - uint32_t len, - uint8_t *out, - uint8_t *text, - uint8_t *key, - uint8_t *n, - uint32_t ctr -); - -void -Hacl_Chacha20_Vec256_chacha20_decrypt_256( - uint32_t len, - uint8_t *out, - uint8_t *cipher, - uint8_t *key, - uint8_t *n, - uint32_t ctr -); - -#if defined(__cplusplus) -} -#endif - -#define __Hacl_Chacha20_Vec256_H_DEFINED -#endif diff --git a/dist/c89-compatible/Hacl_Chacha20_Vec32.c b/dist/c89-compatible/Hacl_Chacha20_Vec32.c deleted file mode 100644 index 64e51dde8f..0000000000 --- a/dist/c89-compatible/Hacl_Chacha20_Vec32.c +++ /dev/null @@ -1,349 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#include "Hacl_Chacha20_Vec32.h" - -#include "internal/Hacl_Chacha20.h" - -static inline void double_round_32(uint32_t *st) -{ - uint32_t std0; - uint32_t std1; - uint32_t std2; - uint32_t std3; - uint32_t std4; - uint32_t std5; - uint32_t std6; - uint32_t std7; - uint32_t std8; - uint32_t std9; - uint32_t std10; - uint32_t std11; - uint32_t std12; - uint32_t std13; - uint32_t std14; - uint32_t std15; - uint32_t std16; - uint32_t std17; - uint32_t std18; - uint32_t std19; - uint32_t std20; - uint32_t std21; - uint32_t std22; - uint32_t std23; - uint32_t std24; - uint32_t std25; - uint32_t std26; - uint32_t std27; - uint32_t std28; - uint32_t std29; - uint32_t std30; - uint32_t std; - st[0U] = st[0U] + st[4U]; - std0 = st[12U] ^ st[0U]; - st[12U] = std0 << (uint32_t)16U | std0 >> (uint32_t)16U; - st[8U] = st[8U] + st[12U]; - std1 = st[4U] ^ st[8U]; - st[4U] = std1 << (uint32_t)12U | std1 >> (uint32_t)20U; - st[0U] = st[0U] + st[4U]; - std2 = st[12U] ^ st[0U]; - st[12U] = std2 << (uint32_t)8U | std2 >> (uint32_t)24U; - st[8U] = st[8U] + st[12U]; - std3 = st[4U] ^ st[8U]; - st[4U] = std3 << (uint32_t)7U | std3 >> (uint32_t)25U; - st[1U] = st[1U] + st[5U]; - std4 = st[13U] ^ st[1U]; - st[13U] = std4 << (uint32_t)16U | std4 >> (uint32_t)16U; - st[9U] = st[9U] + st[13U]; - std5 = st[5U] ^ st[9U]; - st[5U] = std5 << (uint32_t)12U | std5 >> (uint32_t)20U; - st[1U] = st[1U] + st[5U]; - std6 = st[13U] ^ st[1U]; - st[13U] = std6 << (uint32_t)8U | std6 >> (uint32_t)24U; - st[9U] = st[9U] + st[13U]; - std7 = st[5U] ^ st[9U]; - st[5U] = std7 << (uint32_t)7U | std7 >> (uint32_t)25U; - st[2U] = st[2U] + st[6U]; - std8 = st[14U] ^ st[2U]; - st[14U] = std8 << (uint32_t)16U | std8 >> (uint32_t)16U; - st[10U] = st[10U] + st[14U]; - std9 = st[6U] ^ st[10U]; - st[6U] = std9 << (uint32_t)12U | std9 >> (uint32_t)20U; - st[2U] = st[2U] + st[6U]; - std10 = st[14U] ^ st[2U]; - st[14U] = std10 << (uint32_t)8U | std10 >> (uint32_t)24U; - st[10U] = st[10U] + st[14U]; - std11 = st[6U] ^ st[10U]; - st[6U] = std11 << (uint32_t)7U | std11 >> (uint32_t)25U; - st[3U] = st[3U] + st[7U]; - std12 = st[15U] ^ st[3U]; - st[15U] = std12 << (uint32_t)16U | std12 >> (uint32_t)16U; - st[11U] = st[11U] + st[15U]; - std13 = st[7U] ^ st[11U]; - st[7U] = std13 << (uint32_t)12U | std13 >> (uint32_t)20U; - st[3U] = st[3U] + st[7U]; - std14 = st[15U] ^ st[3U]; - st[15U] = std14 << (uint32_t)8U | std14 >> (uint32_t)24U; - st[11U] = st[11U] + st[15U]; - std15 = st[7U] ^ st[11U]; - st[7U] = std15 << (uint32_t)7U | std15 >> (uint32_t)25U; - st[0U] = st[0U] + st[5U]; - std16 = st[15U] ^ st[0U]; - st[15U] = std16 << (uint32_t)16U | std16 >> (uint32_t)16U; - st[10U] = st[10U] + st[15U]; - std17 = st[5U] ^ st[10U]; - st[5U] = std17 << (uint32_t)12U | std17 >> (uint32_t)20U; - st[0U] = st[0U] + st[5U]; - std18 = st[15U] ^ st[0U]; - st[15U] = std18 << (uint32_t)8U | std18 >> (uint32_t)24U; - st[10U] = st[10U] + st[15U]; - std19 = st[5U] ^ st[10U]; - st[5U] = std19 << (uint32_t)7U | std19 >> (uint32_t)25U; - st[1U] = st[1U] + st[6U]; - std20 = st[12U] ^ st[1U]; - st[12U] = std20 << (uint32_t)16U | std20 >> (uint32_t)16U; - st[11U] = st[11U] + st[12U]; - std21 = st[6U] ^ st[11U]; - st[6U] = std21 << (uint32_t)12U | std21 >> (uint32_t)20U; - st[1U] = st[1U] + st[6U]; - std22 = st[12U] ^ st[1U]; - st[12U] = std22 << (uint32_t)8U | std22 >> (uint32_t)24U; - st[11U] = st[11U] + st[12U]; - std23 = st[6U] ^ st[11U]; - st[6U] = std23 << (uint32_t)7U | std23 >> (uint32_t)25U; - st[2U] = st[2U] + st[7U]; - std24 = st[13U] ^ st[2U]; - st[13U] = std24 << (uint32_t)16U | std24 >> (uint32_t)16U; - st[8U] = st[8U] + st[13U]; - std25 = st[7U] ^ st[8U]; - st[7U] = std25 << (uint32_t)12U | std25 >> (uint32_t)20U; - st[2U] = st[2U] + st[7U]; - std26 = st[13U] ^ st[2U]; - st[13U] = std26 << (uint32_t)8U | std26 >> (uint32_t)24U; - st[8U] = st[8U] + st[13U]; - std27 = st[7U] ^ st[8U]; - st[7U] = std27 << (uint32_t)7U | std27 >> (uint32_t)25U; - st[3U] = st[3U] + st[4U]; - std28 = st[14U] ^ st[3U]; - st[14U] = std28 << (uint32_t)16U | std28 >> (uint32_t)16U; - st[9U] = st[9U] + st[14U]; - std29 = st[4U] ^ st[9U]; - st[4U] = std29 << (uint32_t)12U | std29 >> (uint32_t)20U; - st[3U] = st[3U] + st[4U]; - std30 = st[14U] ^ st[3U]; - st[14U] = std30 << (uint32_t)8U | std30 >> (uint32_t)24U; - st[9U] = st[9U] + st[14U]; - std = st[4U] ^ st[9U]; - st[4U] = std << (uint32_t)7U | std >> (uint32_t)25U; -} - -static inline void chacha20_core_32(uint32_t *k, uint32_t *ctx, uint32_t ctr) -{ - uint32_t ctr_u32; - uint32_t cv; - memcpy(k, ctx, (uint32_t)16U * sizeof (uint32_t)); - ctr_u32 = (uint32_t)1U * ctr; - cv = ctr_u32; - k[12U] = k[12U] + cv; - double_round_32(k); - double_round_32(k); - double_round_32(k); - double_round_32(k); - double_round_32(k); - double_round_32(k); - double_round_32(k); - double_round_32(k); - double_round_32(k); - double_round_32(k); - KRML_MAYBE_FOR16(i, - (uint32_t)0U, - (uint32_t)16U, - (uint32_t)1U, - uint32_t *os = k; - uint32_t x = k[i] + ctx[i]; - os[i] = x;); - k[12U] = k[12U] + cv; -} - -static inline void chacha20_init_32(uint32_t *ctx, uint8_t *k, uint8_t *n, uint32_t ctr) -{ - uint32_t ctx1[16U] = { 0U }; - uint32_t ctr1; - uint32_t c12; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint32_t *os = ctx1; - uint32_t x = Hacl_Impl_Chacha20_Vec_chacha20_constants[i]; - os[i] = x;); - KRML_MAYBE_FOR8(i, - (uint32_t)0U, - (uint32_t)8U, - (uint32_t)1U, - uint32_t *os = ctx1 + (uint32_t)4U; - uint8_t *bj = k + i * (uint32_t)4U; - uint32_t u = load32_le(bj); - uint32_t r = u; - uint32_t x = r; - os[i] = x;); - ctx1[12U] = ctr; - KRML_MAYBE_FOR3(i, - (uint32_t)0U, - (uint32_t)3U, - (uint32_t)1U, - uint32_t *os = ctx1 + (uint32_t)13U; - uint8_t *bj = n + i * (uint32_t)4U; - uint32_t u = load32_le(bj); - uint32_t r = u; - uint32_t x = r; - os[i] = x;); - KRML_MAYBE_FOR16(i, - (uint32_t)0U, - (uint32_t)16U, - (uint32_t)1U, - uint32_t *os = ctx; - uint32_t x = ctx1[i]; - os[i] = x;); - ctr1 = (uint32_t)0U; - c12 = ctx[12U]; - ctx[12U] = c12 + ctr1; -} - -void -Hacl_Chacha20_Vec32_chacha20_encrypt_32( - uint32_t len, - uint8_t *out, - uint8_t *text, - uint8_t *key, - uint8_t *n, - uint32_t ctr -) -{ - uint32_t ctx[16U] = { 0U }; - uint32_t rem; - uint32_t nb; - uint32_t rem1; - chacha20_init_32(ctx, key, n, ctr); - rem = len % (uint32_t)64U; - nb = len / (uint32_t)64U; - rem1 = len % (uint32_t)64U; - { - uint32_t i0; - for (i0 = (uint32_t)0U; i0 < nb; i0++) - { - uint8_t *uu____0 = out + i0 * (uint32_t)64U; - uint8_t *uu____1 = text + i0 * (uint32_t)64U; - uint32_t k[16U] = { 0U }; - chacha20_core_32(k, ctx, i0); - KRML_MAYBE_FOR16(i, - (uint32_t)0U, - (uint32_t)16U, - (uint32_t)1U, - uint32_t u = load32_le(uu____1 + i * (uint32_t)4U); - uint32_t x = u; - uint32_t y = x ^ k[i]; - store32_le(uu____0 + i * (uint32_t)4U, y);); - } - } - if (rem1 > (uint32_t)0U) - { - uint8_t *uu____2 = out + nb * (uint32_t)64U; - uint8_t *uu____3 = text + nb * (uint32_t)64U; - uint8_t plain[64U] = { 0U }; - memcpy(plain, uu____3, rem * sizeof (uint8_t)); - { - uint32_t k[16U] = { 0U }; - chacha20_core_32(k, ctx, nb); - KRML_MAYBE_FOR16(i, - (uint32_t)0U, - (uint32_t)16U, - (uint32_t)1U, - uint32_t u = load32_le(plain + i * (uint32_t)4U); - uint32_t x = u; - uint32_t y = x ^ k[i]; - store32_le(plain + i * (uint32_t)4U, y);); - memcpy(uu____2, plain, rem * sizeof (uint8_t)); - } - } -} - -void -Hacl_Chacha20_Vec32_chacha20_decrypt_32( - uint32_t len, - uint8_t *out, - uint8_t *cipher, - uint8_t *key, - uint8_t *n, - uint32_t ctr -) -{ - uint32_t ctx[16U] = { 0U }; - uint32_t rem; - uint32_t nb; - uint32_t rem1; - chacha20_init_32(ctx, key, n, ctr); - rem = len % (uint32_t)64U; - nb = len / (uint32_t)64U; - rem1 = len % (uint32_t)64U; - { - uint32_t i0; - for (i0 = (uint32_t)0U; i0 < nb; i0++) - { - uint8_t *uu____0 = out + i0 * (uint32_t)64U; - uint8_t *uu____1 = cipher + i0 * (uint32_t)64U; - uint32_t k[16U] = { 0U }; - chacha20_core_32(k, ctx, i0); - KRML_MAYBE_FOR16(i, - (uint32_t)0U, - (uint32_t)16U, - (uint32_t)1U, - uint32_t u = load32_le(uu____1 + i * (uint32_t)4U); - uint32_t x = u; - uint32_t y = x ^ k[i]; - store32_le(uu____0 + i * (uint32_t)4U, y);); - } - } - if (rem1 > (uint32_t)0U) - { - uint8_t *uu____2 = out + nb * (uint32_t)64U; - uint8_t *uu____3 = cipher + nb * (uint32_t)64U; - uint8_t plain[64U] = { 0U }; - memcpy(plain, uu____3, rem * sizeof (uint8_t)); - { - uint32_t k[16U] = { 0U }; - chacha20_core_32(k, ctx, nb); - KRML_MAYBE_FOR16(i, - (uint32_t)0U, - (uint32_t)16U, - (uint32_t)1U, - uint32_t u = load32_le(plain + i * (uint32_t)4U); - uint32_t x = u; - uint32_t y = x ^ k[i]; - store32_le(plain + i * (uint32_t)4U, y);); - memcpy(uu____2, plain, rem * sizeof (uint8_t)); - } - } -} - diff --git a/dist/c89-compatible/Hacl_Chacha20_Vec32.h b/dist/c89-compatible/Hacl_Chacha20_Vec32.h deleted file mode 100644 index b0ed73473b..0000000000 --- a/dist/c89-compatible/Hacl_Chacha20_Vec32.h +++ /dev/null @@ -1,65 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#ifndef __Hacl_Chacha20_Vec32_H -#define __Hacl_Chacha20_Vec32_H - -#if defined(__cplusplus) -extern "C" { -#endif - -#include -#include "krml/internal/types.h" -#include "krml/lowstar_endianness.h" -#include "krml/internal/target.h" - - -#include "Hacl_Krmllib.h" -#include "evercrypt_targetconfig.h" -void -Hacl_Chacha20_Vec32_chacha20_encrypt_32( - uint32_t len, - uint8_t *out, - uint8_t *text, - uint8_t *key, - uint8_t *n, - uint32_t ctr -); - -void -Hacl_Chacha20_Vec32_chacha20_decrypt_32( - uint32_t len, - uint8_t *out, - uint8_t *cipher, - uint8_t *key, - uint8_t *n, - uint32_t ctr -); - -#if defined(__cplusplus) -} -#endif - -#define __Hacl_Chacha20_Vec32_H_DEFINED -#endif diff --git a/dist/c89-compatible/Hacl_Curve25519_51.c b/dist/c89-compatible/Hacl_Curve25519_51.c deleted file mode 100644 index b399087087..0000000000 --- a/dist/c89-compatible/Hacl_Curve25519_51.c +++ /dev/null @@ -1,361 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#include "internal/Hacl_Curve25519_51.h" - - - -static const uint8_t g25519[32U] = { (uint8_t)9U }; - -static void point_add_and_double(uint64_t *q, uint64_t *p01_tmp1, FStar_UInt128_uint128 *tmp2) -{ - uint64_t *nq = p01_tmp1; - uint64_t *nq_p1 = p01_tmp1 + (uint32_t)10U; - uint64_t *tmp1 = p01_tmp1 + (uint32_t)20U; - uint64_t *x1 = q; - uint64_t *x2 = nq; - uint64_t *z2 = nq + (uint32_t)5U; - uint64_t *z3 = nq_p1 + (uint32_t)5U; - uint64_t *a = tmp1; - uint64_t *b = tmp1 + (uint32_t)5U; - uint64_t *ab = tmp1; - uint64_t *dc = tmp1 + (uint32_t)10U; - uint64_t *x3; - uint64_t *z31; - uint64_t *d0; - uint64_t *c0; - uint64_t *a1; - uint64_t *b1; - uint64_t *d; - uint64_t *c; - uint64_t *ab1; - uint64_t *dc1; - Hacl_Impl_Curve25519_Field51_fadd(a, x2, z2); - Hacl_Impl_Curve25519_Field51_fsub(b, x2, z2); - x3 = nq_p1; - z31 = nq_p1 + (uint32_t)5U; - d0 = dc; - c0 = dc + (uint32_t)5U; - Hacl_Impl_Curve25519_Field51_fadd(c0, x3, z31); - Hacl_Impl_Curve25519_Field51_fsub(d0, x3, z31); - Hacl_Impl_Curve25519_Field51_fmul2(dc, dc, ab, tmp2); - Hacl_Impl_Curve25519_Field51_fadd(x3, d0, c0); - Hacl_Impl_Curve25519_Field51_fsub(z31, d0, c0); - a1 = tmp1; - b1 = tmp1 + (uint32_t)5U; - d = tmp1 + (uint32_t)10U; - c = tmp1 + (uint32_t)15U; - ab1 = tmp1; - dc1 = tmp1 + (uint32_t)10U; - Hacl_Impl_Curve25519_Field51_fsqr2(dc1, ab1, tmp2); - Hacl_Impl_Curve25519_Field51_fsqr2(nq_p1, nq_p1, tmp2); - a1[0U] = c[0U]; - a1[1U] = c[1U]; - a1[2U] = c[2U]; - a1[3U] = c[3U]; - a1[4U] = c[4U]; - Hacl_Impl_Curve25519_Field51_fsub(c, d, c); - Hacl_Impl_Curve25519_Field51_fmul1(b1, c, (uint64_t)121665U); - Hacl_Impl_Curve25519_Field51_fadd(b1, b1, d); - Hacl_Impl_Curve25519_Field51_fmul2(nq, dc1, ab1, tmp2); - Hacl_Impl_Curve25519_Field51_fmul(z3, z3, x1, tmp2); -} - -static void point_double(uint64_t *nq, uint64_t *tmp1, FStar_UInt128_uint128 *tmp2) -{ - uint64_t *x2 = nq; - uint64_t *z2 = nq + (uint32_t)5U; - uint64_t *a = tmp1; - uint64_t *b = tmp1 + (uint32_t)5U; - uint64_t *d = tmp1 + (uint32_t)10U; - uint64_t *c = tmp1 + (uint32_t)15U; - uint64_t *ab = tmp1; - uint64_t *dc = tmp1 + (uint32_t)10U; - Hacl_Impl_Curve25519_Field51_fadd(a, x2, z2); - Hacl_Impl_Curve25519_Field51_fsub(b, x2, z2); - Hacl_Impl_Curve25519_Field51_fsqr2(dc, ab, tmp2); - a[0U] = c[0U]; - a[1U] = c[1U]; - a[2U] = c[2U]; - a[3U] = c[3U]; - a[4U] = c[4U]; - Hacl_Impl_Curve25519_Field51_fsub(c, d, c); - Hacl_Impl_Curve25519_Field51_fmul1(b, c, (uint64_t)121665U); - Hacl_Impl_Curve25519_Field51_fadd(b, b, d); - Hacl_Impl_Curve25519_Field51_fmul2(nq, dc, ab, tmp2); -} - -static void montgomery_ladder(uint64_t *out, uint8_t *key, uint64_t *init) -{ - FStar_UInt128_uint128 tmp2[10U]; - { - uint32_t _i; - for (_i = 0U; _i < (uint32_t)10U; ++_i) - tmp2[_i] = FStar_UInt128_uint64_to_uint128((uint64_t)0U); - } - { - uint64_t p01_tmp1_swap[41U] = { 0U }; - uint64_t *p0 = p01_tmp1_swap; - uint64_t *p01 = p01_tmp1_swap; - uint64_t *p03 = p01; - uint64_t *p11 = p01 + (uint32_t)10U; - uint64_t *x0; - uint64_t *z0; - uint64_t *p01_tmp1; - uint64_t *p01_tmp11; - uint64_t *nq10; - uint64_t *nq_p11; - uint64_t *swap; - uint64_t sw0; - uint64_t *nq1; - uint64_t *tmp1; - memcpy(p11, init, (uint32_t)10U * sizeof (uint64_t)); - x0 = p03; - z0 = p03 + (uint32_t)5U; - x0[0U] = (uint64_t)1U; - x0[1U] = (uint64_t)0U; - x0[2U] = (uint64_t)0U; - x0[3U] = (uint64_t)0U; - x0[4U] = (uint64_t)0U; - z0[0U] = (uint64_t)0U; - z0[1U] = (uint64_t)0U; - z0[2U] = (uint64_t)0U; - z0[3U] = (uint64_t)0U; - z0[4U] = (uint64_t)0U; - p01_tmp1 = p01_tmp1_swap; - p01_tmp11 = p01_tmp1_swap; - nq10 = p01_tmp1_swap; - nq_p11 = p01_tmp1_swap + (uint32_t)10U; - swap = p01_tmp1_swap + (uint32_t)40U; - Hacl_Impl_Curve25519_Field51_cswap2((uint64_t)1U, nq10, nq_p11); - point_add_and_double(init, p01_tmp11, tmp2); - swap[0U] = (uint64_t)1U; - { - uint32_t i; - for (i = (uint32_t)0U; i < (uint32_t)251U; i++) - { - uint64_t *p01_tmp12 = p01_tmp1_swap; - uint64_t *swap1 = p01_tmp1_swap + (uint32_t)40U; - uint64_t *nq2 = p01_tmp12; - uint64_t *nq_p12 = p01_tmp12 + (uint32_t)10U; - uint64_t - bit = - (uint64_t)(key[((uint32_t)253U - i) - / (uint32_t)8U] - >> ((uint32_t)253U - i) % (uint32_t)8U - & (uint8_t)1U); - uint64_t sw = swap1[0U] ^ bit; - Hacl_Impl_Curve25519_Field51_cswap2(sw, nq2, nq_p12); - point_add_and_double(init, p01_tmp12, tmp2); - swap1[0U] = bit; - } - } - sw0 = swap[0U]; - Hacl_Impl_Curve25519_Field51_cswap2(sw0, nq10, nq_p11); - nq1 = p01_tmp1; - tmp1 = p01_tmp1 + (uint32_t)20U; - point_double(nq1, tmp1, tmp2); - point_double(nq1, tmp1, tmp2); - point_double(nq1, tmp1, tmp2); - memcpy(out, p0, (uint32_t)10U * sizeof (uint64_t)); - } -} - -void -Hacl_Curve25519_51_fsquare_times( - uint64_t *o, - uint64_t *inp, - FStar_UInt128_uint128 *tmp, - uint32_t n -) -{ - uint32_t i; - Hacl_Impl_Curve25519_Field51_fsqr(o, inp, tmp); - for (i = (uint32_t)0U; i < n - (uint32_t)1U; i++) - { - Hacl_Impl_Curve25519_Field51_fsqr(o, o, tmp); - } -} - -void Hacl_Curve25519_51_finv(uint64_t *o, uint64_t *i, FStar_UInt128_uint128 *tmp) -{ - uint64_t t1[20U] = { 0U }; - uint64_t *a1 = t1; - uint64_t *b10 = t1 + (uint32_t)5U; - uint64_t *t010 = t1 + (uint32_t)15U; - FStar_UInt128_uint128 *tmp10 = tmp; - uint64_t *b11; - uint64_t *c10; - uint64_t *t011; - FStar_UInt128_uint128 *tmp11; - uint64_t *b1; - uint64_t *c1; - uint64_t *t01; - FStar_UInt128_uint128 *tmp1; - uint64_t *a; - uint64_t *t0; - Hacl_Curve25519_51_fsquare_times(a1, i, tmp10, (uint32_t)1U); - Hacl_Curve25519_51_fsquare_times(t010, a1, tmp10, (uint32_t)2U); - Hacl_Impl_Curve25519_Field51_fmul(b10, t010, i, tmp); - Hacl_Impl_Curve25519_Field51_fmul(a1, b10, a1, tmp); - Hacl_Curve25519_51_fsquare_times(t010, a1, tmp10, (uint32_t)1U); - Hacl_Impl_Curve25519_Field51_fmul(b10, t010, b10, tmp); - Hacl_Curve25519_51_fsquare_times(t010, b10, tmp10, (uint32_t)5U); - Hacl_Impl_Curve25519_Field51_fmul(b10, t010, b10, tmp); - b11 = t1 + (uint32_t)5U; - c10 = t1 + (uint32_t)10U; - t011 = t1 + (uint32_t)15U; - tmp11 = tmp; - Hacl_Curve25519_51_fsquare_times(t011, b11, tmp11, (uint32_t)10U); - Hacl_Impl_Curve25519_Field51_fmul(c10, t011, b11, tmp); - Hacl_Curve25519_51_fsquare_times(t011, c10, tmp11, (uint32_t)20U); - Hacl_Impl_Curve25519_Field51_fmul(t011, t011, c10, tmp); - Hacl_Curve25519_51_fsquare_times(t011, t011, tmp11, (uint32_t)10U); - Hacl_Impl_Curve25519_Field51_fmul(b11, t011, b11, tmp); - Hacl_Curve25519_51_fsquare_times(t011, b11, tmp11, (uint32_t)50U); - Hacl_Impl_Curve25519_Field51_fmul(c10, t011, b11, tmp); - b1 = t1 + (uint32_t)5U; - c1 = t1 + (uint32_t)10U; - t01 = t1 + (uint32_t)15U; - tmp1 = tmp; - Hacl_Curve25519_51_fsquare_times(t01, c1, tmp1, (uint32_t)100U); - Hacl_Impl_Curve25519_Field51_fmul(t01, t01, c1, tmp); - Hacl_Curve25519_51_fsquare_times(t01, t01, tmp1, (uint32_t)50U); - Hacl_Impl_Curve25519_Field51_fmul(t01, t01, b1, tmp); - Hacl_Curve25519_51_fsquare_times(t01, t01, tmp1, (uint32_t)5U); - a = t1; - t0 = t1 + (uint32_t)15U; - Hacl_Impl_Curve25519_Field51_fmul(o, t0, a, tmp); -} - -static void encode_point(uint8_t *o, uint64_t *i) -{ - uint64_t *x = i; - uint64_t *z = i + (uint32_t)5U; - uint64_t tmp[5U] = { 0U }; - uint64_t u64s[4U] = { 0U }; - FStar_UInt128_uint128 tmp_w[10U]; - { - uint32_t _i; - for (_i = 0U; _i < (uint32_t)10U; ++_i) - tmp_w[_i] = FStar_UInt128_uint64_to_uint128((uint64_t)0U); - } - Hacl_Curve25519_51_finv(tmp, z, tmp_w); - Hacl_Impl_Curve25519_Field51_fmul(tmp, tmp, x, tmp_w); - Hacl_Impl_Curve25519_Field51_store_felem(u64s, tmp); - KRML_MAYBE_FOR4(i0, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - store64_le(o + i0 * (uint32_t)8U, u64s[i0]);); -} - -void Hacl_Curve25519_51_scalarmult(uint8_t *out, uint8_t *priv, uint8_t *pub) -{ - uint64_t init[10U] = { 0U }; - uint64_t tmp[4U] = { 0U }; - uint64_t tmp3; - uint64_t *x; - uint64_t *z; - uint64_t f0l; - uint64_t f0h; - uint64_t f1l; - uint64_t f1h; - uint64_t f2l; - uint64_t f2h; - uint64_t f3l; - uint64_t f3h; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *os = tmp; - uint8_t *bj = pub + i * (uint32_t)8U; - uint64_t u = load64_le(bj); - uint64_t r = u; - uint64_t x0 = r; - os[i] = x0;); - tmp3 = tmp[3U]; - tmp[3U] = tmp3 & (uint64_t)0x7fffffffffffffffU; - x = init; - z = init + (uint32_t)5U; - z[0U] = (uint64_t)1U; - z[1U] = (uint64_t)0U; - z[2U] = (uint64_t)0U; - z[3U] = (uint64_t)0U; - z[4U] = (uint64_t)0U; - f0l = tmp[0U] & (uint64_t)0x7ffffffffffffU; - f0h = tmp[0U] >> (uint32_t)51U; - f1l = (tmp[1U] & (uint64_t)0x3fffffffffU) << (uint32_t)13U; - f1h = tmp[1U] >> (uint32_t)38U; - f2l = (tmp[2U] & (uint64_t)0x1ffffffU) << (uint32_t)26U; - f2h = tmp[2U] >> (uint32_t)25U; - f3l = (tmp[3U] & (uint64_t)0xfffU) << (uint32_t)39U; - f3h = tmp[3U] >> (uint32_t)12U; - x[0U] = f0l; - x[1U] = f0h | f1l; - x[2U] = f1h | f2l; - x[3U] = f2h | f3l; - x[4U] = f3h; - montgomery_ladder(init, priv, init); - encode_point(out, init); -} - -void Hacl_Curve25519_51_secret_to_public(uint8_t *pub, uint8_t *priv) -{ - uint8_t basepoint[32U] = { 0U }; - { - uint32_t i; - for (i = (uint32_t)0U; i < (uint32_t)32U; i++) - { - uint8_t *os = basepoint; - uint8_t x = g25519[i]; - os[i] = x; - } - } - Hacl_Curve25519_51_scalarmult(pub, priv, basepoint); -} - -bool Hacl_Curve25519_51_ecdh(uint8_t *out, uint8_t *priv, uint8_t *pub) -{ - uint8_t zeros[32U] = { 0U }; - Hacl_Curve25519_51_scalarmult(out, priv, pub); - { - uint8_t res = (uint8_t)255U; - uint8_t z; - bool r; - { - uint32_t i; - for (i = (uint32_t)0U; i < (uint32_t)32U; i++) - { - uint8_t uu____0 = FStar_UInt8_eq_mask(out[i], zeros[i]); - res = uu____0 & res; - } - } - z = res; - r = z == (uint8_t)255U; - return !r; - } -} - diff --git a/dist/c89-compatible/Hacl_Curve25519_51.h b/dist/c89-compatible/Hacl_Curve25519_51.h deleted file mode 100644 index 06c432d971..0000000000 --- a/dist/c89-compatible/Hacl_Curve25519_51.h +++ /dev/null @@ -1,52 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#ifndef __Hacl_Curve25519_51_H -#define __Hacl_Curve25519_51_H - -#if defined(__cplusplus) -extern "C" { -#endif - -#include -#include "krml/internal/types.h" -#include "krml/lowstar_endianness.h" -#include "krml/internal/target.h" - - -#include "Hacl_Krmllib.h" -#include "Hacl_Bignum25519_51.h" -#include "evercrypt_targetconfig.h" -void Hacl_Curve25519_51_scalarmult(uint8_t *out, uint8_t *priv, uint8_t *pub); - -void Hacl_Curve25519_51_secret_to_public(uint8_t *pub, uint8_t *priv); - -bool Hacl_Curve25519_51_ecdh(uint8_t *out, uint8_t *priv, uint8_t *pub); - -#if defined(__cplusplus) -} -#endif - -#define __Hacl_Curve25519_51_H_DEFINED -#endif diff --git a/dist/c89-compatible/Hacl_Curve25519_64.c b/dist/c89-compatible/Hacl_Curve25519_64.c deleted file mode 100644 index 0b8c33f4c7..0000000000 --- a/dist/c89-compatible/Hacl_Curve25519_64.c +++ /dev/null @@ -1,454 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#include "Hacl_Curve25519_64.h" - -#include "internal/Vale.h" -#include "curve25519-inline.h" -static inline void add_scalar0(uint64_t *out, uint64_t *f1, uint64_t f2) -{ - #if HACL_CAN_COMPILE_INLINE_ASM - add_scalar(out, f1, f2); - #else - uint64_t uu____0 = add_scalar_e(out, f1, f2); - #endif -} - -static inline void fadd0(uint64_t *out, uint64_t *f1, uint64_t *f2) -{ - #if HACL_CAN_COMPILE_INLINE_ASM - fadd(out, f1, f2); - #else - uint64_t uu____0 = fadd_e(out, f1, f2); - #endif -} - -static inline void fsub0(uint64_t *out, uint64_t *f1, uint64_t *f2) -{ - #if HACL_CAN_COMPILE_INLINE_ASM - fsub(out, f1, f2); - #else - uint64_t uu____0 = fsub_e(out, f1, f2); - #endif -} - -static inline void fmul0(uint64_t *out, uint64_t *f1, uint64_t *f2, uint64_t *tmp) -{ - #if HACL_CAN_COMPILE_INLINE_ASM - fmul(out, f1, f2, tmp); - #else - uint64_t uu____0 = fmul_e(tmp, f1, out, f2); - #endif -} - -static inline void fmul20(uint64_t *out, uint64_t *f1, uint64_t *f2, uint64_t *tmp) -{ - #if HACL_CAN_COMPILE_INLINE_ASM - fmul2(out, f1, f2, tmp); - #else - uint64_t uu____0 = fmul2_e(tmp, f1, out, f2); - #endif -} - -static inline void fmul_scalar0(uint64_t *out, uint64_t *f1, uint64_t f2) -{ - #if HACL_CAN_COMPILE_INLINE_ASM - fmul_scalar(out, f1, f2); - #else - uint64_t uu____0 = fmul_scalar_e(out, f1, f2); - #endif -} - -static inline void fsqr0(uint64_t *out, uint64_t *f1, uint64_t *tmp) -{ - #if HACL_CAN_COMPILE_INLINE_ASM - fsqr(out, f1, tmp); - #else - uint64_t uu____0 = fsqr_e(tmp, f1, out); - #endif -} - -static inline void fsqr20(uint64_t *out, uint64_t *f, uint64_t *tmp) -{ - #if HACL_CAN_COMPILE_INLINE_ASM - fsqr2(out, f, tmp); - #else - uint64_t uu____0 = fsqr2_e(tmp, f, out); - #endif -} - -static inline void cswap20(uint64_t bit, uint64_t *p1, uint64_t *p2) -{ - #if HACL_CAN_COMPILE_INLINE_ASM - cswap2(bit, p1, p2); - #else - uint64_t uu____0 = cswap2_e(bit, p1, p2); - #endif -} - -static const uint8_t g25519[32U] = { (uint8_t)9U }; - -static void point_add_and_double(uint64_t *q, uint64_t *p01_tmp1, uint64_t *tmp2) -{ - uint64_t *nq = p01_tmp1; - uint64_t *nq_p1 = p01_tmp1 + (uint32_t)8U; - uint64_t *tmp1 = p01_tmp1 + (uint32_t)16U; - uint64_t *x1 = q; - uint64_t *x2 = nq; - uint64_t *z2 = nq + (uint32_t)4U; - uint64_t *z3 = nq_p1 + (uint32_t)4U; - uint64_t *a = tmp1; - uint64_t *b = tmp1 + (uint32_t)4U; - uint64_t *ab = tmp1; - uint64_t *dc = tmp1 + (uint32_t)8U; - uint64_t *x3; - uint64_t *z31; - uint64_t *d0; - uint64_t *c0; - uint64_t *a1; - uint64_t *b1; - uint64_t *d; - uint64_t *c; - uint64_t *ab1; - uint64_t *dc1; - fadd0(a, x2, z2); - fsub0(b, x2, z2); - x3 = nq_p1; - z31 = nq_p1 + (uint32_t)4U; - d0 = dc; - c0 = dc + (uint32_t)4U; - fadd0(c0, x3, z31); - fsub0(d0, x3, z31); - fmul20(dc, dc, ab, tmp2); - fadd0(x3, d0, c0); - fsub0(z31, d0, c0); - a1 = tmp1; - b1 = tmp1 + (uint32_t)4U; - d = tmp1 + (uint32_t)8U; - c = tmp1 + (uint32_t)12U; - ab1 = tmp1; - dc1 = tmp1 + (uint32_t)8U; - fsqr20(dc1, ab1, tmp2); - fsqr20(nq_p1, nq_p1, tmp2); - a1[0U] = c[0U]; - a1[1U] = c[1U]; - a1[2U] = c[2U]; - a1[3U] = c[3U]; - fsub0(c, d, c); - fmul_scalar0(b1, c, (uint64_t)121665U); - fadd0(b1, b1, d); - fmul20(nq, dc1, ab1, tmp2); - fmul0(z3, z3, x1, tmp2); -} - -static void point_double(uint64_t *nq, uint64_t *tmp1, uint64_t *tmp2) -{ - uint64_t *x2 = nq; - uint64_t *z2 = nq + (uint32_t)4U; - uint64_t *a = tmp1; - uint64_t *b = tmp1 + (uint32_t)4U; - uint64_t *d = tmp1 + (uint32_t)8U; - uint64_t *c = tmp1 + (uint32_t)12U; - uint64_t *ab = tmp1; - uint64_t *dc = tmp1 + (uint32_t)8U; - fadd0(a, x2, z2); - fsub0(b, x2, z2); - fsqr20(dc, ab, tmp2); - a[0U] = c[0U]; - a[1U] = c[1U]; - a[2U] = c[2U]; - a[3U] = c[3U]; - fsub0(c, d, c); - fmul_scalar0(b, c, (uint64_t)121665U); - fadd0(b, b, d); - fmul20(nq, dc, ab, tmp2); -} - -static void montgomery_ladder(uint64_t *out, uint8_t *key, uint64_t *init) -{ - uint64_t tmp2[16U] = { 0U }; - uint64_t p01_tmp1_swap[33U] = { 0U }; - uint64_t *p0 = p01_tmp1_swap; - uint64_t *p01 = p01_tmp1_swap; - uint64_t *p03 = p01; - uint64_t *p11 = p01 + (uint32_t)8U; - uint64_t *x0; - uint64_t *z0; - uint64_t *p01_tmp1; - uint64_t *p01_tmp11; - uint64_t *nq10; - uint64_t *nq_p11; - uint64_t *swap; - uint64_t sw0; - uint64_t *nq1; - uint64_t *tmp1; - memcpy(p11, init, (uint32_t)8U * sizeof (uint64_t)); - x0 = p03; - z0 = p03 + (uint32_t)4U; - x0[0U] = (uint64_t)1U; - x0[1U] = (uint64_t)0U; - x0[2U] = (uint64_t)0U; - x0[3U] = (uint64_t)0U; - z0[0U] = (uint64_t)0U; - z0[1U] = (uint64_t)0U; - z0[2U] = (uint64_t)0U; - z0[3U] = (uint64_t)0U; - p01_tmp1 = p01_tmp1_swap; - p01_tmp11 = p01_tmp1_swap; - nq10 = p01_tmp1_swap; - nq_p11 = p01_tmp1_swap + (uint32_t)8U; - swap = p01_tmp1_swap + (uint32_t)32U; - cswap20((uint64_t)1U, nq10, nq_p11); - point_add_and_double(init, p01_tmp11, tmp2); - swap[0U] = (uint64_t)1U; - { - uint32_t i; - for (i = (uint32_t)0U; i < (uint32_t)251U; i++) - { - uint64_t *p01_tmp12 = p01_tmp1_swap; - uint64_t *swap1 = p01_tmp1_swap + (uint32_t)32U; - uint64_t *nq2 = p01_tmp12; - uint64_t *nq_p12 = p01_tmp12 + (uint32_t)8U; - uint64_t - bit = - (uint64_t)(key[((uint32_t)253U - i) - / (uint32_t)8U] - >> ((uint32_t)253U - i) % (uint32_t)8U - & (uint8_t)1U); - uint64_t sw = swap1[0U] ^ bit; - cswap20(sw, nq2, nq_p12); - point_add_and_double(init, p01_tmp12, tmp2); - swap1[0U] = bit; - } - } - sw0 = swap[0U]; - cswap20(sw0, nq10, nq_p11); - nq1 = p01_tmp1; - tmp1 = p01_tmp1 + (uint32_t)16U; - point_double(nq1, tmp1, tmp2); - point_double(nq1, tmp1, tmp2); - point_double(nq1, tmp1, tmp2); - memcpy(out, p0, (uint32_t)8U * sizeof (uint64_t)); -} - -static void fsquare_times(uint64_t *o, uint64_t *inp, uint64_t *tmp, uint32_t n) -{ - uint32_t i; - fsqr0(o, inp, tmp); - for (i = (uint32_t)0U; i < n - (uint32_t)1U; i++) - { - fsqr0(o, o, tmp); - } -} - -static void finv(uint64_t *o, uint64_t *i, uint64_t *tmp) -{ - uint64_t t1[16U] = { 0U }; - uint64_t *a1 = t1; - uint64_t *b10 = t1 + (uint32_t)4U; - uint64_t *t010 = t1 + (uint32_t)12U; - uint64_t *tmp10 = tmp; - uint64_t *b11; - uint64_t *c10; - uint64_t *t011; - uint64_t *tmp11; - uint64_t *b1; - uint64_t *c1; - uint64_t *t01; - uint64_t *tmp1; - uint64_t *a; - uint64_t *t0; - fsquare_times(a1, i, tmp10, (uint32_t)1U); - fsquare_times(t010, a1, tmp10, (uint32_t)2U); - fmul0(b10, t010, i, tmp); - fmul0(a1, b10, a1, tmp); - fsquare_times(t010, a1, tmp10, (uint32_t)1U); - fmul0(b10, t010, b10, tmp); - fsquare_times(t010, b10, tmp10, (uint32_t)5U); - fmul0(b10, t010, b10, tmp); - b11 = t1 + (uint32_t)4U; - c10 = t1 + (uint32_t)8U; - t011 = t1 + (uint32_t)12U; - tmp11 = tmp; - fsquare_times(t011, b11, tmp11, (uint32_t)10U); - fmul0(c10, t011, b11, tmp); - fsquare_times(t011, c10, tmp11, (uint32_t)20U); - fmul0(t011, t011, c10, tmp); - fsquare_times(t011, t011, tmp11, (uint32_t)10U); - fmul0(b11, t011, b11, tmp); - fsquare_times(t011, b11, tmp11, (uint32_t)50U); - fmul0(c10, t011, b11, tmp); - b1 = t1 + (uint32_t)4U; - c1 = t1 + (uint32_t)8U; - t01 = t1 + (uint32_t)12U; - tmp1 = tmp; - fsquare_times(t01, c1, tmp1, (uint32_t)100U); - fmul0(t01, t01, c1, tmp); - fsquare_times(t01, t01, tmp1, (uint32_t)50U); - fmul0(t01, t01, b1, tmp); - fsquare_times(t01, t01, tmp1, (uint32_t)5U); - a = t1; - t0 = t1 + (uint32_t)12U; - fmul0(o, t0, a, tmp); -} - -static void store_felem(uint64_t *b, uint64_t *f) -{ - uint64_t f30 = f[3U]; - uint64_t top_bit0 = f30 >> (uint32_t)63U; - uint64_t f31; - uint64_t top_bit; - uint64_t f0; - uint64_t f1; - uint64_t f2; - uint64_t f3; - uint64_t m0; - uint64_t m1; - uint64_t m2; - uint64_t m3; - uint64_t mask; - uint64_t f0_; - uint64_t f1_; - uint64_t f2_; - uint64_t f3_; - uint64_t o0; - uint64_t o1; - uint64_t o2; - uint64_t o3; - f[3U] = f30 & (uint64_t)0x7fffffffffffffffU; - add_scalar0(f, f, (uint64_t)19U * top_bit0); - f31 = f[3U]; - top_bit = f31 >> (uint32_t)63U; - f[3U] = f31 & (uint64_t)0x7fffffffffffffffU; - add_scalar0(f, f, (uint64_t)19U * top_bit); - f0 = f[0U]; - f1 = f[1U]; - f2 = f[2U]; - f3 = f[3U]; - m0 = FStar_UInt64_gte_mask(f0, (uint64_t)0xffffffffffffffedU); - m1 = FStar_UInt64_eq_mask(f1, (uint64_t)0xffffffffffffffffU); - m2 = FStar_UInt64_eq_mask(f2, (uint64_t)0xffffffffffffffffU); - m3 = FStar_UInt64_eq_mask(f3, (uint64_t)0x7fffffffffffffffU); - mask = ((m0 & m1) & m2) & m3; - f0_ = f0 - (mask & (uint64_t)0xffffffffffffffedU); - f1_ = f1 - (mask & (uint64_t)0xffffffffffffffffU); - f2_ = f2 - (mask & (uint64_t)0xffffffffffffffffU); - f3_ = f3 - (mask & (uint64_t)0x7fffffffffffffffU); - o0 = f0_; - o1 = f1_; - o2 = f2_; - o3 = f3_; - b[0U] = o0; - b[1U] = o1; - b[2U] = o2; - b[3U] = o3; -} - -static void encode_point(uint8_t *o, uint64_t *i) -{ - uint64_t *x = i; - uint64_t *z = i + (uint32_t)4U; - uint64_t tmp[4U] = { 0U }; - uint64_t u64s[4U] = { 0U }; - uint64_t tmp_w[16U] = { 0U }; - finv(tmp, z, tmp_w); - fmul0(tmp, tmp, x, tmp_w); - store_felem(u64s, tmp); - KRML_MAYBE_FOR4(i0, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - store64_le(o + i0 * (uint32_t)8U, u64s[i0]);); -} - -void Hacl_Curve25519_64_scalarmult(uint8_t *out, uint8_t *priv, uint8_t *pub) -{ - uint64_t init[8U] = { 0U }; - uint64_t tmp[4U] = { 0U }; - uint64_t tmp3; - uint64_t *x; - uint64_t *z; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *os = tmp; - uint8_t *bj = pub + i * (uint32_t)8U; - uint64_t u = load64_le(bj); - uint64_t r = u; - uint64_t x0 = r; - os[i] = x0;); - tmp3 = tmp[3U]; - tmp[3U] = tmp3 & (uint64_t)0x7fffffffffffffffU; - x = init; - z = init + (uint32_t)4U; - z[0U] = (uint64_t)1U; - z[1U] = (uint64_t)0U; - z[2U] = (uint64_t)0U; - z[3U] = (uint64_t)0U; - x[0U] = tmp[0U]; - x[1U] = tmp[1U]; - x[2U] = tmp[2U]; - x[3U] = tmp[3U]; - montgomery_ladder(init, priv, init); - encode_point(out, init); -} - -void Hacl_Curve25519_64_secret_to_public(uint8_t *pub, uint8_t *priv) -{ - uint8_t basepoint[32U] = { 0U }; - { - uint32_t i; - for (i = (uint32_t)0U; i < (uint32_t)32U; i++) - { - uint8_t *os = basepoint; - uint8_t x = g25519[i]; - os[i] = x; - } - } - Hacl_Curve25519_64_scalarmult(pub, priv, basepoint); -} - -bool Hacl_Curve25519_64_ecdh(uint8_t *out, uint8_t *priv, uint8_t *pub) -{ - uint8_t zeros[32U] = { 0U }; - Hacl_Curve25519_64_scalarmult(out, priv, pub); - { - uint8_t res = (uint8_t)255U; - uint8_t z; - bool r; - { - uint32_t i; - for (i = (uint32_t)0U; i < (uint32_t)32U; i++) - { - uint8_t uu____0 = FStar_UInt8_eq_mask(out[i], zeros[i]); - res = uu____0 & res; - } - } - z = res; - r = z == (uint8_t)255U; - return !r; - } -} - diff --git a/dist/c89-compatible/Hacl_Curve25519_64.h b/dist/c89-compatible/Hacl_Curve25519_64.h deleted file mode 100644 index a989df5d6e..0000000000 --- a/dist/c89-compatible/Hacl_Curve25519_64.h +++ /dev/null @@ -1,51 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#ifndef __Hacl_Curve25519_64_H -#define __Hacl_Curve25519_64_H - -#if defined(__cplusplus) -extern "C" { -#endif - -#include -#include "krml/internal/types.h" -#include "krml/lowstar_endianness.h" -#include "krml/internal/target.h" - - -#include "Hacl_Krmllib.h" -#include "evercrypt_targetconfig.h" -void Hacl_Curve25519_64_scalarmult(uint8_t *out, uint8_t *priv, uint8_t *pub); - -void Hacl_Curve25519_64_secret_to_public(uint8_t *pub, uint8_t *priv); - -bool Hacl_Curve25519_64_ecdh(uint8_t *out, uint8_t *priv, uint8_t *pub); - -#if defined(__cplusplus) -} -#endif - -#define __Hacl_Curve25519_64_H_DEFINED -#endif diff --git a/dist/c89-compatible/Hacl_Curve25519_64_Slow.c b/dist/c89-compatible/Hacl_Curve25519_64_Slow.c deleted file mode 100644 index e55fb71f05..0000000000 --- a/dist/c89-compatible/Hacl_Curve25519_64_Slow.c +++ /dev/null @@ -1,793 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#include "Hacl_Curve25519_64_Slow.h" - - - -static inline void add1_(uint64_t *out, uint64_t *f1, uint64_t f2) -{ - uint64_t c0 = Lib_IntTypes_Intrinsics_add_carry_u64((uint64_t)0U, f1[0U], f2, out); - uint64_t *a1 = f1 + (uint32_t)1U; - uint64_t *res1 = out + (uint32_t)1U; - uint64_t c2 = c0; - uint64_t c1; - uint64_t c; - KRML_MAYBE_FOR3(i, - (uint32_t)0U, - (uint32_t)3U, - (uint32_t)1U, - uint64_t t1 = a1[i]; - uint64_t *res_i = res1 + i; - c2 = Lib_IntTypes_Intrinsics_add_carry_u64(c2, t1, (uint64_t)0U, res_i);); - c1 = c2; - c = c1; -} - -static inline void fadd_(uint64_t *out, uint64_t *f1, uint64_t *f2) -{ - uint64_t c2 = (uint64_t)0U; - uint64_t c0; - uint64_t c01; - uint64_t *a1; - uint64_t *res1; - { - uint64_t t1 = f1[(uint32_t)4U * (uint32_t)0U]; - uint64_t t20 = f2[(uint32_t)4U * (uint32_t)0U]; - uint64_t *res_i0 = out + (uint32_t)4U * (uint32_t)0U; - c2 = Lib_IntTypes_Intrinsics_add_carry_u64(c2, t1, t20, res_i0); - { - uint64_t t10 = f1[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U]; - uint64_t t21 = f2[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U]; - uint64_t *res_i1 = out + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U; - c2 = Lib_IntTypes_Intrinsics_add_carry_u64(c2, t10, t21, res_i1); - { - uint64_t t11 = f1[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U]; - uint64_t t22 = f2[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U]; - uint64_t *res_i2 = out + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U; - c2 = Lib_IntTypes_Intrinsics_add_carry_u64(c2, t11, t22, res_i2); - { - uint64_t t12 = f1[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U]; - uint64_t t2 = f2[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U]; - uint64_t *res_i = out + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U; - c2 = Lib_IntTypes_Intrinsics_add_carry_u64(c2, t12, t2, res_i); - } - } - } - } - c0 = c2; - c01 = Lib_IntTypes_Intrinsics_add_carry_u64((uint64_t)0U, out[0U], c0 * (uint64_t)38U, out); - a1 = out + (uint32_t)1U; - res1 = out + (uint32_t)1U; - { - uint64_t c3 = c01; - uint64_t c1; - uint64_t c; - KRML_MAYBE_FOR3(i, - (uint32_t)0U, - (uint32_t)3U, - (uint32_t)1U, - uint64_t t1 = a1[i]; - uint64_t *res_i = res1 + i; - c3 = Lib_IntTypes_Intrinsics_add_carry_u64(c3, t1, (uint64_t)0U, res_i);); - c1 = c3; - c = c1; - out[0U] = out[0U] + c * (uint64_t)38U; - } -} - -static inline void fsub_(uint64_t *out, uint64_t *f1, uint64_t *f2) -{ - uint64_t c2 = (uint64_t)0U; - uint64_t c0; - uint64_t c01; - uint64_t *a1; - uint64_t *res1; - { - uint64_t t1 = f1[(uint32_t)4U * (uint32_t)0U]; - uint64_t t20 = f2[(uint32_t)4U * (uint32_t)0U]; - uint64_t *res_i0 = out + (uint32_t)4U * (uint32_t)0U; - c2 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c2, t1, t20, res_i0); - { - uint64_t t10 = f1[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U]; - uint64_t t21 = f2[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U]; - uint64_t *res_i1 = out + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U; - c2 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c2, t10, t21, res_i1); - { - uint64_t t11 = f1[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U]; - uint64_t t22 = f2[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U]; - uint64_t *res_i2 = out + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U; - c2 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c2, t11, t22, res_i2); - { - uint64_t t12 = f1[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U]; - uint64_t t2 = f2[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U]; - uint64_t *res_i = out + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U; - c2 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c2, t12, t2, res_i); - } - } - } - } - c0 = c2; - c01 = Lib_IntTypes_Intrinsics_sub_borrow_u64((uint64_t)0U, out[0U], c0 * (uint64_t)38U, out); - a1 = out + (uint32_t)1U; - res1 = out + (uint32_t)1U; - { - uint64_t c3 = c01; - uint64_t c1; - uint64_t c; - KRML_MAYBE_FOR3(i, - (uint32_t)0U, - (uint32_t)3U, - (uint32_t)1U, - uint64_t t1 = a1[i]; - uint64_t *res_i = res1 + i; - c3 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c3, t1, (uint64_t)0U, res_i);); - c1 = c3; - c = c1; - out[0U] = out[0U] - c * (uint64_t)38U; - } -} - -static inline void fmul_(uint64_t *out, uint64_t *f1, uint64_t *f2, uint64_t *tmp) -{ - uint64_t *tmp0 = tmp; - uint64_t *uu____0; - uint64_t *uu____1; - uint64_t *res_j0; - memset(tmp0, 0U, (uint32_t)8U * sizeof (uint64_t)); - KRML_MAYBE_FOR4(i0, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t bj = f2[i0]; - uint64_t *res_j = tmp0 + i0; - uint64_t c = (uint64_t)0U; - { - uint64_t a_i = f1[(uint32_t)4U * (uint32_t)0U]; - uint64_t *res_i0 = res_j + (uint32_t)4U * (uint32_t)0U; - c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, bj, c, res_i0); - { - uint64_t a_i0 = f1[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U]; - uint64_t *res_i1 = res_j + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U; - c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i0, bj, c, res_i1); - { - uint64_t a_i1 = f1[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U]; - uint64_t *res_i2 = res_j + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U; - c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i1, bj, c, res_i2); - { - uint64_t a_i2 = f1[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U]; - uint64_t *res_i = res_j + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U; - c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i2, bj, c, res_i); - } - } - } - } - uint64_t r = c; - tmp0[(uint32_t)4U + i0] = r;); - uu____0 = tmp0 + (uint32_t)4U; - uu____1 = tmp0; - res_j0 = uu____1; - { - uint64_t c2 = (uint64_t)0U; - uint64_t r; - uint64_t c0; - uint64_t *uu____2; - uint64_t c01; - uint64_t *a1; - uint64_t *res1; - { - uint64_t a_i = uu____0[(uint32_t)4U * (uint32_t)0U]; - uint64_t *res_i0 = res_j0 + (uint32_t)4U * (uint32_t)0U; - c2 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, (uint64_t)38U, c2, res_i0); - { - uint64_t a_i0 = uu____0[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U]; - uint64_t *res_i1 = res_j0 + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U; - c2 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i0, (uint64_t)38U, c2, res_i1); - { - uint64_t a_i1 = uu____0[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U]; - uint64_t *res_i2 = res_j0 + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U; - c2 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i1, (uint64_t)38U, c2, res_i2); - { - uint64_t a_i2 = uu____0[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U]; - uint64_t *res_i = res_j0 + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U; - c2 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i2, (uint64_t)38U, c2, res_i); - } - } - } - } - r = c2; - c0 = r; - uu____2 = tmp0; - c01 = Lib_IntTypes_Intrinsics_add_carry_u64((uint64_t)0U, uu____2[0U], c0 * (uint64_t)38U, out); - a1 = uu____2 + (uint32_t)1U; - res1 = out + (uint32_t)1U; - { - uint64_t c3 = c01; - uint64_t c1; - uint64_t c; - KRML_MAYBE_FOR3(i, - (uint32_t)0U, - (uint32_t)3U, - (uint32_t)1U, - uint64_t t1 = a1[i]; - uint64_t *res_i = res1 + i; - c3 = Lib_IntTypes_Intrinsics_add_carry_u64(c3, t1, (uint64_t)0U, res_i);); - c1 = c3; - c = c1; - out[0U] = out[0U] + c * (uint64_t)38U; - } - } -} - -static inline void fmul2_(uint64_t *out, uint64_t *f1, uint64_t *f2, uint64_t *tmp) -{ - uint64_t *out1 = out; - uint64_t *out2 = out + (uint32_t)4U; - uint64_t *f11 = f1; - uint64_t *f12 = f1 + (uint32_t)4U; - uint64_t *f21 = f2; - uint64_t *f22 = f2 + (uint32_t)4U; - fmul_(out1, f11, f21, tmp); - fmul_(out2, f12, f22, tmp); -} - -static inline void fmul1_(uint64_t *out, uint64_t *f1, uint64_t f2) -{ - uint64_t c2 = (uint64_t)0U; - uint64_t c0; - uint64_t c01; - uint64_t *a1; - uint64_t *res1; - { - uint64_t a_i = f1[(uint32_t)4U * (uint32_t)0U]; - uint64_t *res_i0 = out + (uint32_t)4U * (uint32_t)0U; - c2 = Hacl_Bignum_Base_mul_wide_add_u64(a_i, f2, c2, res_i0); - { - uint64_t a_i0 = f1[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U]; - uint64_t *res_i1 = out + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U; - c2 = Hacl_Bignum_Base_mul_wide_add_u64(a_i0, f2, c2, res_i1); - { - uint64_t a_i1 = f1[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U]; - uint64_t *res_i2 = out + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U; - c2 = Hacl_Bignum_Base_mul_wide_add_u64(a_i1, f2, c2, res_i2); - { - uint64_t a_i2 = f1[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U]; - uint64_t *res_i = out + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U; - c2 = Hacl_Bignum_Base_mul_wide_add_u64(a_i2, f2, c2, res_i); - } - } - } - } - c0 = c2; - c01 = Lib_IntTypes_Intrinsics_add_carry_u64((uint64_t)0U, out[0U], c0 * (uint64_t)38U, out); - a1 = out + (uint32_t)1U; - res1 = out + (uint32_t)1U; - { - uint64_t c3 = c01; - uint64_t c1; - uint64_t c; - KRML_MAYBE_FOR3(i, - (uint32_t)0U, - (uint32_t)3U, - (uint32_t)1U, - uint64_t t1 = a1[i]; - uint64_t *res_i = res1 + i; - c3 = Lib_IntTypes_Intrinsics_add_carry_u64(c3, t1, (uint64_t)0U, res_i);); - c1 = c3; - c = c1; - out[0U] = out[0U] + c * (uint64_t)38U; - } -} - -static inline void fsqr_(uint64_t *out, uint64_t *f1, uint64_t *tmp) -{ - uint64_t c00; - memset(tmp, 0U, (uint32_t)8U * sizeof (uint64_t)); - KRML_MAYBE_FOR4(i0, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *ab = f1; - uint64_t a_j = f1[i0]; - uint64_t *res_j = tmp + i0; - uint64_t c = (uint64_t)0U; - { - uint32_t i; - for (i = (uint32_t)0U; i < i0 / (uint32_t)4U; i++) - { - uint64_t a_i = ab[(uint32_t)4U * i]; - uint64_t *res_i0 = res_j + (uint32_t)4U * i; - c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, a_j, c, res_i0); - { - uint64_t a_i0 = ab[(uint32_t)4U * i + (uint32_t)1U]; - uint64_t *res_i1 = res_j + (uint32_t)4U * i + (uint32_t)1U; - c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i0, a_j, c, res_i1); - { - uint64_t a_i1 = ab[(uint32_t)4U * i + (uint32_t)2U]; - uint64_t *res_i2 = res_j + (uint32_t)4U * i + (uint32_t)2U; - c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i1, a_j, c, res_i2); - { - uint64_t a_i2 = ab[(uint32_t)4U * i + (uint32_t)3U]; - uint64_t *res_i = res_j + (uint32_t)4U * i + (uint32_t)3U; - c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i2, a_j, c, res_i); - } - } - } - } - } - { - uint32_t i; - for (i = i0 / (uint32_t)4U * (uint32_t)4U; i < i0; i++) - { - uint64_t a_i = ab[i]; - uint64_t *res_i = res_j + i; - c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, a_j, c, res_i); - } - } - uint64_t r = c; - tmp[i0 + i0] = r;); - c00 = Hacl_Bignum_Addition_bn_add_eq_len_u64((uint32_t)8U, tmp, tmp, tmp); - { - uint64_t tmp1[8U] = { 0U }; - uint64_t c10; - uint64_t *uu____0; - uint64_t *uu____1; - uint64_t *res_j; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - FStar_UInt128_uint128 res = FStar_UInt128_mul_wide(f1[i], f1[i]); - uint64_t hi = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(res, (uint32_t)64U)); - uint64_t lo = FStar_UInt128_uint128_to_uint64(res); - tmp1[(uint32_t)2U * i] = lo; - tmp1[(uint32_t)2U * i + (uint32_t)1U] = hi;); - c10 = Hacl_Bignum_Addition_bn_add_eq_len_u64((uint32_t)8U, tmp, tmp1, tmp); - uu____0 = tmp + (uint32_t)4U; - uu____1 = tmp; - res_j = uu____1; - { - uint64_t c2 = (uint64_t)0U; - uint64_t r; - uint64_t c0; - uint64_t *uu____2; - uint64_t c01; - uint64_t *a1; - uint64_t *res1; - { - uint64_t a_i = uu____0[(uint32_t)4U * (uint32_t)0U]; - uint64_t *res_i0 = res_j + (uint32_t)4U * (uint32_t)0U; - c2 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, (uint64_t)38U, c2, res_i0); - { - uint64_t a_i0 = uu____0[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U]; - uint64_t *res_i1 = res_j + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U; - c2 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i0, (uint64_t)38U, c2, res_i1); - { - uint64_t a_i1 = uu____0[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U]; - uint64_t *res_i2 = res_j + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U; - c2 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i1, (uint64_t)38U, c2, res_i2); - { - uint64_t a_i2 = uu____0[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U]; - uint64_t *res_i = res_j + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U; - c2 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i2, (uint64_t)38U, c2, res_i); - } - } - } - } - r = c2; - c0 = r; - uu____2 = tmp; - c01 = - Lib_IntTypes_Intrinsics_add_carry_u64((uint64_t)0U, - uu____2[0U], - c0 * (uint64_t)38U, - out); - a1 = uu____2 + (uint32_t)1U; - res1 = out + (uint32_t)1U; - { - uint64_t c3 = c01; - uint64_t c1; - uint64_t c; - KRML_MAYBE_FOR3(i, - (uint32_t)0U, - (uint32_t)3U, - (uint32_t)1U, - uint64_t t1 = a1[i]; - uint64_t *res_i = res1 + i; - c3 = Lib_IntTypes_Intrinsics_add_carry_u64(c3, t1, (uint64_t)0U, res_i);); - c1 = c3; - c = c1; - out[0U] = out[0U] + c * (uint64_t)38U; - } - } - } -} - -static inline void fsqr2_(uint64_t *out, uint64_t *f, uint64_t *tmp) -{ - uint64_t *out1 = out; - uint64_t *out2 = out + (uint32_t)4U; - uint64_t *f1 = f; - uint64_t *f2 = f + (uint32_t)4U; - fmul_(out1, f1, f1, tmp); - fmul_(out2, f2, f2, tmp); -} - -static inline void cswap2_(uint64_t bit, uint64_t *p1, uint64_t *p2) -{ - uint64_t mask = (uint64_t)0U - bit; - uint32_t i; - for (i = (uint32_t)0U; i < (uint32_t)8U; i++) - { - uint64_t dummy = mask & (p1[i] ^ p2[i]); - p1[i] = p1[i] ^ dummy; - p2[i] = p2[i] ^ dummy; - } -} - -static const uint8_t g25519[32U] = { (uint8_t)9U }; - -static void point_add_and_double(uint64_t *q, uint64_t *p01_tmp1, uint64_t *tmp2) -{ - uint64_t *nq = p01_tmp1; - uint64_t *nq_p1 = p01_tmp1 + (uint32_t)8U; - uint64_t *tmp1 = p01_tmp1 + (uint32_t)16U; - uint64_t *x1 = q; - uint64_t *x2 = nq; - uint64_t *z2 = nq + (uint32_t)4U; - uint64_t *z3 = nq_p1 + (uint32_t)4U; - uint64_t *a = tmp1; - uint64_t *b = tmp1 + (uint32_t)4U; - uint64_t *ab = tmp1; - uint64_t *dc = tmp1 + (uint32_t)8U; - uint64_t *x3; - uint64_t *z31; - uint64_t *d0; - uint64_t *c0; - uint64_t *a1; - uint64_t *b1; - uint64_t *d; - uint64_t *c; - uint64_t *ab1; - uint64_t *dc1; - fadd_(a, x2, z2); - fsub_(b, x2, z2); - x3 = nq_p1; - z31 = nq_p1 + (uint32_t)4U; - d0 = dc; - c0 = dc + (uint32_t)4U; - fadd_(c0, x3, z31); - fsub_(d0, x3, z31); - fmul2_(dc, dc, ab, tmp2); - fadd_(x3, d0, c0); - fsub_(z31, d0, c0); - a1 = tmp1; - b1 = tmp1 + (uint32_t)4U; - d = tmp1 + (uint32_t)8U; - c = tmp1 + (uint32_t)12U; - ab1 = tmp1; - dc1 = tmp1 + (uint32_t)8U; - fsqr2_(dc1, ab1, tmp2); - fsqr2_(nq_p1, nq_p1, tmp2); - a1[0U] = c[0U]; - a1[1U] = c[1U]; - a1[2U] = c[2U]; - a1[3U] = c[3U]; - fsub_(c, d, c); - fmul1_(b1, c, (uint64_t)121665U); - fadd_(b1, b1, d); - fmul2_(nq, dc1, ab1, tmp2); - fmul_(z3, z3, x1, tmp2); -} - -static void point_double(uint64_t *nq, uint64_t *tmp1, uint64_t *tmp2) -{ - uint64_t *x2 = nq; - uint64_t *z2 = nq + (uint32_t)4U; - uint64_t *a = tmp1; - uint64_t *b = tmp1 + (uint32_t)4U; - uint64_t *d = tmp1 + (uint32_t)8U; - uint64_t *c = tmp1 + (uint32_t)12U; - uint64_t *ab = tmp1; - uint64_t *dc = tmp1 + (uint32_t)8U; - fadd_(a, x2, z2); - fsub_(b, x2, z2); - fsqr2_(dc, ab, tmp2); - a[0U] = c[0U]; - a[1U] = c[1U]; - a[2U] = c[2U]; - a[3U] = c[3U]; - fsub_(c, d, c); - fmul1_(b, c, (uint64_t)121665U); - fadd_(b, b, d); - fmul2_(nq, dc, ab, tmp2); -} - -static void montgomery_ladder(uint64_t *out, uint8_t *key, uint64_t *init) -{ - uint64_t tmp2[16U] = { 0U }; - uint64_t p01_tmp1_swap[33U] = { 0U }; - uint64_t *p0 = p01_tmp1_swap; - uint64_t *p01 = p01_tmp1_swap; - uint64_t *p03 = p01; - uint64_t *p11 = p01 + (uint32_t)8U; - uint64_t *x0; - uint64_t *z0; - uint64_t *p01_tmp1; - uint64_t *p01_tmp11; - uint64_t *nq10; - uint64_t *nq_p11; - uint64_t *swap; - uint64_t sw0; - uint64_t *nq1; - uint64_t *tmp1; - memcpy(p11, init, (uint32_t)8U * sizeof (uint64_t)); - x0 = p03; - z0 = p03 + (uint32_t)4U; - x0[0U] = (uint64_t)1U; - x0[1U] = (uint64_t)0U; - x0[2U] = (uint64_t)0U; - x0[3U] = (uint64_t)0U; - z0[0U] = (uint64_t)0U; - z0[1U] = (uint64_t)0U; - z0[2U] = (uint64_t)0U; - z0[3U] = (uint64_t)0U; - p01_tmp1 = p01_tmp1_swap; - p01_tmp11 = p01_tmp1_swap; - nq10 = p01_tmp1_swap; - nq_p11 = p01_tmp1_swap + (uint32_t)8U; - swap = p01_tmp1_swap + (uint32_t)32U; - cswap2_((uint64_t)1U, nq10, nq_p11); - point_add_and_double(init, p01_tmp11, tmp2); - swap[0U] = (uint64_t)1U; - { - uint32_t i; - for (i = (uint32_t)0U; i < (uint32_t)251U; i++) - { - uint64_t *p01_tmp12 = p01_tmp1_swap; - uint64_t *swap1 = p01_tmp1_swap + (uint32_t)32U; - uint64_t *nq2 = p01_tmp12; - uint64_t *nq_p12 = p01_tmp12 + (uint32_t)8U; - uint64_t - bit = - (uint64_t)(key[((uint32_t)253U - i) - / (uint32_t)8U] - >> ((uint32_t)253U - i) % (uint32_t)8U - & (uint8_t)1U); - uint64_t sw = swap1[0U] ^ bit; - cswap2_(sw, nq2, nq_p12); - point_add_and_double(init, p01_tmp12, tmp2); - swap1[0U] = bit; - } - } - sw0 = swap[0U]; - cswap2_(sw0, nq10, nq_p11); - nq1 = p01_tmp1; - tmp1 = p01_tmp1 + (uint32_t)16U; - point_double(nq1, tmp1, tmp2); - point_double(nq1, tmp1, tmp2); - point_double(nq1, tmp1, tmp2); - memcpy(out, p0, (uint32_t)8U * sizeof (uint64_t)); -} - -static void fsquare_times(uint64_t *o, uint64_t *inp, uint64_t *tmp, uint32_t n) -{ - uint32_t i; - fsqr_(o, inp, tmp); - for (i = (uint32_t)0U; i < n - (uint32_t)1U; i++) - { - fsqr_(o, o, tmp); - } -} - -static void finv(uint64_t *o, uint64_t *i, uint64_t *tmp) -{ - uint64_t t1[16U] = { 0U }; - uint64_t *a1 = t1; - uint64_t *b10 = t1 + (uint32_t)4U; - uint64_t *t010 = t1 + (uint32_t)12U; - uint64_t *tmp10 = tmp; - uint64_t *b11; - uint64_t *c10; - uint64_t *t011; - uint64_t *tmp11; - uint64_t *b1; - uint64_t *c1; - uint64_t *t01; - uint64_t *tmp1; - uint64_t *a; - uint64_t *t0; - fsquare_times(a1, i, tmp10, (uint32_t)1U); - fsquare_times(t010, a1, tmp10, (uint32_t)2U); - fmul_(b10, t010, i, tmp); - fmul_(a1, b10, a1, tmp); - fsquare_times(t010, a1, tmp10, (uint32_t)1U); - fmul_(b10, t010, b10, tmp); - fsquare_times(t010, b10, tmp10, (uint32_t)5U); - fmul_(b10, t010, b10, tmp); - b11 = t1 + (uint32_t)4U; - c10 = t1 + (uint32_t)8U; - t011 = t1 + (uint32_t)12U; - tmp11 = tmp; - fsquare_times(t011, b11, tmp11, (uint32_t)10U); - fmul_(c10, t011, b11, tmp); - fsquare_times(t011, c10, tmp11, (uint32_t)20U); - fmul_(t011, t011, c10, tmp); - fsquare_times(t011, t011, tmp11, (uint32_t)10U); - fmul_(b11, t011, b11, tmp); - fsquare_times(t011, b11, tmp11, (uint32_t)50U); - fmul_(c10, t011, b11, tmp); - b1 = t1 + (uint32_t)4U; - c1 = t1 + (uint32_t)8U; - t01 = t1 + (uint32_t)12U; - tmp1 = tmp; - fsquare_times(t01, c1, tmp1, (uint32_t)100U); - fmul_(t01, t01, c1, tmp); - fsquare_times(t01, t01, tmp1, (uint32_t)50U); - fmul_(t01, t01, b1, tmp); - fsquare_times(t01, t01, tmp1, (uint32_t)5U); - a = t1; - t0 = t1 + (uint32_t)12U; - fmul_(o, t0, a, tmp); -} - -static void store_felem(uint64_t *b, uint64_t *f) -{ - uint64_t f30 = f[3U]; - uint64_t top_bit0 = f30 >> (uint32_t)63U; - uint64_t f31; - uint64_t top_bit; - uint64_t f0; - uint64_t f1; - uint64_t f2; - uint64_t f3; - uint64_t m0; - uint64_t m1; - uint64_t m2; - uint64_t m3; - uint64_t mask; - uint64_t f0_; - uint64_t f1_; - uint64_t f2_; - uint64_t f3_; - uint64_t o0; - uint64_t o1; - uint64_t o2; - uint64_t o3; - f[3U] = f30 & (uint64_t)0x7fffffffffffffffU; - add1_(f, f, (uint64_t)19U * top_bit0); - f31 = f[3U]; - top_bit = f31 >> (uint32_t)63U; - f[3U] = f31 & (uint64_t)0x7fffffffffffffffU; - add1_(f, f, (uint64_t)19U * top_bit); - f0 = f[0U]; - f1 = f[1U]; - f2 = f[2U]; - f3 = f[3U]; - m0 = FStar_UInt64_gte_mask(f0, (uint64_t)0xffffffffffffffedU); - m1 = FStar_UInt64_eq_mask(f1, (uint64_t)0xffffffffffffffffU); - m2 = FStar_UInt64_eq_mask(f2, (uint64_t)0xffffffffffffffffU); - m3 = FStar_UInt64_eq_mask(f3, (uint64_t)0x7fffffffffffffffU); - mask = ((m0 & m1) & m2) & m3; - f0_ = f0 - (mask & (uint64_t)0xffffffffffffffedU); - f1_ = f1 - (mask & (uint64_t)0xffffffffffffffffU); - f2_ = f2 - (mask & (uint64_t)0xffffffffffffffffU); - f3_ = f3 - (mask & (uint64_t)0x7fffffffffffffffU); - o0 = f0_; - o1 = f1_; - o2 = f2_; - o3 = f3_; - b[0U] = o0; - b[1U] = o1; - b[2U] = o2; - b[3U] = o3; -} - -static void encode_point(uint8_t *o, uint64_t *i) -{ - uint64_t *x = i; - uint64_t *z = i + (uint32_t)4U; - uint64_t tmp[4U] = { 0U }; - uint64_t u64s[4U] = { 0U }; - uint64_t tmp_w[16U] = { 0U }; - finv(tmp, z, tmp_w); - fmul_(tmp, tmp, x, tmp_w); - store_felem(u64s, tmp); - KRML_MAYBE_FOR4(i0, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - store64_le(o + i0 * (uint32_t)8U, u64s[i0]);); -} - -void Hacl_Curve25519_64_Slow_scalarmult(uint8_t *out, uint8_t *priv, uint8_t *pub) -{ - uint64_t init[8U] = { 0U }; - uint64_t tmp[4U] = { 0U }; - uint64_t tmp3; - uint64_t *x; - uint64_t *z; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *os = tmp; - uint8_t *bj = pub + i * (uint32_t)8U; - uint64_t u = load64_le(bj); - uint64_t r = u; - uint64_t x0 = r; - os[i] = x0;); - tmp3 = tmp[3U]; - tmp[3U] = tmp3 & (uint64_t)0x7fffffffffffffffU; - x = init; - z = init + (uint32_t)4U; - z[0U] = (uint64_t)1U; - z[1U] = (uint64_t)0U; - z[2U] = (uint64_t)0U; - z[3U] = (uint64_t)0U; - x[0U] = tmp[0U]; - x[1U] = tmp[1U]; - x[2U] = tmp[2U]; - x[3U] = tmp[3U]; - montgomery_ladder(init, priv, init); - encode_point(out, init); -} - -void Hacl_Curve25519_64_Slow_secret_to_public(uint8_t *pub, uint8_t *priv) -{ - uint8_t basepoint[32U] = { 0U }; - { - uint32_t i; - for (i = (uint32_t)0U; i < (uint32_t)32U; i++) - { - uint8_t *os = basepoint; - uint8_t x = g25519[i]; - os[i] = x; - } - } - Hacl_Curve25519_64_Slow_scalarmult(pub, priv, basepoint); -} - -bool Hacl_Curve25519_64_Slow_ecdh(uint8_t *out, uint8_t *priv, uint8_t *pub) -{ - uint8_t zeros[32U] = { 0U }; - Hacl_Curve25519_64_Slow_scalarmult(out, priv, pub); - { - uint8_t res = (uint8_t)255U; - uint8_t z; - bool r; - { - uint32_t i; - for (i = (uint32_t)0U; i < (uint32_t)32U; i++) - { - uint8_t uu____0 = FStar_UInt8_eq_mask(out[i], zeros[i]); - res = uu____0 & res; - } - } - z = res; - r = z == (uint8_t)255U; - return !r; - } -} - diff --git a/dist/c89-compatible/Hacl_Curve25519_64_Slow.h b/dist/c89-compatible/Hacl_Curve25519_64_Slow.h deleted file mode 100644 index 0e5a909adb..0000000000 --- a/dist/c89-compatible/Hacl_Curve25519_64_Slow.h +++ /dev/null @@ -1,52 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#ifndef __Hacl_Curve25519_64_Slow_H -#define __Hacl_Curve25519_64_Slow_H - -#if defined(__cplusplus) -extern "C" { -#endif - -#include -#include "krml/internal/types.h" -#include "krml/lowstar_endianness.h" -#include "krml/internal/target.h" - - -#include "Hacl_Krmllib.h" -#include "Hacl_Bignum_Base.h" -#include "evercrypt_targetconfig.h" -void Hacl_Curve25519_64_Slow_scalarmult(uint8_t *out, uint8_t *priv, uint8_t *pub); - -void Hacl_Curve25519_64_Slow_secret_to_public(uint8_t *pub, uint8_t *priv); - -bool Hacl_Curve25519_64_Slow_ecdh(uint8_t *out, uint8_t *priv, uint8_t *pub); - -#if defined(__cplusplus) -} -#endif - -#define __Hacl_Curve25519_64_Slow_H_DEFINED -#endif diff --git a/dist/c89-compatible/Hacl_EC_Ed25519.c b/dist/c89-compatible/Hacl_EC_Ed25519.c deleted file mode 100644 index ed5165bdfa..0000000000 --- a/dist/c89-compatible/Hacl_EC_Ed25519.c +++ /dev/null @@ -1,348 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#include "Hacl_EC_Ed25519.h" - -#include "internal/Hacl_Ed25519.h" - -/******************************************************************************* - Verified field arithmetic modulo p = 2^255 - 19. - - This is a 64-bit optimized version, where a field element in radix-2^{51} is - represented as an array of five unsigned 64-bit integers, i.e., uint64_t[5]. -*******************************************************************************/ - - -/** -Write the additive identity in `f`. - - The outparam `f` is meant to be 5 limbs in size, i.e., uint64_t[5]. -*/ -void Hacl_EC_Ed25519_mk_felem_zero(uint64_t *b) -{ - b[0U] = (uint64_t)0U; - b[1U] = (uint64_t)0U; - b[2U] = (uint64_t)0U; - b[3U] = (uint64_t)0U; - b[4U] = (uint64_t)0U; -} - -/** -Write the multiplicative identity in `f`. - - The outparam `f` is meant to be 5 limbs in size, i.e., uint64_t[5]. -*/ -void Hacl_EC_Ed25519_mk_felem_one(uint64_t *b) -{ - b[0U] = (uint64_t)1U; - b[1U] = (uint64_t)0U; - b[2U] = (uint64_t)0U; - b[3U] = (uint64_t)0U; - b[4U] = (uint64_t)0U; -} - -/** -Write `a + b mod p` in `out`. - - The arguments `a`, `b`, and the outparam `out` are meant to be 5 limbs in size, i.e., uint64_t[5]. - - Before calling this function, the caller will need to ensure that the following - precondition is observed. - • `a`, `b`, and `out` are either pairwise disjoint or equal -*/ -void Hacl_EC_Ed25519_felem_add(uint64_t *a, uint64_t *b, uint64_t *out) -{ - Hacl_Impl_Curve25519_Field51_fadd(out, a, b); - Hacl_Bignum25519_reduce_513(out); -} - -/** -Write `a - b mod p` in `out`. - - The arguments `a`, `b`, and the outparam `out` are meant to be 5 limbs in size, i.e., uint64_t[5]. - - Before calling this function, the caller will need to ensure that the following - precondition is observed. - • `a`, `b`, and `out` are either pairwise disjoint or equal -*/ -void Hacl_EC_Ed25519_felem_sub(uint64_t *a, uint64_t *b, uint64_t *out) -{ - Hacl_Impl_Curve25519_Field51_fsub(out, a, b); - Hacl_Bignum25519_reduce_513(out); -} - -/** -Write `a * b mod p` in `out`. - - The arguments `a`, `b`, and the outparam `out` are meant to be 5 limbs in size, i.e., uint64_t[5]. - - Before calling this function, the caller will need to ensure that the following - precondition is observed. - • `a`, `b`, and `out` are either pairwise disjoint or equal -*/ -void Hacl_EC_Ed25519_felem_mul(uint64_t *a, uint64_t *b, uint64_t *out) -{ - FStar_UInt128_uint128 tmp[10U]; - { - uint32_t _i; - for (_i = 0U; _i < (uint32_t)10U; ++_i) - tmp[_i] = FStar_UInt128_uint64_to_uint128((uint64_t)0U); - } - Hacl_Impl_Curve25519_Field51_fmul(out, a, b, tmp); -} - -/** -Write `a * a mod p` in `out`. - - The argument `a`, and the outparam `out` are meant to be 5 limbs in size, i.e., uint64_t[5]. - - Before calling this function, the caller will need to ensure that the following - precondition is observed. - • `a` and `out` are either disjoint or equal -*/ -void Hacl_EC_Ed25519_felem_sqr(uint64_t *a, uint64_t *out) -{ - FStar_UInt128_uint128 tmp[5U]; - { - uint32_t _i; - for (_i = 0U; _i < (uint32_t)5U; ++_i) - tmp[_i] = FStar_UInt128_uint64_to_uint128((uint64_t)0U); - } - Hacl_Impl_Curve25519_Field51_fsqr(out, a, tmp); -} - -/** -Write `a ^ (p - 2) mod p` in `out`. - - The function computes modular multiplicative inverse if `a` <> zero. - - The argument `a`, and the outparam `out` are meant to be 5 limbs in size, i.e., uint64_t[5]. - - Before calling this function, the caller will need to ensure that the following - precondition is observed. - • `a` and `out` are disjoint -*/ -void Hacl_EC_Ed25519_felem_inv(uint64_t *a, uint64_t *out) -{ - Hacl_Bignum25519_inverse(out, a); - Hacl_Bignum25519_reduce_513(out); -} - -/** -Load a little-endian field element from memory. - - The argument `b` points to 32 bytes of valid memory, i.e., uint8_t[32]. - The outparam `out` points to a field element of 5 limbs in size, i.e., uint64_t[5]. - - Before calling this function, the caller will need to ensure that the following - precondition is observed. - • `b` and `out` are disjoint - - NOTE that the function also performs the reduction modulo 2^255. -*/ -void Hacl_EC_Ed25519_felem_load(uint8_t *b, uint64_t *out) -{ - Hacl_Bignum25519_load_51(out, b); -} - -/** -Serialize a field element into little-endian memory. - - The argument `a` points to a field element of 5 limbs in size, i.e., uint64_t[5]. - The outparam `out` points to 32 bytes of valid memory, i.e., uint8_t[32]. - - Before calling this function, the caller will need to ensure that the following - precondition is observed. - • `a` and `out` are disjoint -*/ -void Hacl_EC_Ed25519_felem_store(uint64_t *a, uint8_t *out) -{ - Hacl_Bignum25519_store_51(out, a); -} - -/******************************************************************************* - Verified group operations for the edwards25519 elliptic curve of the form - −x^2 + y^2 = 1 − (121665/121666) * x^2 * y^2. - - This is a 64-bit optimized version, where a group element in extended homogeneous - coordinates (X, Y, Z, T) is represented as an array of 20 unsigned 64-bit - integers, i.e., uint64_t[20]. -*******************************************************************************/ - - -/** -Write the point at infinity (additive identity) in `p`. - - The outparam `p` is meant to be 20 limbs in size, i.e., uint64_t[20]. -*/ -void Hacl_EC_Ed25519_mk_point_at_inf(uint64_t *p) -{ - Hacl_Impl_Ed25519_Ladder_make_point_inf(p); -} - -/** -Write the base point (generator) in `p`. - - The outparam `p` is meant to be 20 limbs in size, i.e., uint64_t[20]. -*/ -void Hacl_EC_Ed25519_mk_base_point(uint64_t *p) -{ - uint64_t *gx = p; - uint64_t *gy = p + (uint32_t)5U; - uint64_t *gz = p + (uint32_t)10U; - uint64_t *gt = p + (uint32_t)15U; - gx[0U] = (uint64_t)0x00062d608f25d51aU; - gx[1U] = (uint64_t)0x000412a4b4f6592aU; - gx[2U] = (uint64_t)0x00075b7171a4b31dU; - gx[3U] = (uint64_t)0x0001ff60527118feU; - gx[4U] = (uint64_t)0x000216936d3cd6e5U; - gy[0U] = (uint64_t)0x0006666666666658U; - gy[1U] = (uint64_t)0x0004ccccccccccccU; - gy[2U] = (uint64_t)0x0001999999999999U; - gy[3U] = (uint64_t)0x0003333333333333U; - gy[4U] = (uint64_t)0x0006666666666666U; - gz[0U] = (uint64_t)1U; - gz[1U] = (uint64_t)0U; - gz[2U] = (uint64_t)0U; - gz[3U] = (uint64_t)0U; - gz[4U] = (uint64_t)0U; - gt[0U] = (uint64_t)0x00068ab3a5b7dda3U; - gt[1U] = (uint64_t)0x00000eea2a5eadbbU; - gt[2U] = (uint64_t)0x0002af8df483c27eU; - gt[3U] = (uint64_t)0x000332b375274732U; - gt[4U] = (uint64_t)0x00067875f0fd78b7U; -} - -/** -Write `-p` in `out` (point negation). - - The argument `p` and the outparam `out` are meant to be 20 limbs in size, i.e., uint64_t[20]. - - Before calling this function, the caller will need to ensure that the following - precondition is observed. - • `p` and `out` are disjoint -*/ -void Hacl_EC_Ed25519_point_negate(uint64_t *p, uint64_t *out) -{ - Hacl_Impl_Ed25519_PointNegate_point_negate(p, out); -} - -/** -Write `p + q` in `out` (point addition). - - The arguments `p`, `q` and the outparam `out` are meant to be 20 limbs in size, i.e., uint64_t[20]. - - Before calling this function, the caller will need to ensure that the following - precondition is observed. - • `p`, `q`, and `out` are either pairwise disjoint or equal -*/ -void Hacl_EC_Ed25519_point_add(uint64_t *p, uint64_t *q, uint64_t *out) -{ - Hacl_Impl_Ed25519_PointAdd_point_add(out, p, q); -} - -/** -Write `p + p` in `out` (point doubling). - - The argument `p` and the outparam `out` are meant to be 20 limbs in size, i.e., uint64_t[20]. - - Before calling this function, the caller will need to ensure that the following - precondition is observed. - • `p` and `out` are either pairwise disjoint or equal -*/ -void Hacl_EC_Ed25519_point_double(uint64_t *p, uint64_t *out) -{ - Hacl_Impl_Ed25519_PointDouble_point_double(out, p); -} - -/** -Write `[scalar]p` in `out` (point multiplication or scalar multiplication). - - The argument `p` and the outparam `out` are meant to be 20 limbs in size, i.e., uint64_t[20]. - The argument `scalar` is meant to be 32 bytes in size, i.e., uint8_t[32]. - - The function first loads a little-endian scalar element from `scalar` and - then computes a point multiplication. - - Before calling this function, the caller will need to ensure that the following - precondition is observed. - • `scalar`, `p`, and `out` are pairwise disjoint -*/ -void Hacl_EC_Ed25519_point_mul(uint8_t *scalar, uint64_t *p, uint64_t *out) -{ - Hacl_Impl_Ed25519_Ladder_point_mul(out, scalar, p); -} - -/** -Checks whether `p` is equal to `q` (point equality). - - The function returns `true` if `p` is equal to `q` and `false` otherwise. - - The arguments `p` and `q` are meant to be 20 limbs in size, i.e., uint64_t[20]. - - Before calling this function, the caller will need to ensure that the following - precondition is observed. - • `p` and `q` are either disjoint or equal -*/ -bool Hacl_EC_Ed25519_point_eq(uint64_t *p, uint64_t *q) -{ - return Hacl_Impl_Ed25519_PointEqual_point_equal(p, q); -} - -/** -Compress a point in extended homogeneous coordinates to its compressed form. - - The argument `p` points to a point of 20 limbs in size, i.e., uint64_t[20]. - The outparam `out` points to 32 bytes of valid memory, i.e., uint8_t[32]. - - The function first converts a given point `p` from extended homogeneous to affine coordinates - and then writes [ 2^255 * (`x` % 2) + `y` ] in `out`. - - Before calling this function, the caller will need to ensure that the following - precondition is observed. - • `p` and `out` are disjoint -*/ -void Hacl_EC_Ed25519_point_compress(uint64_t *p, uint8_t *out) -{ - Hacl_Impl_Ed25519_PointCompress_point_compress(out, p); -} - -/** -Decompress a point in extended homogeneous coordinates from its compressed form. - - The function returns `true` for successful decompression of a compressed point - and `false` otherwise. - - The argument `s` points to 32 bytes of valid memory, i.e., uint8_t[32]. - The outparam `out` points to a point of 20 limbs in size, i.e., uint64_t[20]. - - Before calling this function, the caller will need to ensure that the following - precondition is observed. - • `s` and `out` are disjoint -*/ -bool Hacl_EC_Ed25519_point_decompress(uint8_t *s, uint64_t *out) -{ - return Hacl_Impl_Ed25519_PointDecompress_point_decompress(out, s); -} - diff --git a/dist/c89-compatible/Hacl_EC_Ed25519.h b/dist/c89-compatible/Hacl_EC_Ed25519.h deleted file mode 100644 index 7a9673a99d..0000000000 --- a/dist/c89-compatible/Hacl_EC_Ed25519.h +++ /dev/null @@ -1,266 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#ifndef __Hacl_EC_Ed25519_H -#define __Hacl_EC_Ed25519_H - -#if defined(__cplusplus) -extern "C" { -#endif - -#include -#include "krml/internal/types.h" -#include "krml/lowstar_endianness.h" -#include "krml/internal/target.h" - - -#include "Hacl_Krmllib.h" -#include "Hacl_Bignum25519_51.h" -#include "evercrypt_targetconfig.h" -/******************************************************************************* - Verified field arithmetic modulo p = 2^255 - 19. - - This is a 64-bit optimized version, where a field element in radix-2^{51} is - represented as an array of five unsigned 64-bit integers, i.e., uint64_t[5]. -*******************************************************************************/ - - -/** -Write the additive identity in `f`. - - The outparam `f` is meant to be 5 limbs in size, i.e., uint64_t[5]. -*/ -void Hacl_EC_Ed25519_mk_felem_zero(uint64_t *b); - -/** -Write the multiplicative identity in `f`. - - The outparam `f` is meant to be 5 limbs in size, i.e., uint64_t[5]. -*/ -void Hacl_EC_Ed25519_mk_felem_one(uint64_t *b); - -/** -Write `a + b mod p` in `out`. - - The arguments `a`, `b`, and the outparam `out` are meant to be 5 limbs in size, i.e., uint64_t[5]. - - Before calling this function, the caller will need to ensure that the following - precondition is observed. - • `a`, `b`, and `out` are either pairwise disjoint or equal -*/ -void Hacl_EC_Ed25519_felem_add(uint64_t *a, uint64_t *b, uint64_t *out); - -/** -Write `a - b mod p` in `out`. - - The arguments `a`, `b`, and the outparam `out` are meant to be 5 limbs in size, i.e., uint64_t[5]. - - Before calling this function, the caller will need to ensure that the following - precondition is observed. - • `a`, `b`, and `out` are either pairwise disjoint or equal -*/ -void Hacl_EC_Ed25519_felem_sub(uint64_t *a, uint64_t *b, uint64_t *out); - -/** -Write `a * b mod p` in `out`. - - The arguments `a`, `b`, and the outparam `out` are meant to be 5 limbs in size, i.e., uint64_t[5]. - - Before calling this function, the caller will need to ensure that the following - precondition is observed. - • `a`, `b`, and `out` are either pairwise disjoint or equal -*/ -void Hacl_EC_Ed25519_felem_mul(uint64_t *a, uint64_t *b, uint64_t *out); - -/** -Write `a * a mod p` in `out`. - - The argument `a`, and the outparam `out` are meant to be 5 limbs in size, i.e., uint64_t[5]. - - Before calling this function, the caller will need to ensure that the following - precondition is observed. - • `a` and `out` are either disjoint or equal -*/ -void Hacl_EC_Ed25519_felem_sqr(uint64_t *a, uint64_t *out); - -/** -Write `a ^ (p - 2) mod p` in `out`. - - The function computes modular multiplicative inverse if `a` <> zero. - - The argument `a`, and the outparam `out` are meant to be 5 limbs in size, i.e., uint64_t[5]. - - Before calling this function, the caller will need to ensure that the following - precondition is observed. - • `a` and `out` are disjoint -*/ -void Hacl_EC_Ed25519_felem_inv(uint64_t *a, uint64_t *out); - -/** -Load a little-endian field element from memory. - - The argument `b` points to 32 bytes of valid memory, i.e., uint8_t[32]. - The outparam `out` points to a field element of 5 limbs in size, i.e., uint64_t[5]. - - Before calling this function, the caller will need to ensure that the following - precondition is observed. - • `b` and `out` are disjoint - - NOTE that the function also performs the reduction modulo 2^255. -*/ -void Hacl_EC_Ed25519_felem_load(uint8_t *b, uint64_t *out); - -/** -Serialize a field element into little-endian memory. - - The argument `a` points to a field element of 5 limbs in size, i.e., uint64_t[5]. - The outparam `out` points to 32 bytes of valid memory, i.e., uint8_t[32]. - - Before calling this function, the caller will need to ensure that the following - precondition is observed. - • `a` and `out` are disjoint -*/ -void Hacl_EC_Ed25519_felem_store(uint64_t *a, uint8_t *out); - -/******************************************************************************* - Verified group operations for the edwards25519 elliptic curve of the form - −x^2 + y^2 = 1 − (121665/121666) * x^2 * y^2. - - This is a 64-bit optimized version, where a group element in extended homogeneous - coordinates (X, Y, Z, T) is represented as an array of 20 unsigned 64-bit - integers, i.e., uint64_t[20]. -*******************************************************************************/ - - -/** -Write the point at infinity (additive identity) in `p`. - - The outparam `p` is meant to be 20 limbs in size, i.e., uint64_t[20]. -*/ -void Hacl_EC_Ed25519_mk_point_at_inf(uint64_t *p); - -/** -Write the base point (generator) in `p`. - - The outparam `p` is meant to be 20 limbs in size, i.e., uint64_t[20]. -*/ -void Hacl_EC_Ed25519_mk_base_point(uint64_t *p); - -/** -Write `-p` in `out` (point negation). - - The argument `p` and the outparam `out` are meant to be 20 limbs in size, i.e., uint64_t[20]. - - Before calling this function, the caller will need to ensure that the following - precondition is observed. - • `p` and `out` are disjoint -*/ -void Hacl_EC_Ed25519_point_negate(uint64_t *p, uint64_t *out); - -/** -Write `p + q` in `out` (point addition). - - The arguments `p`, `q` and the outparam `out` are meant to be 20 limbs in size, i.e., uint64_t[20]. - - Before calling this function, the caller will need to ensure that the following - precondition is observed. - • `p`, `q`, and `out` are either pairwise disjoint or equal -*/ -void Hacl_EC_Ed25519_point_add(uint64_t *p, uint64_t *q, uint64_t *out); - -/** -Write `p + p` in `out` (point doubling). - - The argument `p` and the outparam `out` are meant to be 20 limbs in size, i.e., uint64_t[20]. - - Before calling this function, the caller will need to ensure that the following - precondition is observed. - • `p` and `out` are either pairwise disjoint or equal -*/ -void Hacl_EC_Ed25519_point_double(uint64_t *p, uint64_t *out); - -/** -Write `[scalar]p` in `out` (point multiplication or scalar multiplication). - - The argument `p` and the outparam `out` are meant to be 20 limbs in size, i.e., uint64_t[20]. - The argument `scalar` is meant to be 32 bytes in size, i.e., uint8_t[32]. - - The function first loads a little-endian scalar element from `scalar` and - then computes a point multiplication. - - Before calling this function, the caller will need to ensure that the following - precondition is observed. - • `scalar`, `p`, and `out` are pairwise disjoint -*/ -void Hacl_EC_Ed25519_point_mul(uint8_t *scalar, uint64_t *p, uint64_t *out); - -/** -Checks whether `p` is equal to `q` (point equality). - - The function returns `true` if `p` is equal to `q` and `false` otherwise. - - The arguments `p` and `q` are meant to be 20 limbs in size, i.e., uint64_t[20]. - - Before calling this function, the caller will need to ensure that the following - precondition is observed. - • `p` and `q` are either disjoint or equal -*/ -bool Hacl_EC_Ed25519_point_eq(uint64_t *p, uint64_t *q); - -/** -Compress a point in extended homogeneous coordinates to its compressed form. - - The argument `p` points to a point of 20 limbs in size, i.e., uint64_t[20]. - The outparam `out` points to 32 bytes of valid memory, i.e., uint8_t[32]. - - The function first converts a given point `p` from extended homogeneous to affine coordinates - and then writes [ 2^255 * (`x` % 2) + `y` ] in `out`. - - Before calling this function, the caller will need to ensure that the following - precondition is observed. - • `p` and `out` are disjoint -*/ -void Hacl_EC_Ed25519_point_compress(uint64_t *p, uint8_t *out); - -/** -Decompress a point in extended homogeneous coordinates from its compressed form. - - The function returns `true` for successful decompression of a compressed point - and `false` otherwise. - - The argument `s` points to 32 bytes of valid memory, i.e., uint8_t[32]. - The outparam `out` points to a point of 20 limbs in size, i.e., uint64_t[20]. - - Before calling this function, the caller will need to ensure that the following - precondition is observed. - • `s` and `out` are disjoint -*/ -bool Hacl_EC_Ed25519_point_decompress(uint8_t *s, uint64_t *out); - -#if defined(__cplusplus) -} -#endif - -#define __Hacl_EC_Ed25519_H_DEFINED -#endif diff --git a/dist/c89-compatible/Hacl_EC_K256.c b/dist/c89-compatible/Hacl_EC_K256.c deleted file mode 100644 index ea5ab9826f..0000000000 --- a/dist/c89-compatible/Hacl_EC_K256.c +++ /dev/null @@ -1,341 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#include "Hacl_EC_K256.h" - -#include "internal/Hacl_K256_ECDSA.h" - -/******************************************************************************* - Verified field arithmetic modulo p = 2^256 - 0x1000003D1. - - This is a 64-bit optimized version, where a field element in radix-2^{52} is - represented as an array of five unsigned 64-bit integers, i.e., uint64_t[5]. -*******************************************************************************/ - - -/** -Write the additive identity in `f`. - - The outparam `f` is meant to be 5 limbs in size, i.e., uint64_t[5]. -*/ -void Hacl_EC_K256_mk_felem_zero(uint64_t *f) -{ - memset(f, 0U, (uint32_t)5U * sizeof (uint64_t)); -} - -/** -Write the multiplicative identity in `f`. - - The outparam `f` is meant to be 5 limbs in size, i.e., uint64_t[5]. -*/ -void Hacl_EC_K256_mk_felem_one(uint64_t *f) -{ - memset(f, 0U, (uint32_t)5U * sizeof (uint64_t)); - f[0U] = (uint64_t)1U; -} - -/** -Write `a + b mod p` in `out`. - - The arguments `a`, `b`, and the outparam `out` are meant to be 5 limbs in size, i.e., uint64_t[5]. - - Before calling this function, the caller will need to ensure that the following - precondition is observed. - • `a`, `b`, and `out` are either pairwise disjoint or equal -*/ -void Hacl_EC_K256_felem_add(uint64_t *a, uint64_t *b, uint64_t *out) -{ - Hacl_K256_Field_fadd(out, a, b); - Hacl_K256_Field_fnormalize_weak(out, out); -} - -/** -Write `a - b mod p` in `out`. - - The arguments `a`, `b`, and the outparam `out` are meant to be 5 limbs in size, i.e., uint64_t[5]. - - Before calling this function, the caller will need to ensure that the following - precondition is observed. - • `a`, `b`, and `out` are either pairwise disjoint or equal -*/ -void Hacl_EC_K256_felem_sub(uint64_t *a, uint64_t *b, uint64_t *out) -{ - Hacl_K256_Field_fsub(out, a, b, (uint64_t)2U); - Hacl_K256_Field_fnormalize_weak(out, out); -} - -/** -Write `a * b mod p` in `out`. - - The arguments `a`, `b`, and the outparam `out` are meant to be 5 limbs in size, i.e., uint64_t[5]. - - Before calling this function, the caller will need to ensure that the following - precondition is observed. - • `a`, `b`, and `out` are either pairwise disjoint or equal -*/ -void Hacl_EC_K256_felem_mul(uint64_t *a, uint64_t *b, uint64_t *out) -{ - Hacl_K256_Field_fmul(out, a, b); -} - -/** -Write `a * a mod p` in `out`. - - The argument `a`, and the outparam `out` are meant to be 5 limbs in size, i.e., uint64_t[5]. - - Before calling this function, the caller will need to ensure that the following - precondition is observed. - • `a` and `out` are either disjoint or equal -*/ -void Hacl_EC_K256_felem_sqr(uint64_t *a, uint64_t *out) -{ - Hacl_K256_Field_fsqr(out, a); -} - -/** -Write `a ^ (p - 2) mod p` in `out`. - - The function computes modular multiplicative inverse if `a` <> zero. - - The argument `a`, and the outparam `out` are meant to be 5 limbs in size, i.e., uint64_t[5]. - - Before calling this function, the caller will need to ensure that the following - precondition is observed. - • `a` and `out` are disjoint -*/ -void Hacl_EC_K256_felem_inv(uint64_t *a, uint64_t *out) -{ - Hacl_Impl_K256_Finv_finv(out, a); -} - -/** -Load a bid-endian field element from memory. - - The argument `b` points to 32 bytes of valid memory, i.e., uint8_t[32]. - The outparam `out` points to a field element of 5 limbs in size, i.e., uint64_t[5]. - - Before calling this function, the caller will need to ensure that the following - precondition is observed. - • `b` and `out` are disjoint -*/ -void Hacl_EC_K256_felem_load(uint8_t *b, uint64_t *out) -{ - Hacl_K256_Field_load_felem(out, b); -} - -/** -Serialize a field element into big-endian memory. - - The argument `a` points to a field element of 5 limbs in size, i.e., uint64_t[5]. - The outparam `out` points to 32 bytes of valid memory, i.e., uint8_t[32]. - - Before calling this function, the caller will need to ensure that the following - precondition is observed. - • `a` and `out` are disjoint -*/ -void Hacl_EC_K256_felem_store(uint64_t *a, uint8_t *out) -{ - uint64_t tmp[5U] = { 0U }; - Hacl_K256_Field_fnormalize(tmp, a); - Hacl_K256_Field_store_felem(out, tmp); -} - -/******************************************************************************* - Verified group operations for the secp256k1 curve of the form y^2 = x^3 + 7. - - This is a 64-bit optimized version, where a group element in projective coordinates - is represented as an array of 15 unsigned 64-bit integers, i.e., uint64_t[15]. -*******************************************************************************/ - - -/** -Write the point at infinity (additive identity) in `p`. - - The outparam `p` is meant to be 15 limbs in size, i.e., uint64_t[15]. -*/ -void Hacl_EC_K256_mk_point_at_inf(uint64_t *p) -{ - Hacl_Impl_K256_PointMul_make_point_at_inf(p); -} - -/** -Write the base point (generator) in `p`. - - The outparam `p` is meant to be 15 limbs in size, i.e., uint64_t[15]. -*/ -void Hacl_EC_K256_mk_base_point(uint64_t *p) -{ - uint64_t *gx = p; - uint64_t *gy = p + (uint32_t)5U; - uint64_t *gz = p + (uint32_t)10U; - gx[0U] = (uint64_t)0x2815b16f81798U; - gx[1U] = (uint64_t)0xdb2dce28d959fU; - gx[2U] = (uint64_t)0xe870b07029bfcU; - gx[3U] = (uint64_t)0xbbac55a06295cU; - gx[4U] = (uint64_t)0x79be667ef9dcU; - gy[0U] = (uint64_t)0x7d08ffb10d4b8U; - gy[1U] = (uint64_t)0x48a68554199c4U; - gy[2U] = (uint64_t)0xe1108a8fd17b4U; - gy[3U] = (uint64_t)0xc4655da4fbfc0U; - gy[4U] = (uint64_t)0x483ada7726a3U; - memset(gz, 0U, (uint32_t)5U * sizeof (uint64_t)); - gz[0U] = (uint64_t)1U; -} - -/** -Write `-p` in `out` (point negation). - - The argument `p` and the outparam `out` are meant to be 15 limbs in size, i.e., uint64_t[15]. - - Before calling this function, the caller will need to ensure that the following - precondition is observed. - • `p` and `out` are either disjoint or equal -*/ -void Hacl_EC_K256_point_negate(uint64_t *p, uint64_t *out) -{ - Hacl_Impl_K256_Point_point_negate(out, p); -} - -/** -Write `p + q` in `out` (point addition). - - The arguments `p`, `q` and the outparam `out` are meant to be 15 limbs in size, i.e., uint64_t[15]. - - Before calling this function, the caller will need to ensure that the following - precondition is observed. - • `p`, `q`, and `out` are either pairwise disjoint or equal -*/ -void Hacl_EC_K256_point_add(uint64_t *p, uint64_t *q, uint64_t *out) -{ - Hacl_Impl_K256_PointAdd_point_add(out, p, q); -} - -/** -Write `p + p` in `out` (point doubling). - - The argument `p` and the outparam `out` are meant to be 15 limbs in size, i.e., uint64_t[15]. - - Before calling this function, the caller will need to ensure that the following - precondition is observed. - • `p` and `out` are either disjoint or equal -*/ -void Hacl_EC_K256_point_double(uint64_t *p, uint64_t *out) -{ - Hacl_Impl_K256_PointDouble_point_double(out, p); -} - -/** -Write `[scalar]p` in `out` (point multiplication or scalar multiplication). - - The argument `p` and the outparam `out` are meant to be 15 limbs in size, i.e., uint64_t[15]. - The argument `scalar` is meant to be 32 bytes in size, i.e., uint8_t[32]. - - The function first loads a bid-endian scalar element from `scalar` and - then computes a point multiplication. - - Before calling this function, the caller will need to ensure that the following - precondition is observed. - • `scalar`, `p`, and `out` are pairwise disjoint -*/ -void Hacl_EC_K256_point_mul(uint8_t *scalar, uint64_t *p, uint64_t *out) -{ - uint64_t scalar_q[4U] = { 0U }; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *os = scalar_q; - uint64_t u = load64_be(scalar + ((uint32_t)4U - i - (uint32_t)1U) * (uint32_t)8U); - uint64_t x = u; - os[i] = x;); - Hacl_Impl_K256_PointMul_point_mul(out, scalar_q, p); -} - -/** -Checks whether `p` is equal to `q` (point equality). - - The function returns `true` if `p` is equal to `q` and `false` otherwise. - - The arguments `p` and `q` are meant to be 15 limbs in size, i.e., uint64_t[15]. - - Before calling this function, the caller will need to ensure that the following - precondition is observed. - • `p` and `q` are either disjoint or equal -*/ -bool Hacl_EC_K256_point_eq(uint64_t *p, uint64_t *q) -{ - return Hacl_Impl_K256_Point_point_eq(p, q); -} - -/** -Compress a point in projective coordinates to its compressed form. - - The argument `p` points to a point of 15 limbs in size, i.e., uint64_t[15]. - The outparam `out` points to 33 bytes of valid memory, i.e., uint8_t[33]. - - The function first converts a given point `p` from projective to affine coordinates - and then writes [ 0x02 for even `y` and 0x03 for odd `y`; `x` ] in `out`. - - Before calling this function, the caller will need to ensure that the following - precondition is observed. - • `p` and `out` are disjoint -*/ -void Hacl_EC_K256_point_compress(uint64_t *p, uint8_t *out) -{ - uint64_t xa[5U] = { 0U }; - uint64_t ya[5U] = { 0U }; - uint64_t *x1 = p; - uint64_t *y1 = p + (uint32_t)5U; - uint64_t *z1 = p + (uint32_t)10U; - uint64_t zinv[5U] = { 0U }; - Hacl_Impl_K256_Finv_finv(zinv, z1); - Hacl_K256_Field_fmul(xa, x1, zinv); - Hacl_K256_Field_fmul(ya, y1, zinv); - Hacl_Impl_K256_Point_aff_point_compress_vartime(out, xa, ya); -} - -/** -Decompress a point in projective coordinates from its compressed form. - - The function returns `true` for successful decompression of a compressed point - and `false` otherwise. - - The argument `s` points to 33 bytes of valid memory, i.e., uint8_t[33]. - The outparam `out` points to a point of 15 limbs in size, i.e., uint64_t[15]. - - Before calling this function, the caller will need to ensure that the following - precondition is observed. - • `s` and `out` are disjoint -*/ -bool Hacl_EC_K256_point_decompress(uint8_t *s, uint64_t *out) -{ - uint64_t *px = out; - uint64_t *py = out + (uint32_t)5U; - uint64_t *pz = out + (uint32_t)10U; - bool b = Hacl_Impl_K256_Point_aff_point_decompress_vartime(px, py, s); - memset(pz, 0U, (uint32_t)5U * sizeof (uint64_t)); - pz[0U] = (uint64_t)1U; - return b; -} - diff --git a/dist/c89-compatible/Hacl_EC_K256.h b/dist/c89-compatible/Hacl_EC_K256.h deleted file mode 100644 index cf5681912c..0000000000 --- a/dist/c89-compatible/Hacl_EC_K256.h +++ /dev/null @@ -1,262 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#ifndef __Hacl_EC_K256_H -#define __Hacl_EC_K256_H - -#if defined(__cplusplus) -extern "C" { -#endif - -#include -#include "krml/internal/types.h" -#include "krml/lowstar_endianness.h" -#include "krml/internal/target.h" - - -#include "Hacl_Krmllib.h" -#include "Hacl_Bignum_K256.h" -#include "evercrypt_targetconfig.h" -/******************************************************************************* - Verified field arithmetic modulo p = 2^256 - 0x1000003D1. - - This is a 64-bit optimized version, where a field element in radix-2^{52} is - represented as an array of five unsigned 64-bit integers, i.e., uint64_t[5]. -*******************************************************************************/ - - -/** -Write the additive identity in `f`. - - The outparam `f` is meant to be 5 limbs in size, i.e., uint64_t[5]. -*/ -void Hacl_EC_K256_mk_felem_zero(uint64_t *f); - -/** -Write the multiplicative identity in `f`. - - The outparam `f` is meant to be 5 limbs in size, i.e., uint64_t[5]. -*/ -void Hacl_EC_K256_mk_felem_one(uint64_t *f); - -/** -Write `a + b mod p` in `out`. - - The arguments `a`, `b`, and the outparam `out` are meant to be 5 limbs in size, i.e., uint64_t[5]. - - Before calling this function, the caller will need to ensure that the following - precondition is observed. - • `a`, `b`, and `out` are either pairwise disjoint or equal -*/ -void Hacl_EC_K256_felem_add(uint64_t *a, uint64_t *b, uint64_t *out); - -/** -Write `a - b mod p` in `out`. - - The arguments `a`, `b`, and the outparam `out` are meant to be 5 limbs in size, i.e., uint64_t[5]. - - Before calling this function, the caller will need to ensure that the following - precondition is observed. - • `a`, `b`, and `out` are either pairwise disjoint or equal -*/ -void Hacl_EC_K256_felem_sub(uint64_t *a, uint64_t *b, uint64_t *out); - -/** -Write `a * b mod p` in `out`. - - The arguments `a`, `b`, and the outparam `out` are meant to be 5 limbs in size, i.e., uint64_t[5]. - - Before calling this function, the caller will need to ensure that the following - precondition is observed. - • `a`, `b`, and `out` are either pairwise disjoint or equal -*/ -void Hacl_EC_K256_felem_mul(uint64_t *a, uint64_t *b, uint64_t *out); - -/** -Write `a * a mod p` in `out`. - - The argument `a`, and the outparam `out` are meant to be 5 limbs in size, i.e., uint64_t[5]. - - Before calling this function, the caller will need to ensure that the following - precondition is observed. - • `a` and `out` are either disjoint or equal -*/ -void Hacl_EC_K256_felem_sqr(uint64_t *a, uint64_t *out); - -/** -Write `a ^ (p - 2) mod p` in `out`. - - The function computes modular multiplicative inverse if `a` <> zero. - - The argument `a`, and the outparam `out` are meant to be 5 limbs in size, i.e., uint64_t[5]. - - Before calling this function, the caller will need to ensure that the following - precondition is observed. - • `a` and `out` are disjoint -*/ -void Hacl_EC_K256_felem_inv(uint64_t *a, uint64_t *out); - -/** -Load a bid-endian field element from memory. - - The argument `b` points to 32 bytes of valid memory, i.e., uint8_t[32]. - The outparam `out` points to a field element of 5 limbs in size, i.e., uint64_t[5]. - - Before calling this function, the caller will need to ensure that the following - precondition is observed. - • `b` and `out` are disjoint -*/ -void Hacl_EC_K256_felem_load(uint8_t *b, uint64_t *out); - -/** -Serialize a field element into big-endian memory. - - The argument `a` points to a field element of 5 limbs in size, i.e., uint64_t[5]. - The outparam `out` points to 32 bytes of valid memory, i.e., uint8_t[32]. - - Before calling this function, the caller will need to ensure that the following - precondition is observed. - • `a` and `out` are disjoint -*/ -void Hacl_EC_K256_felem_store(uint64_t *a, uint8_t *out); - -/******************************************************************************* - Verified group operations for the secp256k1 curve of the form y^2 = x^3 + 7. - - This is a 64-bit optimized version, where a group element in projective coordinates - is represented as an array of 15 unsigned 64-bit integers, i.e., uint64_t[15]. -*******************************************************************************/ - - -/** -Write the point at infinity (additive identity) in `p`. - - The outparam `p` is meant to be 15 limbs in size, i.e., uint64_t[15]. -*/ -void Hacl_EC_K256_mk_point_at_inf(uint64_t *p); - -/** -Write the base point (generator) in `p`. - - The outparam `p` is meant to be 15 limbs in size, i.e., uint64_t[15]. -*/ -void Hacl_EC_K256_mk_base_point(uint64_t *p); - -/** -Write `-p` in `out` (point negation). - - The argument `p` and the outparam `out` are meant to be 15 limbs in size, i.e., uint64_t[15]. - - Before calling this function, the caller will need to ensure that the following - precondition is observed. - • `p` and `out` are either disjoint or equal -*/ -void Hacl_EC_K256_point_negate(uint64_t *p, uint64_t *out); - -/** -Write `p + q` in `out` (point addition). - - The arguments `p`, `q` and the outparam `out` are meant to be 15 limbs in size, i.e., uint64_t[15]. - - Before calling this function, the caller will need to ensure that the following - precondition is observed. - • `p`, `q`, and `out` are either pairwise disjoint or equal -*/ -void Hacl_EC_K256_point_add(uint64_t *p, uint64_t *q, uint64_t *out); - -/** -Write `p + p` in `out` (point doubling). - - The argument `p` and the outparam `out` are meant to be 15 limbs in size, i.e., uint64_t[15]. - - Before calling this function, the caller will need to ensure that the following - precondition is observed. - • `p` and `out` are either disjoint or equal -*/ -void Hacl_EC_K256_point_double(uint64_t *p, uint64_t *out); - -/** -Write `[scalar]p` in `out` (point multiplication or scalar multiplication). - - The argument `p` and the outparam `out` are meant to be 15 limbs in size, i.e., uint64_t[15]. - The argument `scalar` is meant to be 32 bytes in size, i.e., uint8_t[32]. - - The function first loads a bid-endian scalar element from `scalar` and - then computes a point multiplication. - - Before calling this function, the caller will need to ensure that the following - precondition is observed. - • `scalar`, `p`, and `out` are pairwise disjoint -*/ -void Hacl_EC_K256_point_mul(uint8_t *scalar, uint64_t *p, uint64_t *out); - -/** -Checks whether `p` is equal to `q` (point equality). - - The function returns `true` if `p` is equal to `q` and `false` otherwise. - - The arguments `p` and `q` are meant to be 15 limbs in size, i.e., uint64_t[15]. - - Before calling this function, the caller will need to ensure that the following - precondition is observed. - • `p` and `q` are either disjoint or equal -*/ -bool Hacl_EC_K256_point_eq(uint64_t *p, uint64_t *q); - -/** -Compress a point in projective coordinates to its compressed form. - - The argument `p` points to a point of 15 limbs in size, i.e., uint64_t[15]. - The outparam `out` points to 33 bytes of valid memory, i.e., uint8_t[33]. - - The function first converts a given point `p` from projective to affine coordinates - and then writes [ 0x02 for even `y` and 0x03 for odd `y`; `x` ] in `out`. - - Before calling this function, the caller will need to ensure that the following - precondition is observed. - • `p` and `out` are disjoint -*/ -void Hacl_EC_K256_point_compress(uint64_t *p, uint8_t *out); - -/** -Decompress a point in projective coordinates from its compressed form. - - The function returns `true` for successful decompression of a compressed point - and `false` otherwise. - - The argument `s` points to 33 bytes of valid memory, i.e., uint8_t[33]. - The outparam `out` points to a point of 15 limbs in size, i.e., uint64_t[15]. - - Before calling this function, the caller will need to ensure that the following - precondition is observed. - • `s` and `out` are disjoint -*/ -bool Hacl_EC_K256_point_decompress(uint8_t *s, uint64_t *out); - -#if defined(__cplusplus) -} -#endif - -#define __Hacl_EC_K256_H_DEFINED -#endif diff --git a/dist/c89-compatible/Hacl_Ed25519.c b/dist/c89-compatible/Hacl_Ed25519.c deleted file mode 100644 index 046e53ea5f..0000000000 --- a/dist/c89-compatible/Hacl_Ed25519.c +++ /dev/null @@ -1,2029 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#include "internal/Hacl_Ed25519.h" - -#include "internal/Hacl_Hash_SHA2.h" -#include "internal/Hacl_Curve25519_51.h" - -static inline void fsum(uint64_t *out, uint64_t *a, uint64_t *b) -{ - Hacl_Impl_Curve25519_Field51_fadd(out, a, b); -} - -static inline void fdifference(uint64_t *out, uint64_t *a, uint64_t *b) -{ - Hacl_Impl_Curve25519_Field51_fsub(out, a, b); -} - -void Hacl_Bignum25519_reduce_513(uint64_t *a) -{ - uint64_t f0 = a[0U]; - uint64_t f1 = a[1U]; - uint64_t f2 = a[2U]; - uint64_t f3 = a[3U]; - uint64_t f4 = a[4U]; - uint64_t l_ = f0 + (uint64_t)0U; - uint64_t tmp0 = l_ & (uint64_t)0x7ffffffffffffU; - uint64_t c0 = l_ >> (uint32_t)51U; - uint64_t l_0 = f1 + c0; - uint64_t tmp1 = l_0 & (uint64_t)0x7ffffffffffffU; - uint64_t c1 = l_0 >> (uint32_t)51U; - uint64_t l_1 = f2 + c1; - uint64_t tmp2 = l_1 & (uint64_t)0x7ffffffffffffU; - uint64_t c2 = l_1 >> (uint32_t)51U; - uint64_t l_2 = f3 + c2; - uint64_t tmp3 = l_2 & (uint64_t)0x7ffffffffffffU; - uint64_t c3 = l_2 >> (uint32_t)51U; - uint64_t l_3 = f4 + c3; - uint64_t tmp4 = l_3 & (uint64_t)0x7ffffffffffffU; - uint64_t c4 = l_3 >> (uint32_t)51U; - uint64_t l_4 = tmp0 + c4 * (uint64_t)19U; - uint64_t tmp0_ = l_4 & (uint64_t)0x7ffffffffffffU; - uint64_t c5 = l_4 >> (uint32_t)51U; - a[0U] = tmp0_; - a[1U] = tmp1 + c5; - a[2U] = tmp2; - a[3U] = tmp3; - a[4U] = tmp4; -} - -static inline void fmul0(uint64_t *output, uint64_t *input, uint64_t *input2) -{ - FStar_UInt128_uint128 tmp[10U]; - { - uint32_t _i; - for (_i = 0U; _i < (uint32_t)10U; ++_i) - tmp[_i] = FStar_UInt128_uint64_to_uint128((uint64_t)0U); - } - Hacl_Impl_Curve25519_Field51_fmul(output, input, input2, tmp); -} - -static inline void times_2(uint64_t *out, uint64_t *a) -{ - uint64_t a0 = a[0U]; - uint64_t a1 = a[1U]; - uint64_t a2 = a[2U]; - uint64_t a3 = a[3U]; - uint64_t a4 = a[4U]; - uint64_t o0 = (uint64_t)2U * a0; - uint64_t o1 = (uint64_t)2U * a1; - uint64_t o2 = (uint64_t)2U * a2; - uint64_t o3 = (uint64_t)2U * a3; - uint64_t o4 = (uint64_t)2U * a4; - out[0U] = o0; - out[1U] = o1; - out[2U] = o2; - out[3U] = o3; - out[4U] = o4; -} - -static inline void times_d(uint64_t *out, uint64_t *a) -{ - uint64_t d[5U] = { 0U }; - d[0U] = (uint64_t)0x00034dca135978a3U; - d[1U] = (uint64_t)0x0001a8283b156ebdU; - d[2U] = (uint64_t)0x0005e7a26001c029U; - d[3U] = (uint64_t)0x000739c663a03cbbU; - d[4U] = (uint64_t)0x00052036cee2b6ffU; - fmul0(out, d, a); -} - -static inline void times_2d(uint64_t *out, uint64_t *a) -{ - uint64_t d2[5U] = { 0U }; - d2[0U] = (uint64_t)0x00069b9426b2f159U; - d2[1U] = (uint64_t)0x00035050762add7aU; - d2[2U] = (uint64_t)0x0003cf44c0038052U; - d2[3U] = (uint64_t)0x0006738cc7407977U; - d2[4U] = (uint64_t)0x0002406d9dc56dffU; - fmul0(out, d2, a); -} - -static inline void fsquare(uint64_t *out, uint64_t *a) -{ - FStar_UInt128_uint128 tmp[5U]; - { - uint32_t _i; - for (_i = 0U; _i < (uint32_t)5U; ++_i) - tmp[_i] = FStar_UInt128_uint64_to_uint128((uint64_t)0U); - } - Hacl_Impl_Curve25519_Field51_fsqr(out, a, tmp); -} - -static inline void fsquare_times(uint64_t *output, uint64_t *input, uint32_t count) -{ - FStar_UInt128_uint128 tmp[5U]; - { - uint32_t _i; - for (_i = 0U; _i < (uint32_t)5U; ++_i) - tmp[_i] = FStar_UInt128_uint64_to_uint128((uint64_t)0U); - } - Hacl_Curve25519_51_fsquare_times(output, input, tmp, count); -} - -static inline void fsquare_times_inplace(uint64_t *output, uint32_t count) -{ - FStar_UInt128_uint128 tmp[5U]; - { - uint32_t _i; - for (_i = 0U; _i < (uint32_t)5U; ++_i) - tmp[_i] = FStar_UInt128_uint64_to_uint128((uint64_t)0U); - } - Hacl_Curve25519_51_fsquare_times(output, output, tmp, count); -} - -void Hacl_Bignum25519_inverse(uint64_t *out, uint64_t *a) -{ - FStar_UInt128_uint128 tmp[10U]; - { - uint32_t _i; - for (_i = 0U; _i < (uint32_t)10U; ++_i) - tmp[_i] = FStar_UInt128_uint64_to_uint128((uint64_t)0U); - } - Hacl_Curve25519_51_finv(out, a, tmp); -} - -static inline void reduce(uint64_t *out) -{ - uint64_t o0 = out[0U]; - uint64_t o1 = out[1U]; - uint64_t o2 = out[2U]; - uint64_t o3 = out[3U]; - uint64_t o4 = out[4U]; - uint64_t l_ = o0 + (uint64_t)0U; - uint64_t tmp0 = l_ & (uint64_t)0x7ffffffffffffU; - uint64_t c0 = l_ >> (uint32_t)51U; - uint64_t l_0 = o1 + c0; - uint64_t tmp1 = l_0 & (uint64_t)0x7ffffffffffffU; - uint64_t c1 = l_0 >> (uint32_t)51U; - uint64_t l_1 = o2 + c1; - uint64_t tmp2 = l_1 & (uint64_t)0x7ffffffffffffU; - uint64_t c2 = l_1 >> (uint32_t)51U; - uint64_t l_2 = o3 + c2; - uint64_t tmp3 = l_2 & (uint64_t)0x7ffffffffffffU; - uint64_t c3 = l_2 >> (uint32_t)51U; - uint64_t l_3 = o4 + c3; - uint64_t tmp4 = l_3 & (uint64_t)0x7ffffffffffffU; - uint64_t c4 = l_3 >> (uint32_t)51U; - uint64_t l_4 = tmp0 + c4 * (uint64_t)19U; - uint64_t tmp0_ = l_4 & (uint64_t)0x7ffffffffffffU; - uint64_t c5 = l_4 >> (uint32_t)51U; - uint64_t f0 = tmp0_; - uint64_t f1 = tmp1 + c5; - uint64_t f2 = tmp2; - uint64_t f3 = tmp3; - uint64_t f4 = tmp4; - uint64_t m0 = FStar_UInt64_gte_mask(f0, (uint64_t)0x7ffffffffffedU); - uint64_t m1 = FStar_UInt64_eq_mask(f1, (uint64_t)0x7ffffffffffffU); - uint64_t m2 = FStar_UInt64_eq_mask(f2, (uint64_t)0x7ffffffffffffU); - uint64_t m3 = FStar_UInt64_eq_mask(f3, (uint64_t)0x7ffffffffffffU); - uint64_t m4 = FStar_UInt64_eq_mask(f4, (uint64_t)0x7ffffffffffffU); - uint64_t mask = (((m0 & m1) & m2) & m3) & m4; - uint64_t f0_ = f0 - (mask & (uint64_t)0x7ffffffffffedU); - uint64_t f1_ = f1 - (mask & (uint64_t)0x7ffffffffffffU); - uint64_t f2_ = f2 - (mask & (uint64_t)0x7ffffffffffffU); - uint64_t f3_ = f3 - (mask & (uint64_t)0x7ffffffffffffU); - uint64_t f4_ = f4 - (mask & (uint64_t)0x7ffffffffffffU); - uint64_t f01 = f0_; - uint64_t f11 = f1_; - uint64_t f21 = f2_; - uint64_t f31 = f3_; - uint64_t f41 = f4_; - out[0U] = f01; - out[1U] = f11; - out[2U] = f21; - out[3U] = f31; - out[4U] = f41; -} - -void Hacl_Bignum25519_load_51(uint64_t *output, uint8_t *input) -{ - uint64_t u64s[4U] = { 0U }; - uint64_t u64s3; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *os = u64s; - uint8_t *bj = input + i * (uint32_t)8U; - uint64_t u = load64_le(bj); - uint64_t r = u; - uint64_t x = r; - os[i] = x;); - u64s3 = u64s[3U]; - u64s[3U] = u64s3 & (uint64_t)0x7fffffffffffffffU; - output[0U] = u64s[0U] & (uint64_t)0x7ffffffffffffU; - output[1U] = u64s[0U] >> (uint32_t)51U | (u64s[1U] & (uint64_t)0x3fffffffffU) << (uint32_t)13U; - output[2U] = u64s[1U] >> (uint32_t)38U | (u64s[2U] & (uint64_t)0x1ffffffU) << (uint32_t)26U; - output[3U] = u64s[2U] >> (uint32_t)25U | (u64s[3U] & (uint64_t)0xfffU) << (uint32_t)39U; - output[4U] = u64s[3U] >> (uint32_t)12U; -} - -void Hacl_Bignum25519_store_51(uint8_t *output, uint64_t *input) -{ - uint64_t u64s[4U] = { 0U }; - Hacl_Impl_Curve25519_Field51_store_felem(u64s, input); - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - store64_le(output + i * (uint32_t)8U, u64s[i]);); -} - -void Hacl_Impl_Ed25519_PointDouble_point_double(uint64_t *out, uint64_t *p) -{ - uint64_t tmp[20U] = { 0U }; - uint64_t *tmp10 = tmp; - uint64_t *tmp20 = tmp + (uint32_t)5U; - uint64_t *tmp30 = tmp + (uint32_t)10U; - uint64_t *tmp40 = tmp + (uint32_t)15U; - uint64_t *x10 = p; - uint64_t *y10 = p + (uint32_t)5U; - uint64_t *z1 = p + (uint32_t)10U; - uint64_t *tmp1; - uint64_t *tmp2; - uint64_t *tmp3; - uint64_t *tmp4; - uint64_t *x1; - uint64_t *y1; - uint64_t *tmp_f; - uint64_t *tmp_e; - uint64_t *tmp_h; - uint64_t *tmp_g; - uint64_t *x3; - uint64_t *y3; - uint64_t *z3; - uint64_t *t3; - fsquare(tmp10, x10); - fsquare(tmp20, y10); - fsum(tmp30, tmp10, tmp20); - fdifference(tmp40, tmp10, tmp20); - fsquare(tmp10, z1); - times_2(tmp10, tmp10); - tmp1 = tmp; - tmp2 = tmp + (uint32_t)5U; - tmp3 = tmp + (uint32_t)10U; - tmp4 = tmp + (uint32_t)15U; - x1 = p; - y1 = p + (uint32_t)5U; - fsum(tmp2, x1, y1); - fsquare(tmp2, tmp2); - Hacl_Bignum25519_reduce_513(tmp3); - fdifference(tmp2, tmp3, tmp2); - Hacl_Bignum25519_reduce_513(tmp1); - Hacl_Bignum25519_reduce_513(tmp4); - fsum(tmp1, tmp1, tmp4); - tmp_f = tmp; - tmp_e = tmp + (uint32_t)5U; - tmp_h = tmp + (uint32_t)10U; - tmp_g = tmp + (uint32_t)15U; - x3 = out; - y3 = out + (uint32_t)5U; - z3 = out + (uint32_t)10U; - t3 = out + (uint32_t)15U; - fmul0(x3, tmp_e, tmp_f); - fmul0(y3, tmp_g, tmp_h); - fmul0(t3, tmp_e, tmp_h); - fmul0(z3, tmp_f, tmp_g); -} - -static inline void pow2_252m2(uint64_t *out, uint64_t *z) -{ - uint64_t buf[20U] = { 0U }; - uint64_t *a0 = buf; - uint64_t *t00 = buf + (uint32_t)5U; - uint64_t *b0 = buf + (uint32_t)10U; - uint64_t *c0 = buf + (uint32_t)15U; - uint64_t *a; - uint64_t *t0; - uint64_t *b; - uint64_t *c; - fsquare_times(a0, z, (uint32_t)1U); - fsquare_times(t00, a0, (uint32_t)2U); - fmul0(b0, t00, z); - fmul0(a0, b0, a0); - fsquare_times(t00, a0, (uint32_t)1U); - fmul0(b0, t00, b0); - fsquare_times(t00, b0, (uint32_t)5U); - fmul0(b0, t00, b0); - fsquare_times(t00, b0, (uint32_t)10U); - fmul0(c0, t00, b0); - fsquare_times(t00, c0, (uint32_t)20U); - fmul0(t00, t00, c0); - fsquare_times_inplace(t00, (uint32_t)10U); - fmul0(b0, t00, b0); - fsquare_times(t00, b0, (uint32_t)50U); - a = buf; - t0 = buf + (uint32_t)5U; - b = buf + (uint32_t)10U; - c = buf + (uint32_t)15U; - fsquare_times(a, z, (uint32_t)1U); - fmul0(c, t0, b); - fsquare_times(t0, c, (uint32_t)100U); - fmul0(t0, t0, c); - fsquare_times_inplace(t0, (uint32_t)50U); - fmul0(t0, t0, b); - fsquare_times_inplace(t0, (uint32_t)2U); - fmul0(out, t0, a); -} - -static inline bool is_0(uint64_t *x) -{ - uint64_t x0 = x[0U]; - uint64_t x1 = x[1U]; - uint64_t x2 = x[2U]; - uint64_t x3 = x[3U]; - uint64_t x4 = x[4U]; - return - x0 - == (uint64_t)0U - && x1 == (uint64_t)0U - && x2 == (uint64_t)0U - && x3 == (uint64_t)0U - && x4 == (uint64_t)0U; -} - -static inline void mul_modp_sqrt_m1(uint64_t *x) -{ - uint64_t sqrt_m1[5U] = { 0U }; - sqrt_m1[0U] = (uint64_t)0x00061b274a0ea0b0U; - sqrt_m1[1U] = (uint64_t)0x0000d5a5fc8f189dU; - sqrt_m1[2U] = (uint64_t)0x0007ef5e9cbd0c60U; - sqrt_m1[3U] = (uint64_t)0x00078595a6804c9eU; - sqrt_m1[4U] = (uint64_t)0x0002b8324804fc1dU; - fmul0(x, x, sqrt_m1); -} - -static inline bool recover_x(uint64_t *x, uint64_t *y, uint64_t sign) -{ - uint64_t tmp[15U] = { 0U }; - uint64_t *x2 = tmp; - uint64_t x00 = y[0U]; - uint64_t x1 = y[1U]; - uint64_t x21 = y[2U]; - uint64_t x30 = y[3U]; - uint64_t x4 = y[4U]; - bool - b = - x00 - >= (uint64_t)0x7ffffffffffedU - && x1 == (uint64_t)0x7ffffffffffffU - && x21 == (uint64_t)0x7ffffffffffffU - && x30 == (uint64_t)0x7ffffffffffffU - && x4 == (uint64_t)0x7ffffffffffffU; - bool res; - if (b) - { - res = false; - } - else - { - uint64_t tmp1[20U] = { 0U }; - uint64_t *one = tmp1; - uint64_t *y2 = tmp1 + (uint32_t)5U; - uint64_t *dyyi = tmp1 + (uint32_t)10U; - uint64_t *dyy = tmp1 + (uint32_t)15U; - one[0U] = (uint64_t)1U; - one[1U] = (uint64_t)0U; - one[2U] = (uint64_t)0U; - one[3U] = (uint64_t)0U; - one[4U] = (uint64_t)0U; - fsquare(y2, y); - times_d(dyy, y2); - fsum(dyy, dyy, one); - Hacl_Bignum25519_reduce_513(dyy); - Hacl_Bignum25519_inverse(dyyi, dyy); - fdifference(x2, y2, one); - fmul0(x2, x2, dyyi); - reduce(x2); - { - bool x2_is_0 = is_0(x2); - uint8_t z; - if (x2_is_0) - { - if (sign == (uint64_t)0U) - { - x[0U] = (uint64_t)0U; - x[1U] = (uint64_t)0U; - x[2U] = (uint64_t)0U; - x[3U] = (uint64_t)0U; - x[4U] = (uint64_t)0U; - z = (uint8_t)1U; - } - else - { - z = (uint8_t)0U; - } - } - else - { - z = (uint8_t)2U; - } - if (z == (uint8_t)0U) - { - res = false; - } - else if (z == (uint8_t)1U) - { - res = true; - } - else - { - uint64_t *x210 = tmp; - uint64_t *x31 = tmp + (uint32_t)5U; - uint64_t *t00 = tmp + (uint32_t)10U; - pow2_252m2(x31, x210); - fsquare(t00, x31); - fdifference(t00, t00, x210); - Hacl_Bignum25519_reduce_513(t00); - reduce(t00); - { - bool t0_is_0 = is_0(t00); - if (!t0_is_0) - { - mul_modp_sqrt_m1(x31); - } - { - uint64_t *x211 = tmp; - uint64_t *x3 = tmp + (uint32_t)5U; - uint64_t *t01 = tmp + (uint32_t)10U; - fsquare(t01, x3); - fdifference(t01, t01, x211); - Hacl_Bignum25519_reduce_513(t01); - reduce(t01); - { - bool z1 = is_0(t01); - if (z1 == false) - { - res = false; - } - else - { - uint64_t *x32 = tmp + (uint32_t)5U; - uint64_t *t0 = tmp + (uint32_t)10U; - reduce(x32); - { - uint64_t x0 = x32[0U]; - uint64_t x01 = x0 & (uint64_t)1U; - if (!(x01 == sign)) - { - t0[0U] = (uint64_t)0U; - t0[1U] = (uint64_t)0U; - t0[2U] = (uint64_t)0U; - t0[3U] = (uint64_t)0U; - t0[4U] = (uint64_t)0U; - fdifference(x32, t0, x32); - Hacl_Bignum25519_reduce_513(x32); - reduce(x32); - } - memcpy(x, x32, (uint32_t)5U * sizeof (uint64_t)); - res = true; - } - } - } - } - } - } - } - } - { - bool res0 = res; - return res0; - } -} - -bool Hacl_Impl_Ed25519_PointDecompress_point_decompress(uint64_t *out, uint8_t *s) -{ - uint64_t tmp[10U] = { 0U }; - uint64_t *y = tmp; - uint64_t *x = tmp + (uint32_t)5U; - uint8_t s31 = s[31U]; - uint8_t z0 = s31 >> (uint32_t)7U; - uint64_t sign = (uint64_t)z0; - bool z; - bool res0; - bool res; - Hacl_Bignum25519_load_51(y, s); - z = recover_x(x, y, sign); - if (z == false) - { - res0 = false; - } - else - { - uint64_t *outx = out; - uint64_t *outy = out + (uint32_t)5U; - uint64_t *outz = out + (uint32_t)10U; - uint64_t *outt = out + (uint32_t)15U; - memcpy(outx, x, (uint32_t)5U * sizeof (uint64_t)); - memcpy(outy, y, (uint32_t)5U * sizeof (uint64_t)); - outz[0U] = (uint64_t)1U; - outz[1U] = (uint64_t)0U; - outz[2U] = (uint64_t)0U; - outz[3U] = (uint64_t)0U; - outz[4U] = (uint64_t)0U; - fmul0(outt, x, y); - res0 = true; - } - res = res0; - return res; -} - -void Hacl_Impl_Ed25519_PointCompress_point_compress(uint8_t *z, uint64_t *p) -{ - uint64_t tmp[15U] = { 0U }; - uint64_t *x = tmp + (uint32_t)5U; - uint64_t *out = tmp + (uint32_t)10U; - uint64_t *zinv1 = tmp; - uint64_t *x1 = tmp + (uint32_t)5U; - uint64_t *out1 = tmp + (uint32_t)10U; - uint64_t *px = p; - uint64_t *py = p + (uint32_t)5U; - uint64_t *pz = p + (uint32_t)10U; - uint64_t x0; - uint64_t b; - uint8_t xbyte; - uint8_t o31; - Hacl_Bignum25519_inverse(zinv1, pz); - fmul0(x1, px, zinv1); - reduce(x1); - fmul0(out1, py, zinv1); - Hacl_Bignum25519_reduce_513(out1); - x0 = x[0U]; - b = x0 & (uint64_t)1U; - Hacl_Bignum25519_store_51(z, out); - xbyte = (uint8_t)b; - o31 = z[31U]; - z[31U] = o31 + (xbyte << (uint32_t)7U); -} - -static inline void barrett_reduction(uint64_t *z, uint64_t *t) -{ - uint64_t t0 = t[0U]; - uint64_t t1 = t[1U]; - uint64_t t2 = t[2U]; - uint64_t t3 = t[3U]; - uint64_t t4 = t[4U]; - uint64_t t5 = t[5U]; - uint64_t t6 = t[6U]; - uint64_t t7 = t[7U]; - uint64_t t8 = t[8U]; - uint64_t t9 = t[9U]; - uint64_t m00 = (uint64_t)0x12631a5cf5d3edU; - uint64_t m10 = (uint64_t)0xf9dea2f79cd658U; - uint64_t m20 = (uint64_t)0x000000000014deU; - uint64_t m30 = (uint64_t)0x00000000000000U; - uint64_t m40 = (uint64_t)0x00000010000000U; - uint64_t m0 = m00; - uint64_t m1 = m10; - uint64_t m2 = m20; - uint64_t m3 = m30; - uint64_t m4 = m40; - uint64_t m010 = (uint64_t)0x9ce5a30a2c131bU; - uint64_t m110 = (uint64_t)0x215d086329a7edU; - uint64_t m210 = (uint64_t)0xffffffffeb2106U; - uint64_t m310 = (uint64_t)0xffffffffffffffU; - uint64_t m410 = (uint64_t)0x00000fffffffffU; - uint64_t mu0 = m010; - uint64_t mu1 = m110; - uint64_t mu2 = m210; - uint64_t mu3 = m310; - uint64_t mu4 = m410; - uint64_t y_ = (t5 & (uint64_t)0xffffffU) << (uint32_t)32U; - uint64_t x_ = t4 >> (uint32_t)24U; - uint64_t z00 = x_ | y_; - uint64_t y_0 = (t6 & (uint64_t)0xffffffU) << (uint32_t)32U; - uint64_t x_0 = t5 >> (uint32_t)24U; - uint64_t z10 = x_0 | y_0; - uint64_t y_1 = (t7 & (uint64_t)0xffffffU) << (uint32_t)32U; - uint64_t x_1 = t6 >> (uint32_t)24U; - uint64_t z20 = x_1 | y_1; - uint64_t y_2 = (t8 & (uint64_t)0xffffffU) << (uint32_t)32U; - uint64_t x_2 = t7 >> (uint32_t)24U; - uint64_t z30 = x_2 | y_2; - uint64_t y_3 = (t9 & (uint64_t)0xffffffU) << (uint32_t)32U; - uint64_t x_3 = t8 >> (uint32_t)24U; - uint64_t z40 = x_3 | y_3; - uint64_t q0 = z00; - uint64_t q1 = z10; - uint64_t q2 = z20; - uint64_t q3 = z30; - uint64_t q4 = z40; - FStar_UInt128_uint128 xy000 = FStar_UInt128_mul_wide(q0, mu0); - FStar_UInt128_uint128 xy010 = FStar_UInt128_mul_wide(q0, mu1); - FStar_UInt128_uint128 xy020 = FStar_UInt128_mul_wide(q0, mu2); - FStar_UInt128_uint128 xy030 = FStar_UInt128_mul_wide(q0, mu3); - FStar_UInt128_uint128 xy040 = FStar_UInt128_mul_wide(q0, mu4); - FStar_UInt128_uint128 xy100 = FStar_UInt128_mul_wide(q1, mu0); - FStar_UInt128_uint128 xy110 = FStar_UInt128_mul_wide(q1, mu1); - FStar_UInt128_uint128 xy120 = FStar_UInt128_mul_wide(q1, mu2); - FStar_UInt128_uint128 xy130 = FStar_UInt128_mul_wide(q1, mu3); - FStar_UInt128_uint128 xy14 = FStar_UInt128_mul_wide(q1, mu4); - FStar_UInt128_uint128 xy200 = FStar_UInt128_mul_wide(q2, mu0); - FStar_UInt128_uint128 xy210 = FStar_UInt128_mul_wide(q2, mu1); - FStar_UInt128_uint128 xy220 = FStar_UInt128_mul_wide(q2, mu2); - FStar_UInt128_uint128 xy23 = FStar_UInt128_mul_wide(q2, mu3); - FStar_UInt128_uint128 xy24 = FStar_UInt128_mul_wide(q2, mu4); - FStar_UInt128_uint128 xy300 = FStar_UInt128_mul_wide(q3, mu0); - FStar_UInt128_uint128 xy310 = FStar_UInt128_mul_wide(q3, mu1); - FStar_UInt128_uint128 xy32 = FStar_UInt128_mul_wide(q3, mu2); - FStar_UInt128_uint128 xy33 = FStar_UInt128_mul_wide(q3, mu3); - FStar_UInt128_uint128 xy34 = FStar_UInt128_mul_wide(q3, mu4); - FStar_UInt128_uint128 xy400 = FStar_UInt128_mul_wide(q4, mu0); - FStar_UInt128_uint128 xy41 = FStar_UInt128_mul_wide(q4, mu1); - FStar_UInt128_uint128 xy42 = FStar_UInt128_mul_wide(q4, mu2); - FStar_UInt128_uint128 xy43 = FStar_UInt128_mul_wide(q4, mu3); - FStar_UInt128_uint128 xy44 = FStar_UInt128_mul_wide(q4, mu4); - FStar_UInt128_uint128 z01 = xy000; - FStar_UInt128_uint128 z11 = FStar_UInt128_add_mod(xy010, xy100); - FStar_UInt128_uint128 z21 = FStar_UInt128_add_mod(FStar_UInt128_add_mod(xy020, xy110), xy200); - FStar_UInt128_uint128 - z31 = - FStar_UInt128_add_mod(FStar_UInt128_add_mod(FStar_UInt128_add_mod(xy030, xy120), xy210), - xy300); - FStar_UInt128_uint128 - z41 = - FStar_UInt128_add_mod(FStar_UInt128_add_mod(FStar_UInt128_add_mod(FStar_UInt128_add_mod(xy040, - xy130), - xy220), - xy310), - xy400); - FStar_UInt128_uint128 - z5 = - FStar_UInt128_add_mod(FStar_UInt128_add_mod(FStar_UInt128_add_mod(xy14, xy23), xy32), - xy41); - FStar_UInt128_uint128 z6 = FStar_UInt128_add_mod(FStar_UInt128_add_mod(xy24, xy33), xy42); - FStar_UInt128_uint128 z7 = FStar_UInt128_add_mod(xy34, xy43); - FStar_UInt128_uint128 z8 = xy44; - FStar_UInt128_uint128 carry0 = FStar_UInt128_shift_right(z01, (uint32_t)56U); - FStar_UInt128_uint128 c00 = carry0; - FStar_UInt128_uint128 - carry1 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z11, c00), (uint32_t)56U); - uint64_t - t100 = - FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z11, c00)) - & (uint64_t)0xffffffffffffffU; - FStar_UInt128_uint128 c10 = carry1; - FStar_UInt128_uint128 - carry2 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z21, c10), (uint32_t)56U); - uint64_t - t101 = - FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z21, c10)) - & (uint64_t)0xffffffffffffffU; - FStar_UInt128_uint128 c20 = carry2; - FStar_UInt128_uint128 - carry3 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z31, c20), (uint32_t)56U); - uint64_t - t102 = - FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z31, c20)) - & (uint64_t)0xffffffffffffffU; - FStar_UInt128_uint128 c30 = carry3; - FStar_UInt128_uint128 - carry4 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z41, c30), (uint32_t)56U); - uint64_t - t103 = - FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z41, c30)) - & (uint64_t)0xffffffffffffffU; - FStar_UInt128_uint128 c40 = carry4; - uint64_t t410 = t103; - FStar_UInt128_uint128 - carry5 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z5, c40), (uint32_t)56U); - uint64_t - t104 = - FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z5, c40)) - & (uint64_t)0xffffffffffffffU; - FStar_UInt128_uint128 c5 = carry5; - uint64_t t51 = t104; - FStar_UInt128_uint128 - carry6 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z6, c5), (uint32_t)56U); - uint64_t - t105 = - FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z6, c5)) - & (uint64_t)0xffffffffffffffU; - FStar_UInt128_uint128 c6 = carry6; - uint64_t t61 = t105; - FStar_UInt128_uint128 - carry7 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z7, c6), (uint32_t)56U); - uint64_t - t106 = - FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z7, c6)) - & (uint64_t)0xffffffffffffffU; - FStar_UInt128_uint128 c7 = carry7; - uint64_t t71 = t106; - FStar_UInt128_uint128 - carry8 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z8, c7), (uint32_t)56U); - uint64_t - t107 = - FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z8, c7)) - & (uint64_t)0xffffffffffffffU; - FStar_UInt128_uint128 c8 = carry8; - uint64_t t81 = t107; - uint64_t t91 = FStar_UInt128_uint128_to_uint64(c8); - uint64_t qmu4_ = t410; - uint64_t qmu5_ = t51; - uint64_t qmu6_ = t61; - uint64_t qmu7_ = t71; - uint64_t qmu8_ = t81; - uint64_t qmu9_ = t91; - uint64_t y_4 = (qmu5_ & (uint64_t)0xffffffffffU) << (uint32_t)16U; - uint64_t x_4 = qmu4_ >> (uint32_t)40U; - uint64_t z02 = x_4 | y_4; - uint64_t y_5 = (qmu6_ & (uint64_t)0xffffffffffU) << (uint32_t)16U; - uint64_t x_5 = qmu5_ >> (uint32_t)40U; - uint64_t z12 = x_5 | y_5; - uint64_t y_6 = (qmu7_ & (uint64_t)0xffffffffffU) << (uint32_t)16U; - uint64_t x_6 = qmu6_ >> (uint32_t)40U; - uint64_t z22 = x_6 | y_6; - uint64_t y_7 = (qmu8_ & (uint64_t)0xffffffffffU) << (uint32_t)16U; - uint64_t x_7 = qmu7_ >> (uint32_t)40U; - uint64_t z32 = x_7 | y_7; - uint64_t y_8 = (qmu9_ & (uint64_t)0xffffffffffU) << (uint32_t)16U; - uint64_t x_8 = qmu8_ >> (uint32_t)40U; - uint64_t z42 = x_8 | y_8; - uint64_t qdiv0 = z02; - uint64_t qdiv1 = z12; - uint64_t qdiv2 = z22; - uint64_t qdiv3 = z32; - uint64_t qdiv4 = z42; - uint64_t r0 = t0; - uint64_t r1 = t1; - uint64_t r2 = t2; - uint64_t r3 = t3; - uint64_t r4 = t4 & (uint64_t)0xffffffffffU; - FStar_UInt128_uint128 xy00 = FStar_UInt128_mul_wide(qdiv0, m0); - FStar_UInt128_uint128 xy01 = FStar_UInt128_mul_wide(qdiv0, m1); - FStar_UInt128_uint128 xy02 = FStar_UInt128_mul_wide(qdiv0, m2); - FStar_UInt128_uint128 xy03 = FStar_UInt128_mul_wide(qdiv0, m3); - FStar_UInt128_uint128 xy04 = FStar_UInt128_mul_wide(qdiv0, m4); - FStar_UInt128_uint128 xy10 = FStar_UInt128_mul_wide(qdiv1, m0); - FStar_UInt128_uint128 xy11 = FStar_UInt128_mul_wide(qdiv1, m1); - FStar_UInt128_uint128 xy12 = FStar_UInt128_mul_wide(qdiv1, m2); - FStar_UInt128_uint128 xy13 = FStar_UInt128_mul_wide(qdiv1, m3); - FStar_UInt128_uint128 xy20 = FStar_UInt128_mul_wide(qdiv2, m0); - FStar_UInt128_uint128 xy21 = FStar_UInt128_mul_wide(qdiv2, m1); - FStar_UInt128_uint128 xy22 = FStar_UInt128_mul_wide(qdiv2, m2); - FStar_UInt128_uint128 xy30 = FStar_UInt128_mul_wide(qdiv3, m0); - FStar_UInt128_uint128 xy31 = FStar_UInt128_mul_wide(qdiv3, m1); - FStar_UInt128_uint128 xy40 = FStar_UInt128_mul_wide(qdiv4, m0); - FStar_UInt128_uint128 carry9 = FStar_UInt128_shift_right(xy00, (uint32_t)56U); - uint64_t t108 = FStar_UInt128_uint128_to_uint64(xy00) & (uint64_t)0xffffffffffffffU; - FStar_UInt128_uint128 c0 = carry9; - uint64_t t010 = t108; - FStar_UInt128_uint128 - carry10 = - FStar_UInt128_shift_right(FStar_UInt128_add_mod(FStar_UInt128_add_mod(xy01, xy10), c0), - (uint32_t)56U); - uint64_t - t109 = - FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(FStar_UInt128_add_mod(xy01, xy10), c0)) - & (uint64_t)0xffffffffffffffU; - FStar_UInt128_uint128 c11 = carry10; - uint64_t t110 = t109; - FStar_UInt128_uint128 - carry11 = - FStar_UInt128_shift_right(FStar_UInt128_add_mod(FStar_UInt128_add_mod(FStar_UInt128_add_mod(xy02, - xy11), - xy20), - c11), - (uint32_t)56U); - uint64_t - t1010 = - FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(FStar_UInt128_add_mod(FStar_UInt128_add_mod(xy02, - xy11), - xy20), - c11)) - & (uint64_t)0xffffffffffffffU; - FStar_UInt128_uint128 c21 = carry11; - uint64_t t210 = t1010; - FStar_UInt128_uint128 - carry = - FStar_UInt128_shift_right(FStar_UInt128_add_mod(FStar_UInt128_add_mod(FStar_UInt128_add_mod(FStar_UInt128_add_mod(xy03, - xy12), - xy21), - xy30), - c21), - (uint32_t)56U); - uint64_t - t1011 = - FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(FStar_UInt128_add_mod(FStar_UInt128_add_mod(FStar_UInt128_add_mod(xy03, - xy12), - xy21), - xy30), - c21)) - & (uint64_t)0xffffffffffffffU; - FStar_UInt128_uint128 c31 = carry; - uint64_t t310 = t1011; - uint64_t - t411 = - FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(FStar_UInt128_add_mod(FStar_UInt128_add_mod(FStar_UInt128_add_mod(FStar_UInt128_add_mod(xy04, - xy13), - xy22), - xy31), - xy40), - c31)) - & (uint64_t)0xffffffffffU; - uint64_t qmul0 = t010; - uint64_t qmul1 = t110; - uint64_t qmul2 = t210; - uint64_t qmul3 = t310; - uint64_t qmul4 = t411; - uint64_t b5 = (r0 - qmul0) >> (uint32_t)63U; - uint64_t t1012 = (b5 << (uint32_t)56U) + r0 - qmul0; - uint64_t c1 = b5; - uint64_t t011 = t1012; - uint64_t b6 = (r1 - (qmul1 + c1)) >> (uint32_t)63U; - uint64_t t1013 = (b6 << (uint32_t)56U) + r1 - (qmul1 + c1); - uint64_t c2 = b6; - uint64_t t111 = t1013; - uint64_t b7 = (r2 - (qmul2 + c2)) >> (uint32_t)63U; - uint64_t t1014 = (b7 << (uint32_t)56U) + r2 - (qmul2 + c2); - uint64_t c3 = b7; - uint64_t t211 = t1014; - uint64_t b8 = (r3 - (qmul3 + c3)) >> (uint32_t)63U; - uint64_t t1015 = (b8 << (uint32_t)56U) + r3 - (qmul3 + c3); - uint64_t c4 = b8; - uint64_t t311 = t1015; - uint64_t b9 = (r4 - (qmul4 + c4)) >> (uint32_t)63U; - uint64_t t1016 = (b9 << (uint32_t)40U) + r4 - (qmul4 + c4); - uint64_t t412 = t1016; - uint64_t s0 = t011; - uint64_t s1 = t111; - uint64_t s2 = t211; - uint64_t s3 = t311; - uint64_t s4 = t412; - uint64_t m01 = (uint64_t)0x12631a5cf5d3edU; - uint64_t m11 = (uint64_t)0xf9dea2f79cd658U; - uint64_t m21 = (uint64_t)0x000000000014deU; - uint64_t m31 = (uint64_t)0x00000000000000U; - uint64_t m41 = (uint64_t)0x00000010000000U; - uint64_t y0 = m01; - uint64_t y1 = m11; - uint64_t y2 = m21; - uint64_t y3 = m31; - uint64_t y4 = m41; - uint64_t b10 = (s0 - y0) >> (uint32_t)63U; - uint64_t t1017 = (b10 << (uint32_t)56U) + s0 - y0; - uint64_t b0 = b10; - uint64_t t01 = t1017; - uint64_t b11 = (s1 - (y1 + b0)) >> (uint32_t)63U; - uint64_t t1018 = (b11 << (uint32_t)56U) + s1 - (y1 + b0); - uint64_t b1 = b11; - uint64_t t11 = t1018; - uint64_t b12 = (s2 - (y2 + b1)) >> (uint32_t)63U; - uint64_t t1019 = (b12 << (uint32_t)56U) + s2 - (y2 + b1); - uint64_t b2 = b12; - uint64_t t21 = t1019; - uint64_t b13 = (s3 - (y3 + b2)) >> (uint32_t)63U; - uint64_t t1020 = (b13 << (uint32_t)56U) + s3 - (y3 + b2); - uint64_t b3 = b13; - uint64_t t31 = t1020; - uint64_t b = (s4 - (y4 + b3)) >> (uint32_t)63U; - uint64_t t10 = (b << (uint32_t)56U) + s4 - (y4 + b3); - uint64_t b4 = b; - uint64_t t41 = t10; - uint64_t mask = b4 - (uint64_t)1U; - uint64_t z03 = s0 ^ (mask & (s0 ^ t01)); - uint64_t z13 = s1 ^ (mask & (s1 ^ t11)); - uint64_t z23 = s2 ^ (mask & (s2 ^ t21)); - uint64_t z33 = s3 ^ (mask & (s3 ^ t31)); - uint64_t z43 = s4 ^ (mask & (s4 ^ t41)); - uint64_t z04 = z03; - uint64_t z14 = z13; - uint64_t z24 = z23; - uint64_t z34 = z33; - uint64_t z44 = z43; - uint64_t o0 = z04; - uint64_t o1 = z14; - uint64_t o2 = z24; - uint64_t o3 = z34; - uint64_t o4 = z44; - uint64_t z0 = o0; - uint64_t z1 = o1; - uint64_t z2 = o2; - uint64_t z3 = o3; - uint64_t z4 = o4; - z[0U] = z0; - z[1U] = z1; - z[2U] = z2; - z[3U] = z3; - z[4U] = z4; -} - -static inline void mul_modq(uint64_t *out, uint64_t *x, uint64_t *y) -{ - uint64_t tmp[10U] = { 0U }; - uint64_t x0 = x[0U]; - uint64_t x1 = x[1U]; - uint64_t x2 = x[2U]; - uint64_t x3 = x[3U]; - uint64_t x4 = x[4U]; - uint64_t y0 = y[0U]; - uint64_t y1 = y[1U]; - uint64_t y2 = y[2U]; - uint64_t y3 = y[3U]; - uint64_t y4 = y[4U]; - FStar_UInt128_uint128 xy00 = FStar_UInt128_mul_wide(x0, y0); - FStar_UInt128_uint128 xy01 = FStar_UInt128_mul_wide(x0, y1); - FStar_UInt128_uint128 xy02 = FStar_UInt128_mul_wide(x0, y2); - FStar_UInt128_uint128 xy03 = FStar_UInt128_mul_wide(x0, y3); - FStar_UInt128_uint128 xy04 = FStar_UInt128_mul_wide(x0, y4); - FStar_UInt128_uint128 xy10 = FStar_UInt128_mul_wide(x1, y0); - FStar_UInt128_uint128 xy11 = FStar_UInt128_mul_wide(x1, y1); - FStar_UInt128_uint128 xy12 = FStar_UInt128_mul_wide(x1, y2); - FStar_UInt128_uint128 xy13 = FStar_UInt128_mul_wide(x1, y3); - FStar_UInt128_uint128 xy14 = FStar_UInt128_mul_wide(x1, y4); - FStar_UInt128_uint128 xy20 = FStar_UInt128_mul_wide(x2, y0); - FStar_UInt128_uint128 xy21 = FStar_UInt128_mul_wide(x2, y1); - FStar_UInt128_uint128 xy22 = FStar_UInt128_mul_wide(x2, y2); - FStar_UInt128_uint128 xy23 = FStar_UInt128_mul_wide(x2, y3); - FStar_UInt128_uint128 xy24 = FStar_UInt128_mul_wide(x2, y4); - FStar_UInt128_uint128 xy30 = FStar_UInt128_mul_wide(x3, y0); - FStar_UInt128_uint128 xy31 = FStar_UInt128_mul_wide(x3, y1); - FStar_UInt128_uint128 xy32 = FStar_UInt128_mul_wide(x3, y2); - FStar_UInt128_uint128 xy33 = FStar_UInt128_mul_wide(x3, y3); - FStar_UInt128_uint128 xy34 = FStar_UInt128_mul_wide(x3, y4); - FStar_UInt128_uint128 xy40 = FStar_UInt128_mul_wide(x4, y0); - FStar_UInt128_uint128 xy41 = FStar_UInt128_mul_wide(x4, y1); - FStar_UInt128_uint128 xy42 = FStar_UInt128_mul_wide(x4, y2); - FStar_UInt128_uint128 xy43 = FStar_UInt128_mul_wide(x4, y3); - FStar_UInt128_uint128 xy44 = FStar_UInt128_mul_wide(x4, y4); - FStar_UInt128_uint128 z00 = xy00; - FStar_UInt128_uint128 z10 = FStar_UInt128_add_mod(xy01, xy10); - FStar_UInt128_uint128 z20 = FStar_UInt128_add_mod(FStar_UInt128_add_mod(xy02, xy11), xy20); - FStar_UInt128_uint128 - z30 = - FStar_UInt128_add_mod(FStar_UInt128_add_mod(FStar_UInt128_add_mod(xy03, xy12), xy21), - xy30); - FStar_UInt128_uint128 - z40 = - FStar_UInt128_add_mod(FStar_UInt128_add_mod(FStar_UInt128_add_mod(FStar_UInt128_add_mod(xy04, - xy13), - xy22), - xy31), - xy40); - FStar_UInt128_uint128 - z50 = - FStar_UInt128_add_mod(FStar_UInt128_add_mod(FStar_UInt128_add_mod(xy14, xy23), xy32), - xy41); - FStar_UInt128_uint128 z60 = FStar_UInt128_add_mod(FStar_UInt128_add_mod(xy24, xy33), xy42); - FStar_UInt128_uint128 z70 = FStar_UInt128_add_mod(xy34, xy43); - FStar_UInt128_uint128 z80 = xy44; - FStar_UInt128_uint128 carry0 = FStar_UInt128_shift_right(z00, (uint32_t)56U); - uint64_t t10 = FStar_UInt128_uint128_to_uint64(z00) & (uint64_t)0xffffffffffffffU; - FStar_UInt128_uint128 c0 = carry0; - uint64_t t0 = t10; - FStar_UInt128_uint128 - carry1 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z10, c0), (uint32_t)56U); - uint64_t - t11 = - FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z10, c0)) - & (uint64_t)0xffffffffffffffU; - FStar_UInt128_uint128 c1 = carry1; - uint64_t t1 = t11; - FStar_UInt128_uint128 - carry2 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z20, c1), (uint32_t)56U); - uint64_t - t12 = - FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z20, c1)) - & (uint64_t)0xffffffffffffffU; - FStar_UInt128_uint128 c2 = carry2; - uint64_t t2 = t12; - FStar_UInt128_uint128 - carry3 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z30, c2), (uint32_t)56U); - uint64_t - t13 = - FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z30, c2)) - & (uint64_t)0xffffffffffffffU; - FStar_UInt128_uint128 c3 = carry3; - uint64_t t3 = t13; - FStar_UInt128_uint128 - carry4 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z40, c3), (uint32_t)56U); - uint64_t - t14 = - FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z40, c3)) - & (uint64_t)0xffffffffffffffU; - FStar_UInt128_uint128 c4 = carry4; - uint64_t t4 = t14; - FStar_UInt128_uint128 - carry5 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z50, c4), (uint32_t)56U); - uint64_t - t15 = - FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z50, c4)) - & (uint64_t)0xffffffffffffffU; - FStar_UInt128_uint128 c5 = carry5; - uint64_t t5 = t15; - FStar_UInt128_uint128 - carry6 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z60, c5), (uint32_t)56U); - uint64_t - t16 = - FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z60, c5)) - & (uint64_t)0xffffffffffffffU; - FStar_UInt128_uint128 c6 = carry6; - uint64_t t6 = t16; - FStar_UInt128_uint128 - carry7 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z70, c6), (uint32_t)56U); - uint64_t - t17 = - FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z70, c6)) - & (uint64_t)0xffffffffffffffU; - FStar_UInt128_uint128 c7 = carry7; - uint64_t t7 = t17; - FStar_UInt128_uint128 - carry = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z80, c7), (uint32_t)56U); - uint64_t - t = - FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z80, c7)) - & (uint64_t)0xffffffffffffffU; - FStar_UInt128_uint128 c8 = carry; - uint64_t t8 = t; - uint64_t t9 = FStar_UInt128_uint128_to_uint64(c8); - uint64_t z0 = t0; - uint64_t z1 = t1; - uint64_t z2 = t2; - uint64_t z3 = t3; - uint64_t z4 = t4; - uint64_t z5 = t5; - uint64_t z6 = t6; - uint64_t z7 = t7; - uint64_t z8 = t8; - uint64_t z9 = t9; - tmp[0U] = z0; - tmp[1U] = z1; - tmp[2U] = z2; - tmp[3U] = z3; - tmp[4U] = z4; - tmp[5U] = z5; - tmp[6U] = z6; - tmp[7U] = z7; - tmp[8U] = z8; - tmp[9U] = z9; - barrett_reduction(out, tmp); -} - -static inline void add_modq(uint64_t *out, uint64_t *x, uint64_t *y) -{ - uint64_t x0 = x[0U]; - uint64_t x1 = x[1U]; - uint64_t x2 = x[2U]; - uint64_t x3 = x[3U]; - uint64_t x4 = x[4U]; - uint64_t y0 = y[0U]; - uint64_t y1 = y[1U]; - uint64_t y2 = y[2U]; - uint64_t y3 = y[3U]; - uint64_t y4 = y[4U]; - uint64_t carry0 = (x0 + y0) >> (uint32_t)56U; - uint64_t t0 = (x0 + y0) & (uint64_t)0xffffffffffffffU; - uint64_t t00 = t0; - uint64_t c0 = carry0; - uint64_t carry1 = (x1 + y1 + c0) >> (uint32_t)56U; - uint64_t t1 = (x1 + y1 + c0) & (uint64_t)0xffffffffffffffU; - uint64_t t10 = t1; - uint64_t c1 = carry1; - uint64_t carry2 = (x2 + y2 + c1) >> (uint32_t)56U; - uint64_t t2 = (x2 + y2 + c1) & (uint64_t)0xffffffffffffffU; - uint64_t t20 = t2; - uint64_t c2 = carry2; - uint64_t carry = (x3 + y3 + c2) >> (uint32_t)56U; - uint64_t t3 = (x3 + y3 + c2) & (uint64_t)0xffffffffffffffU; - uint64_t t30 = t3; - uint64_t c3 = carry; - uint64_t t4 = x4 + y4 + c3; - uint64_t m0 = (uint64_t)0x12631a5cf5d3edU; - uint64_t m1 = (uint64_t)0xf9dea2f79cd658U; - uint64_t m2 = (uint64_t)0x000000000014deU; - uint64_t m3 = (uint64_t)0x00000000000000U; - uint64_t m4 = (uint64_t)0x00000010000000U; - uint64_t y01 = m0; - uint64_t y11 = m1; - uint64_t y21 = m2; - uint64_t y31 = m3; - uint64_t y41 = m4; - uint64_t b5 = (t00 - y01) >> (uint32_t)63U; - uint64_t t5 = (b5 << (uint32_t)56U) + t00 - y01; - uint64_t b0 = b5; - uint64_t t01 = t5; - uint64_t b6 = (t10 - (y11 + b0)) >> (uint32_t)63U; - uint64_t t6 = (b6 << (uint32_t)56U) + t10 - (y11 + b0); - uint64_t b1 = b6; - uint64_t t11 = t6; - uint64_t b7 = (t20 - (y21 + b1)) >> (uint32_t)63U; - uint64_t t7 = (b7 << (uint32_t)56U) + t20 - (y21 + b1); - uint64_t b2 = b7; - uint64_t t21 = t7; - uint64_t b8 = (t30 - (y31 + b2)) >> (uint32_t)63U; - uint64_t t8 = (b8 << (uint32_t)56U) + t30 - (y31 + b2); - uint64_t b3 = b8; - uint64_t t31 = t8; - uint64_t b = (t4 - (y41 + b3)) >> (uint32_t)63U; - uint64_t t = (b << (uint32_t)56U) + t4 - (y41 + b3); - uint64_t b4 = b; - uint64_t t41 = t; - uint64_t mask = b4 - (uint64_t)1U; - uint64_t z00 = t00 ^ (mask & (t00 ^ t01)); - uint64_t z10 = t10 ^ (mask & (t10 ^ t11)); - uint64_t z20 = t20 ^ (mask & (t20 ^ t21)); - uint64_t z30 = t30 ^ (mask & (t30 ^ t31)); - uint64_t z40 = t4 ^ (mask & (t4 ^ t41)); - uint64_t z01 = z00; - uint64_t z11 = z10; - uint64_t z21 = z20; - uint64_t z31 = z30; - uint64_t z41 = z40; - uint64_t o0 = z01; - uint64_t o1 = z11; - uint64_t o2 = z21; - uint64_t o3 = z31; - uint64_t o4 = z41; - uint64_t z0 = o0; - uint64_t z1 = o1; - uint64_t z2 = o2; - uint64_t z3 = o3; - uint64_t z4 = o4; - out[0U] = z0; - out[1U] = z1; - out[2U] = z2; - out[3U] = z3; - out[4U] = z4; -} - -static inline bool gte_q(uint64_t *s) -{ - uint64_t s0 = s[0U]; - uint64_t s1 = s[1U]; - uint64_t s2 = s[2U]; - uint64_t s3 = s[3U]; - uint64_t s4 = s[4U]; - if (s4 > (uint64_t)0x00000010000000U) - { - return true; - } - if (s4 < (uint64_t)0x00000010000000U) - { - return false; - } - if (s3 > (uint64_t)0x00000000000000U) - { - return true; - } - if (s2 > (uint64_t)0x000000000014deU) - { - return true; - } - if (s2 < (uint64_t)0x000000000014deU) - { - return false; - } - if (s1 > (uint64_t)0xf9dea2f79cd658U) - { - return true; - } - if (s1 < (uint64_t)0xf9dea2f79cd658U) - { - return false; - } - if (s0 >= (uint64_t)0x12631a5cf5d3edU) - { - return true; - } - return false; -} - -static inline bool eq(uint64_t *a, uint64_t *b) -{ - uint64_t a0 = a[0U]; - uint64_t a1 = a[1U]; - uint64_t a2 = a[2U]; - uint64_t a3 = a[3U]; - uint64_t a4 = a[4U]; - uint64_t b0 = b[0U]; - uint64_t b1 = b[1U]; - uint64_t b2 = b[2U]; - uint64_t b3 = b[3U]; - uint64_t b4 = b[4U]; - return a0 == b0 && a1 == b1 && a2 == b2 && a3 == b3 && a4 == b4; -} - -bool Hacl_Impl_Ed25519_PointEqual_point_equal(uint64_t *p, uint64_t *q) -{ - uint64_t tmp[20U] = { 0U }; - uint64_t *pxqz = tmp; - uint64_t *qxpz = tmp + (uint32_t)5U; - bool b; - bool res; - fmul0(pxqz, p, q + (uint32_t)10U); - reduce(pxqz); - fmul0(qxpz, q, p + (uint32_t)10U); - reduce(qxpz); - b = eq(pxqz, qxpz); - if (b) - { - uint64_t *pyqz = tmp + (uint32_t)10U; - uint64_t *qypz = tmp + (uint32_t)15U; - fmul0(pyqz, p + (uint32_t)5U, q + (uint32_t)10U); - reduce(pyqz); - fmul0(qypz, q + (uint32_t)5U, p + (uint32_t)10U); - reduce(qypz); - res = eq(pyqz, qypz); - } - else - { - res = false; - } - return res; -} - -void Hacl_Impl_Ed25519_PointAdd_point_add(uint64_t *out, uint64_t *p, uint64_t *q) -{ - uint64_t tmp[30U] = { 0U }; - uint64_t *tmp10 = tmp; - uint64_t *tmp20 = tmp + (uint32_t)5U; - uint64_t *tmp30 = tmp + (uint32_t)10U; - uint64_t *tmp40 = tmp + (uint32_t)15U; - uint64_t *x1 = p; - uint64_t *y1 = p + (uint32_t)5U; - uint64_t *x2 = q; - uint64_t *y2 = q + (uint32_t)5U; - uint64_t *tmp1; - uint64_t *tmp2; - uint64_t *tmp3; - uint64_t *tmp4; - uint64_t *tmp5; - uint64_t *tmp6; - uint64_t *z1; - uint64_t *t1; - uint64_t *z2; - uint64_t *t2; - uint64_t *tmp_g; - uint64_t *tmp_h; - uint64_t *tmp_e; - uint64_t *tmp_f; - uint64_t *x3; - uint64_t *y3; - uint64_t *z3; - uint64_t *t3; - fdifference(tmp10, y1, x1); - fdifference(tmp20, y2, x2); - fmul0(tmp30, tmp10, tmp20); - fsum(tmp10, y1, x1); - fsum(tmp20, y2, x2); - fmul0(tmp40, tmp10, tmp20); - tmp1 = tmp; - tmp2 = tmp + (uint32_t)5U; - tmp3 = tmp + (uint32_t)10U; - tmp4 = tmp + (uint32_t)15U; - tmp5 = tmp + (uint32_t)20U; - tmp6 = tmp + (uint32_t)25U; - z1 = p + (uint32_t)10U; - t1 = p + (uint32_t)15U; - z2 = q + (uint32_t)10U; - t2 = q + (uint32_t)15U; - times_2d(tmp1, t1); - fmul0(tmp1, tmp1, t2); - times_2(tmp2, z1); - fmul0(tmp2, tmp2, z2); - fdifference(tmp5, tmp4, tmp3); - fdifference(tmp6, tmp2, tmp1); - fsum(tmp1, tmp2, tmp1); - fsum(tmp2, tmp4, tmp3); - tmp_g = tmp; - tmp_h = tmp + (uint32_t)5U; - tmp_e = tmp + (uint32_t)20U; - tmp_f = tmp + (uint32_t)25U; - x3 = out; - y3 = out + (uint32_t)5U; - z3 = out + (uint32_t)10U; - t3 = out + (uint32_t)15U; - fmul0(x3, tmp_e, tmp_f); - fmul0(y3, tmp_g, tmp_h); - fmul0(t3, tmp_e, tmp_h); - fmul0(z3, tmp_f, tmp_g); -} - -void Hacl_Impl_Ed25519_PointNegate_point_negate(uint64_t *p, uint64_t *out) -{ - uint64_t zero[5U] = { 0U }; - uint64_t *x; - uint64_t *y; - uint64_t *z; - uint64_t *t; - uint64_t *x1; - uint64_t *y1; - uint64_t *z1; - uint64_t *t1; - zero[0U] = (uint64_t)0U; - zero[1U] = (uint64_t)0U; - zero[2U] = (uint64_t)0U; - zero[3U] = (uint64_t)0U; - zero[4U] = (uint64_t)0U; - x = p; - y = p + (uint32_t)5U; - z = p + (uint32_t)10U; - t = p + (uint32_t)15U; - x1 = out; - y1 = out + (uint32_t)5U; - z1 = out + (uint32_t)10U; - t1 = out + (uint32_t)15U; - fdifference(x1, zero, x); - Hacl_Bignum25519_reduce_513(x1); - memcpy(y1, y, (uint32_t)5U * sizeof (uint64_t)); - memcpy(z1, z, (uint32_t)5U * sizeof (uint64_t)); - fdifference(t1, zero, t); - Hacl_Bignum25519_reduce_513(t1); -} - -void Hacl_Impl_Ed25519_Ladder_make_point_inf(uint64_t *b) -{ - uint64_t *x = b; - uint64_t *y = b + (uint32_t)5U; - uint64_t *z = b + (uint32_t)10U; - uint64_t *t = b + (uint32_t)15U; - x[0U] = (uint64_t)0U; - x[1U] = (uint64_t)0U; - x[2U] = (uint64_t)0U; - x[3U] = (uint64_t)0U; - x[4U] = (uint64_t)0U; - y[0U] = (uint64_t)1U; - y[1U] = (uint64_t)0U; - y[2U] = (uint64_t)0U; - y[3U] = (uint64_t)0U; - y[4U] = (uint64_t)0U; - z[0U] = (uint64_t)1U; - z[1U] = (uint64_t)0U; - z[2U] = (uint64_t)0U; - z[3U] = (uint64_t)0U; - z[4U] = (uint64_t)0U; - t[0U] = (uint64_t)0U; - t[1U] = (uint64_t)0U; - t[2U] = (uint64_t)0U; - t[3U] = (uint64_t)0U; - t[4U] = (uint64_t)0U; -} - -void Hacl_Impl_Ed25519_Ladder_point_mul(uint64_t *result, uint8_t *scalar, uint64_t *q) -{ - uint64_t bscalar[4U] = { 0U }; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *os = bscalar; - uint8_t *bj = scalar + i * (uint32_t)8U; - uint64_t u = load64_le(bj); - uint64_t r = u; - uint64_t x = r; - os[i] = x;); - { - uint64_t table[320U] = { 0U }; - uint64_t tmp[20U] = { 0U }; - uint64_t *t0 = table; - uint64_t *t1 = table + (uint32_t)20U; - Hacl_Impl_Ed25519_Ladder_make_point_inf(t0); - memcpy(t1, q, (uint32_t)20U * sizeof (uint64_t)); - KRML_MAYBE_FOR7(i, - (uint32_t)0U, - (uint32_t)7U, - (uint32_t)1U, - uint64_t *t11 = table + (i + (uint32_t)1U) * (uint32_t)20U; - Hacl_Impl_Ed25519_PointDouble_point_double(tmp, t11); - memcpy(table + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)20U, - tmp, - (uint32_t)20U * sizeof (uint64_t)); - uint64_t *t2 = table + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)20U; - Hacl_Impl_Ed25519_PointAdd_point_add(tmp, q, t2); - memcpy(table + ((uint32_t)2U * i + (uint32_t)3U) * (uint32_t)20U, - tmp, - (uint32_t)20U * sizeof (uint64_t));); - Hacl_Impl_Ed25519_Ladder_make_point_inf(result); - { - uint32_t i0; - for (i0 = (uint32_t)0U; i0 < (uint32_t)64U; i0++) - { - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - Hacl_Impl_Ed25519_PointDouble_point_double(result, result);); - { - uint32_t bk = (uint32_t)256U; - uint64_t mask_l = (uint64_t)15U; - uint32_t i1 = (bk - (uint32_t)4U * i0 - (uint32_t)4U) / (uint32_t)64U; - uint32_t j = (bk - (uint32_t)4U * i0 - (uint32_t)4U) % (uint32_t)64U; - uint64_t p1 = bscalar[i1] >> j; - uint64_t ite; - if (i1 + (uint32_t)1U < (uint32_t)4U && (uint32_t)0U < j) - { - ite = p1 | bscalar[i1 + (uint32_t)1U] << ((uint32_t)64U - j); - } - else - { - ite = p1; - } - { - uint64_t bits_l = ite & mask_l; - uint64_t a_bits_l[20U] = { 0U }; - memcpy(a_bits_l, table, (uint32_t)20U * sizeof (uint64_t)); - KRML_MAYBE_FOR15(i2, - (uint32_t)0U, - (uint32_t)15U, - (uint32_t)1U, - uint64_t c = FStar_UInt64_eq_mask(bits_l, (uint64_t)(i2 + (uint32_t)1U)); - uint64_t *res_j = table + (i2 + (uint32_t)1U) * (uint32_t)20U; - { - uint32_t i; - for (i = (uint32_t)0U; i < (uint32_t)20U; i++) - { - uint64_t *os = a_bits_l; - uint64_t x = (c & res_j[i]) | (~c & a_bits_l[i]); - os[i] = x; - } - }); - Hacl_Impl_Ed25519_PointAdd_point_add(result, result, a_bits_l); - } - } - } - } - } -} - -static inline void point_mul_g(uint64_t *result, uint8_t *scalar) -{ - uint64_t g[20U] = { 0U }; - uint64_t *gx = g; - uint64_t *gy = g + (uint32_t)5U; - uint64_t *gz = g + (uint32_t)10U; - uint64_t *gt = g + (uint32_t)15U; - gx[0U] = (uint64_t)0x00062d608f25d51aU; - gx[1U] = (uint64_t)0x000412a4b4f6592aU; - gx[2U] = (uint64_t)0x00075b7171a4b31dU; - gx[3U] = (uint64_t)0x0001ff60527118feU; - gx[4U] = (uint64_t)0x000216936d3cd6e5U; - gy[0U] = (uint64_t)0x0006666666666658U; - gy[1U] = (uint64_t)0x0004ccccccccccccU; - gy[2U] = (uint64_t)0x0001999999999999U; - gy[3U] = (uint64_t)0x0003333333333333U; - gy[4U] = (uint64_t)0x0006666666666666U; - gz[0U] = (uint64_t)1U; - gz[1U] = (uint64_t)0U; - gz[2U] = (uint64_t)0U; - gz[3U] = (uint64_t)0U; - gz[4U] = (uint64_t)0U; - gt[0U] = (uint64_t)0x00068ab3a5b7dda3U; - gt[1U] = (uint64_t)0x00000eea2a5eadbbU; - gt[2U] = (uint64_t)0x0002af8df483c27eU; - gt[3U] = (uint64_t)0x000332b375274732U; - gt[4U] = (uint64_t)0x00067875f0fd78b7U; - Hacl_Impl_Ed25519_Ladder_point_mul(result, scalar, g); -} - -static inline void -point_mul_double_vartime( - uint64_t *result, - uint8_t *scalar1, - uint64_t *q1, - uint8_t *scalar2, - uint64_t *q2 -) -{ - uint64_t bscalar1[4U] = { 0U }; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *os = bscalar1; - uint8_t *bj = scalar1 + i * (uint32_t)8U; - uint64_t u = load64_le(bj); - uint64_t r = u; - uint64_t x = r; - os[i] = x;); - { - uint64_t bscalar2[4U] = { 0U }; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *os = bscalar2; - uint8_t *bj = scalar2 + i * (uint32_t)8U; - uint64_t u = load64_le(bj); - uint64_t r = u; - uint64_t x = r; - os[i] = x;); - { - uint64_t table1[320U] = { 0U }; - uint64_t tmp0[20U] = { 0U }; - uint64_t *t00 = table1; - uint64_t *t10 = table1 + (uint32_t)20U; - Hacl_Impl_Ed25519_Ladder_make_point_inf(t00); - memcpy(t10, q1, (uint32_t)20U * sizeof (uint64_t)); - KRML_MAYBE_FOR7(i, - (uint32_t)0U, - (uint32_t)7U, - (uint32_t)1U, - uint64_t *t11 = table1 + (i + (uint32_t)1U) * (uint32_t)20U; - Hacl_Impl_Ed25519_PointDouble_point_double(tmp0, t11); - memcpy(table1 + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)20U, - tmp0, - (uint32_t)20U * sizeof (uint64_t)); - uint64_t *t2 = table1 + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)20U; - Hacl_Impl_Ed25519_PointAdd_point_add(tmp0, q1, t2); - memcpy(table1 + ((uint32_t)2U * i + (uint32_t)3U) * (uint32_t)20U, - tmp0, - (uint32_t)20U * sizeof (uint64_t));); - { - uint64_t table2[320U] = { 0U }; - uint64_t tmp[20U] = { 0U }; - uint64_t *t0 = table2; - uint64_t *t1 = table2 + (uint32_t)20U; - Hacl_Impl_Ed25519_Ladder_make_point_inf(t0); - memcpy(t1, q2, (uint32_t)20U * sizeof (uint64_t)); - KRML_MAYBE_FOR7(i, - (uint32_t)0U, - (uint32_t)7U, - (uint32_t)1U, - uint64_t *t11 = table2 + (i + (uint32_t)1U) * (uint32_t)20U; - Hacl_Impl_Ed25519_PointDouble_point_double(tmp, t11); - memcpy(table2 + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)20U, - tmp, - (uint32_t)20U * sizeof (uint64_t)); - uint64_t *t2 = table2 + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)20U; - Hacl_Impl_Ed25519_PointAdd_point_add(tmp, q2, t2); - memcpy(table2 + ((uint32_t)2U * i + (uint32_t)3U) * (uint32_t)20U, - tmp, - (uint32_t)20U * sizeof (uint64_t));); - Hacl_Impl_Ed25519_Ladder_make_point_inf(result); - { - uint32_t i; - for (i = (uint32_t)0U; i < (uint32_t)64U; i++) - { - KRML_MAYBE_FOR4(i0, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - Hacl_Impl_Ed25519_PointDouble_point_double(result, result);); - { - uint32_t bk = (uint32_t)256U; - uint64_t mask_l0 = (uint64_t)15U; - uint32_t i10 = (bk - (uint32_t)4U * i - (uint32_t)4U) / (uint32_t)64U; - uint32_t j0 = (bk - (uint32_t)4U * i - (uint32_t)4U) % (uint32_t)64U; - uint64_t p10 = bscalar2[i10] >> j0; - uint64_t ite0; - if (i10 + (uint32_t)1U < (uint32_t)4U && (uint32_t)0U < j0) - { - ite0 = p10 | bscalar2[i10 + (uint32_t)1U] << ((uint32_t)64U - j0); - } - else - { - ite0 = p10; - } - { - uint64_t bits_l = ite0 & mask_l0; - uint64_t a_bits_l0[20U] = { 0U }; - uint32_t bits_l320 = (uint32_t)bits_l; - uint64_t *a_bits_l1 = table2 + bits_l320 * (uint32_t)20U; - memcpy(a_bits_l0, a_bits_l1, (uint32_t)20U * sizeof (uint64_t)); - Hacl_Impl_Ed25519_PointAdd_point_add(result, result, a_bits_l0); - { - uint32_t bk0 = (uint32_t)256U; - uint64_t mask_l = (uint64_t)15U; - uint32_t i1 = (bk0 - (uint32_t)4U * i - (uint32_t)4U) / (uint32_t)64U; - uint32_t j = (bk0 - (uint32_t)4U * i - (uint32_t)4U) % (uint32_t)64U; - uint64_t p1 = bscalar1[i1] >> j; - uint64_t ite; - if (i1 + (uint32_t)1U < (uint32_t)4U && (uint32_t)0U < j) - { - ite = p1 | bscalar1[i1 + (uint32_t)1U] << ((uint32_t)64U - j); - } - else - { - ite = p1; - } - { - uint64_t bits_l0 = ite & mask_l; - uint64_t a_bits_l[20U] = { 0U }; - uint32_t bits_l32 = (uint32_t)bits_l0; - uint64_t *a_bits_l10 = table1 + bits_l32 * (uint32_t)20U; - memcpy(a_bits_l, a_bits_l10, (uint32_t)20U * sizeof (uint64_t)); - Hacl_Impl_Ed25519_PointAdd_point_add(result, result, a_bits_l); - } - } - } - } - } - } - } - } - } -} - -static inline void -point_mul_g_double_vartime(uint64_t *result, uint8_t *scalar1, uint8_t *scalar2, uint64_t *q2) -{ - uint64_t g[20U] = { 0U }; - uint64_t *gx = g; - uint64_t *gy = g + (uint32_t)5U; - uint64_t *gz = g + (uint32_t)10U; - uint64_t *gt = g + (uint32_t)15U; - gx[0U] = (uint64_t)0x00062d608f25d51aU; - gx[1U] = (uint64_t)0x000412a4b4f6592aU; - gx[2U] = (uint64_t)0x00075b7171a4b31dU; - gx[3U] = (uint64_t)0x0001ff60527118feU; - gx[4U] = (uint64_t)0x000216936d3cd6e5U; - gy[0U] = (uint64_t)0x0006666666666658U; - gy[1U] = (uint64_t)0x0004ccccccccccccU; - gy[2U] = (uint64_t)0x0001999999999999U; - gy[3U] = (uint64_t)0x0003333333333333U; - gy[4U] = (uint64_t)0x0006666666666666U; - gz[0U] = (uint64_t)1U; - gz[1U] = (uint64_t)0U; - gz[2U] = (uint64_t)0U; - gz[3U] = (uint64_t)0U; - gz[4U] = (uint64_t)0U; - gt[0U] = (uint64_t)0x00068ab3a5b7dda3U; - gt[1U] = (uint64_t)0x00000eea2a5eadbbU; - gt[2U] = (uint64_t)0x0002af8df483c27eU; - gt[3U] = (uint64_t)0x000332b375274732U; - gt[4U] = (uint64_t)0x00067875f0fd78b7U; - point_mul_double_vartime(result, scalar1, g, scalar2, q2); -} - -static inline void -point_negate_mul_double_g_vartime( - uint64_t *result, - uint8_t *scalar1, - uint8_t *scalar2, - uint64_t *q2 -) -{ - uint64_t q2_neg[20U] = { 0U }; - Hacl_Impl_Ed25519_PointNegate_point_negate(q2, q2_neg); - point_mul_g_double_vartime(result, scalar1, scalar2, q2_neg); -} - -static inline void store_56(uint8_t *out, uint64_t *b) -{ - uint64_t b0 = b[0U]; - uint64_t b1 = b[1U]; - uint64_t b2 = b[2U]; - uint64_t b3 = b[3U]; - uint64_t b4 = b[4U]; - uint32_t b4_ = (uint32_t)b4; - uint8_t *b80 = out; - uint8_t *b81; - uint8_t *b82; - uint8_t *b8; - store64_le(b80, b0); - b81 = out + (uint32_t)7U; - store64_le(b81, b1); - b82 = out + (uint32_t)14U; - store64_le(b82, b2); - b8 = out + (uint32_t)21U; - store64_le(b8, b3); - store32_le(out + (uint32_t)28U, b4_); -} - -static inline void load_64_bytes(uint64_t *out, uint8_t *b) -{ - uint8_t *b80 = b; - uint64_t u = load64_le(b80); - uint64_t z = u; - uint64_t b0 = z & (uint64_t)0xffffffffffffffU; - uint8_t *b81 = b + (uint32_t)7U; - uint64_t u0 = load64_le(b81); - uint64_t z0 = u0; - uint64_t b1 = z0 & (uint64_t)0xffffffffffffffU; - uint8_t *b82 = b + (uint32_t)14U; - uint64_t u1 = load64_le(b82); - uint64_t z1 = u1; - uint64_t b2 = z1 & (uint64_t)0xffffffffffffffU; - uint8_t *b83 = b + (uint32_t)21U; - uint64_t u2 = load64_le(b83); - uint64_t z2 = u2; - uint64_t b3 = z2 & (uint64_t)0xffffffffffffffU; - uint8_t *b84 = b + (uint32_t)28U; - uint64_t u3 = load64_le(b84); - uint64_t z3 = u3; - uint64_t b4 = z3 & (uint64_t)0xffffffffffffffU; - uint8_t *b85 = b + (uint32_t)35U; - uint64_t u4 = load64_le(b85); - uint64_t z4 = u4; - uint64_t b5 = z4 & (uint64_t)0xffffffffffffffU; - uint8_t *b86 = b + (uint32_t)42U; - uint64_t u5 = load64_le(b86); - uint64_t z5 = u5; - uint64_t b6 = z5 & (uint64_t)0xffffffffffffffU; - uint8_t *b87 = b + (uint32_t)49U; - uint64_t u6 = load64_le(b87); - uint64_t z6 = u6; - uint64_t b7 = z6 & (uint64_t)0xffffffffffffffU; - uint8_t *b8 = b + (uint32_t)56U; - uint64_t u7 = load64_le(b8); - uint64_t z7 = u7; - uint64_t b88 = z7 & (uint64_t)0xffffffffffffffU; - uint8_t b63 = b[63U]; - uint64_t b9 = (uint64_t)b63; - out[0U] = b0; - out[1U] = b1; - out[2U] = b2; - out[3U] = b3; - out[4U] = b4; - out[5U] = b5; - out[6U] = b6; - out[7U] = b7; - out[8U] = b88; - out[9U] = b9; -} - -static inline void load_32_bytes(uint64_t *out, uint8_t *b) -{ - uint8_t *b80 = b; - uint64_t u0 = load64_le(b80); - uint64_t z = u0; - uint64_t b0 = z & (uint64_t)0xffffffffffffffU; - uint8_t *b81 = b + (uint32_t)7U; - uint64_t u1 = load64_le(b81); - uint64_t z0 = u1; - uint64_t b1 = z0 & (uint64_t)0xffffffffffffffU; - uint8_t *b82 = b + (uint32_t)14U; - uint64_t u2 = load64_le(b82); - uint64_t z1 = u2; - uint64_t b2 = z1 & (uint64_t)0xffffffffffffffU; - uint8_t *b8 = b + (uint32_t)21U; - uint64_t u3 = load64_le(b8); - uint64_t z2 = u3; - uint64_t b3 = z2 & (uint64_t)0xffffffffffffffU; - uint32_t u = load32_le(b + (uint32_t)28U); - uint32_t b4 = u; - uint64_t b41 = (uint64_t)b4; - out[0U] = b0; - out[1U] = b1; - out[2U] = b2; - out[3U] = b3; - out[4U] = b41; -} - -static inline void sha512_pre_msg(uint8_t *hash, uint8_t *prefix, uint32_t len, uint8_t *input) -{ - uint8_t buf[128U] = { 0U }; - uint64_t block_state[8U] = { 0U }; - Hacl_Streaming_SHA2_state_sha2_384 s; - s.block_state = block_state; - s.buf = buf; - s.total_len = (uint64_t)0U; - { - Hacl_Streaming_SHA2_state_sha2_384 p = s; - Hacl_Streaming_SHA2_state_sha2_384 *st; - Hacl_Hash_Core_SHA2_init_512(block_state); - st = &p; - Hacl_Streaming_SHA2_update_512(st, prefix, (uint32_t)32U); - Hacl_Streaming_SHA2_update_512(st, input, len); - Hacl_Streaming_SHA2_finish_512(st, hash); - } -} - -static inline void -sha512_pre_pre2_msg( - uint8_t *hash, - uint8_t *prefix, - uint8_t *prefix2, - uint32_t len, - uint8_t *input -) -{ - uint8_t buf[128U] = { 0U }; - uint64_t block_state[8U] = { 0U }; - Hacl_Streaming_SHA2_state_sha2_384 s; - s.block_state = block_state; - s.buf = buf; - s.total_len = (uint64_t)0U; - { - Hacl_Streaming_SHA2_state_sha2_384 p = s; - Hacl_Streaming_SHA2_state_sha2_384 *st; - Hacl_Hash_Core_SHA2_init_512(block_state); - st = &p; - Hacl_Streaming_SHA2_update_512(st, prefix, (uint32_t)32U); - Hacl_Streaming_SHA2_update_512(st, prefix2, (uint32_t)32U); - Hacl_Streaming_SHA2_update_512(st, input, len); - Hacl_Streaming_SHA2_finish_512(st, hash); - } -} - -static inline void -sha512_modq_pre(uint64_t *out, uint8_t *prefix, uint32_t len, uint8_t *input) -{ - uint64_t tmp[10U] = { 0U }; - uint8_t hash[64U] = { 0U }; - sha512_pre_msg(hash, prefix, len, input); - load_64_bytes(tmp, hash); - barrett_reduction(out, tmp); -} - -static inline void -sha512_modq_pre_pre2( - uint64_t *out, - uint8_t *prefix, - uint8_t *prefix2, - uint32_t len, - uint8_t *input -) -{ - uint64_t tmp[10U] = { 0U }; - uint8_t hash[64U] = { 0U }; - sha512_pre_pre2_msg(hash, prefix, prefix2, len, input); - load_64_bytes(tmp, hash); - barrett_reduction(out, tmp); -} - -static inline void point_mul_g_compress(uint8_t *out, uint8_t *s) -{ - uint64_t tmp[20U] = { 0U }; - point_mul_g(tmp, s); - Hacl_Impl_Ed25519_PointCompress_point_compress(out, tmp); -} - -static inline void secret_expand(uint8_t *expanded, uint8_t *secret) -{ - uint8_t *h_low; - uint8_t h_low0; - uint8_t h_low31; - Hacl_Hash_SHA2_hash_512(secret, (uint32_t)32U, expanded); - h_low = expanded; - h_low0 = h_low[0U]; - h_low31 = h_low[31U]; - h_low[0U] = h_low0 & (uint8_t)0xf8U; - h_low[31U] = (h_low31 & (uint8_t)127U) | (uint8_t)64U; -} - -/******************************************************************************** - Verified C library for EdDSA signing and verification on the edwards25519 curve. -********************************************************************************/ - - -/** -Compute the public key from the private key. - - The outparam `public_key` points to 32 bytes of valid memory, i.e., uint8_t[32]. - The argument `private_key` points to 32 bytes of valid memory, i.e., uint8_t[32]. -*/ -void Hacl_Ed25519_secret_to_public(uint8_t *public_key, uint8_t *private_key) -{ - uint8_t expanded_secret[64U] = { 0U }; - uint8_t *a; - secret_expand(expanded_secret, private_key); - a = expanded_secret; - point_mul_g_compress(public_key, a); -} - -/** -Compute the expanded keys for an Ed25519 signature. - - The outparam `expanded_keys` points to 96 bytes of valid memory, i.e., uint8_t[96]. - The argument `private_key` points to 32 bytes of valid memory, i.e., uint8_t[32]. - - If one needs to sign several messages under the same private key, it is more efficient - to call `expand_keys` only once and `sign_expanded` multiple times, for each message. -*/ -void Hacl_Ed25519_expand_keys(uint8_t *expanded_keys, uint8_t *private_key) -{ - uint8_t *public_key = expanded_keys; - uint8_t *s_prefix = expanded_keys + (uint32_t)32U; - uint8_t *s = expanded_keys + (uint32_t)32U; - secret_expand(s_prefix, private_key); - point_mul_g_compress(public_key, s); -} - -/** -Create an Ed25519 signature with the (precomputed) expanded keys. - - The outparam `signature` points to 64 bytes of valid memory, i.e., uint8_t[64]. - The argument `expanded_keys` points to 96 bytes of valid memory, i.e., uint8_t[96]. - The argument `msg` points to `msg_len` bytes of valid memory, i.e., uint8_t[msg_len]. - - The argument `expanded_keys` is obtained through `expand_keys`. - - If one needs to sign several messages under the same private key, it is more efficient - to call `expand_keys` only once and `sign_expanded` multiple times, for each message. -*/ -void -Hacl_Ed25519_sign_expanded( - uint8_t *signature, - uint8_t *expanded_keys, - uint32_t msg_len, - uint8_t *msg -) -{ - uint8_t *rs = signature; - uint8_t *ss = signature + (uint32_t)32U; - uint64_t rq[5U] = { 0U }; - uint64_t hq[5U] = { 0U }; - uint8_t rb[32U] = { 0U }; - uint8_t *public_key = expanded_keys; - uint8_t *s = expanded_keys + (uint32_t)32U; - uint8_t *prefix = expanded_keys + (uint32_t)64U; - sha512_modq_pre(rq, prefix, msg_len, msg); - store_56(rb, rq); - point_mul_g_compress(rs, rb); - sha512_modq_pre_pre2(hq, rs, public_key, msg_len, msg); - { - uint64_t aq[5U] = { 0U }; - load_32_bytes(aq, s); - mul_modq(aq, hq, aq); - add_modq(aq, rq, aq); - store_56(ss, aq); - } -} - -/** -Create an Ed25519 signature. - - The outparam `signature` points to 64 bytes of valid memory, i.e., uint8_t[64]. - The argument `private_key` points to 32 bytes of valid memory, i.e., uint8_t[32]. - The argument `msg` points to `msg_len` bytes of valid memory, i.e., uint8_t[msg_len]. - - The function first calls `expand_keys` and then invokes `sign_expanded`. - - If one needs to sign several messages under the same private key, it is more efficient - to call `expand_keys` only once and `sign_expanded` multiple times, for each message. -*/ -void -Hacl_Ed25519_sign(uint8_t *signature, uint8_t *private_key, uint32_t msg_len, uint8_t *msg) -{ - uint8_t expanded_keys[96U] = { 0U }; - Hacl_Ed25519_expand_keys(expanded_keys, private_key); - Hacl_Ed25519_sign_expanded(signature, expanded_keys, msg_len, msg); -} - -/** -Verify an Ed25519 signature. - - The function returns `true` if the signature is valid and `false` otherwise. - - The argument `public_key` points to 32 bytes of valid memory, i.e., uint8_t[32]. - The argument `msg` points to `msg_len` bytes of valid memory, i.e., uint8_t[msg_len]. - The argument `signature` points to 64 bytes of valid memory, i.e., uint8_t[64]. -*/ -bool -Hacl_Ed25519_verify(uint8_t *public_key, uint32_t msg_len, uint8_t *msg, uint8_t *signature) -{ - uint64_t a_[20U] = { 0U }; - bool b = Hacl_Impl_Ed25519_PointDecompress_point_decompress(a_, public_key); - if (b) - { - uint64_t r_[20U] = { 0U }; - uint8_t *rs = signature; - bool b_ = Hacl_Impl_Ed25519_PointDecompress_point_decompress(r_, rs); - if (b_) - { - uint8_t hb[32U] = { 0U }; - uint8_t *rs1 = signature; - uint8_t *sb = signature + (uint32_t)32U; - uint64_t tmp[5U] = { 0U }; - load_32_bytes(tmp, sb); - { - bool b1 = gte_q(tmp); - bool b10 = b1; - if (b10) - { - return false; - } - { - uint64_t tmp0[5U] = { 0U }; - sha512_modq_pre_pre2(tmp0, rs1, public_key, msg_len, msg); - store_56(hb, tmp0); - { - uint64_t exp_d[20U] = { 0U }; - point_negate_mul_double_g_vartime(exp_d, sb, hb, a_); - { - bool b2 = Hacl_Impl_Ed25519_PointEqual_point_equal(exp_d, r_); - return b2; - } - } - } - } - } - return false; - } - return false; -} - diff --git a/dist/c89-compatible/Hacl_Ed25519.h b/dist/c89-compatible/Hacl_Ed25519.h deleted file mode 100644 index 13d630ce1d..0000000000 --- a/dist/c89-compatible/Hacl_Ed25519.h +++ /dev/null @@ -1,119 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#ifndef __Hacl_Ed25519_H -#define __Hacl_Ed25519_H - -#if defined(__cplusplus) -extern "C" { -#endif - -#include -#include "krml/internal/types.h" -#include "krml/lowstar_endianness.h" -#include "krml/internal/target.h" - - -#include "Hacl_Streaming_SHA2.h" -#include "Hacl_Krmllib.h" -#include "Hacl_Hash_SHA2.h" -#include "Hacl_Bignum25519_51.h" -#include "evercrypt_targetconfig.h" -/******************************************************************************** - Verified C library for EdDSA signing and verification on the edwards25519 curve. -********************************************************************************/ - - -/** -Compute the public key from the private key. - - The outparam `public_key` points to 32 bytes of valid memory, i.e., uint8_t[32]. - The argument `private_key` points to 32 bytes of valid memory, i.e., uint8_t[32]. -*/ -void Hacl_Ed25519_secret_to_public(uint8_t *public_key, uint8_t *private_key); - -/** -Compute the expanded keys for an Ed25519 signature. - - The outparam `expanded_keys` points to 96 bytes of valid memory, i.e., uint8_t[96]. - The argument `private_key` points to 32 bytes of valid memory, i.e., uint8_t[32]. - - If one needs to sign several messages under the same private key, it is more efficient - to call `expand_keys` only once and `sign_expanded` multiple times, for each message. -*/ -void Hacl_Ed25519_expand_keys(uint8_t *expanded_keys, uint8_t *private_key); - -/** -Create an Ed25519 signature with the (precomputed) expanded keys. - - The outparam `signature` points to 64 bytes of valid memory, i.e., uint8_t[64]. - The argument `expanded_keys` points to 96 bytes of valid memory, i.e., uint8_t[96]. - The argument `msg` points to `msg_len` bytes of valid memory, i.e., uint8_t[msg_len]. - - The argument `expanded_keys` is obtained through `expand_keys`. - - If one needs to sign several messages under the same private key, it is more efficient - to call `expand_keys` only once and `sign_expanded` multiple times, for each message. -*/ -void -Hacl_Ed25519_sign_expanded( - uint8_t *signature, - uint8_t *expanded_keys, - uint32_t msg_len, - uint8_t *msg -); - -/** -Create an Ed25519 signature. - - The outparam `signature` points to 64 bytes of valid memory, i.e., uint8_t[64]. - The argument `private_key` points to 32 bytes of valid memory, i.e., uint8_t[32]. - The argument `msg` points to `msg_len` bytes of valid memory, i.e., uint8_t[msg_len]. - - The function first calls `expand_keys` and then invokes `sign_expanded`. - - If one needs to sign several messages under the same private key, it is more efficient - to call `expand_keys` only once and `sign_expanded` multiple times, for each message. -*/ -void -Hacl_Ed25519_sign(uint8_t *signature, uint8_t *private_key, uint32_t msg_len, uint8_t *msg); - -/** -Verify an Ed25519 signature. - - The function returns `true` if the signature is valid and `false` otherwise. - - The argument `public_key` points to 32 bytes of valid memory, i.e., uint8_t[32]. - The argument `msg` points to `msg_len` bytes of valid memory, i.e., uint8_t[msg_len]. - The argument `signature` points to 64 bytes of valid memory, i.e., uint8_t[64]. -*/ -bool -Hacl_Ed25519_verify(uint8_t *public_key, uint32_t msg_len, uint8_t *msg, uint8_t *signature); - -#if defined(__cplusplus) -} -#endif - -#define __Hacl_Ed25519_H_DEFINED -#endif diff --git a/dist/c89-compatible/Hacl_FFDHE.c b/dist/c89-compatible/Hacl_FFDHE.c deleted file mode 100644 index 72a2282402..0000000000 --- a/dist/c89-compatible/Hacl_FFDHE.c +++ /dev/null @@ -1,395 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#include "Hacl_FFDHE.h" - -#include "internal/Hacl_Bignum.h" - -static inline uint32_t ffdhe_len(Spec_FFDHE_ffdhe_alg a) -{ - switch (a) - { - case Spec_FFDHE_FFDHE2048: - { - return (uint32_t)256U; - } - case Spec_FFDHE_FFDHE3072: - { - return (uint32_t)384U; - } - case Spec_FFDHE_FFDHE4096: - { - return (uint32_t)512U; - } - case Spec_FFDHE_FFDHE6144: - { - return (uint32_t)768U; - } - case Spec_FFDHE_FFDHE8192: - { - return (uint32_t)1024U; - } - default: - { - KRML_HOST_PRINTF("KaRaMeL incomplete match at %s:%d\n", __FILE__, __LINE__); - KRML_HOST_EXIT(253U); - } - } -} - -static inline void ffdhe_precomp_p(Spec_FFDHE_ffdhe_alg a, uint64_t *p_r2_n) -{ - uint32_t nLen = (ffdhe_len(a) - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U; - uint64_t *p_n = p_r2_n; - uint64_t *r2_n = p_r2_n + nLen; - KRML_CHECK_SIZE(sizeof (uint8_t), ffdhe_len(a)); - { - uint8_t p_s[ffdhe_len(a)]; - memset(p_s, 0U, ffdhe_len(a) * sizeof (uint8_t)); - { - const uint8_t *p; - switch (a) - { - case Spec_FFDHE_FFDHE2048: - { - p = Hacl_Impl_FFDHE_Constants_ffdhe_p2048; - break; - } - case Spec_FFDHE_FFDHE3072: - { - p = Hacl_Impl_FFDHE_Constants_ffdhe_p3072; - break; - } - case Spec_FFDHE_FFDHE4096: - { - p = Hacl_Impl_FFDHE_Constants_ffdhe_p4096; - break; - } - case Spec_FFDHE_FFDHE6144: - { - p = Hacl_Impl_FFDHE_Constants_ffdhe_p6144; - break; - } - case Spec_FFDHE_FFDHE8192: - { - p = Hacl_Impl_FFDHE_Constants_ffdhe_p8192; - break; - } - default: - { - KRML_HOST_PRINTF("KaRaMeL incomplete match at %s:%d\n", __FILE__, __LINE__); - KRML_HOST_EXIT(253U); - } - } - { - uint32_t len = ffdhe_len(a); - { - uint32_t i; - for (i = (uint32_t)0U; i < len; i++) - { - uint8_t *os = p_s; - uint8_t x = p[i]; - os[i] = x; - } - } - Hacl_Bignum_Convert_bn_from_bytes_be_uint64(ffdhe_len(a), p_s, p_n); - Hacl_Bignum_Montgomery_bn_precomp_r2_mod_n_u64((ffdhe_len(a) - (uint32_t)1U) - / (uint32_t)8U - + (uint32_t)1U, - (uint32_t)8U * ffdhe_len(a) - (uint32_t)1U, - p_n, - r2_n); - } - } - } -} - -static inline uint64_t ffdhe_check_pk(Spec_FFDHE_ffdhe_alg a, uint64_t *pk_n, uint64_t *p_n) -{ - uint32_t nLen = (ffdhe_len(a) - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U; - KRML_CHECK_SIZE(sizeof (uint64_t), nLen); - { - uint64_t p_n1[nLen]; - memset(p_n1, 0U, nLen * sizeof (uint64_t)); - { - uint64_t - c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64((uint64_t)0U, p_n[0U], (uint64_t)1U, p_n1); - uint64_t c1; - if ((uint32_t)1U < nLen) - { - uint64_t *a1 = p_n + (uint32_t)1U; - uint64_t *res1 = p_n1 + (uint32_t)1U; - uint64_t c = c0; - { - uint32_t i; - for (i = (uint32_t)0U; i < (nLen - (uint32_t)1U) / (uint32_t)4U; i++) - { - uint64_t t1 = a1[(uint32_t)4U * i]; - uint64_t *res_i0 = res1 + (uint32_t)4U * i; - c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, (uint64_t)0U, res_i0); - { - uint64_t t10 = a1[(uint32_t)4U * i + (uint32_t)1U]; - uint64_t *res_i1 = res1 + (uint32_t)4U * i + (uint32_t)1U; - c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t10, (uint64_t)0U, res_i1); - { - uint64_t t11 = a1[(uint32_t)4U * i + (uint32_t)2U]; - uint64_t *res_i2 = res1 + (uint32_t)4U * i + (uint32_t)2U; - c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t11, (uint64_t)0U, res_i2); - { - uint64_t t12 = a1[(uint32_t)4U * i + (uint32_t)3U]; - uint64_t *res_i = res1 + (uint32_t)4U * i + (uint32_t)3U; - c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t12, (uint64_t)0U, res_i); - } - } - } - } - } - { - uint32_t i; - for - (i - = (nLen - (uint32_t)1U) / (uint32_t)4U * (uint32_t)4U; - i - < nLen - (uint32_t)1U; - i++) - { - uint64_t t1 = a1[i]; - uint64_t *res_i = res1 + i; - c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, (uint64_t)0U, res_i); - } - } - { - uint64_t c10 = c; - c1 = c10; - } - } - else - { - c1 = c0; - } - KRML_CHECK_SIZE(sizeof (uint64_t), nLen); - { - uint64_t b2[nLen]; - memset(b2, 0U, nLen * sizeof (uint64_t)); - { - uint32_t i = (uint32_t)0U; - uint32_t j = (uint32_t)0U; - b2[i] = b2[i] | (uint64_t)1U << j; - { - uint64_t acc0 = (uint64_t)0U; - uint64_t res; - uint64_t m0; - { - uint32_t i0; - for (i0 = (uint32_t)0U; i0 < nLen; i0++) - { - uint64_t beq = FStar_UInt64_eq_mask(b2[i0], pk_n[i0]); - uint64_t blt = ~FStar_UInt64_gte_mask(b2[i0], pk_n[i0]); - acc0 = - (beq & acc0) - | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U))); - } - } - res = acc0; - m0 = res; - { - uint64_t acc = (uint64_t)0U; - uint64_t m1; - { - uint32_t i0; - for (i0 = (uint32_t)0U; i0 < nLen; i0++) - { - uint64_t beq = FStar_UInt64_eq_mask(pk_n[i0], p_n1[i0]); - uint64_t blt = ~FStar_UInt64_gte_mask(pk_n[i0], p_n1[i0]); - acc = - (beq & acc) - | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U))); - } - } - m1 = acc; - return m0 & m1; - } - } - } - } - } - } -} - -static inline void -ffdhe_compute_exp( - Spec_FFDHE_ffdhe_alg a, - uint64_t *p_r2_n, - uint64_t *sk_n, - uint64_t *b_n, - uint8_t *res -) -{ - uint32_t nLen = (ffdhe_len(a) - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U; - uint64_t *p_n = p_r2_n; - uint64_t *r2_n = p_r2_n + nLen; - KRML_CHECK_SIZE(sizeof (uint64_t), nLen); - { - uint64_t res_n[nLen]; - memset(res_n, 0U, nLen * sizeof (uint64_t)); - { - uint64_t mu = Hacl_Bignum_ModInvLimb_mod_inv_uint64(p_n[0U]); - Hacl_Bignum_Exponentiation_bn_mod_exp_consttime_precomp_u64((ffdhe_len(a) - (uint32_t)1U) - / (uint32_t)8U - + (uint32_t)1U, - p_n, - mu, - r2_n, - b_n, - (uint32_t)64U * nLen, - sk_n, - res_n); - Hacl_Bignum_Convert_bn_to_bytes_be_uint64(ffdhe_len(a), res_n, res); - } - } -} - -uint32_t Hacl_FFDHE_ffdhe_len(Spec_FFDHE_ffdhe_alg a) -{ - return ffdhe_len(a); -} - -uint64_t *Hacl_FFDHE_new_ffdhe_precomp_p(Spec_FFDHE_ffdhe_alg a) -{ - uint32_t nLen = (ffdhe_len(a) - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U; - KRML_CHECK_SIZE(sizeof (uint64_t), nLen + nLen); - { - uint64_t *res = (uint64_t *)KRML_HOST_CALLOC(nLen + nLen, sizeof (uint64_t)); - if (res == NULL) - { - return res; - } - { - uint64_t *res1 = res; - uint64_t *res2 = res1; - ffdhe_precomp_p(a, res2); - return res2; - } - } -} - -void -Hacl_FFDHE_ffdhe_secret_to_public_precomp( - Spec_FFDHE_ffdhe_alg a, - uint64_t *p_r2_n, - uint8_t *sk, - uint8_t *pk -) -{ - uint32_t len = ffdhe_len(a); - uint32_t nLen = (len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U; - KRML_CHECK_SIZE(sizeof (uint64_t), nLen); - { - uint64_t g_n[nLen]; - memset(g_n, 0U, nLen * sizeof (uint64_t)); - { - uint8_t g = (uint8_t)0U; - { - uint8_t *os = &g; - uint8_t x = Hacl_Impl_FFDHE_Constants_ffdhe_g2[0U]; - os[0U] = x; - } - Hacl_Bignum_Convert_bn_from_bytes_be_uint64((uint32_t)1U, &g, g_n); - KRML_CHECK_SIZE(sizeof (uint64_t), nLen); - { - uint64_t sk_n[nLen]; - memset(sk_n, 0U, nLen * sizeof (uint64_t)); - Hacl_Bignum_Convert_bn_from_bytes_be_uint64(len, sk, sk_n); - ffdhe_compute_exp(a, p_r2_n, sk_n, g_n, pk); - } - } - } -} - -void Hacl_FFDHE_ffdhe_secret_to_public(Spec_FFDHE_ffdhe_alg a, uint8_t *sk, uint8_t *pk) -{ - uint32_t len = ffdhe_len(a); - uint32_t nLen = (len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U; - KRML_CHECK_SIZE(sizeof (uint64_t), nLen + nLen); - { - uint64_t p_r2_n[nLen + nLen]; - memset(p_r2_n, 0U, (nLen + nLen) * sizeof (uint64_t)); - ffdhe_precomp_p(a, p_r2_n); - Hacl_FFDHE_ffdhe_secret_to_public_precomp(a, p_r2_n, sk, pk); - } -} - -uint64_t -Hacl_FFDHE_ffdhe_shared_secret_precomp( - Spec_FFDHE_ffdhe_alg a, - uint64_t *p_r2_n, - uint8_t *sk, - uint8_t *pk, - uint8_t *ss -) -{ - uint32_t len = ffdhe_len(a); - uint32_t nLen = (len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U; - uint64_t *p_n = p_r2_n; - KRML_CHECK_SIZE(sizeof (uint64_t), nLen); - { - uint64_t sk_n[nLen]; - memset(sk_n, 0U, nLen * sizeof (uint64_t)); - KRML_CHECK_SIZE(sizeof (uint64_t), nLen); - { - uint64_t pk_n[nLen]; - memset(pk_n, 0U, nLen * sizeof (uint64_t)); - { - uint64_t m; - Hacl_Bignum_Convert_bn_from_bytes_be_uint64(len, sk, sk_n); - Hacl_Bignum_Convert_bn_from_bytes_be_uint64(len, pk, pk_n); - m = ffdhe_check_pk(a, pk_n, p_n); - if (m == (uint64_t)0xFFFFFFFFFFFFFFFFU) - { - ffdhe_compute_exp(a, p_r2_n, sk_n, pk_n, ss); - } - return m; - } - } - } -} - -uint64_t -Hacl_FFDHE_ffdhe_shared_secret(Spec_FFDHE_ffdhe_alg a, uint8_t *sk, uint8_t *pk, uint8_t *ss) -{ - uint32_t len = ffdhe_len(a); - uint32_t nLen = (len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U; - KRML_CHECK_SIZE(sizeof (uint64_t), nLen + nLen); - { - uint64_t p_n[nLen + nLen]; - memset(p_n, 0U, (nLen + nLen) * sizeof (uint64_t)); - { - uint64_t m; - ffdhe_precomp_p(a, p_n); - m = Hacl_FFDHE_ffdhe_shared_secret_precomp(a, p_n, sk, pk, ss); - return m; - } - } -} - diff --git a/dist/c89-compatible/Hacl_FFDHE.h b/dist/c89-compatible/Hacl_FFDHE.h deleted file mode 100644 index b4c0a4ddfb..0000000000 --- a/dist/c89-compatible/Hacl_FFDHE.h +++ /dev/null @@ -1,73 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#ifndef __Hacl_FFDHE_H -#define __Hacl_FFDHE_H - -#if defined(__cplusplus) -extern "C" { -#endif - -#include -#include "krml/internal/types.h" -#include "krml/lowstar_endianness.h" -#include "krml/internal/target.h" - - -#include "Hacl_Spec.h" -#include "Hacl_Impl_FFDHE_Constants.h" -#include "Hacl_Bignum_Base.h" -#include "evercrypt_targetconfig.h" -uint32_t Hacl_FFDHE_ffdhe_len(Spec_FFDHE_ffdhe_alg a); - -uint64_t *Hacl_FFDHE_new_ffdhe_precomp_p(Spec_FFDHE_ffdhe_alg a); - -void -Hacl_FFDHE_ffdhe_secret_to_public_precomp( - Spec_FFDHE_ffdhe_alg a, - uint64_t *p_r2_n, - uint8_t *sk, - uint8_t *pk -); - -void Hacl_FFDHE_ffdhe_secret_to_public(Spec_FFDHE_ffdhe_alg a, uint8_t *sk, uint8_t *pk); - -uint64_t -Hacl_FFDHE_ffdhe_shared_secret_precomp( - Spec_FFDHE_ffdhe_alg a, - uint64_t *p_r2_n, - uint8_t *sk, - uint8_t *pk, - uint8_t *ss -); - -uint64_t -Hacl_FFDHE_ffdhe_shared_secret(Spec_FFDHE_ffdhe_alg a, uint8_t *sk, uint8_t *pk, uint8_t *ss); - -#if defined(__cplusplus) -} -#endif - -#define __Hacl_FFDHE_H_DEFINED -#endif diff --git a/dist/c89-compatible/Hacl_Frodo1344.c b/dist/c89-compatible/Hacl_Frodo1344.c deleted file mode 100644 index 818d9a8979..0000000000 --- a/dist/c89-compatible/Hacl_Frodo1344.c +++ /dev/null @@ -1,386 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#include "Hacl_Frodo1344.h" - -#include "internal/Hacl_Frodo_KEM.h" - -uint32_t Hacl_Frodo1344_crypto_bytes = (uint32_t)32U; - -uint32_t Hacl_Frodo1344_crypto_publickeybytes = (uint32_t)21520U; - -uint32_t Hacl_Frodo1344_crypto_secretkeybytes = (uint32_t)43088U; - -uint32_t Hacl_Frodo1344_crypto_ciphertextbytes = (uint32_t)21632U; - -uint32_t Hacl_Frodo1344_crypto_kem_keypair(uint8_t *pk, uint8_t *sk) -{ - uint8_t coins[80U] = { 0U }; - uint8_t *s; - uint8_t *seed_se; - uint8_t *z; - uint8_t *seed_a; - uint8_t *b_bytes; - uint8_t *s_bytes; - randombytes_((uint32_t)80U, coins); - s = coins; - seed_se = coins + (uint32_t)32U; - z = coins + (uint32_t)64U; - seed_a = pk; - Hacl_SHA3_shake256_hacl((uint32_t)16U, z, (uint32_t)16U, seed_a); - b_bytes = pk + (uint32_t)16U; - s_bytes = sk + (uint32_t)21552U; - { - uint16_t s_matrix[10752U] = { 0U }; - uint16_t e_matrix[10752U] = { 0U }; - uint8_t r[43008U] = { 0U }; - uint8_t shake_input_seed_se[33U] = { 0U }; - shake_input_seed_se[0U] = (uint8_t)0x5fU; - memcpy(shake_input_seed_se + (uint32_t)1U, seed_se, (uint32_t)32U * sizeof (uint8_t)); - Hacl_SHA3_shake256_hacl((uint32_t)33U, shake_input_seed_se, (uint32_t)43008U, r); - Lib_Memzero0_memzero(shake_input_seed_se, (uint32_t)33U * sizeof (shake_input_seed_se[0U])); - Hacl_Impl_Frodo_Sample_frodo_sample_matrix1344((uint32_t)1344U, (uint32_t)8U, r, s_matrix); - Hacl_Impl_Frodo_Sample_frodo_sample_matrix1344((uint32_t)1344U, - (uint32_t)8U, - r + (uint32_t)21504U, - e_matrix); - { - uint16_t b_matrix[10752U] = { 0U }; - uint16_t a_matrix[1806336U] = { 0U }; - uint32_t slen1; - uint8_t *sk_p; - Hacl_Impl_Frodo_Params_frodo_gen_matrix(Spec_Frodo_Params_SHAKE128, - (uint32_t)1344U, - seed_a, - a_matrix); - Hacl_Impl_Matrix_matrix_mul_s((uint32_t)1344U, - (uint32_t)1344U, - (uint32_t)8U, - a_matrix, - s_matrix, - b_matrix); - Hacl_Impl_Matrix_matrix_add((uint32_t)1344U, (uint32_t)8U, b_matrix, e_matrix); - Hacl_Impl_Frodo_Pack_frodo_pack((uint32_t)1344U, - (uint32_t)8U, - (uint32_t)16U, - b_matrix, - b_bytes); - Hacl_Impl_Matrix_matrix_to_lbytes((uint32_t)1344U, (uint32_t)8U, s_matrix, s_bytes); - Lib_Memzero0_memzero(s_matrix, (uint32_t)10752U * sizeof (s_matrix[0U])); - Lib_Memzero0_memzero(e_matrix, (uint32_t)10752U * sizeof (e_matrix[0U])); - slen1 = (uint32_t)43056U; - sk_p = sk; - memcpy(sk_p, s, (uint32_t)32U * sizeof (uint8_t)); - memcpy(sk_p + (uint32_t)32U, pk, (uint32_t)21520U * sizeof (uint8_t)); - Hacl_SHA3_shake256_hacl((uint32_t)21520U, pk, (uint32_t)32U, sk + slen1); - Lib_Memzero0_memzero(coins, (uint32_t)80U * sizeof (coins[0U])); - return (uint32_t)0U; - } - } -} - -uint32_t Hacl_Frodo1344_crypto_kem_enc(uint8_t *ct, uint8_t *ss, uint8_t *pk) -{ - uint8_t coins[32U] = { 0U }; - randombytes_((uint32_t)32U, coins); - { - uint8_t seed_se_k[64U] = { 0U }; - uint8_t pkh_mu[64U] = { 0U }; - uint8_t *seed_se; - uint8_t *k; - Hacl_SHA3_shake256_hacl((uint32_t)21520U, pk, (uint32_t)32U, pkh_mu); - memcpy(pkh_mu + (uint32_t)32U, coins, (uint32_t)32U * sizeof (uint8_t)); - Hacl_SHA3_shake256_hacl((uint32_t)64U, pkh_mu, (uint32_t)64U, seed_se_k); - seed_se = seed_se_k; - k = seed_se_k + (uint32_t)32U; - { - uint8_t *seed_a = pk; - uint8_t *b = pk + (uint32_t)16U; - uint16_t sp_matrix[10752U] = { 0U }; - uint16_t ep_matrix[10752U] = { 0U }; - uint16_t epp_matrix[64U] = { 0U }; - uint8_t r[43136U] = { 0U }; - uint8_t shake_input_seed_se[33U] = { 0U }; - uint8_t *c1; - uint8_t *c2; - shake_input_seed_se[0U] = (uint8_t)0x96U; - memcpy(shake_input_seed_se + (uint32_t)1U, seed_se, (uint32_t)32U * sizeof (uint8_t)); - Hacl_SHA3_shake256_hacl((uint32_t)33U, shake_input_seed_se, (uint32_t)43136U, r); - Lib_Memzero0_memzero(shake_input_seed_se, (uint32_t)33U * sizeof (shake_input_seed_se[0U])); - Hacl_Impl_Frodo_Sample_frodo_sample_matrix1344((uint32_t)8U, (uint32_t)1344U, r, sp_matrix); - Hacl_Impl_Frodo_Sample_frodo_sample_matrix1344((uint32_t)8U, - (uint32_t)1344U, - r + (uint32_t)21504U, - ep_matrix); - Hacl_Impl_Frodo_Sample_frodo_sample_matrix1344((uint32_t)8U, - (uint32_t)8U, - r + (uint32_t)43008U, - epp_matrix); - c1 = ct; - c2 = ct + (uint32_t)21504U; - { - uint16_t bp_matrix[10752U] = { 0U }; - uint16_t a_matrix[1806336U] = { 0U }; - Hacl_Impl_Frodo_Params_frodo_gen_matrix(Spec_Frodo_Params_SHAKE128, - (uint32_t)1344U, - seed_a, - a_matrix); - Hacl_Impl_Matrix_matrix_mul((uint32_t)8U, - (uint32_t)1344U, - (uint32_t)1344U, - sp_matrix, - a_matrix, - bp_matrix); - Hacl_Impl_Matrix_matrix_add((uint32_t)8U, (uint32_t)1344U, bp_matrix, ep_matrix); - Hacl_Impl_Frodo_Pack_frodo_pack((uint32_t)8U, - (uint32_t)1344U, - (uint32_t)16U, - bp_matrix, - c1); - { - uint16_t v_matrix[64U] = { 0U }; - uint16_t b_matrix[10752U] = { 0U }; - Hacl_Impl_Frodo_Pack_frodo_unpack((uint32_t)1344U, - (uint32_t)8U, - (uint32_t)16U, - b, - b_matrix); - Hacl_Impl_Matrix_matrix_mul((uint32_t)8U, - (uint32_t)1344U, - (uint32_t)8U, - sp_matrix, - b_matrix, - v_matrix); - Hacl_Impl_Matrix_matrix_add((uint32_t)8U, (uint32_t)8U, v_matrix, epp_matrix); - { - uint16_t mu_encode[64U] = { 0U }; - Hacl_Impl_Frodo_Encode_frodo_key_encode((uint32_t)16U, - (uint32_t)4U, - (uint32_t)8U, - coins, - mu_encode); - Hacl_Impl_Matrix_matrix_add((uint32_t)8U, (uint32_t)8U, v_matrix, mu_encode); - Lib_Memzero0_memzero(mu_encode, (uint32_t)64U * sizeof (mu_encode[0U])); - Hacl_Impl_Frodo_Pack_frodo_pack((uint32_t)8U, - (uint32_t)8U, - (uint32_t)16U, - v_matrix, - c2); - Lib_Memzero0_memzero(v_matrix, (uint32_t)64U * sizeof (v_matrix[0U])); - Lib_Memzero0_memzero(sp_matrix, (uint32_t)10752U * sizeof (sp_matrix[0U])); - Lib_Memzero0_memzero(ep_matrix, (uint32_t)10752U * sizeof (ep_matrix[0U])); - Lib_Memzero0_memzero(epp_matrix, (uint32_t)64U * sizeof (epp_matrix[0U])); - { - uint32_t ss_init_len = (uint32_t)21664U; - KRML_CHECK_SIZE(sizeof (uint8_t), ss_init_len); - { - uint8_t shake_input_ss[ss_init_len]; - memset(shake_input_ss, 0U, ss_init_len * sizeof (uint8_t)); - memcpy(shake_input_ss, ct, (uint32_t)21632U * sizeof (uint8_t)); - memcpy(shake_input_ss + (uint32_t)21632U, k, (uint32_t)32U * sizeof (uint8_t)); - Hacl_SHA3_shake256_hacl(ss_init_len, shake_input_ss, (uint32_t)32U, ss); - Lib_Memzero0_memzero(shake_input_ss, ss_init_len * sizeof (shake_input_ss[0U])); - Lib_Memzero0_memzero(seed_se_k, (uint32_t)64U * sizeof (seed_se_k[0U])); - Lib_Memzero0_memzero(coins, (uint32_t)32U * sizeof (coins[0U])); - return (uint32_t)0U; - } - } - } - } - } - } - } -} - -uint32_t Hacl_Frodo1344_crypto_kem_dec(uint8_t *ss, uint8_t *ct, uint8_t *sk) -{ - uint16_t bp_matrix[10752U] = { 0U }; - uint16_t c_matrix[64U] = { 0U }; - uint8_t *c1 = ct; - uint8_t *c2 = ct + (uint32_t)21504U; - Hacl_Impl_Frodo_Pack_frodo_unpack((uint32_t)8U, (uint32_t)1344U, (uint32_t)16U, c1, bp_matrix); - Hacl_Impl_Frodo_Pack_frodo_unpack((uint32_t)8U, (uint32_t)8U, (uint32_t)16U, c2, c_matrix); - { - uint8_t mu_decode[32U] = { 0U }; - uint8_t *s_bytes = sk + (uint32_t)21552U; - uint16_t s_matrix[10752U] = { 0U }; - uint16_t m_matrix[64U] = { 0U }; - Hacl_Impl_Matrix_matrix_from_lbytes((uint32_t)1344U, (uint32_t)8U, s_bytes, s_matrix); - Hacl_Impl_Matrix_matrix_mul_s((uint32_t)8U, - (uint32_t)1344U, - (uint32_t)8U, - bp_matrix, - s_matrix, - m_matrix); - Hacl_Impl_Matrix_matrix_sub((uint32_t)8U, (uint32_t)8U, c_matrix, m_matrix); - Hacl_Impl_Frodo_Encode_frodo_key_decode((uint32_t)16U, - (uint32_t)4U, - (uint32_t)8U, - m_matrix, - mu_decode); - Lib_Memzero0_memzero(s_matrix, (uint32_t)10752U * sizeof (s_matrix[0U])); - Lib_Memzero0_memzero(m_matrix, (uint32_t)64U * sizeof (m_matrix[0U])); - { - uint8_t seed_se_k[64U] = { 0U }; - uint32_t pkh_mu_decode_len = (uint32_t)64U; - KRML_CHECK_SIZE(sizeof (uint8_t), pkh_mu_decode_len); - { - uint8_t pkh_mu_decode[pkh_mu_decode_len]; - memset(pkh_mu_decode, 0U, pkh_mu_decode_len * sizeof (uint8_t)); - { - uint8_t *pkh = sk + (uint32_t)43056U; - uint8_t *seed_se; - uint8_t *kp; - uint8_t *s; - memcpy(pkh_mu_decode, pkh, (uint32_t)32U * sizeof (uint8_t)); - memcpy(pkh_mu_decode + (uint32_t)32U, mu_decode, (uint32_t)32U * sizeof (uint8_t)); - Hacl_SHA3_shake256_hacl(pkh_mu_decode_len, pkh_mu_decode, (uint32_t)64U, seed_se_k); - seed_se = seed_se_k; - kp = seed_se_k + (uint32_t)32U; - s = sk; - { - uint16_t bpp_matrix[10752U] = { 0U }; - uint16_t cp_matrix[64U] = { 0U }; - uint16_t sp_matrix[10752U] = { 0U }; - uint16_t ep_matrix[10752U] = { 0U }; - uint16_t epp_matrix[64U] = { 0U }; - uint8_t r[43136U] = { 0U }; - uint8_t shake_input_seed_se[33U] = { 0U }; - uint8_t *pk; - uint8_t *seed_a; - uint8_t *b; - shake_input_seed_se[0U] = (uint8_t)0x96U; - memcpy(shake_input_seed_se + (uint32_t)1U, seed_se, (uint32_t)32U * sizeof (uint8_t)); - Hacl_SHA3_shake256_hacl((uint32_t)33U, shake_input_seed_se, (uint32_t)43136U, r); - Lib_Memzero0_memzero(shake_input_seed_se, - (uint32_t)33U * sizeof (shake_input_seed_se[0U])); - Hacl_Impl_Frodo_Sample_frodo_sample_matrix1344((uint32_t)8U, - (uint32_t)1344U, - r, - sp_matrix); - Hacl_Impl_Frodo_Sample_frodo_sample_matrix1344((uint32_t)8U, - (uint32_t)1344U, - r + (uint32_t)21504U, - ep_matrix); - Hacl_Impl_Frodo_Sample_frodo_sample_matrix1344((uint32_t)8U, - (uint32_t)8U, - r + (uint32_t)43008U, - epp_matrix); - pk = sk + (uint32_t)32U; - seed_a = pk; - b = pk + (uint32_t)16U; - { - uint16_t a_matrix[1806336U] = { 0U }; - Hacl_Impl_Frodo_Params_frodo_gen_matrix(Spec_Frodo_Params_SHAKE128, - (uint32_t)1344U, - seed_a, - a_matrix); - Hacl_Impl_Matrix_matrix_mul((uint32_t)8U, - (uint32_t)1344U, - (uint32_t)1344U, - sp_matrix, - a_matrix, - bpp_matrix); - Hacl_Impl_Matrix_matrix_add((uint32_t)8U, (uint32_t)1344U, bpp_matrix, ep_matrix); - { - uint16_t b_matrix[10752U] = { 0U }; - Hacl_Impl_Frodo_Pack_frodo_unpack((uint32_t)1344U, - (uint32_t)8U, - (uint32_t)16U, - b, - b_matrix); - Hacl_Impl_Matrix_matrix_mul((uint32_t)8U, - (uint32_t)1344U, - (uint32_t)8U, - sp_matrix, - b_matrix, - cp_matrix); - Hacl_Impl_Matrix_matrix_add((uint32_t)8U, (uint32_t)8U, cp_matrix, epp_matrix); - { - uint16_t mu_encode[64U] = { 0U }; - uint16_t b1; - uint16_t b2; - uint16_t mask0; - uint16_t mask; - Hacl_Impl_Frodo_Encode_frodo_key_encode((uint32_t)16U, - (uint32_t)4U, - (uint32_t)8U, - mu_decode, - mu_encode); - Hacl_Impl_Matrix_matrix_add((uint32_t)8U, (uint32_t)8U, cp_matrix, mu_encode); - Lib_Memzero0_memzero(mu_encode, (uint32_t)64U * sizeof (mu_encode[0U])); - Hacl_Impl_Matrix_mod_pow2((uint32_t)8U, - (uint32_t)1344U, - (uint32_t)16U, - bpp_matrix); - Hacl_Impl_Matrix_mod_pow2((uint32_t)8U, (uint32_t)8U, (uint32_t)16U, cp_matrix); - Lib_Memzero0_memzero(sp_matrix, (uint32_t)10752U * sizeof (sp_matrix[0U])); - Lib_Memzero0_memzero(ep_matrix, (uint32_t)10752U * sizeof (ep_matrix[0U])); - Lib_Memzero0_memzero(epp_matrix, (uint32_t)64U * sizeof (epp_matrix[0U])); - b1 = - Hacl_Impl_Matrix_matrix_eq((uint32_t)8U, - (uint32_t)1344U, - bp_matrix, - bpp_matrix); - b2 = Hacl_Impl_Matrix_matrix_eq((uint32_t)8U, (uint32_t)8U, c_matrix, cp_matrix); - mask0 = b1 & b2; - mask = mask0; - { - uint8_t kp_s[32U] = { 0U }; - uint32_t ss_init_len; - { - uint32_t i; - for (i = (uint32_t)0U; i < (uint32_t)32U; i++) - { - uint8_t *os = kp_s; - uint8_t uu____0 = s[i]; - uint8_t x = uu____0 ^ ((uint8_t)mask & (kp[i] ^ uu____0)); - os[i] = x; - } - } - ss_init_len = (uint32_t)21664U; - KRML_CHECK_SIZE(sizeof (uint8_t), ss_init_len); - { - uint8_t ss_init[ss_init_len]; - memset(ss_init, 0U, ss_init_len * sizeof (uint8_t)); - memcpy(ss_init, ct, (uint32_t)21632U * sizeof (uint8_t)); - memcpy(ss_init + (uint32_t)21632U, kp_s, (uint32_t)32U * sizeof (uint8_t)); - Hacl_SHA3_shake256_hacl(ss_init_len, ss_init, (uint32_t)32U, ss); - Lib_Memzero0_memzero(ss_init, ss_init_len * sizeof (ss_init[0U])); - Lib_Memzero0_memzero(kp_s, (uint32_t)32U * sizeof (kp_s[0U])); - Lib_Memzero0_memzero(seed_se_k, (uint32_t)64U * sizeof (seed_se_k[0U])); - Lib_Memzero0_memzero(mu_decode, (uint32_t)32U * sizeof (mu_decode[0U])); - return (uint32_t)0U; - } - } - } - } - } - } - } - } - } - } -} - diff --git a/dist/c89-compatible/Hacl_Frodo1344.h b/dist/c89-compatible/Hacl_Frodo1344.h deleted file mode 100644 index b88ca8406d..0000000000 --- a/dist/c89-compatible/Hacl_Frodo1344.h +++ /dev/null @@ -1,62 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#ifndef __Hacl_Frodo1344_H -#define __Hacl_Frodo1344_H - -#if defined(__cplusplus) -extern "C" { -#endif - -#include -#include "krml/internal/types.h" -#include "krml/lowstar_endianness.h" -#include "krml/internal/target.h" - - -#include "Lib_Memzero0.h" -#include "Hacl_Spec.h" -#include "Hacl_SHA3.h" -#include "Hacl_Frodo_KEM.h" -#include "evercrypt_targetconfig.h" -extern uint32_t Hacl_Frodo1344_crypto_bytes; - -extern uint32_t Hacl_Frodo1344_crypto_publickeybytes; - -extern uint32_t Hacl_Frodo1344_crypto_secretkeybytes; - -extern uint32_t Hacl_Frodo1344_crypto_ciphertextbytes; - -uint32_t Hacl_Frodo1344_crypto_kem_keypair(uint8_t *pk, uint8_t *sk); - -uint32_t Hacl_Frodo1344_crypto_kem_enc(uint8_t *ct, uint8_t *ss, uint8_t *pk); - -uint32_t Hacl_Frodo1344_crypto_kem_dec(uint8_t *ss, uint8_t *ct, uint8_t *sk); - -#if defined(__cplusplus) -} -#endif - -#define __Hacl_Frodo1344_H_DEFINED -#endif diff --git a/dist/c89-compatible/Hacl_Frodo64.c b/dist/c89-compatible/Hacl_Frodo64.c deleted file mode 100644 index 28f49f8e8c..0000000000 --- a/dist/c89-compatible/Hacl_Frodo64.c +++ /dev/null @@ -1,379 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#include "Hacl_Frodo64.h" - -#include "internal/Hacl_Frodo_KEM.h" - -/* - this variant is used only for testing purposes! - */ - - -uint32_t Hacl_Frodo64_crypto_bytes = (uint32_t)16U; - -uint32_t Hacl_Frodo64_crypto_publickeybytes = (uint32_t)976U; - -uint32_t Hacl_Frodo64_crypto_secretkeybytes = (uint32_t)2032U; - -uint32_t Hacl_Frodo64_crypto_ciphertextbytes = (uint32_t)1080U; - -uint32_t Hacl_Frodo64_crypto_kem_keypair(uint8_t *pk, uint8_t *sk) -{ - uint8_t coins[48U] = { 0U }; - uint8_t *s; - uint8_t *seed_se; - uint8_t *z; - uint8_t *seed_a; - uint8_t *b_bytes; - uint8_t *s_bytes; - randombytes_((uint32_t)48U, coins); - s = coins; - seed_se = coins + (uint32_t)16U; - z = coins + (uint32_t)32U; - seed_a = pk; - Hacl_SHA3_shake128_hacl((uint32_t)16U, z, (uint32_t)16U, seed_a); - b_bytes = pk + (uint32_t)16U; - s_bytes = sk + (uint32_t)992U; - { - uint16_t s_matrix[512U] = { 0U }; - uint16_t e_matrix[512U] = { 0U }; - uint8_t r[2048U] = { 0U }; - uint8_t shake_input_seed_se[17U] = { 0U }; - shake_input_seed_se[0U] = (uint8_t)0x5fU; - memcpy(shake_input_seed_se + (uint32_t)1U, seed_se, (uint32_t)16U * sizeof (uint8_t)); - Hacl_SHA3_shake128_hacl((uint32_t)17U, shake_input_seed_se, (uint32_t)2048U, r); - Lib_Memzero0_memzero(shake_input_seed_se, (uint32_t)17U * sizeof (shake_input_seed_se[0U])); - Hacl_Impl_Frodo_Sample_frodo_sample_matrix64((uint32_t)64U, (uint32_t)8U, r, s_matrix); - Hacl_Impl_Frodo_Sample_frodo_sample_matrix64((uint32_t)64U, - (uint32_t)8U, - r + (uint32_t)1024U, - e_matrix); - { - uint16_t b_matrix[512U] = { 0U }; - uint16_t a_matrix[4096U] = { 0U }; - uint32_t slen1; - uint8_t *sk_p; - Hacl_Impl_Frodo_Params_frodo_gen_matrix(Spec_Frodo_Params_SHAKE128, - (uint32_t)64U, - seed_a, - a_matrix); - Hacl_Impl_Matrix_matrix_mul_s((uint32_t)64U, - (uint32_t)64U, - (uint32_t)8U, - a_matrix, - s_matrix, - b_matrix); - Hacl_Impl_Matrix_matrix_add((uint32_t)64U, (uint32_t)8U, b_matrix, e_matrix); - Hacl_Impl_Frodo_Pack_frodo_pack((uint32_t)64U, - (uint32_t)8U, - (uint32_t)15U, - b_matrix, - b_bytes); - Hacl_Impl_Matrix_matrix_to_lbytes((uint32_t)64U, (uint32_t)8U, s_matrix, s_bytes); - Lib_Memzero0_memzero(s_matrix, (uint32_t)512U * sizeof (s_matrix[0U])); - Lib_Memzero0_memzero(e_matrix, (uint32_t)512U * sizeof (e_matrix[0U])); - slen1 = (uint32_t)2016U; - sk_p = sk; - memcpy(sk_p, s, (uint32_t)16U * sizeof (uint8_t)); - memcpy(sk_p + (uint32_t)16U, pk, (uint32_t)976U * sizeof (uint8_t)); - Hacl_SHA3_shake128_hacl((uint32_t)976U, pk, (uint32_t)16U, sk + slen1); - Lib_Memzero0_memzero(coins, (uint32_t)48U * sizeof (coins[0U])); - return (uint32_t)0U; - } - } -} - -uint32_t Hacl_Frodo64_crypto_kem_enc(uint8_t *ct, uint8_t *ss, uint8_t *pk) -{ - uint8_t coins[16U] = { 0U }; - randombytes_((uint32_t)16U, coins); - { - uint8_t seed_se_k[32U] = { 0U }; - uint8_t pkh_mu[32U] = { 0U }; - uint8_t *seed_se; - uint8_t *k; - Hacl_SHA3_shake128_hacl((uint32_t)976U, pk, (uint32_t)16U, pkh_mu); - memcpy(pkh_mu + (uint32_t)16U, coins, (uint32_t)16U * sizeof (uint8_t)); - Hacl_SHA3_shake128_hacl((uint32_t)32U, pkh_mu, (uint32_t)32U, seed_se_k); - seed_se = seed_se_k; - k = seed_se_k + (uint32_t)16U; - { - uint8_t *seed_a = pk; - uint8_t *b = pk + (uint32_t)16U; - uint16_t sp_matrix[512U] = { 0U }; - uint16_t ep_matrix[512U] = { 0U }; - uint16_t epp_matrix[64U] = { 0U }; - uint8_t r[2176U] = { 0U }; - uint8_t shake_input_seed_se[17U] = { 0U }; - uint8_t *c1; - uint8_t *c2; - shake_input_seed_se[0U] = (uint8_t)0x96U; - memcpy(shake_input_seed_se + (uint32_t)1U, seed_se, (uint32_t)16U * sizeof (uint8_t)); - Hacl_SHA3_shake128_hacl((uint32_t)17U, shake_input_seed_se, (uint32_t)2176U, r); - Lib_Memzero0_memzero(shake_input_seed_se, (uint32_t)17U * sizeof (shake_input_seed_se[0U])); - Hacl_Impl_Frodo_Sample_frodo_sample_matrix64((uint32_t)8U, (uint32_t)64U, r, sp_matrix); - Hacl_Impl_Frodo_Sample_frodo_sample_matrix64((uint32_t)8U, - (uint32_t)64U, - r + (uint32_t)1024U, - ep_matrix); - Hacl_Impl_Frodo_Sample_frodo_sample_matrix64((uint32_t)8U, - (uint32_t)8U, - r + (uint32_t)2048U, - epp_matrix); - c1 = ct; - c2 = ct + (uint32_t)960U; - { - uint16_t bp_matrix[512U] = { 0U }; - uint16_t a_matrix[4096U] = { 0U }; - Hacl_Impl_Frodo_Params_frodo_gen_matrix(Spec_Frodo_Params_SHAKE128, - (uint32_t)64U, - seed_a, - a_matrix); - Hacl_Impl_Matrix_matrix_mul((uint32_t)8U, - (uint32_t)64U, - (uint32_t)64U, - sp_matrix, - a_matrix, - bp_matrix); - Hacl_Impl_Matrix_matrix_add((uint32_t)8U, (uint32_t)64U, bp_matrix, ep_matrix); - Hacl_Impl_Frodo_Pack_frodo_pack((uint32_t)8U, (uint32_t)64U, (uint32_t)15U, bp_matrix, c1); - { - uint16_t v_matrix[64U] = { 0U }; - uint16_t b_matrix[512U] = { 0U }; - Hacl_Impl_Frodo_Pack_frodo_unpack((uint32_t)64U, - (uint32_t)8U, - (uint32_t)15U, - b, - b_matrix); - Hacl_Impl_Matrix_matrix_mul((uint32_t)8U, - (uint32_t)64U, - (uint32_t)8U, - sp_matrix, - b_matrix, - v_matrix); - Hacl_Impl_Matrix_matrix_add((uint32_t)8U, (uint32_t)8U, v_matrix, epp_matrix); - { - uint16_t mu_encode[64U] = { 0U }; - Hacl_Impl_Frodo_Encode_frodo_key_encode((uint32_t)15U, - (uint32_t)2U, - (uint32_t)8U, - coins, - mu_encode); - Hacl_Impl_Matrix_matrix_add((uint32_t)8U, (uint32_t)8U, v_matrix, mu_encode); - Lib_Memzero0_memzero(mu_encode, (uint32_t)64U * sizeof (mu_encode[0U])); - Hacl_Impl_Frodo_Pack_frodo_pack((uint32_t)8U, - (uint32_t)8U, - (uint32_t)15U, - v_matrix, - c2); - Lib_Memzero0_memzero(v_matrix, (uint32_t)64U * sizeof (v_matrix[0U])); - Lib_Memzero0_memzero(sp_matrix, (uint32_t)512U * sizeof (sp_matrix[0U])); - Lib_Memzero0_memzero(ep_matrix, (uint32_t)512U * sizeof (ep_matrix[0U])); - Lib_Memzero0_memzero(epp_matrix, (uint32_t)64U * sizeof (epp_matrix[0U])); - { - uint32_t ss_init_len = (uint32_t)1096U; - KRML_CHECK_SIZE(sizeof (uint8_t), ss_init_len); - { - uint8_t shake_input_ss[ss_init_len]; - memset(shake_input_ss, 0U, ss_init_len * sizeof (uint8_t)); - memcpy(shake_input_ss, ct, (uint32_t)1080U * sizeof (uint8_t)); - memcpy(shake_input_ss + (uint32_t)1080U, k, (uint32_t)16U * sizeof (uint8_t)); - Hacl_SHA3_shake128_hacl(ss_init_len, shake_input_ss, (uint32_t)16U, ss); - Lib_Memzero0_memzero(shake_input_ss, ss_init_len * sizeof (shake_input_ss[0U])); - Lib_Memzero0_memzero(seed_se_k, (uint32_t)32U * sizeof (seed_se_k[0U])); - Lib_Memzero0_memzero(coins, (uint32_t)16U * sizeof (coins[0U])); - return (uint32_t)0U; - } - } - } - } - } - } - } -} - -uint32_t Hacl_Frodo64_crypto_kem_dec(uint8_t *ss, uint8_t *ct, uint8_t *sk) -{ - uint16_t bp_matrix[512U] = { 0U }; - uint16_t c_matrix[64U] = { 0U }; - uint8_t *c1 = ct; - uint8_t *c2 = ct + (uint32_t)960U; - Hacl_Impl_Frodo_Pack_frodo_unpack((uint32_t)8U, (uint32_t)64U, (uint32_t)15U, c1, bp_matrix); - Hacl_Impl_Frodo_Pack_frodo_unpack((uint32_t)8U, (uint32_t)8U, (uint32_t)15U, c2, c_matrix); - { - uint8_t mu_decode[16U] = { 0U }; - uint8_t *s_bytes = sk + (uint32_t)992U; - uint16_t s_matrix[512U] = { 0U }; - uint16_t m_matrix[64U] = { 0U }; - Hacl_Impl_Matrix_matrix_from_lbytes((uint32_t)64U, (uint32_t)8U, s_bytes, s_matrix); - Hacl_Impl_Matrix_matrix_mul_s((uint32_t)8U, - (uint32_t)64U, - (uint32_t)8U, - bp_matrix, - s_matrix, - m_matrix); - Hacl_Impl_Matrix_matrix_sub((uint32_t)8U, (uint32_t)8U, c_matrix, m_matrix); - Hacl_Impl_Frodo_Encode_frodo_key_decode((uint32_t)15U, - (uint32_t)2U, - (uint32_t)8U, - m_matrix, - mu_decode); - Lib_Memzero0_memzero(s_matrix, (uint32_t)512U * sizeof (s_matrix[0U])); - Lib_Memzero0_memzero(m_matrix, (uint32_t)64U * sizeof (m_matrix[0U])); - { - uint8_t seed_se_k[32U] = { 0U }; - uint32_t pkh_mu_decode_len = (uint32_t)32U; - KRML_CHECK_SIZE(sizeof (uint8_t), pkh_mu_decode_len); - { - uint8_t pkh_mu_decode[pkh_mu_decode_len]; - memset(pkh_mu_decode, 0U, pkh_mu_decode_len * sizeof (uint8_t)); - { - uint8_t *pkh = sk + (uint32_t)2016U; - uint8_t *seed_se; - uint8_t *kp; - uint8_t *s; - memcpy(pkh_mu_decode, pkh, (uint32_t)16U * sizeof (uint8_t)); - memcpy(pkh_mu_decode + (uint32_t)16U, mu_decode, (uint32_t)16U * sizeof (uint8_t)); - Hacl_SHA3_shake128_hacl(pkh_mu_decode_len, pkh_mu_decode, (uint32_t)32U, seed_se_k); - seed_se = seed_se_k; - kp = seed_se_k + (uint32_t)16U; - s = sk; - { - uint16_t bpp_matrix[512U] = { 0U }; - uint16_t cp_matrix[64U] = { 0U }; - uint16_t sp_matrix[512U] = { 0U }; - uint16_t ep_matrix[512U] = { 0U }; - uint16_t epp_matrix[64U] = { 0U }; - uint8_t r[2176U] = { 0U }; - uint8_t shake_input_seed_se[17U] = { 0U }; - uint8_t *pk; - uint8_t *seed_a; - uint8_t *b; - shake_input_seed_se[0U] = (uint8_t)0x96U; - memcpy(shake_input_seed_se + (uint32_t)1U, seed_se, (uint32_t)16U * sizeof (uint8_t)); - Hacl_SHA3_shake128_hacl((uint32_t)17U, shake_input_seed_se, (uint32_t)2176U, r); - Lib_Memzero0_memzero(shake_input_seed_se, - (uint32_t)17U * sizeof (shake_input_seed_se[0U])); - Hacl_Impl_Frodo_Sample_frodo_sample_matrix64((uint32_t)8U, (uint32_t)64U, r, sp_matrix); - Hacl_Impl_Frodo_Sample_frodo_sample_matrix64((uint32_t)8U, - (uint32_t)64U, - r + (uint32_t)1024U, - ep_matrix); - Hacl_Impl_Frodo_Sample_frodo_sample_matrix64((uint32_t)8U, - (uint32_t)8U, - r + (uint32_t)2048U, - epp_matrix); - pk = sk + (uint32_t)16U; - seed_a = pk; - b = pk + (uint32_t)16U; - { - uint16_t a_matrix[4096U] = { 0U }; - Hacl_Impl_Frodo_Params_frodo_gen_matrix(Spec_Frodo_Params_SHAKE128, - (uint32_t)64U, - seed_a, - a_matrix); - Hacl_Impl_Matrix_matrix_mul((uint32_t)8U, - (uint32_t)64U, - (uint32_t)64U, - sp_matrix, - a_matrix, - bpp_matrix); - Hacl_Impl_Matrix_matrix_add((uint32_t)8U, (uint32_t)64U, bpp_matrix, ep_matrix); - { - uint16_t b_matrix[512U] = { 0U }; - Hacl_Impl_Frodo_Pack_frodo_unpack((uint32_t)64U, - (uint32_t)8U, - (uint32_t)15U, - b, - b_matrix); - Hacl_Impl_Matrix_matrix_mul((uint32_t)8U, - (uint32_t)64U, - (uint32_t)8U, - sp_matrix, - b_matrix, - cp_matrix); - Hacl_Impl_Matrix_matrix_add((uint32_t)8U, (uint32_t)8U, cp_matrix, epp_matrix); - { - uint16_t mu_encode[64U] = { 0U }; - uint16_t b1; - uint16_t b2; - uint16_t mask0; - uint16_t mask; - Hacl_Impl_Frodo_Encode_frodo_key_encode((uint32_t)15U, - (uint32_t)2U, - (uint32_t)8U, - mu_decode, - mu_encode); - Hacl_Impl_Matrix_matrix_add((uint32_t)8U, (uint32_t)8U, cp_matrix, mu_encode); - Lib_Memzero0_memzero(mu_encode, (uint32_t)64U * sizeof (mu_encode[0U])); - Hacl_Impl_Matrix_mod_pow2((uint32_t)8U, (uint32_t)64U, (uint32_t)15U, bpp_matrix); - Hacl_Impl_Matrix_mod_pow2((uint32_t)8U, (uint32_t)8U, (uint32_t)15U, cp_matrix); - Lib_Memzero0_memzero(sp_matrix, (uint32_t)512U * sizeof (sp_matrix[0U])); - Lib_Memzero0_memzero(ep_matrix, (uint32_t)512U * sizeof (ep_matrix[0U])); - Lib_Memzero0_memzero(epp_matrix, (uint32_t)64U * sizeof (epp_matrix[0U])); - b1 = - Hacl_Impl_Matrix_matrix_eq((uint32_t)8U, - (uint32_t)64U, - bp_matrix, - bpp_matrix); - b2 = Hacl_Impl_Matrix_matrix_eq((uint32_t)8U, (uint32_t)8U, c_matrix, cp_matrix); - mask0 = b1 & b2; - mask = mask0; - { - uint8_t kp_s[16U] = { 0U }; - uint32_t ss_init_len; - KRML_MAYBE_FOR16(i, - (uint32_t)0U, - (uint32_t)16U, - (uint32_t)1U, - uint8_t *os = kp_s; - uint8_t uu____0 = s[i]; - uint8_t x = uu____0 ^ ((uint8_t)mask & (kp[i] ^ uu____0)); - os[i] = x;); - ss_init_len = (uint32_t)1096U; - KRML_CHECK_SIZE(sizeof (uint8_t), ss_init_len); - { - uint8_t ss_init[ss_init_len]; - memset(ss_init, 0U, ss_init_len * sizeof (uint8_t)); - memcpy(ss_init, ct, (uint32_t)1080U * sizeof (uint8_t)); - memcpy(ss_init + (uint32_t)1080U, kp_s, (uint32_t)16U * sizeof (uint8_t)); - Hacl_SHA3_shake128_hacl(ss_init_len, ss_init, (uint32_t)16U, ss); - Lib_Memzero0_memzero(ss_init, ss_init_len * sizeof (ss_init[0U])); - Lib_Memzero0_memzero(kp_s, (uint32_t)16U * sizeof (kp_s[0U])); - Lib_Memzero0_memzero(seed_se_k, (uint32_t)32U * sizeof (seed_se_k[0U])); - Lib_Memzero0_memzero(mu_decode, (uint32_t)16U * sizeof (mu_decode[0U])); - return (uint32_t)0U; - } - } - } - } - } - } - } - } - } - } -} - diff --git a/dist/c89-compatible/Hacl_Frodo64.h b/dist/c89-compatible/Hacl_Frodo64.h deleted file mode 100644 index 0c6a1fce0e..0000000000 --- a/dist/c89-compatible/Hacl_Frodo64.h +++ /dev/null @@ -1,67 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#ifndef __Hacl_Frodo64_H -#define __Hacl_Frodo64_H - -#if defined(__cplusplus) -extern "C" { -#endif - -#include -#include "krml/internal/types.h" -#include "krml/lowstar_endianness.h" -#include "krml/internal/target.h" - - -#include "Lib_Memzero0.h" -#include "Hacl_Spec.h" -#include "Hacl_SHA3.h" -#include "Hacl_Frodo_KEM.h" -#include "evercrypt_targetconfig.h" -/* - this variant is used only for testing purposes! - */ - - -extern uint32_t Hacl_Frodo64_crypto_bytes; - -extern uint32_t Hacl_Frodo64_crypto_publickeybytes; - -extern uint32_t Hacl_Frodo64_crypto_secretkeybytes; - -extern uint32_t Hacl_Frodo64_crypto_ciphertextbytes; - -uint32_t Hacl_Frodo64_crypto_kem_keypair(uint8_t *pk, uint8_t *sk); - -uint32_t Hacl_Frodo64_crypto_kem_enc(uint8_t *ct, uint8_t *ss, uint8_t *pk); - -uint32_t Hacl_Frodo64_crypto_kem_dec(uint8_t *ss, uint8_t *ct, uint8_t *sk); - -#if defined(__cplusplus) -} -#endif - -#define __Hacl_Frodo64_H_DEFINED -#endif diff --git a/dist/c89-compatible/Hacl_Frodo640.c b/dist/c89-compatible/Hacl_Frodo640.c deleted file mode 100644 index 32ea78fcf7..0000000000 --- a/dist/c89-compatible/Hacl_Frodo640.c +++ /dev/null @@ -1,380 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#include "Hacl_Frodo640.h" - -#include "internal/Hacl_Frodo_KEM.h" - -uint32_t Hacl_Frodo640_crypto_bytes = (uint32_t)16U; - -uint32_t Hacl_Frodo640_crypto_publickeybytes = (uint32_t)9616U; - -uint32_t Hacl_Frodo640_crypto_secretkeybytes = (uint32_t)19888U; - -uint32_t Hacl_Frodo640_crypto_ciphertextbytes = (uint32_t)9720U; - -uint32_t Hacl_Frodo640_crypto_kem_keypair(uint8_t *pk, uint8_t *sk) -{ - uint8_t coins[48U] = { 0U }; - uint8_t *s; - uint8_t *seed_se; - uint8_t *z; - uint8_t *seed_a; - uint8_t *b_bytes; - uint8_t *s_bytes; - randombytes_((uint32_t)48U, coins); - s = coins; - seed_se = coins + (uint32_t)16U; - z = coins + (uint32_t)32U; - seed_a = pk; - Hacl_SHA3_shake128_hacl((uint32_t)16U, z, (uint32_t)16U, seed_a); - b_bytes = pk + (uint32_t)16U; - s_bytes = sk + (uint32_t)9632U; - { - uint16_t s_matrix[5120U] = { 0U }; - uint16_t e_matrix[5120U] = { 0U }; - uint8_t r[20480U] = { 0U }; - uint8_t shake_input_seed_se[17U] = { 0U }; - shake_input_seed_se[0U] = (uint8_t)0x5fU; - memcpy(shake_input_seed_se + (uint32_t)1U, seed_se, (uint32_t)16U * sizeof (uint8_t)); - Hacl_SHA3_shake128_hacl((uint32_t)17U, shake_input_seed_se, (uint32_t)20480U, r); - Lib_Memzero0_memzero(shake_input_seed_se, (uint32_t)17U * sizeof (shake_input_seed_se[0U])); - Hacl_Impl_Frodo_Sample_frodo_sample_matrix640((uint32_t)640U, (uint32_t)8U, r, s_matrix); - Hacl_Impl_Frodo_Sample_frodo_sample_matrix640((uint32_t)640U, - (uint32_t)8U, - r + (uint32_t)10240U, - e_matrix); - { - uint16_t b_matrix[5120U] = { 0U }; - uint16_t a_matrix[409600U] = { 0U }; - uint32_t slen1; - uint8_t *sk_p; - Hacl_Impl_Frodo_Params_frodo_gen_matrix(Spec_Frodo_Params_SHAKE128, - (uint32_t)640U, - seed_a, - a_matrix); - Hacl_Impl_Matrix_matrix_mul_s((uint32_t)640U, - (uint32_t)640U, - (uint32_t)8U, - a_matrix, - s_matrix, - b_matrix); - Hacl_Impl_Matrix_matrix_add((uint32_t)640U, (uint32_t)8U, b_matrix, e_matrix); - Hacl_Impl_Frodo_Pack_frodo_pack((uint32_t)640U, - (uint32_t)8U, - (uint32_t)15U, - b_matrix, - b_bytes); - Hacl_Impl_Matrix_matrix_to_lbytes((uint32_t)640U, (uint32_t)8U, s_matrix, s_bytes); - Lib_Memzero0_memzero(s_matrix, (uint32_t)5120U * sizeof (s_matrix[0U])); - Lib_Memzero0_memzero(e_matrix, (uint32_t)5120U * sizeof (e_matrix[0U])); - slen1 = (uint32_t)19872U; - sk_p = sk; - memcpy(sk_p, s, (uint32_t)16U * sizeof (uint8_t)); - memcpy(sk_p + (uint32_t)16U, pk, (uint32_t)9616U * sizeof (uint8_t)); - Hacl_SHA3_shake128_hacl((uint32_t)9616U, pk, (uint32_t)16U, sk + slen1); - Lib_Memzero0_memzero(coins, (uint32_t)48U * sizeof (coins[0U])); - return (uint32_t)0U; - } - } -} - -uint32_t Hacl_Frodo640_crypto_kem_enc(uint8_t *ct, uint8_t *ss, uint8_t *pk) -{ - uint8_t coins[16U] = { 0U }; - randombytes_((uint32_t)16U, coins); - { - uint8_t seed_se_k[32U] = { 0U }; - uint8_t pkh_mu[32U] = { 0U }; - uint8_t *seed_se; - uint8_t *k; - Hacl_SHA3_shake128_hacl((uint32_t)9616U, pk, (uint32_t)16U, pkh_mu); - memcpy(pkh_mu + (uint32_t)16U, coins, (uint32_t)16U * sizeof (uint8_t)); - Hacl_SHA3_shake128_hacl((uint32_t)32U, pkh_mu, (uint32_t)32U, seed_se_k); - seed_se = seed_se_k; - k = seed_se_k + (uint32_t)16U; - { - uint8_t *seed_a = pk; - uint8_t *b = pk + (uint32_t)16U; - uint16_t sp_matrix[5120U] = { 0U }; - uint16_t ep_matrix[5120U] = { 0U }; - uint16_t epp_matrix[64U] = { 0U }; - uint8_t r[20608U] = { 0U }; - uint8_t shake_input_seed_se[17U] = { 0U }; - uint8_t *c1; - uint8_t *c2; - shake_input_seed_se[0U] = (uint8_t)0x96U; - memcpy(shake_input_seed_se + (uint32_t)1U, seed_se, (uint32_t)16U * sizeof (uint8_t)); - Hacl_SHA3_shake128_hacl((uint32_t)17U, shake_input_seed_se, (uint32_t)20608U, r); - Lib_Memzero0_memzero(shake_input_seed_se, (uint32_t)17U * sizeof (shake_input_seed_se[0U])); - Hacl_Impl_Frodo_Sample_frodo_sample_matrix640((uint32_t)8U, (uint32_t)640U, r, sp_matrix); - Hacl_Impl_Frodo_Sample_frodo_sample_matrix640((uint32_t)8U, - (uint32_t)640U, - r + (uint32_t)10240U, - ep_matrix); - Hacl_Impl_Frodo_Sample_frodo_sample_matrix640((uint32_t)8U, - (uint32_t)8U, - r + (uint32_t)20480U, - epp_matrix); - c1 = ct; - c2 = ct + (uint32_t)9600U; - { - uint16_t bp_matrix[5120U] = { 0U }; - uint16_t a_matrix[409600U] = { 0U }; - Hacl_Impl_Frodo_Params_frodo_gen_matrix(Spec_Frodo_Params_SHAKE128, - (uint32_t)640U, - seed_a, - a_matrix); - Hacl_Impl_Matrix_matrix_mul((uint32_t)8U, - (uint32_t)640U, - (uint32_t)640U, - sp_matrix, - a_matrix, - bp_matrix); - Hacl_Impl_Matrix_matrix_add((uint32_t)8U, (uint32_t)640U, bp_matrix, ep_matrix); - Hacl_Impl_Frodo_Pack_frodo_pack((uint32_t)8U, (uint32_t)640U, (uint32_t)15U, bp_matrix, c1); - { - uint16_t v_matrix[64U] = { 0U }; - uint16_t b_matrix[5120U] = { 0U }; - Hacl_Impl_Frodo_Pack_frodo_unpack((uint32_t)640U, - (uint32_t)8U, - (uint32_t)15U, - b, - b_matrix); - Hacl_Impl_Matrix_matrix_mul((uint32_t)8U, - (uint32_t)640U, - (uint32_t)8U, - sp_matrix, - b_matrix, - v_matrix); - Hacl_Impl_Matrix_matrix_add((uint32_t)8U, (uint32_t)8U, v_matrix, epp_matrix); - { - uint16_t mu_encode[64U] = { 0U }; - Hacl_Impl_Frodo_Encode_frodo_key_encode((uint32_t)15U, - (uint32_t)2U, - (uint32_t)8U, - coins, - mu_encode); - Hacl_Impl_Matrix_matrix_add((uint32_t)8U, (uint32_t)8U, v_matrix, mu_encode); - Lib_Memzero0_memzero(mu_encode, (uint32_t)64U * sizeof (mu_encode[0U])); - Hacl_Impl_Frodo_Pack_frodo_pack((uint32_t)8U, - (uint32_t)8U, - (uint32_t)15U, - v_matrix, - c2); - Lib_Memzero0_memzero(v_matrix, (uint32_t)64U * sizeof (v_matrix[0U])); - Lib_Memzero0_memzero(sp_matrix, (uint32_t)5120U * sizeof (sp_matrix[0U])); - Lib_Memzero0_memzero(ep_matrix, (uint32_t)5120U * sizeof (ep_matrix[0U])); - Lib_Memzero0_memzero(epp_matrix, (uint32_t)64U * sizeof (epp_matrix[0U])); - { - uint32_t ss_init_len = (uint32_t)9736U; - KRML_CHECK_SIZE(sizeof (uint8_t), ss_init_len); - { - uint8_t shake_input_ss[ss_init_len]; - memset(shake_input_ss, 0U, ss_init_len * sizeof (uint8_t)); - memcpy(shake_input_ss, ct, (uint32_t)9720U * sizeof (uint8_t)); - memcpy(shake_input_ss + (uint32_t)9720U, k, (uint32_t)16U * sizeof (uint8_t)); - Hacl_SHA3_shake128_hacl(ss_init_len, shake_input_ss, (uint32_t)16U, ss); - Lib_Memzero0_memzero(shake_input_ss, ss_init_len * sizeof (shake_input_ss[0U])); - Lib_Memzero0_memzero(seed_se_k, (uint32_t)32U * sizeof (seed_se_k[0U])); - Lib_Memzero0_memzero(coins, (uint32_t)16U * sizeof (coins[0U])); - return (uint32_t)0U; - } - } - } - } - } - } - } -} - -uint32_t Hacl_Frodo640_crypto_kem_dec(uint8_t *ss, uint8_t *ct, uint8_t *sk) -{ - uint16_t bp_matrix[5120U] = { 0U }; - uint16_t c_matrix[64U] = { 0U }; - uint8_t *c1 = ct; - uint8_t *c2 = ct + (uint32_t)9600U; - Hacl_Impl_Frodo_Pack_frodo_unpack((uint32_t)8U, (uint32_t)640U, (uint32_t)15U, c1, bp_matrix); - Hacl_Impl_Frodo_Pack_frodo_unpack((uint32_t)8U, (uint32_t)8U, (uint32_t)15U, c2, c_matrix); - { - uint8_t mu_decode[16U] = { 0U }; - uint8_t *s_bytes = sk + (uint32_t)9632U; - uint16_t s_matrix[5120U] = { 0U }; - uint16_t m_matrix[64U] = { 0U }; - Hacl_Impl_Matrix_matrix_from_lbytes((uint32_t)640U, (uint32_t)8U, s_bytes, s_matrix); - Hacl_Impl_Matrix_matrix_mul_s((uint32_t)8U, - (uint32_t)640U, - (uint32_t)8U, - bp_matrix, - s_matrix, - m_matrix); - Hacl_Impl_Matrix_matrix_sub((uint32_t)8U, (uint32_t)8U, c_matrix, m_matrix); - Hacl_Impl_Frodo_Encode_frodo_key_decode((uint32_t)15U, - (uint32_t)2U, - (uint32_t)8U, - m_matrix, - mu_decode); - Lib_Memzero0_memzero(s_matrix, (uint32_t)5120U * sizeof (s_matrix[0U])); - Lib_Memzero0_memzero(m_matrix, (uint32_t)64U * sizeof (m_matrix[0U])); - { - uint8_t seed_se_k[32U] = { 0U }; - uint32_t pkh_mu_decode_len = (uint32_t)32U; - KRML_CHECK_SIZE(sizeof (uint8_t), pkh_mu_decode_len); - { - uint8_t pkh_mu_decode[pkh_mu_decode_len]; - memset(pkh_mu_decode, 0U, pkh_mu_decode_len * sizeof (uint8_t)); - { - uint8_t *pkh = sk + (uint32_t)19872U; - uint8_t *seed_se; - uint8_t *kp; - uint8_t *s; - memcpy(pkh_mu_decode, pkh, (uint32_t)16U * sizeof (uint8_t)); - memcpy(pkh_mu_decode + (uint32_t)16U, mu_decode, (uint32_t)16U * sizeof (uint8_t)); - Hacl_SHA3_shake128_hacl(pkh_mu_decode_len, pkh_mu_decode, (uint32_t)32U, seed_se_k); - seed_se = seed_se_k; - kp = seed_se_k + (uint32_t)16U; - s = sk; - { - uint16_t bpp_matrix[5120U] = { 0U }; - uint16_t cp_matrix[64U] = { 0U }; - uint16_t sp_matrix[5120U] = { 0U }; - uint16_t ep_matrix[5120U] = { 0U }; - uint16_t epp_matrix[64U] = { 0U }; - uint8_t r[20608U] = { 0U }; - uint8_t shake_input_seed_se[17U] = { 0U }; - uint8_t *pk; - uint8_t *seed_a; - uint8_t *b; - shake_input_seed_se[0U] = (uint8_t)0x96U; - memcpy(shake_input_seed_se + (uint32_t)1U, seed_se, (uint32_t)16U * sizeof (uint8_t)); - Hacl_SHA3_shake128_hacl((uint32_t)17U, shake_input_seed_se, (uint32_t)20608U, r); - Lib_Memzero0_memzero(shake_input_seed_se, - (uint32_t)17U * sizeof (shake_input_seed_se[0U])); - Hacl_Impl_Frodo_Sample_frodo_sample_matrix640((uint32_t)8U, - (uint32_t)640U, - r, - sp_matrix); - Hacl_Impl_Frodo_Sample_frodo_sample_matrix640((uint32_t)8U, - (uint32_t)640U, - r + (uint32_t)10240U, - ep_matrix); - Hacl_Impl_Frodo_Sample_frodo_sample_matrix640((uint32_t)8U, - (uint32_t)8U, - r + (uint32_t)20480U, - epp_matrix); - pk = sk + (uint32_t)16U; - seed_a = pk; - b = pk + (uint32_t)16U; - { - uint16_t a_matrix[409600U] = { 0U }; - Hacl_Impl_Frodo_Params_frodo_gen_matrix(Spec_Frodo_Params_SHAKE128, - (uint32_t)640U, - seed_a, - a_matrix); - Hacl_Impl_Matrix_matrix_mul((uint32_t)8U, - (uint32_t)640U, - (uint32_t)640U, - sp_matrix, - a_matrix, - bpp_matrix); - Hacl_Impl_Matrix_matrix_add((uint32_t)8U, (uint32_t)640U, bpp_matrix, ep_matrix); - { - uint16_t b_matrix[5120U] = { 0U }; - Hacl_Impl_Frodo_Pack_frodo_unpack((uint32_t)640U, - (uint32_t)8U, - (uint32_t)15U, - b, - b_matrix); - Hacl_Impl_Matrix_matrix_mul((uint32_t)8U, - (uint32_t)640U, - (uint32_t)8U, - sp_matrix, - b_matrix, - cp_matrix); - Hacl_Impl_Matrix_matrix_add((uint32_t)8U, (uint32_t)8U, cp_matrix, epp_matrix); - { - uint16_t mu_encode[64U] = { 0U }; - uint16_t b1; - uint16_t b2; - uint16_t mask0; - uint16_t mask; - Hacl_Impl_Frodo_Encode_frodo_key_encode((uint32_t)15U, - (uint32_t)2U, - (uint32_t)8U, - mu_decode, - mu_encode); - Hacl_Impl_Matrix_matrix_add((uint32_t)8U, (uint32_t)8U, cp_matrix, mu_encode); - Lib_Memzero0_memzero(mu_encode, (uint32_t)64U * sizeof (mu_encode[0U])); - Hacl_Impl_Matrix_mod_pow2((uint32_t)8U, - (uint32_t)640U, - (uint32_t)15U, - bpp_matrix); - Hacl_Impl_Matrix_mod_pow2((uint32_t)8U, (uint32_t)8U, (uint32_t)15U, cp_matrix); - Lib_Memzero0_memzero(sp_matrix, (uint32_t)5120U * sizeof (sp_matrix[0U])); - Lib_Memzero0_memzero(ep_matrix, (uint32_t)5120U * sizeof (ep_matrix[0U])); - Lib_Memzero0_memzero(epp_matrix, (uint32_t)64U * sizeof (epp_matrix[0U])); - b1 = - Hacl_Impl_Matrix_matrix_eq((uint32_t)8U, - (uint32_t)640U, - bp_matrix, - bpp_matrix); - b2 = Hacl_Impl_Matrix_matrix_eq((uint32_t)8U, (uint32_t)8U, c_matrix, cp_matrix); - mask0 = b1 & b2; - mask = mask0; - { - uint8_t kp_s[16U] = { 0U }; - uint32_t ss_init_len; - KRML_MAYBE_FOR16(i, - (uint32_t)0U, - (uint32_t)16U, - (uint32_t)1U, - uint8_t *os = kp_s; - uint8_t uu____0 = s[i]; - uint8_t x = uu____0 ^ ((uint8_t)mask & (kp[i] ^ uu____0)); - os[i] = x;); - ss_init_len = (uint32_t)9736U; - KRML_CHECK_SIZE(sizeof (uint8_t), ss_init_len); - { - uint8_t ss_init[ss_init_len]; - memset(ss_init, 0U, ss_init_len * sizeof (uint8_t)); - memcpy(ss_init, ct, (uint32_t)9720U * sizeof (uint8_t)); - memcpy(ss_init + (uint32_t)9720U, kp_s, (uint32_t)16U * sizeof (uint8_t)); - Hacl_SHA3_shake128_hacl(ss_init_len, ss_init, (uint32_t)16U, ss); - Lib_Memzero0_memzero(ss_init, ss_init_len * sizeof (ss_init[0U])); - Lib_Memzero0_memzero(kp_s, (uint32_t)16U * sizeof (kp_s[0U])); - Lib_Memzero0_memzero(seed_se_k, (uint32_t)32U * sizeof (seed_se_k[0U])); - Lib_Memzero0_memzero(mu_decode, (uint32_t)16U * sizeof (mu_decode[0U])); - return (uint32_t)0U; - } - } - } - } - } - } - } - } - } - } -} - diff --git a/dist/c89-compatible/Hacl_Frodo640.h b/dist/c89-compatible/Hacl_Frodo640.h deleted file mode 100644 index 7a376206e5..0000000000 --- a/dist/c89-compatible/Hacl_Frodo640.h +++ /dev/null @@ -1,62 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#ifndef __Hacl_Frodo640_H -#define __Hacl_Frodo640_H - -#if defined(__cplusplus) -extern "C" { -#endif - -#include -#include "krml/internal/types.h" -#include "krml/lowstar_endianness.h" -#include "krml/internal/target.h" - - -#include "Lib_Memzero0.h" -#include "Hacl_Spec.h" -#include "Hacl_SHA3.h" -#include "Hacl_Frodo_KEM.h" -#include "evercrypt_targetconfig.h" -extern uint32_t Hacl_Frodo640_crypto_bytes; - -extern uint32_t Hacl_Frodo640_crypto_publickeybytes; - -extern uint32_t Hacl_Frodo640_crypto_secretkeybytes; - -extern uint32_t Hacl_Frodo640_crypto_ciphertextbytes; - -uint32_t Hacl_Frodo640_crypto_kem_keypair(uint8_t *pk, uint8_t *sk); - -uint32_t Hacl_Frodo640_crypto_kem_enc(uint8_t *ct, uint8_t *ss, uint8_t *pk); - -uint32_t Hacl_Frodo640_crypto_kem_dec(uint8_t *ss, uint8_t *ct, uint8_t *sk); - -#if defined(__cplusplus) -} -#endif - -#define __Hacl_Frodo640_H_DEFINED -#endif diff --git a/dist/c89-compatible/Hacl_Frodo976.c b/dist/c89-compatible/Hacl_Frodo976.c deleted file mode 100644 index 3e8104c187..0000000000 --- a/dist/c89-compatible/Hacl_Frodo976.c +++ /dev/null @@ -1,382 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#include "Hacl_Frodo976.h" - -#include "internal/Hacl_Frodo_KEM.h" - -uint32_t Hacl_Frodo976_crypto_bytes = (uint32_t)24U; - -uint32_t Hacl_Frodo976_crypto_publickeybytes = (uint32_t)15632U; - -uint32_t Hacl_Frodo976_crypto_secretkeybytes = (uint32_t)31296U; - -uint32_t Hacl_Frodo976_crypto_ciphertextbytes = (uint32_t)15744U; - -uint32_t Hacl_Frodo976_crypto_kem_keypair(uint8_t *pk, uint8_t *sk) -{ - uint8_t coins[64U] = { 0U }; - uint8_t *s; - uint8_t *seed_se; - uint8_t *z; - uint8_t *seed_a; - uint8_t *b_bytes; - uint8_t *s_bytes; - randombytes_((uint32_t)64U, coins); - s = coins; - seed_se = coins + (uint32_t)24U; - z = coins + (uint32_t)48U; - seed_a = pk; - Hacl_SHA3_shake256_hacl((uint32_t)16U, z, (uint32_t)16U, seed_a); - b_bytes = pk + (uint32_t)16U; - s_bytes = sk + (uint32_t)15656U; - { - uint16_t s_matrix[7808U] = { 0U }; - uint16_t e_matrix[7808U] = { 0U }; - uint8_t r[31232U] = { 0U }; - uint8_t shake_input_seed_se[25U] = { 0U }; - shake_input_seed_se[0U] = (uint8_t)0x5fU; - memcpy(shake_input_seed_se + (uint32_t)1U, seed_se, (uint32_t)24U * sizeof (uint8_t)); - Hacl_SHA3_shake256_hacl((uint32_t)25U, shake_input_seed_se, (uint32_t)31232U, r); - Lib_Memzero0_memzero(shake_input_seed_se, (uint32_t)25U * sizeof (shake_input_seed_se[0U])); - Hacl_Impl_Frodo_Sample_frodo_sample_matrix976((uint32_t)976U, (uint32_t)8U, r, s_matrix); - Hacl_Impl_Frodo_Sample_frodo_sample_matrix976((uint32_t)976U, - (uint32_t)8U, - r + (uint32_t)15616U, - e_matrix); - { - uint16_t b_matrix[7808U] = { 0U }; - uint16_t a_matrix[952576U] = { 0U }; - uint32_t slen1; - uint8_t *sk_p; - Hacl_Impl_Frodo_Params_frodo_gen_matrix(Spec_Frodo_Params_SHAKE128, - (uint32_t)976U, - seed_a, - a_matrix); - Hacl_Impl_Matrix_matrix_mul_s((uint32_t)976U, - (uint32_t)976U, - (uint32_t)8U, - a_matrix, - s_matrix, - b_matrix); - Hacl_Impl_Matrix_matrix_add((uint32_t)976U, (uint32_t)8U, b_matrix, e_matrix); - Hacl_Impl_Frodo_Pack_frodo_pack((uint32_t)976U, - (uint32_t)8U, - (uint32_t)16U, - b_matrix, - b_bytes); - Hacl_Impl_Matrix_matrix_to_lbytes((uint32_t)976U, (uint32_t)8U, s_matrix, s_bytes); - Lib_Memzero0_memzero(s_matrix, (uint32_t)7808U * sizeof (s_matrix[0U])); - Lib_Memzero0_memzero(e_matrix, (uint32_t)7808U * sizeof (e_matrix[0U])); - slen1 = (uint32_t)31272U; - sk_p = sk; - memcpy(sk_p, s, (uint32_t)24U * sizeof (uint8_t)); - memcpy(sk_p + (uint32_t)24U, pk, (uint32_t)15632U * sizeof (uint8_t)); - Hacl_SHA3_shake256_hacl((uint32_t)15632U, pk, (uint32_t)24U, sk + slen1); - Lib_Memzero0_memzero(coins, (uint32_t)64U * sizeof (coins[0U])); - return (uint32_t)0U; - } - } -} - -uint32_t Hacl_Frodo976_crypto_kem_enc(uint8_t *ct, uint8_t *ss, uint8_t *pk) -{ - uint8_t coins[24U] = { 0U }; - randombytes_((uint32_t)24U, coins); - { - uint8_t seed_se_k[48U] = { 0U }; - uint8_t pkh_mu[48U] = { 0U }; - uint8_t *seed_se; - uint8_t *k; - Hacl_SHA3_shake256_hacl((uint32_t)15632U, pk, (uint32_t)24U, pkh_mu); - memcpy(pkh_mu + (uint32_t)24U, coins, (uint32_t)24U * sizeof (uint8_t)); - Hacl_SHA3_shake256_hacl((uint32_t)48U, pkh_mu, (uint32_t)48U, seed_se_k); - seed_se = seed_se_k; - k = seed_se_k + (uint32_t)24U; - { - uint8_t *seed_a = pk; - uint8_t *b = pk + (uint32_t)16U; - uint16_t sp_matrix[7808U] = { 0U }; - uint16_t ep_matrix[7808U] = { 0U }; - uint16_t epp_matrix[64U] = { 0U }; - uint8_t r[31360U] = { 0U }; - uint8_t shake_input_seed_se[25U] = { 0U }; - uint8_t *c1; - uint8_t *c2; - shake_input_seed_se[0U] = (uint8_t)0x96U; - memcpy(shake_input_seed_se + (uint32_t)1U, seed_se, (uint32_t)24U * sizeof (uint8_t)); - Hacl_SHA3_shake256_hacl((uint32_t)25U, shake_input_seed_se, (uint32_t)31360U, r); - Lib_Memzero0_memzero(shake_input_seed_se, (uint32_t)25U * sizeof (shake_input_seed_se[0U])); - Hacl_Impl_Frodo_Sample_frodo_sample_matrix976((uint32_t)8U, (uint32_t)976U, r, sp_matrix); - Hacl_Impl_Frodo_Sample_frodo_sample_matrix976((uint32_t)8U, - (uint32_t)976U, - r + (uint32_t)15616U, - ep_matrix); - Hacl_Impl_Frodo_Sample_frodo_sample_matrix976((uint32_t)8U, - (uint32_t)8U, - r + (uint32_t)31232U, - epp_matrix); - c1 = ct; - c2 = ct + (uint32_t)15616U; - { - uint16_t bp_matrix[7808U] = { 0U }; - uint16_t a_matrix[952576U] = { 0U }; - Hacl_Impl_Frodo_Params_frodo_gen_matrix(Spec_Frodo_Params_SHAKE128, - (uint32_t)976U, - seed_a, - a_matrix); - Hacl_Impl_Matrix_matrix_mul((uint32_t)8U, - (uint32_t)976U, - (uint32_t)976U, - sp_matrix, - a_matrix, - bp_matrix); - Hacl_Impl_Matrix_matrix_add((uint32_t)8U, (uint32_t)976U, bp_matrix, ep_matrix); - Hacl_Impl_Frodo_Pack_frodo_pack((uint32_t)8U, (uint32_t)976U, (uint32_t)16U, bp_matrix, c1); - { - uint16_t v_matrix[64U] = { 0U }; - uint16_t b_matrix[7808U] = { 0U }; - Hacl_Impl_Frodo_Pack_frodo_unpack((uint32_t)976U, - (uint32_t)8U, - (uint32_t)16U, - b, - b_matrix); - Hacl_Impl_Matrix_matrix_mul((uint32_t)8U, - (uint32_t)976U, - (uint32_t)8U, - sp_matrix, - b_matrix, - v_matrix); - Hacl_Impl_Matrix_matrix_add((uint32_t)8U, (uint32_t)8U, v_matrix, epp_matrix); - { - uint16_t mu_encode[64U] = { 0U }; - Hacl_Impl_Frodo_Encode_frodo_key_encode((uint32_t)16U, - (uint32_t)3U, - (uint32_t)8U, - coins, - mu_encode); - Hacl_Impl_Matrix_matrix_add((uint32_t)8U, (uint32_t)8U, v_matrix, mu_encode); - Lib_Memzero0_memzero(mu_encode, (uint32_t)64U * sizeof (mu_encode[0U])); - Hacl_Impl_Frodo_Pack_frodo_pack((uint32_t)8U, - (uint32_t)8U, - (uint32_t)16U, - v_matrix, - c2); - Lib_Memzero0_memzero(v_matrix, (uint32_t)64U * sizeof (v_matrix[0U])); - Lib_Memzero0_memzero(sp_matrix, (uint32_t)7808U * sizeof (sp_matrix[0U])); - Lib_Memzero0_memzero(ep_matrix, (uint32_t)7808U * sizeof (ep_matrix[0U])); - Lib_Memzero0_memzero(epp_matrix, (uint32_t)64U * sizeof (epp_matrix[0U])); - { - uint32_t ss_init_len = (uint32_t)15768U; - KRML_CHECK_SIZE(sizeof (uint8_t), ss_init_len); - { - uint8_t shake_input_ss[ss_init_len]; - memset(shake_input_ss, 0U, ss_init_len * sizeof (uint8_t)); - memcpy(shake_input_ss, ct, (uint32_t)15744U * sizeof (uint8_t)); - memcpy(shake_input_ss + (uint32_t)15744U, k, (uint32_t)24U * sizeof (uint8_t)); - Hacl_SHA3_shake256_hacl(ss_init_len, shake_input_ss, (uint32_t)24U, ss); - Lib_Memzero0_memzero(shake_input_ss, ss_init_len * sizeof (shake_input_ss[0U])); - Lib_Memzero0_memzero(seed_se_k, (uint32_t)48U * sizeof (seed_se_k[0U])); - Lib_Memzero0_memzero(coins, (uint32_t)24U * sizeof (coins[0U])); - return (uint32_t)0U; - } - } - } - } - } - } - } -} - -uint32_t Hacl_Frodo976_crypto_kem_dec(uint8_t *ss, uint8_t *ct, uint8_t *sk) -{ - uint16_t bp_matrix[7808U] = { 0U }; - uint16_t c_matrix[64U] = { 0U }; - uint8_t *c1 = ct; - uint8_t *c2 = ct + (uint32_t)15616U; - Hacl_Impl_Frodo_Pack_frodo_unpack((uint32_t)8U, (uint32_t)976U, (uint32_t)16U, c1, bp_matrix); - Hacl_Impl_Frodo_Pack_frodo_unpack((uint32_t)8U, (uint32_t)8U, (uint32_t)16U, c2, c_matrix); - { - uint8_t mu_decode[24U] = { 0U }; - uint8_t *s_bytes = sk + (uint32_t)15656U; - uint16_t s_matrix[7808U] = { 0U }; - uint16_t m_matrix[64U] = { 0U }; - Hacl_Impl_Matrix_matrix_from_lbytes((uint32_t)976U, (uint32_t)8U, s_bytes, s_matrix); - Hacl_Impl_Matrix_matrix_mul_s((uint32_t)8U, - (uint32_t)976U, - (uint32_t)8U, - bp_matrix, - s_matrix, - m_matrix); - Hacl_Impl_Matrix_matrix_sub((uint32_t)8U, (uint32_t)8U, c_matrix, m_matrix); - Hacl_Impl_Frodo_Encode_frodo_key_decode((uint32_t)16U, - (uint32_t)3U, - (uint32_t)8U, - m_matrix, - mu_decode); - Lib_Memzero0_memzero(s_matrix, (uint32_t)7808U * sizeof (s_matrix[0U])); - Lib_Memzero0_memzero(m_matrix, (uint32_t)64U * sizeof (m_matrix[0U])); - { - uint8_t seed_se_k[48U] = { 0U }; - uint32_t pkh_mu_decode_len = (uint32_t)48U; - KRML_CHECK_SIZE(sizeof (uint8_t), pkh_mu_decode_len); - { - uint8_t pkh_mu_decode[pkh_mu_decode_len]; - memset(pkh_mu_decode, 0U, pkh_mu_decode_len * sizeof (uint8_t)); - { - uint8_t *pkh = sk + (uint32_t)31272U; - uint8_t *seed_se; - uint8_t *kp; - uint8_t *s; - memcpy(pkh_mu_decode, pkh, (uint32_t)24U * sizeof (uint8_t)); - memcpy(pkh_mu_decode + (uint32_t)24U, mu_decode, (uint32_t)24U * sizeof (uint8_t)); - Hacl_SHA3_shake256_hacl(pkh_mu_decode_len, pkh_mu_decode, (uint32_t)48U, seed_se_k); - seed_se = seed_se_k; - kp = seed_se_k + (uint32_t)24U; - s = sk; - { - uint16_t bpp_matrix[7808U] = { 0U }; - uint16_t cp_matrix[64U] = { 0U }; - uint16_t sp_matrix[7808U] = { 0U }; - uint16_t ep_matrix[7808U] = { 0U }; - uint16_t epp_matrix[64U] = { 0U }; - uint8_t r[31360U] = { 0U }; - uint8_t shake_input_seed_se[25U] = { 0U }; - uint8_t *pk; - uint8_t *seed_a; - uint8_t *b; - shake_input_seed_se[0U] = (uint8_t)0x96U; - memcpy(shake_input_seed_se + (uint32_t)1U, seed_se, (uint32_t)24U * sizeof (uint8_t)); - Hacl_SHA3_shake256_hacl((uint32_t)25U, shake_input_seed_se, (uint32_t)31360U, r); - Lib_Memzero0_memzero(shake_input_seed_se, - (uint32_t)25U * sizeof (shake_input_seed_se[0U])); - Hacl_Impl_Frodo_Sample_frodo_sample_matrix976((uint32_t)8U, - (uint32_t)976U, - r, - sp_matrix); - Hacl_Impl_Frodo_Sample_frodo_sample_matrix976((uint32_t)8U, - (uint32_t)976U, - r + (uint32_t)15616U, - ep_matrix); - Hacl_Impl_Frodo_Sample_frodo_sample_matrix976((uint32_t)8U, - (uint32_t)8U, - r + (uint32_t)31232U, - epp_matrix); - pk = sk + (uint32_t)24U; - seed_a = pk; - b = pk + (uint32_t)16U; - { - uint16_t a_matrix[952576U] = { 0U }; - Hacl_Impl_Frodo_Params_frodo_gen_matrix(Spec_Frodo_Params_SHAKE128, - (uint32_t)976U, - seed_a, - a_matrix); - Hacl_Impl_Matrix_matrix_mul((uint32_t)8U, - (uint32_t)976U, - (uint32_t)976U, - sp_matrix, - a_matrix, - bpp_matrix); - Hacl_Impl_Matrix_matrix_add((uint32_t)8U, (uint32_t)976U, bpp_matrix, ep_matrix); - { - uint16_t b_matrix[7808U] = { 0U }; - Hacl_Impl_Frodo_Pack_frodo_unpack((uint32_t)976U, - (uint32_t)8U, - (uint32_t)16U, - b, - b_matrix); - Hacl_Impl_Matrix_matrix_mul((uint32_t)8U, - (uint32_t)976U, - (uint32_t)8U, - sp_matrix, - b_matrix, - cp_matrix); - Hacl_Impl_Matrix_matrix_add((uint32_t)8U, (uint32_t)8U, cp_matrix, epp_matrix); - { - uint16_t mu_encode[64U] = { 0U }; - uint16_t b1; - uint16_t b2; - uint16_t mask0; - uint16_t mask; - Hacl_Impl_Frodo_Encode_frodo_key_encode((uint32_t)16U, - (uint32_t)3U, - (uint32_t)8U, - mu_decode, - mu_encode); - Hacl_Impl_Matrix_matrix_add((uint32_t)8U, (uint32_t)8U, cp_matrix, mu_encode); - Lib_Memzero0_memzero(mu_encode, (uint32_t)64U * sizeof (mu_encode[0U])); - Hacl_Impl_Matrix_mod_pow2((uint32_t)8U, - (uint32_t)976U, - (uint32_t)16U, - bpp_matrix); - Hacl_Impl_Matrix_mod_pow2((uint32_t)8U, (uint32_t)8U, (uint32_t)16U, cp_matrix); - Lib_Memzero0_memzero(sp_matrix, (uint32_t)7808U * sizeof (sp_matrix[0U])); - Lib_Memzero0_memzero(ep_matrix, (uint32_t)7808U * sizeof (ep_matrix[0U])); - Lib_Memzero0_memzero(epp_matrix, (uint32_t)64U * sizeof (epp_matrix[0U])); - b1 = - Hacl_Impl_Matrix_matrix_eq((uint32_t)8U, - (uint32_t)976U, - bp_matrix, - bpp_matrix); - b2 = Hacl_Impl_Matrix_matrix_eq((uint32_t)8U, (uint32_t)8U, c_matrix, cp_matrix); - mask0 = b1 & b2; - mask = mask0; - { - uint8_t kp_s[24U] = { 0U }; - uint32_t ss_init_len; - { - uint32_t i; - for (i = (uint32_t)0U; i < (uint32_t)24U; i++) - { - uint8_t *os = kp_s; - uint8_t uu____0 = s[i]; - uint8_t x = uu____0 ^ ((uint8_t)mask & (kp[i] ^ uu____0)); - os[i] = x; - } - } - ss_init_len = (uint32_t)15768U; - KRML_CHECK_SIZE(sizeof (uint8_t), ss_init_len); - { - uint8_t ss_init[ss_init_len]; - memset(ss_init, 0U, ss_init_len * sizeof (uint8_t)); - memcpy(ss_init, ct, (uint32_t)15744U * sizeof (uint8_t)); - memcpy(ss_init + (uint32_t)15744U, kp_s, (uint32_t)24U * sizeof (uint8_t)); - Hacl_SHA3_shake256_hacl(ss_init_len, ss_init, (uint32_t)24U, ss); - Lib_Memzero0_memzero(ss_init, ss_init_len * sizeof (ss_init[0U])); - Lib_Memzero0_memzero(kp_s, (uint32_t)24U * sizeof (kp_s[0U])); - Lib_Memzero0_memzero(seed_se_k, (uint32_t)48U * sizeof (seed_se_k[0U])); - Lib_Memzero0_memzero(mu_decode, (uint32_t)24U * sizeof (mu_decode[0U])); - return (uint32_t)0U; - } - } - } - } - } - } - } - } - } - } -} - diff --git a/dist/c89-compatible/Hacl_Frodo976.h b/dist/c89-compatible/Hacl_Frodo976.h deleted file mode 100644 index 5026899f8d..0000000000 --- a/dist/c89-compatible/Hacl_Frodo976.h +++ /dev/null @@ -1,62 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#ifndef __Hacl_Frodo976_H -#define __Hacl_Frodo976_H - -#if defined(__cplusplus) -extern "C" { -#endif - -#include -#include "krml/internal/types.h" -#include "krml/lowstar_endianness.h" -#include "krml/internal/target.h" - - -#include "Lib_Memzero0.h" -#include "Hacl_Spec.h" -#include "Hacl_SHA3.h" -#include "Hacl_Frodo_KEM.h" -#include "evercrypt_targetconfig.h" -extern uint32_t Hacl_Frodo976_crypto_bytes; - -extern uint32_t Hacl_Frodo976_crypto_publickeybytes; - -extern uint32_t Hacl_Frodo976_crypto_secretkeybytes; - -extern uint32_t Hacl_Frodo976_crypto_ciphertextbytes; - -uint32_t Hacl_Frodo976_crypto_kem_keypair(uint8_t *pk, uint8_t *sk); - -uint32_t Hacl_Frodo976_crypto_kem_enc(uint8_t *ct, uint8_t *ss, uint8_t *pk); - -uint32_t Hacl_Frodo976_crypto_kem_dec(uint8_t *ss, uint8_t *ct, uint8_t *sk); - -#if defined(__cplusplus) -} -#endif - -#define __Hacl_Frodo976_H_DEFINED -#endif diff --git a/dist/c89-compatible/Hacl_Frodo_KEM.c b/dist/c89-compatible/Hacl_Frodo_KEM.c deleted file mode 100644 index 27307761ad..0000000000 --- a/dist/c89-compatible/Hacl_Frodo_KEM.c +++ /dev/null @@ -1,33 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#include "internal/Hacl_Frodo_KEM.h" - - - -void randombytes_(uint32_t len, uint8_t *res) -{ - bool b = Lib_RandomBuffer_System_randombytes(res, len); -} - diff --git a/dist/c89-compatible/Hacl_Frodo_KEM.h b/dist/c89-compatible/Hacl_Frodo_KEM.h deleted file mode 100644 index c9a6e25b70..0000000000 --- a/dist/c89-compatible/Hacl_Frodo_KEM.h +++ /dev/null @@ -1,660 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#ifndef __Hacl_Frodo_KEM_H -#define __Hacl_Frodo_KEM_H - -#if defined(__cplusplus) -extern "C" { -#endif - -#include -#include "krml/internal/types.h" -#include "krml/lowstar_endianness.h" -#include "krml/internal/target.h" - - -#include "Lib_RandomBuffer_System.h" -#include "Hacl_Spec.h" -#include "Hacl_SHA3.h" -#include "Hacl_Krmllib.h" -#include "evercrypt_targetconfig.h" -static inline void -Hacl_Keccak_shake128_4x( - uint32_t input_len, - uint8_t *input0, - uint8_t *input1, - uint8_t *input2, - uint8_t *input3, - uint32_t output_len, - uint8_t *output0, - uint8_t *output1, - uint8_t *output2, - uint8_t *output3 -) -{ - Hacl_SHA3_shake128_hacl(input_len, input0, output_len, output0); - Hacl_SHA3_shake128_hacl(input_len, input1, output_len, output1); - Hacl_SHA3_shake128_hacl(input_len, input2, output_len, output2); - Hacl_SHA3_shake128_hacl(input_len, input3, output_len, output3); -} - -static inline void -Hacl_Impl_Matrix_mod_pow2(uint32_t n1, uint32_t n2, uint32_t logq, uint16_t *a) -{ - if (logq < (uint32_t)16U) - { - uint32_t i; - for (i = (uint32_t)0U; i < n1; i++) - { - uint32_t i0; - for (i0 = (uint32_t)0U; i0 < n2; i0++) - { - a[i * n2 + i0] = a[i * n2 + i0] & (((uint16_t)1U << logq) - (uint16_t)1U); - } - } - return; - } -} - -static inline void -Hacl_Impl_Matrix_matrix_add(uint32_t n1, uint32_t n2, uint16_t *a, uint16_t *b) -{ - uint32_t i; - for (i = (uint32_t)0U; i < n1; i++) - { - uint32_t i0; - for (i0 = (uint32_t)0U; i0 < n2; i0++) - { - a[i * n2 + i0] = a[i * n2 + i0] + b[i * n2 + i0]; - } - } -} - -static inline void -Hacl_Impl_Matrix_matrix_sub(uint32_t n1, uint32_t n2, uint16_t *a, uint16_t *b) -{ - uint32_t i; - for (i = (uint32_t)0U; i < n1; i++) - { - uint32_t i0; - for (i0 = (uint32_t)0U; i0 < n2; i0++) - { - b[i * n2 + i0] = a[i * n2 + i0] - b[i * n2 + i0]; - } - } -} - -static inline void -Hacl_Impl_Matrix_matrix_mul( - uint32_t n1, - uint32_t n2, - uint32_t n3, - uint16_t *a, - uint16_t *b, - uint16_t *c -) -{ - uint32_t i0; - for (i0 = (uint32_t)0U; i0 < n1; i0++) - { - uint32_t i1; - for (i1 = (uint32_t)0U; i1 < n3; i1++) - { - uint16_t res = (uint16_t)0U; - { - uint32_t i; - for (i = (uint32_t)0U; i < n2; i++) - { - uint16_t aij = a[i0 * n2 + i]; - uint16_t bjk = b[i * n3 + i1]; - uint16_t res0 = res; - res = res0 + aij * bjk; - } - } - c[i0 * n3 + i1] = res; - } - } -} - -static inline void -Hacl_Impl_Matrix_matrix_mul_s( - uint32_t n1, - uint32_t n2, - uint32_t n3, - uint16_t *a, - uint16_t *b, - uint16_t *c -) -{ - uint32_t i0; - for (i0 = (uint32_t)0U; i0 < n1; i0++) - { - uint32_t i1; - for (i1 = (uint32_t)0U; i1 < n3; i1++) - { - uint16_t res = (uint16_t)0U; - { - uint32_t i; - for (i = (uint32_t)0U; i < n2; i++) - { - uint16_t aij = a[i0 * n2 + i]; - uint16_t bjk = b[i1 * n2 + i]; - uint16_t res0 = res; - res = res0 + aij * bjk; - } - } - c[i0 * n3 + i1] = res; - } - } -} - -static inline uint16_t -Hacl_Impl_Matrix_matrix_eq(uint32_t n1, uint32_t n2, uint16_t *a, uint16_t *b) -{ - uint16_t res = (uint16_t)0xFFFFU; - uint16_t r; - { - uint32_t i; - for (i = (uint32_t)0U; i < n1 * n2; i++) - { - uint16_t uu____0 = FStar_UInt16_eq_mask(a[i], b[i]); - res = uu____0 & res; - } - } - r = res; - return r; -} - -static inline void -Hacl_Impl_Matrix_matrix_to_lbytes(uint32_t n1, uint32_t n2, uint16_t *m, uint8_t *res) -{ - uint32_t i; - for (i = (uint32_t)0U; i < n1 * n2; i++) - { - store16_le(res + (uint32_t)2U * i, m[i]); - } -} - -static inline void -Hacl_Impl_Matrix_matrix_from_lbytes(uint32_t n1, uint32_t n2, uint8_t *b, uint16_t *res) -{ - uint32_t i; - for (i = (uint32_t)0U; i < n1 * n2; i++) - { - uint16_t *os = res; - uint16_t u = load16_le(b + (uint32_t)2U * i); - uint16_t x = u; - os[i] = x; - } -} - -static inline void -Hacl_Impl_Frodo_Gen_frodo_gen_matrix_shake_4x(uint32_t n, uint8_t *seed, uint16_t *res) -{ - KRML_CHECK_SIZE(sizeof (uint8_t), (uint32_t)8U * n); - { - uint8_t r[(uint32_t)8U * n]; - memset(r, 0U, (uint32_t)8U * n * sizeof (uint8_t)); - { - uint8_t tmp_seed[72U] = { 0U }; - memcpy(tmp_seed + (uint32_t)2U, seed, (uint32_t)16U * sizeof (uint8_t)); - memcpy(tmp_seed + (uint32_t)20U, seed, (uint32_t)16U * sizeof (uint8_t)); - memcpy(tmp_seed + (uint32_t)38U, seed, (uint32_t)16U * sizeof (uint8_t)); - memcpy(tmp_seed + (uint32_t)56U, seed, (uint32_t)16U * sizeof (uint8_t)); - memset(res, 0U, n * n * sizeof (uint16_t)); - { - uint32_t i; - for (i = (uint32_t)0U; i < n / (uint32_t)4U; i++) - { - uint8_t *r0 = r + (uint32_t)0U * n; - uint8_t *r1 = r + (uint32_t)2U * n; - uint8_t *r2 = r + (uint32_t)4U * n; - uint8_t *r3 = r + (uint32_t)6U * n; - uint8_t *tmp_seed0 = tmp_seed; - uint8_t *tmp_seed1 = tmp_seed + (uint32_t)18U; - uint8_t *tmp_seed2 = tmp_seed + (uint32_t)36U; - uint8_t *tmp_seed3 = tmp_seed + (uint32_t)54U; - store16_le(tmp_seed0, (uint16_t)((uint32_t)4U * i + (uint32_t)0U)); - store16_le(tmp_seed1, (uint16_t)((uint32_t)4U * i + (uint32_t)1U)); - store16_le(tmp_seed2, (uint16_t)((uint32_t)4U * i + (uint32_t)2U)); - store16_le(tmp_seed3, (uint16_t)((uint32_t)4U * i + (uint32_t)3U)); - Hacl_Keccak_shake128_4x((uint32_t)18U, - tmp_seed0, - tmp_seed1, - tmp_seed2, - tmp_seed3, - (uint32_t)2U * n, - r0, - r1, - r2, - r3); - { - uint32_t i0; - for (i0 = (uint32_t)0U; i0 < n; i0++) - { - uint8_t *resij0 = r0 + i0 * (uint32_t)2U; - uint8_t *resij1 = r1 + i0 * (uint32_t)2U; - uint8_t *resij2 = r2 + i0 * (uint32_t)2U; - uint8_t *resij3 = r3 + i0 * (uint32_t)2U; - uint16_t u = load16_le(resij0); - res[((uint32_t)4U * i + (uint32_t)0U) * n + i0] = u; - { - uint16_t u0 = load16_le(resij1); - res[((uint32_t)4U * i + (uint32_t)1U) * n + i0] = u0; - { - uint16_t u1 = load16_le(resij2); - res[((uint32_t)4U * i + (uint32_t)2U) * n + i0] = u1; - { - uint16_t u2 = load16_le(resij3); - res[((uint32_t)4U * i + (uint32_t)3U) * n + i0] = u2; - } - } - } - } - } - } - } - } - } -} - -static inline void -Hacl_Impl_Frodo_Params_frodo_gen_matrix( - Spec_Frodo_Params_frodo_gen_a a, - uint32_t n, - uint8_t *seed, - uint16_t *a_matrix -) -{ - switch (a) - { - case Spec_Frodo_Params_SHAKE128: - { - Hacl_Impl_Frodo_Gen_frodo_gen_matrix_shake_4x(n, seed, a_matrix); - break; - } - default: - { - KRML_HOST_PRINTF("KaRaMeL incomplete match at %s:%d\n", __FILE__, __LINE__); - KRML_HOST_EXIT(253U); - } - } -} - -static const -uint16_t -Hacl_Impl_Frodo_Params_cdf_table640[13U] = - { - (uint16_t)4643U, (uint16_t)13363U, (uint16_t)20579U, (uint16_t)25843U, (uint16_t)29227U, - (uint16_t)31145U, (uint16_t)32103U, (uint16_t)32525U, (uint16_t)32689U, (uint16_t)32745U, - (uint16_t)32762U, (uint16_t)32766U, (uint16_t)32767U - }; - -static const -uint16_t -Hacl_Impl_Frodo_Params_cdf_table976[11U] = - { - (uint16_t)5638U, (uint16_t)15915U, (uint16_t)23689U, (uint16_t)28571U, (uint16_t)31116U, - (uint16_t)32217U, (uint16_t)32613U, (uint16_t)32731U, (uint16_t)32760U, (uint16_t)32766U, - (uint16_t)32767U - }; - -static const -uint16_t -Hacl_Impl_Frodo_Params_cdf_table1344[7U] = - { - (uint16_t)9142U, (uint16_t)23462U, (uint16_t)30338U, (uint16_t)32361U, (uint16_t)32725U, - (uint16_t)32765U, (uint16_t)32767U - }; - -static inline void -Hacl_Impl_Frodo_Sample_frodo_sample_matrix64( - uint32_t n1, - uint32_t n2, - uint8_t *r, - uint16_t *res -) -{ - uint32_t i; - memset(res, 0U, n1 * n2 * sizeof (uint16_t)); - for (i = (uint32_t)0U; i < n1; i++) - { - uint32_t i0; - for (i0 = (uint32_t)0U; i0 < n2; i0++) - { - uint8_t *resij = r + (uint32_t)2U * (n2 * i + i0); - uint16_t u = load16_le(resij); - uint16_t uu____0 = u; - uint16_t prnd = uu____0 >> (uint32_t)1U; - uint16_t sign = uu____0 & (uint16_t)1U; - uint16_t sample = (uint16_t)0U; - uint32_t bound = (uint32_t)12U; - uint16_t sample00; - { - uint32_t i1; - for (i1 = (uint32_t)0U; i1 < bound; i1++) - { - uint16_t sample0 = sample; - uint16_t ti = Hacl_Impl_Frodo_Params_cdf_table640[i1]; - uint16_t samplei = (uint16_t)(uint32_t)(ti - prnd) >> (uint32_t)15U; - sample = samplei + sample0; - } - } - sample00 = sample; - res[i * n2 + i0] = ((~sign + (uint16_t)1U) ^ sample00) + sign; - } - } -} - -static inline void -Hacl_Impl_Frodo_Sample_frodo_sample_matrix640( - uint32_t n1, - uint32_t n2, - uint8_t *r, - uint16_t *res -) -{ - uint32_t i; - memset(res, 0U, n1 * n2 * sizeof (uint16_t)); - for (i = (uint32_t)0U; i < n1; i++) - { - uint32_t i0; - for (i0 = (uint32_t)0U; i0 < n2; i0++) - { - uint8_t *resij = r + (uint32_t)2U * (n2 * i + i0); - uint16_t u = load16_le(resij); - uint16_t uu____0 = u; - uint16_t prnd = uu____0 >> (uint32_t)1U; - uint16_t sign = uu____0 & (uint16_t)1U; - uint16_t sample = (uint16_t)0U; - uint32_t bound = (uint32_t)12U; - uint16_t sample00; - { - uint32_t i1; - for (i1 = (uint32_t)0U; i1 < bound; i1++) - { - uint16_t sample0 = sample; - uint16_t ti = Hacl_Impl_Frodo_Params_cdf_table640[i1]; - uint16_t samplei = (uint16_t)(uint32_t)(ti - prnd) >> (uint32_t)15U; - sample = samplei + sample0; - } - } - sample00 = sample; - res[i * n2 + i0] = ((~sign + (uint16_t)1U) ^ sample00) + sign; - } - } -} - -static inline void -Hacl_Impl_Frodo_Sample_frodo_sample_matrix976( - uint32_t n1, - uint32_t n2, - uint8_t *r, - uint16_t *res -) -{ - uint32_t i; - memset(res, 0U, n1 * n2 * sizeof (uint16_t)); - for (i = (uint32_t)0U; i < n1; i++) - { - uint32_t i0; - for (i0 = (uint32_t)0U; i0 < n2; i0++) - { - uint8_t *resij = r + (uint32_t)2U * (n2 * i + i0); - uint16_t u = load16_le(resij); - uint16_t uu____0 = u; - uint16_t prnd = uu____0 >> (uint32_t)1U; - uint16_t sign = uu____0 & (uint16_t)1U; - uint16_t sample = (uint16_t)0U; - uint32_t bound = (uint32_t)10U; - uint16_t sample00; - { - uint32_t i1; - for (i1 = (uint32_t)0U; i1 < bound; i1++) - { - uint16_t sample0 = sample; - uint16_t ti = Hacl_Impl_Frodo_Params_cdf_table976[i1]; - uint16_t samplei = (uint16_t)(uint32_t)(ti - prnd) >> (uint32_t)15U; - sample = samplei + sample0; - } - } - sample00 = sample; - res[i * n2 + i0] = ((~sign + (uint16_t)1U) ^ sample00) + sign; - } - } -} - -static inline void -Hacl_Impl_Frodo_Sample_frodo_sample_matrix1344( - uint32_t n1, - uint32_t n2, - uint8_t *r, - uint16_t *res -) -{ - uint32_t i; - memset(res, 0U, n1 * n2 * sizeof (uint16_t)); - for (i = (uint32_t)0U; i < n1; i++) - { - uint32_t i0; - for (i0 = (uint32_t)0U; i0 < n2; i0++) - { - uint8_t *resij = r + (uint32_t)2U * (n2 * i + i0); - uint16_t u = load16_le(resij); - uint16_t uu____0 = u; - uint16_t prnd = uu____0 >> (uint32_t)1U; - uint16_t sign = uu____0 & (uint16_t)1U; - uint16_t sample = (uint16_t)0U; - uint32_t bound = (uint32_t)6U; - uint16_t sample00; - { - uint32_t i1; - for (i1 = (uint32_t)0U; i1 < bound; i1++) - { - uint16_t sample0 = sample; - uint16_t ti = Hacl_Impl_Frodo_Params_cdf_table1344[i1]; - uint16_t samplei = (uint16_t)(uint32_t)(ti - prnd) >> (uint32_t)15U; - sample = samplei + sample0; - } - } - sample00 = sample; - res[i * n2 + i0] = ((~sign + (uint16_t)1U) ^ sample00) + sign; - } - } -} - -static inline void -Hacl_Impl_Frodo_Pack_frodo_pack( - uint32_t n1, - uint32_t n2, - uint32_t d, - uint16_t *a, - uint8_t *res -) -{ - uint32_t n = n1 * n2 / (uint32_t)8U; - uint32_t i; - for (i = (uint32_t)0U; i < n; i++) - { - uint16_t *a1 = a + (uint32_t)8U * i; - uint8_t *r = res + d * i; - uint16_t maskd = (uint16_t)((uint32_t)1U << d) - (uint16_t)1U; - uint8_t v16[16U] = { 0U }; - uint16_t a0 = a1[0U] & maskd; - uint16_t a11 = a1[1U] & maskd; - uint16_t a2 = a1[2U] & maskd; - uint16_t a3 = a1[3U] & maskd; - uint16_t a4 = a1[4U] & maskd; - uint16_t a5 = a1[5U] & maskd; - uint16_t a6 = a1[6U] & maskd; - uint16_t a7 = a1[7U] & maskd; - FStar_UInt128_uint128 - templong = - FStar_UInt128_logor(FStar_UInt128_logor(FStar_UInt128_logor(FStar_UInt128_logor(FStar_UInt128_logor(FStar_UInt128_logor(FStar_UInt128_logor(FStar_UInt128_shift_left(FStar_UInt128_uint64_to_uint128((uint64_t)a0), - (uint32_t)7U * d), - FStar_UInt128_shift_left(FStar_UInt128_uint64_to_uint128((uint64_t)a11), - (uint32_t)6U * d)), - FStar_UInt128_shift_left(FStar_UInt128_uint64_to_uint128((uint64_t)a2), - (uint32_t)5U * d)), - FStar_UInt128_shift_left(FStar_UInt128_uint64_to_uint128((uint64_t)a3), - (uint32_t)4U * d)), - FStar_UInt128_shift_left(FStar_UInt128_uint64_to_uint128((uint64_t)a4), - (uint32_t)3U * d)), - FStar_UInt128_shift_left(FStar_UInt128_uint64_to_uint128((uint64_t)a5), - (uint32_t)2U * d)), - FStar_UInt128_shift_left(FStar_UInt128_uint64_to_uint128((uint64_t)a6), (uint32_t)1U * d)), - FStar_UInt128_shift_left(FStar_UInt128_uint64_to_uint128((uint64_t)a7), (uint32_t)0U * d)); - uint8_t *src; - store128_be(v16, templong); - src = v16 + (uint32_t)16U - d; - memcpy(r, src, d * sizeof (uint8_t)); - } -} - -static inline void -Hacl_Impl_Frodo_Pack_frodo_unpack( - uint32_t n1, - uint32_t n2, - uint32_t d, - uint8_t *b, - uint16_t *res -) -{ - uint32_t n = n1 * n2 / (uint32_t)8U; - uint32_t i; - for (i = (uint32_t)0U; i < n; i++) - { - uint8_t *b1 = b + d * i; - uint16_t *r = res + (uint32_t)8U * i; - uint16_t maskd = (uint16_t)((uint32_t)1U << d) - (uint16_t)1U; - uint8_t src[16U] = { 0U }; - FStar_UInt128_uint128 u; - FStar_UInt128_uint128 templong; - memcpy(src + (uint32_t)16U - d, b1, d * sizeof (uint8_t)); - u = load128_be(src); - templong = u; - r[0U] = - (uint16_t)FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(templong, - (uint32_t)7U * d)) - & maskd; - r[1U] = - (uint16_t)FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(templong, - (uint32_t)6U * d)) - & maskd; - r[2U] = - (uint16_t)FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(templong, - (uint32_t)5U * d)) - & maskd; - r[3U] = - (uint16_t)FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(templong, - (uint32_t)4U * d)) - & maskd; - r[4U] = - (uint16_t)FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(templong, - (uint32_t)3U * d)) - & maskd; - r[5U] = - (uint16_t)FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(templong, - (uint32_t)2U * d)) - & maskd; - r[6U] = - (uint16_t)FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(templong, - (uint32_t)1U * d)) - & maskd; - r[7U] = - (uint16_t)FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(templong, - (uint32_t)0U * d)) - & maskd; - } -} - -static inline void -Hacl_Impl_Frodo_Encode_frodo_key_encode( - uint32_t logq, - uint32_t b, - uint32_t n, - uint8_t *a, - uint16_t *res -) -{ - uint32_t i0; - for (i0 = (uint32_t)0U; i0 < n; i0++) - { - uint8_t v8[8U] = { 0U }; - uint8_t *chunk = a + i0 * b; - uint64_t u; - uint64_t x0; - uint64_t x; - uint32_t i; - memcpy(v8, chunk, b * sizeof (uint8_t)); - u = load64_le(v8); - x0 = u; - x = x0; - for (i = (uint32_t)0U; i < (uint32_t)8U; i++) - { - uint64_t rk = x >> b * i & (((uint64_t)1U << b) - (uint64_t)1U); - res[i0 * n + i] = (uint16_t)rk << (logq - b); - } - } -} - -static inline void -Hacl_Impl_Frodo_Encode_frodo_key_decode( - uint32_t logq, - uint32_t b, - uint32_t n, - uint16_t *a, - uint8_t *res -) -{ - uint32_t i; - for (i = (uint32_t)0U; i < n; i++) - { - uint64_t templong0 = (uint64_t)0U; - uint64_t templong; - KRML_MAYBE_FOR8(i0, - (uint32_t)0U, - (uint32_t)8U, - (uint32_t)1U, - uint16_t aik = a[i * n + i0]; - uint16_t res1 = (aik + ((uint16_t)1U << (logq - b - (uint32_t)1U))) >> (logq - b); - templong0 = templong0 | (uint64_t)(res1 & (((uint16_t)1U << b) - (uint16_t)1U)) << b * i0;); - templong = templong0; - { - uint8_t v8[8U] = { 0U }; - uint8_t *tmp; - store64_le(v8, templong); - tmp = v8; - memcpy(res + i * b, tmp, b * sizeof (uint8_t)); - } - } -} - -#if defined(__cplusplus) -} -#endif - -#define __Hacl_Frodo_KEM_H_DEFINED -#endif diff --git a/dist/c89-compatible/Hacl_GenericField32.c b/dist/c89-compatible/Hacl_GenericField32.c deleted file mode 100644 index 901d719699..0000000000 --- a/dist/c89-compatible/Hacl_GenericField32.c +++ /dev/null @@ -1,800 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#include "Hacl_GenericField32.h" - -#include "internal/Hacl_Bignum.h" - -/******************************************************************************* - -A verified field arithmetic library. - -This is a 32-bit optimized version, where bignums are represented as an array -of `len` unsigned 32-bit integers, i.e. uint32_t[len]. - -All the arithmetic operations are performed in the Montgomery domain. - -All the functions below preserve the following invariant for a bignum `aM` in -Montgomery form. - • aM < n - -*******************************************************************************/ - - -/** -Check whether this library will work for a modulus `n`. - - The function returns false if any of the following preconditions are violated, - true otherwise. - • n % 2 = 1 - • 1 < n -*/ -bool Hacl_GenericField32_field_modulus_check(uint32_t len, uint32_t *n) -{ - uint32_t m = Hacl_Bignum_Montgomery_bn_check_modulus_u32(len, n); - return m == (uint32_t)0xFFFFFFFFU; -} - -/** -Heap-allocate and initialize a montgomery context. - - The argument n is meant to be `len` limbs in size, i.e. uint32_t[len]. - - Before calling this function, the caller will need to ensure that the following - preconditions are observed. - • n % 2 = 1 - • 1 < n - - The caller will need to call Hacl_GenericField32_field_free on the return value - to avoid memory leaks. -*/ -Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 -*Hacl_GenericField32_field_init(uint32_t len, uint32_t *n) -{ - KRML_CHECK_SIZE(sizeof (uint32_t), len); - { - uint32_t *r2 = (uint32_t *)KRML_HOST_CALLOC(len, sizeof (uint32_t)); - KRML_CHECK_SIZE(sizeof (uint32_t), len); - { - uint32_t *n1 = (uint32_t *)KRML_HOST_CALLOC(len, sizeof (uint32_t)); - uint32_t *r21 = r2; - uint32_t *n11 = n1; - uint32_t nBits; - uint32_t mu; - memcpy(n11, n, len * sizeof (uint32_t)); - nBits = (uint32_t)32U * Hacl_Bignum_Lib_bn_get_top_index_u32(len, n); - Hacl_Bignum_Montgomery_bn_precomp_r2_mod_n_u32(len, nBits, n, r21); - mu = Hacl_Bignum_ModInvLimb_mod_inv_uint32(n[0U]); - { - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 res; - res.len = len; - res.n = n11; - res.mu = mu; - res.r2 = r21; - KRML_CHECK_SIZE(sizeof (Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32), (uint32_t)1U); - { - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 - *buf = - (Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 *)KRML_HOST_MALLOC(sizeof ( - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 - )); - buf[0U] = res; - return buf; - } - } - } - } -} - -/** -Deallocate the memory previously allocated by Hacl_GenericField32_field_init. - - The argument k is a montgomery context obtained through Hacl_GenericField32_field_init. -*/ -void Hacl_GenericField32_field_free(Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 *k) -{ - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 k1 = *k; - uint32_t *n = k1.n; - uint32_t *r2 = k1.r2; - KRML_HOST_FREE(n); - KRML_HOST_FREE(r2); - KRML_HOST_FREE(k); -} - -/** -Return the size of a modulus `n` in limbs. - - The argument k is a montgomery context obtained through Hacl_GenericField32_field_init. -*/ -uint32_t Hacl_GenericField32_field_get_len(Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 *k) -{ - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 k1 = *k; - return k1.len; -} - -/** -Convert a bignum from the regular representation to the Montgomery representation. - - Write `a * R mod n` in `aM`. - - The argument a and the outparam aM are meant to be `len` limbs in size, i.e. uint32_t[len]. - The argument k is a montgomery context obtained through Hacl_GenericField32_field_init. -*/ -void -Hacl_GenericField32_to_field( - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 *k, - uint32_t *a, - uint32_t *aM -) -{ - uint32_t len1 = Hacl_GenericField32_field_get_len(k); - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 k1 = *k; - Hacl_Bignum_Montgomery_bn_to_mont_u32(len1, k1.n, k1.mu, k1.r2, a, aM); -} - -/** -Convert a result back from the Montgomery representation to the regular representation. - - Write `aM / R mod n` in `a`, i.e. - Hacl_GenericField32_from_field(k, Hacl_GenericField32_to_field(k, a)) == a % n - - The argument aM and the outparam a are meant to be `len` limbs in size, i.e. uint32_t[len]. - The argument k is a montgomery context obtained through Hacl_GenericField32_field_init. -*/ -void -Hacl_GenericField32_from_field( - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 *k, - uint32_t *aM, - uint32_t *a -) -{ - uint32_t len1 = Hacl_GenericField32_field_get_len(k); - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 k1 = *k; - Hacl_Bignum_Montgomery_bn_from_mont_u32(len1, k1.n, k1.mu, aM, a); -} - -/** -Write `aM + bM mod n` in `cM`. - - The arguments aM, bM, and the outparam cM are meant to be `len` limbs in size, i.e. uint32_t[len]. - The argument k is a montgomery context obtained through Hacl_GenericField32_field_init. -*/ -void -Hacl_GenericField32_add( - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 *k, - uint32_t *aM, - uint32_t *bM, - uint32_t *cM -) -{ - uint32_t len1 = Hacl_GenericField32_field_get_len(k); - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 k1 = *k; - Hacl_Bignum_bn_add_mod_n_u32(len1, k1.n, aM, bM, cM); -} - -/** -Write `aM - bM mod n` to `cM`. - - The arguments aM, bM, and the outparam cM are meant to be `len` limbs in size, i.e. uint32_t[len]. - The argument k is a montgomery context obtained through Hacl_GenericField32_field_init. -*/ -void -Hacl_GenericField32_sub( - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 *k, - uint32_t *aM, - uint32_t *bM, - uint32_t *cM -) -{ - uint32_t len1 = Hacl_GenericField32_field_get_len(k); - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 k1 = *k; - Hacl_Bignum_bn_sub_mod_n_u32(len1, k1.n, aM, bM, cM); -} - -/** -Write `aM * bM mod n` in `cM`. - - The arguments aM, bM, and the outparam cM are meant to be `len` limbs in size, i.e. uint32_t[len]. - The argument k is a montgomery context obtained through Hacl_GenericField32_field_init. -*/ -void -Hacl_GenericField32_mul( - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 *k, - uint32_t *aM, - uint32_t *bM, - uint32_t *cM -) -{ - uint32_t len1 = Hacl_GenericField32_field_get_len(k); - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 k1 = *k; - Hacl_Bignum_Montgomery_bn_mont_mul_u32(len1, k1.n, k1.mu, aM, bM, cM); -} - -/** -Write `aM * aM mod n` in `cM`. - - The argument aM and the outparam cM are meant to be `len` limbs in size, i.e. uint32_t[len]. - The argument k is a montgomery context obtained through Hacl_GenericField32_field_init. -*/ -void -Hacl_GenericField32_sqr( - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 *k, - uint32_t *aM, - uint32_t *cM -) -{ - uint32_t len1 = Hacl_GenericField32_field_get_len(k); - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 k1 = *k; - Hacl_Bignum_Montgomery_bn_mont_sqr_u32(len1, k1.n, k1.mu, aM, cM); -} - -/** -Convert a bignum `one` to its Montgomery representation. - - The outparam oneM is meant to be `len` limbs in size, i.e. uint32_t[len]. - The argument k is a montgomery context obtained through Hacl_GenericField32_field_init. -*/ -void Hacl_GenericField32_one(Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 *k, uint32_t *oneM) -{ - uint32_t len1 = Hacl_GenericField32_field_get_len(k); - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 k1 = *k; - Hacl_Bignum_Montgomery_bn_from_mont_u32(len1, k1.n, k1.mu, k1.r2, oneM); -} - -/** -Write `aM ^ b mod n` in `resM`. - - The argument aM and the outparam resM are meant to be `len` limbs in size, i.e. uint32_t[len]. - The argument k is a montgomery context obtained through Hacl_GenericField32_field_init. - - The argument b is a bignum of any size, and bBits is an upper bound on the - number of significant bits of b. A tighter bound results in faster execution - time. When in doubt, the number of bits for the bignum size is always a safe - default, e.g. if b is a 256-bit bignum, bBits should be 256. - - This function is constant-time over its argument b, at the cost of a slower - execution time than exp_vartime. - - Before calling this function, the caller will need to ensure that the following - precondition is observed. - • b < pow2 bBits -*/ -void -Hacl_GenericField32_exp_consttime( - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 *k, - uint32_t *aM, - uint32_t bBits, - uint32_t *b, - uint32_t *resM -) -{ - uint32_t len1 = Hacl_GenericField32_field_get_len(k); - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 k1 = *k; - KRML_CHECK_SIZE(sizeof (uint32_t), k1.len); - { - uint32_t aMc[k1.len]; - memset(aMc, 0U, k1.len * sizeof (uint32_t)); - memcpy(aMc, aM, k1.len * sizeof (uint32_t)); - if (bBits < (uint32_t)200U) - { - KRML_CHECK_SIZE(sizeof (uint32_t), len1 + len1); - { - uint32_t ctx[len1 + len1]; - memset(ctx, 0U, (len1 + len1) * sizeof (uint32_t)); - memcpy(ctx, k1.n, len1 * sizeof (uint32_t)); - memcpy(ctx + len1, k1.r2, len1 * sizeof (uint32_t)); - { - uint32_t sw = (uint32_t)0U; - uint32_t *ctx_n = ctx; - uint32_t *ctx_r2 = ctx + len1; - Hacl_Bignum_Montgomery_bn_from_mont_u32(len1, ctx_n, k1.mu, ctx_r2, resM); - { - uint32_t i0; - for (i0 = (uint32_t)0U; i0 < bBits; i0++) - { - uint32_t i1 = (bBits - i0 - (uint32_t)1U) / (uint32_t)32U; - uint32_t j = (bBits - i0 - (uint32_t)1U) % (uint32_t)32U; - uint32_t tmp = b[i1]; - uint32_t bit = tmp >> j & (uint32_t)1U; - uint32_t sw1 = bit ^ sw; - { - uint32_t i; - for (i = (uint32_t)0U; i < len1; i++) - { - uint32_t dummy = ((uint32_t)0U - sw1) & (resM[i] ^ aMc[i]); - resM[i] = resM[i] ^ dummy; - aMc[i] = aMc[i] ^ dummy; - } - } - { - uint32_t *ctx_n0 = ctx; - Hacl_Bignum_Montgomery_bn_mont_mul_u32(len1, ctx_n0, k1.mu, aMc, resM, aMc); - { - uint32_t *ctx_n1 = ctx; - Hacl_Bignum_Montgomery_bn_mont_sqr_u32(len1, ctx_n1, k1.mu, resM, resM); - sw = bit; - } - } - } - } - { - uint32_t sw0 = sw; - { - uint32_t i; - for (i = (uint32_t)0U; i < len1; i++) - { - uint32_t dummy = ((uint32_t)0U - sw0) & (resM[i] ^ aMc[i]); - resM[i] = resM[i] ^ dummy; - aMc[i] = aMc[i] ^ dummy; - } - } - } - } - } - } - else - { - uint32_t bLen; - if (bBits == (uint32_t)0U) - { - bLen = (uint32_t)1U; - } - else - { - bLen = (bBits - (uint32_t)1U) / (uint32_t)32U + (uint32_t)1U; - } - KRML_CHECK_SIZE(sizeof (uint32_t), len1 + len1); - { - uint32_t ctx[len1 + len1]; - memset(ctx, 0U, (len1 + len1) * sizeof (uint32_t)); - memcpy(ctx, k1.n, len1 * sizeof (uint32_t)); - memcpy(ctx + len1, k1.r2, len1 * sizeof (uint32_t)); - KRML_CHECK_SIZE(sizeof (uint32_t), (uint32_t)16U * len1); - { - uint32_t table[(uint32_t)16U * len1]; - memset(table, 0U, (uint32_t)16U * len1 * sizeof (uint32_t)); - KRML_CHECK_SIZE(sizeof (uint32_t), len1); - { - uint32_t tmp[len1]; - memset(tmp, 0U, len1 * sizeof (uint32_t)); - { - uint32_t *t0 = table; - uint32_t *t1 = table + len1; - uint32_t *ctx_n0 = ctx; - uint32_t *ctx_r20 = ctx + len1; - Hacl_Bignum_Montgomery_bn_from_mont_u32(len1, ctx_n0, k1.mu, ctx_r20, t0); - memcpy(t1, aMc, len1 * sizeof (uint32_t)); - KRML_MAYBE_FOR7(i, - (uint32_t)0U, - (uint32_t)7U, - (uint32_t)1U, - uint32_t *t11 = table + (i + (uint32_t)1U) * len1; - uint32_t *ctx_n1 = ctx; - Hacl_Bignum_Montgomery_bn_mont_sqr_u32(len1, ctx_n1, k1.mu, t11, tmp); - memcpy(table + ((uint32_t)2U * i + (uint32_t)2U) * len1, - tmp, - len1 * sizeof (uint32_t)); - uint32_t *t2 = table + ((uint32_t)2U * i + (uint32_t)2U) * len1; - uint32_t *ctx_n = ctx; - Hacl_Bignum_Montgomery_bn_mont_mul_u32(len1, ctx_n, k1.mu, aMc, t2, tmp); - memcpy(table + ((uint32_t)2U * i + (uint32_t)3U) * len1, - tmp, - len1 * sizeof (uint32_t));); - if (bBits % (uint32_t)4U != (uint32_t)0U) - { - uint32_t mask_l = (uint32_t)15U; - uint32_t i0 = bBits / (uint32_t)4U * (uint32_t)4U / (uint32_t)32U; - uint32_t j = bBits / (uint32_t)4U * (uint32_t)4U % (uint32_t)32U; - uint32_t p1 = b[i0] >> j; - uint32_t ite; - if (i0 + (uint32_t)1U < bLen && (uint32_t)0U < j) - { - ite = p1 | b[i0 + (uint32_t)1U] << ((uint32_t)32U - j); - } - else - { - ite = p1; - } - { - uint32_t bits_c = ite & mask_l; - memcpy(resM, table + (uint32_t)0U * len1, len1 * sizeof (uint32_t)); - KRML_MAYBE_FOR15(i1, - (uint32_t)0U, - (uint32_t)15U, - (uint32_t)1U, - uint32_t c = FStar_UInt32_eq_mask(bits_c, i1 + (uint32_t)1U); - uint32_t *res_j = table + (i1 + (uint32_t)1U) * len1; - { - uint32_t i; - for (i = (uint32_t)0U; i < len1; i++) - { - uint32_t *os = resM; - uint32_t x = (c & res_j[i]) | (~c & resM[i]); - os[i] = x; - } - }); - } - } - else - { - uint32_t *ctx_n = ctx; - uint32_t *ctx_r2 = ctx + len1; - Hacl_Bignum_Montgomery_bn_from_mont_u32(len1, ctx_n, k1.mu, ctx_r2, resM); - } - { - uint32_t i0; - for (i0 = (uint32_t)0U; i0 < bBits / (uint32_t)4U; i0++) - { - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint32_t *ctx_n = ctx; - Hacl_Bignum_Montgomery_bn_mont_sqr_u32(len1, ctx_n, k1.mu, resM, resM);); - { - uint32_t bk = bBits - bBits % (uint32_t)4U; - uint32_t mask_l = (uint32_t)15U; - uint32_t i1 = (bk - (uint32_t)4U * i0 - (uint32_t)4U) / (uint32_t)32U; - uint32_t j = (bk - (uint32_t)4U * i0 - (uint32_t)4U) % (uint32_t)32U; - uint32_t p1 = b[i1] >> j; - uint32_t ite; - if (i1 + (uint32_t)1U < bLen && (uint32_t)0U < j) - { - ite = p1 | b[i1 + (uint32_t)1U] << ((uint32_t)32U - j); - } - else - { - ite = p1; - } - { - uint32_t bits_l = ite & mask_l; - KRML_CHECK_SIZE(sizeof (uint32_t), len1); - { - uint32_t a_bits_l[len1]; - memset(a_bits_l, 0U, len1 * sizeof (uint32_t)); - memcpy(a_bits_l, table + (uint32_t)0U * len1, len1 * sizeof (uint32_t)); - KRML_MAYBE_FOR15(i2, - (uint32_t)0U, - (uint32_t)15U, - (uint32_t)1U, - uint32_t c = FStar_UInt32_eq_mask(bits_l, i2 + (uint32_t)1U); - uint32_t *res_j = table + (i2 + (uint32_t)1U) * len1; - { - uint32_t i; - for (i = (uint32_t)0U; i < len1; i++) - { - uint32_t *os = a_bits_l; - uint32_t x = (c & res_j[i]) | (~c & a_bits_l[i]); - os[i] = x; - } - }); - { - uint32_t *ctx_n = ctx; - Hacl_Bignum_Montgomery_bn_mont_mul_u32(len1, - ctx_n, - k1.mu, - resM, - a_bits_l, - resM); - } - } - } - } - } - } - } - } - } - } - } - } -} - -/** -Write `aM ^ b mod n` in `resM`. - - The argument aM and the outparam resM are meant to be `len` limbs in size, i.e. uint32_t[len]. - The argument k is a montgomery context obtained through Hacl_GenericField32_field_init. - - The argument b is a bignum of any size, and bBits is an upper bound on the - number of significant bits of b. A tighter bound results in faster execution - time. When in doubt, the number of bits for the bignum size is always a safe - default, e.g. if b is a 256-bit bignum, bBits should be 256. - - The function is *NOT* constant-time on the argument b. See the - exp_consttime function for constant-time variant. - - Before calling this function, the caller will need to ensure that the following - precondition is observed. - • b < pow2 bBits -*/ -void -Hacl_GenericField32_exp_vartime( - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 *k, - uint32_t *aM, - uint32_t bBits, - uint32_t *b, - uint32_t *resM -) -{ - uint32_t len1 = Hacl_GenericField32_field_get_len(k); - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 k1 = *k; - KRML_CHECK_SIZE(sizeof (uint32_t), k1.len); - { - uint32_t aMc[k1.len]; - memset(aMc, 0U, k1.len * sizeof (uint32_t)); - memcpy(aMc, aM, k1.len * sizeof (uint32_t)); - if (bBits < (uint32_t)200U) - { - KRML_CHECK_SIZE(sizeof (uint32_t), len1 + len1); - { - uint32_t ctx[len1 + len1]; - memset(ctx, 0U, (len1 + len1) * sizeof (uint32_t)); - memcpy(ctx, k1.n, len1 * sizeof (uint32_t)); - memcpy(ctx + len1, k1.r2, len1 * sizeof (uint32_t)); - { - uint32_t *ctx_n = ctx; - uint32_t *ctx_r2 = ctx + len1; - Hacl_Bignum_Montgomery_bn_from_mont_u32(len1, ctx_n, k1.mu, ctx_r2, resM); - { - uint32_t i; - for (i = (uint32_t)0U; i < bBits; i++) - { - uint32_t i1 = i / (uint32_t)32U; - uint32_t j = i % (uint32_t)32U; - uint32_t tmp = b[i1]; - uint32_t bit = tmp >> j & (uint32_t)1U; - if (!(bit == (uint32_t)0U)) - { - uint32_t *ctx_n0 = ctx; - Hacl_Bignum_Montgomery_bn_mont_mul_u32(len1, ctx_n0, k1.mu, resM, aMc, resM); - } - { - uint32_t *ctx_n0 = ctx; - Hacl_Bignum_Montgomery_bn_mont_sqr_u32(len1, ctx_n0, k1.mu, aMc, aMc); - } - } - } - } - } - } - else - { - uint32_t bLen; - if (bBits == (uint32_t)0U) - { - bLen = (uint32_t)1U; - } - else - { - bLen = (bBits - (uint32_t)1U) / (uint32_t)32U + (uint32_t)1U; - } - KRML_CHECK_SIZE(sizeof (uint32_t), len1 + len1); - { - uint32_t ctx[len1 + len1]; - memset(ctx, 0U, (len1 + len1) * sizeof (uint32_t)); - memcpy(ctx, k1.n, len1 * sizeof (uint32_t)); - memcpy(ctx + len1, k1.r2, len1 * sizeof (uint32_t)); - KRML_CHECK_SIZE(sizeof (uint32_t), (uint32_t)16U * len1); - { - uint32_t table[(uint32_t)16U * len1]; - memset(table, 0U, (uint32_t)16U * len1 * sizeof (uint32_t)); - KRML_CHECK_SIZE(sizeof (uint32_t), len1); - { - uint32_t tmp[len1]; - memset(tmp, 0U, len1 * sizeof (uint32_t)); - { - uint32_t *t0 = table; - uint32_t *t1 = table + len1; - uint32_t *ctx_n0 = ctx; - uint32_t *ctx_r20 = ctx + len1; - Hacl_Bignum_Montgomery_bn_from_mont_u32(len1, ctx_n0, k1.mu, ctx_r20, t0); - memcpy(t1, aMc, len1 * sizeof (uint32_t)); - KRML_MAYBE_FOR7(i, - (uint32_t)0U, - (uint32_t)7U, - (uint32_t)1U, - uint32_t *t11 = table + (i + (uint32_t)1U) * len1; - uint32_t *ctx_n1 = ctx; - Hacl_Bignum_Montgomery_bn_mont_sqr_u32(len1, ctx_n1, k1.mu, t11, tmp); - memcpy(table + ((uint32_t)2U * i + (uint32_t)2U) * len1, - tmp, - len1 * sizeof (uint32_t)); - uint32_t *t2 = table + ((uint32_t)2U * i + (uint32_t)2U) * len1; - uint32_t *ctx_n = ctx; - Hacl_Bignum_Montgomery_bn_mont_mul_u32(len1, ctx_n, k1.mu, aMc, t2, tmp); - memcpy(table + ((uint32_t)2U * i + (uint32_t)3U) * len1, - tmp, - len1 * sizeof (uint32_t));); - if (bBits % (uint32_t)4U != (uint32_t)0U) - { - uint32_t mask_l = (uint32_t)15U; - uint32_t i = bBits / (uint32_t)4U * (uint32_t)4U / (uint32_t)32U; - uint32_t j = bBits / (uint32_t)4U * (uint32_t)4U % (uint32_t)32U; - uint32_t p1 = b[i] >> j; - uint32_t ite; - if (i + (uint32_t)1U < bLen && (uint32_t)0U < j) - { - ite = p1 | b[i + (uint32_t)1U] << ((uint32_t)32U - j); - } - else - { - ite = p1; - } - { - uint32_t bits_c = ite & mask_l; - uint32_t bits_l32 = bits_c; - uint32_t *a_bits_l = table + bits_l32 * len1; - memcpy(resM, a_bits_l, len1 * sizeof (uint32_t)); - } - } - else - { - uint32_t *ctx_n = ctx; - uint32_t *ctx_r2 = ctx + len1; - Hacl_Bignum_Montgomery_bn_from_mont_u32(len1, ctx_n, k1.mu, ctx_r2, resM); - } - { - uint32_t i; - for (i = (uint32_t)0U; i < bBits / (uint32_t)4U; i++) - { - KRML_MAYBE_FOR4(i0, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint32_t *ctx_n = ctx; - Hacl_Bignum_Montgomery_bn_mont_sqr_u32(len1, ctx_n, k1.mu, resM, resM);); - { - uint32_t bk = bBits - bBits % (uint32_t)4U; - uint32_t mask_l = (uint32_t)15U; - uint32_t i1 = (bk - (uint32_t)4U * i - (uint32_t)4U) / (uint32_t)32U; - uint32_t j = (bk - (uint32_t)4U * i - (uint32_t)4U) % (uint32_t)32U; - uint32_t p1 = b[i1] >> j; - uint32_t ite; - if (i1 + (uint32_t)1U < bLen && (uint32_t)0U < j) - { - ite = p1 | b[i1 + (uint32_t)1U] << ((uint32_t)32U - j); - } - else - { - ite = p1; - } - { - uint32_t bits_l = ite & mask_l; - KRML_CHECK_SIZE(sizeof (uint32_t), len1); - { - uint32_t a_bits_l[len1]; - memset(a_bits_l, 0U, len1 * sizeof (uint32_t)); - { - uint32_t bits_l32 = bits_l; - uint32_t *a_bits_l1 = table + bits_l32 * len1; - memcpy(a_bits_l, a_bits_l1, len1 * sizeof (uint32_t)); - { - uint32_t *ctx_n = ctx; - Hacl_Bignum_Montgomery_bn_mont_mul_u32(len1, - ctx_n, - k1.mu, - resM, - a_bits_l, - resM); - } - } - } - } - } - } - } - } - } - } - } - } - } -} - -/** -Write `aM ^ (-1) mod n` in `aInvM`. - - The argument aM and the outparam aInvM are meant to be `len` limbs in size, i.e. uint32_t[len]. - The argument k is a montgomery context obtained through Hacl_GenericField32_field_init. - - Before calling this function, the caller will need to ensure that the following - preconditions are observed. - • n is a prime - • 0 < aM -*/ -void -Hacl_GenericField32_inverse( - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 *k, - uint32_t *aM, - uint32_t *aInvM -) -{ - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 k1 = *k; - uint32_t len1 = k1.len; - KRML_CHECK_SIZE(sizeof (uint32_t), len1); - { - uint32_t n2[len1]; - memset(n2, 0U, len1 * sizeof (uint32_t)); - { - uint32_t - c0 = Lib_IntTypes_Intrinsics_sub_borrow_u32((uint32_t)0U, k1.n[0U], (uint32_t)2U, n2); - uint32_t c1; - if ((uint32_t)1U < len1) - { - uint32_t *a1 = k1.n + (uint32_t)1U; - uint32_t *res1 = n2 + (uint32_t)1U; - uint32_t c = c0; - { - uint32_t i; - for (i = (uint32_t)0U; i < (len1 - (uint32_t)1U) / (uint32_t)4U; i++) - { - uint32_t t1 = a1[(uint32_t)4U * i]; - uint32_t *res_i0 = res1 + (uint32_t)4U * i; - c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, (uint32_t)0U, res_i0); - { - uint32_t t10 = a1[(uint32_t)4U * i + (uint32_t)1U]; - uint32_t *res_i1 = res1 + (uint32_t)4U * i + (uint32_t)1U; - c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t10, (uint32_t)0U, res_i1); - { - uint32_t t11 = a1[(uint32_t)4U * i + (uint32_t)2U]; - uint32_t *res_i2 = res1 + (uint32_t)4U * i + (uint32_t)2U; - c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t11, (uint32_t)0U, res_i2); - { - uint32_t t12 = a1[(uint32_t)4U * i + (uint32_t)3U]; - uint32_t *res_i = res1 + (uint32_t)4U * i + (uint32_t)3U; - c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t12, (uint32_t)0U, res_i); - } - } - } - } - } - { - uint32_t i; - for - (i - = (len1 - (uint32_t)1U) / (uint32_t)4U * (uint32_t)4U; - i - < len1 - (uint32_t)1U; - i++) - { - uint32_t t1 = a1[i]; - uint32_t *res_i = res1 + i; - c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, (uint32_t)0U, res_i); - } - } - { - uint32_t c10 = c; - c1 = c10; - } - } - else - { - c1 = c0; - } - Hacl_GenericField32_exp_vartime(k, aM, k1.len * (uint32_t)32U, n2, aInvM); - } - } -} - diff --git a/dist/c89-compatible/Hacl_GenericField32.h b/dist/c89-compatible/Hacl_GenericField32.h deleted file mode 100644 index 9043148b80..0000000000 --- a/dist/c89-compatible/Hacl_GenericField32.h +++ /dev/null @@ -1,270 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#ifndef __Hacl_GenericField32_H -#define __Hacl_GenericField32_H - -#if defined(__cplusplus) -extern "C" { -#endif - -#include -#include "krml/internal/types.h" -#include "krml/lowstar_endianness.h" -#include "krml/internal/target.h" - - -#include "Hacl_Bignum_Base.h" -#include "Hacl_Bignum.h" -#include "evercrypt_targetconfig.h" -typedef Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 *Hacl_GenericField32_pbn_mont_ctx_u32; - -/******************************************************************************* - -A verified field arithmetic library. - -This is a 32-bit optimized version, where bignums are represented as an array -of `len` unsigned 32-bit integers, i.e. uint32_t[len]. - -All the arithmetic operations are performed in the Montgomery domain. - -All the functions below preserve the following invariant for a bignum `aM` in -Montgomery form. - • aM < n - -*******************************************************************************/ - - -/** -Check whether this library will work for a modulus `n`. - - The function returns false if any of the following preconditions are violated, - true otherwise. - • n % 2 = 1 - • 1 < n -*/ -bool Hacl_GenericField32_field_modulus_check(uint32_t len, uint32_t *n); - -/** -Heap-allocate and initialize a montgomery context. - - The argument n is meant to be `len` limbs in size, i.e. uint32_t[len]. - - Before calling this function, the caller will need to ensure that the following - preconditions are observed. - • n % 2 = 1 - • 1 < n - - The caller will need to call Hacl_GenericField32_field_free on the return value - to avoid memory leaks. -*/ -Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 -*Hacl_GenericField32_field_init(uint32_t len, uint32_t *n); - -/** -Deallocate the memory previously allocated by Hacl_GenericField32_field_init. - - The argument k is a montgomery context obtained through Hacl_GenericField32_field_init. -*/ -void Hacl_GenericField32_field_free(Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 *k); - -/** -Return the size of a modulus `n` in limbs. - - The argument k is a montgomery context obtained through Hacl_GenericField32_field_init. -*/ -uint32_t Hacl_GenericField32_field_get_len(Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 *k); - -/** -Convert a bignum from the regular representation to the Montgomery representation. - - Write `a * R mod n` in `aM`. - - The argument a and the outparam aM are meant to be `len` limbs in size, i.e. uint32_t[len]. - The argument k is a montgomery context obtained through Hacl_GenericField32_field_init. -*/ -void -Hacl_GenericField32_to_field( - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 *k, - uint32_t *a, - uint32_t *aM -); - -/** -Convert a result back from the Montgomery representation to the regular representation. - - Write `aM / R mod n` in `a`, i.e. - Hacl_GenericField32_from_field(k, Hacl_GenericField32_to_field(k, a)) == a % n - - The argument aM and the outparam a are meant to be `len` limbs in size, i.e. uint32_t[len]. - The argument k is a montgomery context obtained through Hacl_GenericField32_field_init. -*/ -void -Hacl_GenericField32_from_field( - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 *k, - uint32_t *aM, - uint32_t *a -); - -/** -Write `aM + bM mod n` in `cM`. - - The arguments aM, bM, and the outparam cM are meant to be `len` limbs in size, i.e. uint32_t[len]. - The argument k is a montgomery context obtained through Hacl_GenericField32_field_init. -*/ -void -Hacl_GenericField32_add( - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 *k, - uint32_t *aM, - uint32_t *bM, - uint32_t *cM -); - -/** -Write `aM - bM mod n` to `cM`. - - The arguments aM, bM, and the outparam cM are meant to be `len` limbs in size, i.e. uint32_t[len]. - The argument k is a montgomery context obtained through Hacl_GenericField32_field_init. -*/ -void -Hacl_GenericField32_sub( - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 *k, - uint32_t *aM, - uint32_t *bM, - uint32_t *cM -); - -/** -Write `aM * bM mod n` in `cM`. - - The arguments aM, bM, and the outparam cM are meant to be `len` limbs in size, i.e. uint32_t[len]. - The argument k is a montgomery context obtained through Hacl_GenericField32_field_init. -*/ -void -Hacl_GenericField32_mul( - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 *k, - uint32_t *aM, - uint32_t *bM, - uint32_t *cM -); - -/** -Write `aM * aM mod n` in `cM`. - - The argument aM and the outparam cM are meant to be `len` limbs in size, i.e. uint32_t[len]. - The argument k is a montgomery context obtained through Hacl_GenericField32_field_init. -*/ -void -Hacl_GenericField32_sqr( - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 *k, - uint32_t *aM, - uint32_t *cM -); - -/** -Convert a bignum `one` to its Montgomery representation. - - The outparam oneM is meant to be `len` limbs in size, i.e. uint32_t[len]. - The argument k is a montgomery context obtained through Hacl_GenericField32_field_init. -*/ -void Hacl_GenericField32_one(Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 *k, uint32_t *oneM); - -/** -Write `aM ^ b mod n` in `resM`. - - The argument aM and the outparam resM are meant to be `len` limbs in size, i.e. uint32_t[len]. - The argument k is a montgomery context obtained through Hacl_GenericField32_field_init. - - The argument b is a bignum of any size, and bBits is an upper bound on the - number of significant bits of b. A tighter bound results in faster execution - time. When in doubt, the number of bits for the bignum size is always a safe - default, e.g. if b is a 256-bit bignum, bBits should be 256. - - This function is constant-time over its argument b, at the cost of a slower - execution time than exp_vartime. - - Before calling this function, the caller will need to ensure that the following - precondition is observed. - • b < pow2 bBits -*/ -void -Hacl_GenericField32_exp_consttime( - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 *k, - uint32_t *aM, - uint32_t bBits, - uint32_t *b, - uint32_t *resM -); - -/** -Write `aM ^ b mod n` in `resM`. - - The argument aM and the outparam resM are meant to be `len` limbs in size, i.e. uint32_t[len]. - The argument k is a montgomery context obtained through Hacl_GenericField32_field_init. - - The argument b is a bignum of any size, and bBits is an upper bound on the - number of significant bits of b. A tighter bound results in faster execution - time. When in doubt, the number of bits for the bignum size is always a safe - default, e.g. if b is a 256-bit bignum, bBits should be 256. - - The function is *NOT* constant-time on the argument b. See the - exp_consttime function for constant-time variant. - - Before calling this function, the caller will need to ensure that the following - precondition is observed. - • b < pow2 bBits -*/ -void -Hacl_GenericField32_exp_vartime( - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 *k, - uint32_t *aM, - uint32_t bBits, - uint32_t *b, - uint32_t *resM -); - -/** -Write `aM ^ (-1) mod n` in `aInvM`. - - The argument aM and the outparam aInvM are meant to be `len` limbs in size, i.e. uint32_t[len]. - The argument k is a montgomery context obtained through Hacl_GenericField32_field_init. - - Before calling this function, the caller will need to ensure that the following - preconditions are observed. - • n is a prime - • 0 < aM -*/ -void -Hacl_GenericField32_inverse( - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 *k, - uint32_t *aM, - uint32_t *aInvM -); - -#if defined(__cplusplus) -} -#endif - -#define __Hacl_GenericField32_H_DEFINED -#endif diff --git a/dist/c89-compatible/Hacl_GenericField64.c b/dist/c89-compatible/Hacl_GenericField64.c deleted file mode 100644 index b7f0188351..0000000000 --- a/dist/c89-compatible/Hacl_GenericField64.c +++ /dev/null @@ -1,800 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#include "Hacl_GenericField64.h" - -#include "internal/Hacl_Bignum.h" - -/******************************************************************************* - -A verified field arithmetic library. - -This is a 64-bit optimized version, where bignums are represented as an array -of `len` unsigned 64-bit integers, i.e. uint64_t[len]. - -All the arithmetic operations are performed in the Montgomery domain. - -All the functions below preserve the following invariant for a bignum `aM` in -Montgomery form. - • aM < n - -*******************************************************************************/ - - -/** -Check whether this library will work for a modulus `n`. - - The function returns false if any of the following preconditions are violated, - true otherwise. - • n % 2 = 1 - • 1 < n -*/ -bool Hacl_GenericField64_field_modulus_check(uint32_t len, uint64_t *n) -{ - uint64_t m = Hacl_Bignum_Montgomery_bn_check_modulus_u64(len, n); - return m == (uint64_t)0xFFFFFFFFFFFFFFFFU; -} - -/** -Heap-allocate and initialize a montgomery context. - - The argument n is meant to be `len` limbs in size, i.e. uint64_t[len]. - - Before calling this function, the caller will need to ensure that the following - preconditions are observed. - • n % 2 = 1 - • 1 < n - - The caller will need to call Hacl_GenericField64_field_free on the return value - to avoid memory leaks. -*/ -Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 -*Hacl_GenericField64_field_init(uint32_t len, uint64_t *n) -{ - KRML_CHECK_SIZE(sizeof (uint64_t), len); - { - uint64_t *r2 = (uint64_t *)KRML_HOST_CALLOC(len, sizeof (uint64_t)); - KRML_CHECK_SIZE(sizeof (uint64_t), len); - { - uint64_t *n1 = (uint64_t *)KRML_HOST_CALLOC(len, sizeof (uint64_t)); - uint64_t *r21 = r2; - uint64_t *n11 = n1; - uint32_t nBits; - uint64_t mu; - memcpy(n11, n, len * sizeof (uint64_t)); - nBits = (uint32_t)64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64(len, n); - Hacl_Bignum_Montgomery_bn_precomp_r2_mod_n_u64(len, nBits, n, r21); - mu = Hacl_Bignum_ModInvLimb_mod_inv_uint64(n[0U]); - { - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 res; - res.len = len; - res.n = n11; - res.mu = mu; - res.r2 = r21; - KRML_CHECK_SIZE(sizeof (Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64), (uint32_t)1U); - { - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 - *buf = - (Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 *)KRML_HOST_MALLOC(sizeof ( - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 - )); - buf[0U] = res; - return buf; - } - } - } - } -} - -/** -Deallocate the memory previously allocated by Hacl_GenericField64_field_init. - - The argument k is a montgomery context obtained through Hacl_GenericField64_field_init. -*/ -void Hacl_GenericField64_field_free(Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 *k) -{ - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 k1 = *k; - uint64_t *n = k1.n; - uint64_t *r2 = k1.r2; - KRML_HOST_FREE(n); - KRML_HOST_FREE(r2); - KRML_HOST_FREE(k); -} - -/** -Return the size of a modulus `n` in limbs. - - The argument k is a montgomery context obtained through Hacl_GenericField64_field_init. -*/ -uint32_t Hacl_GenericField64_field_get_len(Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 *k) -{ - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 k1 = *k; - return k1.len; -} - -/** -Convert a bignum from the regular representation to the Montgomery representation. - - Write `a * R mod n` in `aM`. - - The argument a and the outparam aM are meant to be `len` limbs in size, i.e. uint64_t[len]. - The argument k is a montgomery context obtained through Hacl_GenericField64_field_init. -*/ -void -Hacl_GenericField64_to_field( - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 *k, - uint64_t *a, - uint64_t *aM -) -{ - uint32_t len1 = Hacl_GenericField64_field_get_len(k); - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 k1 = *k; - Hacl_Bignum_Montgomery_bn_to_mont_u64(len1, k1.n, k1.mu, k1.r2, a, aM); -} - -/** -Convert a result back from the Montgomery representation to the regular representation. - - Write `aM / R mod n` in `a`, i.e. - Hacl_GenericField64_from_field(k, Hacl_GenericField64_to_field(k, a)) == a % n - - The argument aM and the outparam a are meant to be `len` limbs in size, i.e. uint64_t[len]. - The argument k is a montgomery context obtained through Hacl_GenericField64_field_init. -*/ -void -Hacl_GenericField64_from_field( - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 *k, - uint64_t *aM, - uint64_t *a -) -{ - uint32_t len1 = Hacl_GenericField64_field_get_len(k); - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 k1 = *k; - Hacl_Bignum_Montgomery_bn_from_mont_u64(len1, k1.n, k1.mu, aM, a); -} - -/** -Write `aM + bM mod n` in `cM`. - - The arguments aM, bM, and the outparam cM are meant to be `len` limbs in size, i.e. uint64_t[len]. - The argument k is a montgomery context obtained through Hacl_GenericField64_field_init. -*/ -void -Hacl_GenericField64_add( - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 *k, - uint64_t *aM, - uint64_t *bM, - uint64_t *cM -) -{ - uint32_t len1 = Hacl_GenericField64_field_get_len(k); - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 k1 = *k; - Hacl_Bignum_bn_add_mod_n_u64(len1, k1.n, aM, bM, cM); -} - -/** -Write `aM - bM mod n` to `cM`. - - The arguments aM, bM, and the outparam cM are meant to be `len` limbs in size, i.e. uint64_t[len]. - The argument k is a montgomery context obtained through Hacl_GenericField64_field_init. -*/ -void -Hacl_GenericField64_sub( - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 *k, - uint64_t *aM, - uint64_t *bM, - uint64_t *cM -) -{ - uint32_t len1 = Hacl_GenericField64_field_get_len(k); - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 k1 = *k; - Hacl_Bignum_bn_sub_mod_n_u64(len1, k1.n, aM, bM, cM); -} - -/** -Write `aM * bM mod n` in `cM`. - - The arguments aM, bM, and the outparam cM are meant to be `len` limbs in size, i.e. uint64_t[len]. - The argument k is a montgomery context obtained through Hacl_GenericField64_field_init. -*/ -void -Hacl_GenericField64_mul( - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 *k, - uint64_t *aM, - uint64_t *bM, - uint64_t *cM -) -{ - uint32_t len1 = Hacl_GenericField64_field_get_len(k); - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 k1 = *k; - Hacl_Bignum_Montgomery_bn_mont_mul_u64(len1, k1.n, k1.mu, aM, bM, cM); -} - -/** -Write `aM * aM mod n` in `cM`. - - The argument aM and the outparam cM are meant to be `len` limbs in size, i.e. uint64_t[len]. - The argument k is a montgomery context obtained through Hacl_GenericField64_field_init. -*/ -void -Hacl_GenericField64_sqr( - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 *k, - uint64_t *aM, - uint64_t *cM -) -{ - uint32_t len1 = Hacl_GenericField64_field_get_len(k); - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 k1 = *k; - Hacl_Bignum_Montgomery_bn_mont_sqr_u64(len1, k1.n, k1.mu, aM, cM); -} - -/** -Convert a bignum `one` to its Montgomery representation. - - The outparam oneM is meant to be `len` limbs in size, i.e. uint64_t[len]. - The argument k is a montgomery context obtained through Hacl_GenericField64_field_init. -*/ -void Hacl_GenericField64_one(Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 *k, uint64_t *oneM) -{ - uint32_t len1 = Hacl_GenericField64_field_get_len(k); - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 k1 = *k; - Hacl_Bignum_Montgomery_bn_from_mont_u64(len1, k1.n, k1.mu, k1.r2, oneM); -} - -/** -Write `aM ^ b mod n` in `resM`. - - The argument aM and the outparam resM are meant to be `len` limbs in size, i.e. uint64_t[len]. - The argument k is a montgomery context obtained through Hacl_GenericField64_field_init. - - The argument b is a bignum of any size, and bBits is an upper bound on the - number of significant bits of b. A tighter bound results in faster execution - time. When in doubt, the number of bits for the bignum size is always a safe - default, e.g. if b is a 256-bit bignum, bBits should be 256. - - This function is constant-time over its argument b, at the cost of a slower - execution time than exp_vartime. - - Before calling this function, the caller will need to ensure that the following - precondition is observed. - • b < pow2 bBits -*/ -void -Hacl_GenericField64_exp_consttime( - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 *k, - uint64_t *aM, - uint32_t bBits, - uint64_t *b, - uint64_t *resM -) -{ - uint32_t len1 = Hacl_GenericField64_field_get_len(k); - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 k1 = *k; - KRML_CHECK_SIZE(sizeof (uint64_t), k1.len); - { - uint64_t aMc[k1.len]; - memset(aMc, 0U, k1.len * sizeof (uint64_t)); - memcpy(aMc, aM, k1.len * sizeof (uint64_t)); - if (bBits < (uint32_t)200U) - { - KRML_CHECK_SIZE(sizeof (uint64_t), len1 + len1); - { - uint64_t ctx[len1 + len1]; - memset(ctx, 0U, (len1 + len1) * sizeof (uint64_t)); - memcpy(ctx, k1.n, len1 * sizeof (uint64_t)); - memcpy(ctx + len1, k1.r2, len1 * sizeof (uint64_t)); - { - uint64_t sw = (uint64_t)0U; - uint64_t *ctx_n = ctx; - uint64_t *ctx_r2 = ctx + len1; - Hacl_Bignum_Montgomery_bn_from_mont_u64(len1, ctx_n, k1.mu, ctx_r2, resM); - { - uint32_t i0; - for (i0 = (uint32_t)0U; i0 < bBits; i0++) - { - uint32_t i1 = (bBits - i0 - (uint32_t)1U) / (uint32_t)64U; - uint32_t j = (bBits - i0 - (uint32_t)1U) % (uint32_t)64U; - uint64_t tmp = b[i1]; - uint64_t bit = tmp >> j & (uint64_t)1U; - uint64_t sw1 = bit ^ sw; - { - uint32_t i; - for (i = (uint32_t)0U; i < len1; i++) - { - uint64_t dummy = ((uint64_t)0U - sw1) & (resM[i] ^ aMc[i]); - resM[i] = resM[i] ^ dummy; - aMc[i] = aMc[i] ^ dummy; - } - } - { - uint64_t *ctx_n0 = ctx; - Hacl_Bignum_Montgomery_bn_mont_mul_u64(len1, ctx_n0, k1.mu, aMc, resM, aMc); - { - uint64_t *ctx_n1 = ctx; - Hacl_Bignum_Montgomery_bn_mont_sqr_u64(len1, ctx_n1, k1.mu, resM, resM); - sw = bit; - } - } - } - } - { - uint64_t sw0 = sw; - { - uint32_t i; - for (i = (uint32_t)0U; i < len1; i++) - { - uint64_t dummy = ((uint64_t)0U - sw0) & (resM[i] ^ aMc[i]); - resM[i] = resM[i] ^ dummy; - aMc[i] = aMc[i] ^ dummy; - } - } - } - } - } - } - else - { - uint32_t bLen; - if (bBits == (uint32_t)0U) - { - bLen = (uint32_t)1U; - } - else - { - bLen = (bBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U; - } - KRML_CHECK_SIZE(sizeof (uint64_t), len1 + len1); - { - uint64_t ctx[len1 + len1]; - memset(ctx, 0U, (len1 + len1) * sizeof (uint64_t)); - memcpy(ctx, k1.n, len1 * sizeof (uint64_t)); - memcpy(ctx + len1, k1.r2, len1 * sizeof (uint64_t)); - KRML_CHECK_SIZE(sizeof (uint64_t), (uint32_t)16U * len1); - { - uint64_t table[(uint32_t)16U * len1]; - memset(table, 0U, (uint32_t)16U * len1 * sizeof (uint64_t)); - KRML_CHECK_SIZE(sizeof (uint64_t), len1); - { - uint64_t tmp[len1]; - memset(tmp, 0U, len1 * sizeof (uint64_t)); - { - uint64_t *t0 = table; - uint64_t *t1 = table + len1; - uint64_t *ctx_n0 = ctx; - uint64_t *ctx_r20 = ctx + len1; - Hacl_Bignum_Montgomery_bn_from_mont_u64(len1, ctx_n0, k1.mu, ctx_r20, t0); - memcpy(t1, aMc, len1 * sizeof (uint64_t)); - KRML_MAYBE_FOR7(i, - (uint32_t)0U, - (uint32_t)7U, - (uint32_t)1U, - uint64_t *t11 = table + (i + (uint32_t)1U) * len1; - uint64_t *ctx_n1 = ctx; - Hacl_Bignum_Montgomery_bn_mont_sqr_u64(len1, ctx_n1, k1.mu, t11, tmp); - memcpy(table + ((uint32_t)2U * i + (uint32_t)2U) * len1, - tmp, - len1 * sizeof (uint64_t)); - uint64_t *t2 = table + ((uint32_t)2U * i + (uint32_t)2U) * len1; - uint64_t *ctx_n = ctx; - Hacl_Bignum_Montgomery_bn_mont_mul_u64(len1, ctx_n, k1.mu, aMc, t2, tmp); - memcpy(table + ((uint32_t)2U * i + (uint32_t)3U) * len1, - tmp, - len1 * sizeof (uint64_t));); - if (bBits % (uint32_t)4U != (uint32_t)0U) - { - uint64_t mask_l = (uint64_t)15U; - uint32_t i0 = bBits / (uint32_t)4U * (uint32_t)4U / (uint32_t)64U; - uint32_t j = bBits / (uint32_t)4U * (uint32_t)4U % (uint32_t)64U; - uint64_t p1 = b[i0] >> j; - uint64_t ite; - if (i0 + (uint32_t)1U < bLen && (uint32_t)0U < j) - { - ite = p1 | b[i0 + (uint32_t)1U] << ((uint32_t)64U - j); - } - else - { - ite = p1; - } - { - uint64_t bits_c = ite & mask_l; - memcpy(resM, table + (uint32_t)0U * len1, len1 * sizeof (uint64_t)); - KRML_MAYBE_FOR15(i1, - (uint32_t)0U, - (uint32_t)15U, - (uint32_t)1U, - uint64_t c = FStar_UInt64_eq_mask(bits_c, (uint64_t)(i1 + (uint32_t)1U)); - uint64_t *res_j = table + (i1 + (uint32_t)1U) * len1; - { - uint32_t i; - for (i = (uint32_t)0U; i < len1; i++) - { - uint64_t *os = resM; - uint64_t x = (c & res_j[i]) | (~c & resM[i]); - os[i] = x; - } - }); - } - } - else - { - uint64_t *ctx_n = ctx; - uint64_t *ctx_r2 = ctx + len1; - Hacl_Bignum_Montgomery_bn_from_mont_u64(len1, ctx_n, k1.mu, ctx_r2, resM); - } - { - uint32_t i0; - for (i0 = (uint32_t)0U; i0 < bBits / (uint32_t)4U; i0++) - { - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *ctx_n = ctx; - Hacl_Bignum_Montgomery_bn_mont_sqr_u64(len1, ctx_n, k1.mu, resM, resM);); - { - uint32_t bk = bBits - bBits % (uint32_t)4U; - uint64_t mask_l = (uint64_t)15U; - uint32_t i1 = (bk - (uint32_t)4U * i0 - (uint32_t)4U) / (uint32_t)64U; - uint32_t j = (bk - (uint32_t)4U * i0 - (uint32_t)4U) % (uint32_t)64U; - uint64_t p1 = b[i1] >> j; - uint64_t ite; - if (i1 + (uint32_t)1U < bLen && (uint32_t)0U < j) - { - ite = p1 | b[i1 + (uint32_t)1U] << ((uint32_t)64U - j); - } - else - { - ite = p1; - } - { - uint64_t bits_l = ite & mask_l; - KRML_CHECK_SIZE(sizeof (uint64_t), len1); - { - uint64_t a_bits_l[len1]; - memset(a_bits_l, 0U, len1 * sizeof (uint64_t)); - memcpy(a_bits_l, table + (uint32_t)0U * len1, len1 * sizeof (uint64_t)); - KRML_MAYBE_FOR15(i2, - (uint32_t)0U, - (uint32_t)15U, - (uint32_t)1U, - uint64_t c = FStar_UInt64_eq_mask(bits_l, (uint64_t)(i2 + (uint32_t)1U)); - uint64_t *res_j = table + (i2 + (uint32_t)1U) * len1; - { - uint32_t i; - for (i = (uint32_t)0U; i < len1; i++) - { - uint64_t *os = a_bits_l; - uint64_t x = (c & res_j[i]) | (~c & a_bits_l[i]); - os[i] = x; - } - }); - { - uint64_t *ctx_n = ctx; - Hacl_Bignum_Montgomery_bn_mont_mul_u64(len1, - ctx_n, - k1.mu, - resM, - a_bits_l, - resM); - } - } - } - } - } - } - } - } - } - } - } - } -} - -/** -Write `aM ^ b mod n` in `resM`. - - The argument aM and the outparam resM are meant to be `len` limbs in size, i.e. uint64_t[len]. - The argument k is a montgomery context obtained through Hacl_GenericField64_field_init. - - The argument b is a bignum of any size, and bBits is an upper bound on the - number of significant bits of b. A tighter bound results in faster execution - time. When in doubt, the number of bits for the bignum size is always a safe - default, e.g. if b is a 256-bit bignum, bBits should be 256. - - The function is *NOT* constant-time on the argument b. See the - exp_consttime function for constant-time variant. - - Before calling this function, the caller will need to ensure that the following - precondition is observed. - • b < pow2 bBits -*/ -void -Hacl_GenericField64_exp_vartime( - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 *k, - uint64_t *aM, - uint32_t bBits, - uint64_t *b, - uint64_t *resM -) -{ - uint32_t len1 = Hacl_GenericField64_field_get_len(k); - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 k1 = *k; - KRML_CHECK_SIZE(sizeof (uint64_t), k1.len); - { - uint64_t aMc[k1.len]; - memset(aMc, 0U, k1.len * sizeof (uint64_t)); - memcpy(aMc, aM, k1.len * sizeof (uint64_t)); - if (bBits < (uint32_t)200U) - { - KRML_CHECK_SIZE(sizeof (uint64_t), len1 + len1); - { - uint64_t ctx[len1 + len1]; - memset(ctx, 0U, (len1 + len1) * sizeof (uint64_t)); - memcpy(ctx, k1.n, len1 * sizeof (uint64_t)); - memcpy(ctx + len1, k1.r2, len1 * sizeof (uint64_t)); - { - uint64_t *ctx_n = ctx; - uint64_t *ctx_r2 = ctx + len1; - Hacl_Bignum_Montgomery_bn_from_mont_u64(len1, ctx_n, k1.mu, ctx_r2, resM); - { - uint32_t i; - for (i = (uint32_t)0U; i < bBits; i++) - { - uint32_t i1 = i / (uint32_t)64U; - uint32_t j = i % (uint32_t)64U; - uint64_t tmp = b[i1]; - uint64_t bit = tmp >> j & (uint64_t)1U; - if (!(bit == (uint64_t)0U)) - { - uint64_t *ctx_n0 = ctx; - Hacl_Bignum_Montgomery_bn_mont_mul_u64(len1, ctx_n0, k1.mu, resM, aMc, resM); - } - { - uint64_t *ctx_n0 = ctx; - Hacl_Bignum_Montgomery_bn_mont_sqr_u64(len1, ctx_n0, k1.mu, aMc, aMc); - } - } - } - } - } - } - else - { - uint32_t bLen; - if (bBits == (uint32_t)0U) - { - bLen = (uint32_t)1U; - } - else - { - bLen = (bBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U; - } - KRML_CHECK_SIZE(sizeof (uint64_t), len1 + len1); - { - uint64_t ctx[len1 + len1]; - memset(ctx, 0U, (len1 + len1) * sizeof (uint64_t)); - memcpy(ctx, k1.n, len1 * sizeof (uint64_t)); - memcpy(ctx + len1, k1.r2, len1 * sizeof (uint64_t)); - KRML_CHECK_SIZE(sizeof (uint64_t), (uint32_t)16U * len1); - { - uint64_t table[(uint32_t)16U * len1]; - memset(table, 0U, (uint32_t)16U * len1 * sizeof (uint64_t)); - KRML_CHECK_SIZE(sizeof (uint64_t), len1); - { - uint64_t tmp[len1]; - memset(tmp, 0U, len1 * sizeof (uint64_t)); - { - uint64_t *t0 = table; - uint64_t *t1 = table + len1; - uint64_t *ctx_n0 = ctx; - uint64_t *ctx_r20 = ctx + len1; - Hacl_Bignum_Montgomery_bn_from_mont_u64(len1, ctx_n0, k1.mu, ctx_r20, t0); - memcpy(t1, aMc, len1 * sizeof (uint64_t)); - KRML_MAYBE_FOR7(i, - (uint32_t)0U, - (uint32_t)7U, - (uint32_t)1U, - uint64_t *t11 = table + (i + (uint32_t)1U) * len1; - uint64_t *ctx_n1 = ctx; - Hacl_Bignum_Montgomery_bn_mont_sqr_u64(len1, ctx_n1, k1.mu, t11, tmp); - memcpy(table + ((uint32_t)2U * i + (uint32_t)2U) * len1, - tmp, - len1 * sizeof (uint64_t)); - uint64_t *t2 = table + ((uint32_t)2U * i + (uint32_t)2U) * len1; - uint64_t *ctx_n = ctx; - Hacl_Bignum_Montgomery_bn_mont_mul_u64(len1, ctx_n, k1.mu, aMc, t2, tmp); - memcpy(table + ((uint32_t)2U * i + (uint32_t)3U) * len1, - tmp, - len1 * sizeof (uint64_t));); - if (bBits % (uint32_t)4U != (uint32_t)0U) - { - uint64_t mask_l = (uint64_t)15U; - uint32_t i = bBits / (uint32_t)4U * (uint32_t)4U / (uint32_t)64U; - uint32_t j = bBits / (uint32_t)4U * (uint32_t)4U % (uint32_t)64U; - uint64_t p1 = b[i] >> j; - uint64_t ite; - if (i + (uint32_t)1U < bLen && (uint32_t)0U < j) - { - ite = p1 | b[i + (uint32_t)1U] << ((uint32_t)64U - j); - } - else - { - ite = p1; - } - { - uint64_t bits_c = ite & mask_l; - uint32_t bits_l32 = (uint32_t)bits_c; - uint64_t *a_bits_l = table + bits_l32 * len1; - memcpy(resM, a_bits_l, len1 * sizeof (uint64_t)); - } - } - else - { - uint64_t *ctx_n = ctx; - uint64_t *ctx_r2 = ctx + len1; - Hacl_Bignum_Montgomery_bn_from_mont_u64(len1, ctx_n, k1.mu, ctx_r2, resM); - } - { - uint32_t i; - for (i = (uint32_t)0U; i < bBits / (uint32_t)4U; i++) - { - KRML_MAYBE_FOR4(i0, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *ctx_n = ctx; - Hacl_Bignum_Montgomery_bn_mont_sqr_u64(len1, ctx_n, k1.mu, resM, resM);); - { - uint32_t bk = bBits - bBits % (uint32_t)4U; - uint64_t mask_l = (uint64_t)15U; - uint32_t i1 = (bk - (uint32_t)4U * i - (uint32_t)4U) / (uint32_t)64U; - uint32_t j = (bk - (uint32_t)4U * i - (uint32_t)4U) % (uint32_t)64U; - uint64_t p1 = b[i1] >> j; - uint64_t ite; - if (i1 + (uint32_t)1U < bLen && (uint32_t)0U < j) - { - ite = p1 | b[i1 + (uint32_t)1U] << ((uint32_t)64U - j); - } - else - { - ite = p1; - } - { - uint64_t bits_l = ite & mask_l; - KRML_CHECK_SIZE(sizeof (uint64_t), len1); - { - uint64_t a_bits_l[len1]; - memset(a_bits_l, 0U, len1 * sizeof (uint64_t)); - { - uint32_t bits_l32 = (uint32_t)bits_l; - uint64_t *a_bits_l1 = table + bits_l32 * len1; - memcpy(a_bits_l, a_bits_l1, len1 * sizeof (uint64_t)); - { - uint64_t *ctx_n = ctx; - Hacl_Bignum_Montgomery_bn_mont_mul_u64(len1, - ctx_n, - k1.mu, - resM, - a_bits_l, - resM); - } - } - } - } - } - } - } - } - } - } - } - } - } -} - -/** -Write `aM ^ (-1) mod n` in `aInvM`. - - The argument aM and the outparam aInvM are meant to be `len` limbs in size, i.e. uint64_t[len]. - The argument k is a montgomery context obtained through Hacl_GenericField64_field_init. - - Before calling this function, the caller will need to ensure that the following - preconditions are observed. - • n is a prime - • 0 < aM -*/ -void -Hacl_GenericField64_inverse( - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 *k, - uint64_t *aM, - uint64_t *aInvM -) -{ - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 k1 = *k; - uint32_t len1 = k1.len; - KRML_CHECK_SIZE(sizeof (uint64_t), len1); - { - uint64_t n2[len1]; - memset(n2, 0U, len1 * sizeof (uint64_t)); - { - uint64_t - c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64((uint64_t)0U, k1.n[0U], (uint64_t)2U, n2); - uint64_t c1; - if ((uint32_t)1U < len1) - { - uint64_t *a1 = k1.n + (uint32_t)1U; - uint64_t *res1 = n2 + (uint32_t)1U; - uint64_t c = c0; - { - uint32_t i; - for (i = (uint32_t)0U; i < (len1 - (uint32_t)1U) / (uint32_t)4U; i++) - { - uint64_t t1 = a1[(uint32_t)4U * i]; - uint64_t *res_i0 = res1 + (uint32_t)4U * i; - c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, (uint64_t)0U, res_i0); - { - uint64_t t10 = a1[(uint32_t)4U * i + (uint32_t)1U]; - uint64_t *res_i1 = res1 + (uint32_t)4U * i + (uint32_t)1U; - c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t10, (uint64_t)0U, res_i1); - { - uint64_t t11 = a1[(uint32_t)4U * i + (uint32_t)2U]; - uint64_t *res_i2 = res1 + (uint32_t)4U * i + (uint32_t)2U; - c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t11, (uint64_t)0U, res_i2); - { - uint64_t t12 = a1[(uint32_t)4U * i + (uint32_t)3U]; - uint64_t *res_i = res1 + (uint32_t)4U * i + (uint32_t)3U; - c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t12, (uint64_t)0U, res_i); - } - } - } - } - } - { - uint32_t i; - for - (i - = (len1 - (uint32_t)1U) / (uint32_t)4U * (uint32_t)4U; - i - < len1 - (uint32_t)1U; - i++) - { - uint64_t t1 = a1[i]; - uint64_t *res_i = res1 + i; - c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, (uint64_t)0U, res_i); - } - } - { - uint64_t c10 = c; - c1 = c10; - } - } - else - { - c1 = c0; - } - Hacl_GenericField64_exp_vartime(k, aM, k1.len * (uint32_t)64U, n2, aInvM); - } - } -} - diff --git a/dist/c89-compatible/Hacl_GenericField64.h b/dist/c89-compatible/Hacl_GenericField64.h deleted file mode 100644 index 5fa0bcd2c2..0000000000 --- a/dist/c89-compatible/Hacl_GenericField64.h +++ /dev/null @@ -1,270 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#ifndef __Hacl_GenericField64_H -#define __Hacl_GenericField64_H - -#if defined(__cplusplus) -extern "C" { -#endif - -#include -#include "krml/internal/types.h" -#include "krml/lowstar_endianness.h" -#include "krml/internal/target.h" - - -#include "Hacl_Bignum_Base.h" -#include "Hacl_Bignum.h" -#include "evercrypt_targetconfig.h" -typedef Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 *Hacl_GenericField64_pbn_mont_ctx_u64; - -/******************************************************************************* - -A verified field arithmetic library. - -This is a 64-bit optimized version, where bignums are represented as an array -of `len` unsigned 64-bit integers, i.e. uint64_t[len]. - -All the arithmetic operations are performed in the Montgomery domain. - -All the functions below preserve the following invariant for a bignum `aM` in -Montgomery form. - • aM < n - -*******************************************************************************/ - - -/** -Check whether this library will work for a modulus `n`. - - The function returns false if any of the following preconditions are violated, - true otherwise. - • n % 2 = 1 - • 1 < n -*/ -bool Hacl_GenericField64_field_modulus_check(uint32_t len, uint64_t *n); - -/** -Heap-allocate and initialize a montgomery context. - - The argument n is meant to be `len` limbs in size, i.e. uint64_t[len]. - - Before calling this function, the caller will need to ensure that the following - preconditions are observed. - • n % 2 = 1 - • 1 < n - - The caller will need to call Hacl_GenericField64_field_free on the return value - to avoid memory leaks. -*/ -Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 -*Hacl_GenericField64_field_init(uint32_t len, uint64_t *n); - -/** -Deallocate the memory previously allocated by Hacl_GenericField64_field_init. - - The argument k is a montgomery context obtained through Hacl_GenericField64_field_init. -*/ -void Hacl_GenericField64_field_free(Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 *k); - -/** -Return the size of a modulus `n` in limbs. - - The argument k is a montgomery context obtained through Hacl_GenericField64_field_init. -*/ -uint32_t Hacl_GenericField64_field_get_len(Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 *k); - -/** -Convert a bignum from the regular representation to the Montgomery representation. - - Write `a * R mod n` in `aM`. - - The argument a and the outparam aM are meant to be `len` limbs in size, i.e. uint64_t[len]. - The argument k is a montgomery context obtained through Hacl_GenericField64_field_init. -*/ -void -Hacl_GenericField64_to_field( - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 *k, - uint64_t *a, - uint64_t *aM -); - -/** -Convert a result back from the Montgomery representation to the regular representation. - - Write `aM / R mod n` in `a`, i.e. - Hacl_GenericField64_from_field(k, Hacl_GenericField64_to_field(k, a)) == a % n - - The argument aM and the outparam a are meant to be `len` limbs in size, i.e. uint64_t[len]. - The argument k is a montgomery context obtained through Hacl_GenericField64_field_init. -*/ -void -Hacl_GenericField64_from_field( - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 *k, - uint64_t *aM, - uint64_t *a -); - -/** -Write `aM + bM mod n` in `cM`. - - The arguments aM, bM, and the outparam cM are meant to be `len` limbs in size, i.e. uint64_t[len]. - The argument k is a montgomery context obtained through Hacl_GenericField64_field_init. -*/ -void -Hacl_GenericField64_add( - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 *k, - uint64_t *aM, - uint64_t *bM, - uint64_t *cM -); - -/** -Write `aM - bM mod n` to `cM`. - - The arguments aM, bM, and the outparam cM are meant to be `len` limbs in size, i.e. uint64_t[len]. - The argument k is a montgomery context obtained through Hacl_GenericField64_field_init. -*/ -void -Hacl_GenericField64_sub( - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 *k, - uint64_t *aM, - uint64_t *bM, - uint64_t *cM -); - -/** -Write `aM * bM mod n` in `cM`. - - The arguments aM, bM, and the outparam cM are meant to be `len` limbs in size, i.e. uint64_t[len]. - The argument k is a montgomery context obtained through Hacl_GenericField64_field_init. -*/ -void -Hacl_GenericField64_mul( - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 *k, - uint64_t *aM, - uint64_t *bM, - uint64_t *cM -); - -/** -Write `aM * aM mod n` in `cM`. - - The argument aM and the outparam cM are meant to be `len` limbs in size, i.e. uint64_t[len]. - The argument k is a montgomery context obtained through Hacl_GenericField64_field_init. -*/ -void -Hacl_GenericField64_sqr( - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 *k, - uint64_t *aM, - uint64_t *cM -); - -/** -Convert a bignum `one` to its Montgomery representation. - - The outparam oneM is meant to be `len` limbs in size, i.e. uint64_t[len]. - The argument k is a montgomery context obtained through Hacl_GenericField64_field_init. -*/ -void Hacl_GenericField64_one(Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 *k, uint64_t *oneM); - -/** -Write `aM ^ b mod n` in `resM`. - - The argument aM and the outparam resM are meant to be `len` limbs in size, i.e. uint64_t[len]. - The argument k is a montgomery context obtained through Hacl_GenericField64_field_init. - - The argument b is a bignum of any size, and bBits is an upper bound on the - number of significant bits of b. A tighter bound results in faster execution - time. When in doubt, the number of bits for the bignum size is always a safe - default, e.g. if b is a 256-bit bignum, bBits should be 256. - - This function is constant-time over its argument b, at the cost of a slower - execution time than exp_vartime. - - Before calling this function, the caller will need to ensure that the following - precondition is observed. - • b < pow2 bBits -*/ -void -Hacl_GenericField64_exp_consttime( - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 *k, - uint64_t *aM, - uint32_t bBits, - uint64_t *b, - uint64_t *resM -); - -/** -Write `aM ^ b mod n` in `resM`. - - The argument aM and the outparam resM are meant to be `len` limbs in size, i.e. uint64_t[len]. - The argument k is a montgomery context obtained through Hacl_GenericField64_field_init. - - The argument b is a bignum of any size, and bBits is an upper bound on the - number of significant bits of b. A tighter bound results in faster execution - time. When in doubt, the number of bits for the bignum size is always a safe - default, e.g. if b is a 256-bit bignum, bBits should be 256. - - The function is *NOT* constant-time on the argument b. See the - exp_consttime function for constant-time variant. - - Before calling this function, the caller will need to ensure that the following - precondition is observed. - • b < pow2 bBits -*/ -void -Hacl_GenericField64_exp_vartime( - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 *k, - uint64_t *aM, - uint32_t bBits, - uint64_t *b, - uint64_t *resM -); - -/** -Write `aM ^ (-1) mod n` in `aInvM`. - - The argument aM and the outparam aInvM are meant to be `len` limbs in size, i.e. uint64_t[len]. - The argument k is a montgomery context obtained through Hacl_GenericField64_field_init. - - Before calling this function, the caller will need to ensure that the following - preconditions are observed. - • n is a prime - • 0 < aM -*/ -void -Hacl_GenericField64_inverse( - Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 *k, - uint64_t *aM, - uint64_t *aInvM -); - -#if defined(__cplusplus) -} -#endif - -#define __Hacl_GenericField64_H_DEFINED -#endif diff --git a/dist/c89-compatible/Hacl_HKDF.c b/dist/c89-compatible/Hacl_HKDF.c deleted file mode 100644 index 14a5128853..0000000000 --- a/dist/c89-compatible/Hacl_HKDF.c +++ /dev/null @@ -1,308 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#include "Hacl_HKDF.h" - - - -void -Hacl_HKDF_expand_sha2_256( - uint8_t *okm, - uint8_t *prk, - uint32_t prklen, - uint8_t *info, - uint32_t infolen, - uint32_t len -) -{ - uint32_t tlen = (uint32_t)32U; - uint32_t n = len / tlen; - uint8_t *output = okm; - KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + (uint32_t)1U); - { - uint8_t text[tlen + infolen + (uint32_t)1U]; - memset(text, 0U, (tlen + infolen + (uint32_t)1U) * sizeof (uint8_t)); - { - uint8_t *text0 = text + tlen; - uint8_t *tag = text; - uint8_t *ctr = text + tlen + infolen; - memcpy(text + tlen, info, infolen * sizeof (uint8_t)); - { - uint32_t i; - for (i = (uint32_t)0U; i < n; i++) - { - ctr[0U] = (uint8_t)(i + (uint32_t)1U); - if (i == (uint32_t)0U) - { - Hacl_HMAC_compute_sha2_256(tag, prk, prklen, text0, infolen + (uint32_t)1U); - } - else - { - Hacl_HMAC_compute_sha2_256(tag, prk, prklen, text, tlen + infolen + (uint32_t)1U); - } - memcpy(output + i * tlen, tag, tlen * sizeof (uint8_t)); - } - } - if (n * tlen < len) - { - ctr[0U] = (uint8_t)(n + (uint32_t)1U); - if (n == (uint32_t)0U) - { - Hacl_HMAC_compute_sha2_256(tag, prk, prklen, text0, infolen + (uint32_t)1U); - } - else - { - Hacl_HMAC_compute_sha2_256(tag, prk, prklen, text, tlen + infolen + (uint32_t)1U); - } - { - uint8_t *block = okm + n * tlen; - memcpy(block, tag, (len - n * tlen) * sizeof (uint8_t)); - } - } - } - } -} - -void -Hacl_HKDF_extract_sha2_256( - uint8_t *prk, - uint8_t *salt, - uint32_t saltlen, - uint8_t *ikm, - uint32_t ikmlen -) -{ - Hacl_HMAC_compute_sha2_256(prk, salt, saltlen, ikm, ikmlen); -} - -void -Hacl_HKDF_expand_sha2_512( - uint8_t *okm, - uint8_t *prk, - uint32_t prklen, - uint8_t *info, - uint32_t infolen, - uint32_t len -) -{ - uint32_t tlen = (uint32_t)64U; - uint32_t n = len / tlen; - uint8_t *output = okm; - KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + (uint32_t)1U); - { - uint8_t text[tlen + infolen + (uint32_t)1U]; - memset(text, 0U, (tlen + infolen + (uint32_t)1U) * sizeof (uint8_t)); - { - uint8_t *text0 = text + tlen; - uint8_t *tag = text; - uint8_t *ctr = text + tlen + infolen; - memcpy(text + tlen, info, infolen * sizeof (uint8_t)); - { - uint32_t i; - for (i = (uint32_t)0U; i < n; i++) - { - ctr[0U] = (uint8_t)(i + (uint32_t)1U); - if (i == (uint32_t)0U) - { - Hacl_HMAC_compute_sha2_512(tag, prk, prklen, text0, infolen + (uint32_t)1U); - } - else - { - Hacl_HMAC_compute_sha2_512(tag, prk, prklen, text, tlen + infolen + (uint32_t)1U); - } - memcpy(output + i * tlen, tag, tlen * sizeof (uint8_t)); - } - } - if (n * tlen < len) - { - ctr[0U] = (uint8_t)(n + (uint32_t)1U); - if (n == (uint32_t)0U) - { - Hacl_HMAC_compute_sha2_512(tag, prk, prklen, text0, infolen + (uint32_t)1U); - } - else - { - Hacl_HMAC_compute_sha2_512(tag, prk, prklen, text, tlen + infolen + (uint32_t)1U); - } - { - uint8_t *block = okm + n * tlen; - memcpy(block, tag, (len - n * tlen) * sizeof (uint8_t)); - } - } - } - } -} - -void -Hacl_HKDF_extract_sha2_512( - uint8_t *prk, - uint8_t *salt, - uint32_t saltlen, - uint8_t *ikm, - uint32_t ikmlen -) -{ - Hacl_HMAC_compute_sha2_512(prk, salt, saltlen, ikm, ikmlen); -} - -void -Hacl_HKDF_expand_blake2s_32( - uint8_t *okm, - uint8_t *prk, - uint32_t prklen, - uint8_t *info, - uint32_t infolen, - uint32_t len -) -{ - uint32_t tlen = (uint32_t)32U; - uint32_t n = len / tlen; - uint8_t *output = okm; - KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + (uint32_t)1U); - { - uint8_t text[tlen + infolen + (uint32_t)1U]; - memset(text, 0U, (tlen + infolen + (uint32_t)1U) * sizeof (uint8_t)); - { - uint8_t *text0 = text + tlen; - uint8_t *tag = text; - uint8_t *ctr = text + tlen + infolen; - memcpy(text + tlen, info, infolen * sizeof (uint8_t)); - { - uint32_t i; - for (i = (uint32_t)0U; i < n; i++) - { - ctr[0U] = (uint8_t)(i + (uint32_t)1U); - if (i == (uint32_t)0U) - { - Hacl_HMAC_compute_blake2s_32(tag, prk, prklen, text0, infolen + (uint32_t)1U); - } - else - { - Hacl_HMAC_compute_blake2s_32(tag, prk, prklen, text, tlen + infolen + (uint32_t)1U); - } - memcpy(output + i * tlen, tag, tlen * sizeof (uint8_t)); - } - } - if (n * tlen < len) - { - ctr[0U] = (uint8_t)(n + (uint32_t)1U); - if (n == (uint32_t)0U) - { - Hacl_HMAC_compute_blake2s_32(tag, prk, prklen, text0, infolen + (uint32_t)1U); - } - else - { - Hacl_HMAC_compute_blake2s_32(tag, prk, prklen, text, tlen + infolen + (uint32_t)1U); - } - { - uint8_t *block = okm + n * tlen; - memcpy(block, tag, (len - n * tlen) * sizeof (uint8_t)); - } - } - } - } -} - -void -Hacl_HKDF_extract_blake2s_32( - uint8_t *prk, - uint8_t *salt, - uint32_t saltlen, - uint8_t *ikm, - uint32_t ikmlen -) -{ - Hacl_HMAC_compute_blake2s_32(prk, salt, saltlen, ikm, ikmlen); -} - -void -Hacl_HKDF_expand_blake2b_32( - uint8_t *okm, - uint8_t *prk, - uint32_t prklen, - uint8_t *info, - uint32_t infolen, - uint32_t len -) -{ - uint32_t tlen = (uint32_t)64U; - uint32_t n = len / tlen; - uint8_t *output = okm; - KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + (uint32_t)1U); - { - uint8_t text[tlen + infolen + (uint32_t)1U]; - memset(text, 0U, (tlen + infolen + (uint32_t)1U) * sizeof (uint8_t)); - { - uint8_t *text0 = text + tlen; - uint8_t *tag = text; - uint8_t *ctr = text + tlen + infolen; - memcpy(text + tlen, info, infolen * sizeof (uint8_t)); - { - uint32_t i; - for (i = (uint32_t)0U; i < n; i++) - { - ctr[0U] = (uint8_t)(i + (uint32_t)1U); - if (i == (uint32_t)0U) - { - Hacl_HMAC_compute_blake2b_32(tag, prk, prklen, text0, infolen + (uint32_t)1U); - } - else - { - Hacl_HMAC_compute_blake2b_32(tag, prk, prklen, text, tlen + infolen + (uint32_t)1U); - } - memcpy(output + i * tlen, tag, tlen * sizeof (uint8_t)); - } - } - if (n * tlen < len) - { - ctr[0U] = (uint8_t)(n + (uint32_t)1U); - if (n == (uint32_t)0U) - { - Hacl_HMAC_compute_blake2b_32(tag, prk, prklen, text0, infolen + (uint32_t)1U); - } - else - { - Hacl_HMAC_compute_blake2b_32(tag, prk, prklen, text, tlen + infolen + (uint32_t)1U); - } - { - uint8_t *block = okm + n * tlen; - memcpy(block, tag, (len - n * tlen) * sizeof (uint8_t)); - } - } - } - } -} - -void -Hacl_HKDF_extract_blake2b_32( - uint8_t *prk, - uint8_t *salt, - uint32_t saltlen, - uint8_t *ikm, - uint32_t ikmlen -) -{ - Hacl_HMAC_compute_blake2b_32(prk, salt, saltlen, ikm, ikmlen); -} - diff --git a/dist/c89-compatible/Hacl_HKDF.h b/dist/c89-compatible/Hacl_HKDF.h deleted file mode 100644 index 511562583b..0000000000 --- a/dist/c89-compatible/Hacl_HKDF.h +++ /dev/null @@ -1,121 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#ifndef __Hacl_HKDF_H -#define __Hacl_HKDF_H - -#if defined(__cplusplus) -extern "C" { -#endif - -#include -#include "krml/internal/types.h" -#include "krml/lowstar_endianness.h" -#include "krml/internal/target.h" - - -#include "Hacl_HMAC.h" -#include "evercrypt_targetconfig.h" -void -Hacl_HKDF_expand_sha2_256( - uint8_t *okm, - uint8_t *prk, - uint32_t prklen, - uint8_t *info, - uint32_t infolen, - uint32_t len -); - -void -Hacl_HKDF_extract_sha2_256( - uint8_t *prk, - uint8_t *salt, - uint32_t saltlen, - uint8_t *ikm, - uint32_t ikmlen -); - -void -Hacl_HKDF_expand_sha2_512( - uint8_t *okm, - uint8_t *prk, - uint32_t prklen, - uint8_t *info, - uint32_t infolen, - uint32_t len -); - -void -Hacl_HKDF_extract_sha2_512( - uint8_t *prk, - uint8_t *salt, - uint32_t saltlen, - uint8_t *ikm, - uint32_t ikmlen -); - -void -Hacl_HKDF_expand_blake2s_32( - uint8_t *okm, - uint8_t *prk, - uint32_t prklen, - uint8_t *info, - uint32_t infolen, - uint32_t len -); - -void -Hacl_HKDF_extract_blake2s_32( - uint8_t *prk, - uint8_t *salt, - uint32_t saltlen, - uint8_t *ikm, - uint32_t ikmlen -); - -void -Hacl_HKDF_expand_blake2b_32( - uint8_t *okm, - uint8_t *prk, - uint32_t prklen, - uint8_t *info, - uint32_t infolen, - uint32_t len -); - -void -Hacl_HKDF_extract_blake2b_32( - uint8_t *prk, - uint8_t *salt, - uint32_t saltlen, - uint8_t *ikm, - uint32_t ikmlen -); - -#if defined(__cplusplus) -} -#endif - -#define __Hacl_HKDF_H_DEFINED -#endif diff --git a/dist/c89-compatible/Hacl_HKDF_Blake2b_256.c b/dist/c89-compatible/Hacl_HKDF_Blake2b_256.c deleted file mode 100644 index 9f61ed4374..0000000000 --- a/dist/c89-compatible/Hacl_HKDF_Blake2b_256.c +++ /dev/null @@ -1,114 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#include "Hacl_HKDF_Blake2b_256.h" - - - -void -Hacl_HKDF_Blake2b_256_expand_blake2b_256( - uint8_t *okm, - uint8_t *prk, - uint32_t prklen, - uint8_t *info, - uint32_t infolen, - uint32_t len -) -{ - uint32_t tlen = (uint32_t)64U; - uint32_t n = len / tlen; - uint8_t *output = okm; - KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + (uint32_t)1U); - { - uint8_t text[tlen + infolen + (uint32_t)1U]; - memset(text, 0U, (tlen + infolen + (uint32_t)1U) * sizeof (uint8_t)); - { - uint8_t *text0 = text + tlen; - uint8_t *tag = text; - uint8_t *ctr = text + tlen + infolen; - memcpy(text + tlen, info, infolen * sizeof (uint8_t)); - { - uint32_t i; - for (i = (uint32_t)0U; i < n; i++) - { - ctr[0U] = (uint8_t)(i + (uint32_t)1U); - if (i == (uint32_t)0U) - { - Hacl_HMAC_Blake2b_256_compute_blake2b_256(tag, - prk, - prklen, - text0, - infolen + (uint32_t)1U); - } - else - { - Hacl_HMAC_Blake2b_256_compute_blake2b_256(tag, - prk, - prklen, - text, - tlen + infolen + (uint32_t)1U); - } - memcpy(output + i * tlen, tag, tlen * sizeof (uint8_t)); - } - } - if (n * tlen < len) - { - ctr[0U] = (uint8_t)(n + (uint32_t)1U); - if (n == (uint32_t)0U) - { - Hacl_HMAC_Blake2b_256_compute_blake2b_256(tag, - prk, - prklen, - text0, - infolen + (uint32_t)1U); - } - else - { - Hacl_HMAC_Blake2b_256_compute_blake2b_256(tag, - prk, - prklen, - text, - tlen + infolen + (uint32_t)1U); - } - { - uint8_t *block = okm + n * tlen; - memcpy(block, tag, (len - n * tlen) * sizeof (uint8_t)); - } - } - } - } -} - -void -Hacl_HKDF_Blake2b_256_extract_blake2b_256( - uint8_t *prk, - uint8_t *salt, - uint32_t saltlen, - uint8_t *ikm, - uint32_t ikmlen -) -{ - Hacl_HMAC_Blake2b_256_compute_blake2b_256(prk, salt, saltlen, ikm, ikmlen); -} - diff --git a/dist/c89-compatible/Hacl_HKDF_Blake2b_256.h b/dist/c89-compatible/Hacl_HKDF_Blake2b_256.h deleted file mode 100644 index 3805ccefaf..0000000000 --- a/dist/c89-compatible/Hacl_HKDF_Blake2b_256.h +++ /dev/null @@ -1,64 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#ifndef __Hacl_HKDF_Blake2b_256_H -#define __Hacl_HKDF_Blake2b_256_H - -#if defined(__cplusplus) -extern "C" { -#endif - -#include -#include "krml/internal/types.h" -#include "krml/lowstar_endianness.h" -#include "krml/internal/target.h" - - -#include "Hacl_HMAC_Blake2b_256.h" -#include "evercrypt_targetconfig.h" -void -Hacl_HKDF_Blake2b_256_expand_blake2b_256( - uint8_t *okm, - uint8_t *prk, - uint32_t prklen, - uint8_t *info, - uint32_t infolen, - uint32_t len -); - -void -Hacl_HKDF_Blake2b_256_extract_blake2b_256( - uint8_t *prk, - uint8_t *salt, - uint32_t saltlen, - uint8_t *ikm, - uint32_t ikmlen -); - -#if defined(__cplusplus) -} -#endif - -#define __Hacl_HKDF_Blake2b_256_H_DEFINED -#endif diff --git a/dist/c89-compatible/Hacl_HKDF_Blake2s_128.c b/dist/c89-compatible/Hacl_HKDF_Blake2s_128.c deleted file mode 100644 index 97c34da3bf..0000000000 --- a/dist/c89-compatible/Hacl_HKDF_Blake2s_128.c +++ /dev/null @@ -1,114 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#include "Hacl_HKDF_Blake2s_128.h" - - - -void -Hacl_HKDF_Blake2s_128_expand_blake2s_128( - uint8_t *okm, - uint8_t *prk, - uint32_t prklen, - uint8_t *info, - uint32_t infolen, - uint32_t len -) -{ - uint32_t tlen = (uint32_t)32U; - uint32_t n = len / tlen; - uint8_t *output = okm; - KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + (uint32_t)1U); - { - uint8_t text[tlen + infolen + (uint32_t)1U]; - memset(text, 0U, (tlen + infolen + (uint32_t)1U) * sizeof (uint8_t)); - { - uint8_t *text0 = text + tlen; - uint8_t *tag = text; - uint8_t *ctr = text + tlen + infolen; - memcpy(text + tlen, info, infolen * sizeof (uint8_t)); - { - uint32_t i; - for (i = (uint32_t)0U; i < n; i++) - { - ctr[0U] = (uint8_t)(i + (uint32_t)1U); - if (i == (uint32_t)0U) - { - Hacl_HMAC_Blake2s_128_compute_blake2s_128(tag, - prk, - prklen, - text0, - infolen + (uint32_t)1U); - } - else - { - Hacl_HMAC_Blake2s_128_compute_blake2s_128(tag, - prk, - prklen, - text, - tlen + infolen + (uint32_t)1U); - } - memcpy(output + i * tlen, tag, tlen * sizeof (uint8_t)); - } - } - if (n * tlen < len) - { - ctr[0U] = (uint8_t)(n + (uint32_t)1U); - if (n == (uint32_t)0U) - { - Hacl_HMAC_Blake2s_128_compute_blake2s_128(tag, - prk, - prklen, - text0, - infolen + (uint32_t)1U); - } - else - { - Hacl_HMAC_Blake2s_128_compute_blake2s_128(tag, - prk, - prklen, - text, - tlen + infolen + (uint32_t)1U); - } - { - uint8_t *block = okm + n * tlen; - memcpy(block, tag, (len - n * tlen) * sizeof (uint8_t)); - } - } - } - } -} - -void -Hacl_HKDF_Blake2s_128_extract_blake2s_128( - uint8_t *prk, - uint8_t *salt, - uint32_t saltlen, - uint8_t *ikm, - uint32_t ikmlen -) -{ - Hacl_HMAC_Blake2s_128_compute_blake2s_128(prk, salt, saltlen, ikm, ikmlen); -} - diff --git a/dist/c89-compatible/Hacl_HKDF_Blake2s_128.h b/dist/c89-compatible/Hacl_HKDF_Blake2s_128.h deleted file mode 100644 index 4815a8d11d..0000000000 --- a/dist/c89-compatible/Hacl_HKDF_Blake2s_128.h +++ /dev/null @@ -1,64 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#ifndef __Hacl_HKDF_Blake2s_128_H -#define __Hacl_HKDF_Blake2s_128_H - -#if defined(__cplusplus) -extern "C" { -#endif - -#include -#include "krml/internal/types.h" -#include "krml/lowstar_endianness.h" -#include "krml/internal/target.h" - - -#include "Hacl_HMAC_Blake2s_128.h" -#include "evercrypt_targetconfig.h" -void -Hacl_HKDF_Blake2s_128_expand_blake2s_128( - uint8_t *okm, - uint8_t *prk, - uint32_t prklen, - uint8_t *info, - uint32_t infolen, - uint32_t len -); - -void -Hacl_HKDF_Blake2s_128_extract_blake2s_128( - uint8_t *prk, - uint8_t *salt, - uint32_t saltlen, - uint8_t *ikm, - uint32_t ikmlen -); - -#if defined(__cplusplus) -} -#endif - -#define __Hacl_HKDF_Blake2s_128_H_DEFINED -#endif diff --git a/dist/c89-compatible/Hacl_HMAC.c b/dist/c89-compatible/Hacl_HMAC.c deleted file mode 100644 index 0f4dd6c5dd..0000000000 --- a/dist/c89-compatible/Hacl_HMAC.c +++ /dev/null @@ -1,1153 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#include "internal/Hacl_HMAC.h" - -#include "internal/Hacl_Hash_SHA2.h" -#include "internal/Hacl_Hash_SHA1.h" -#include "internal/Hacl_Hash_Blake2.h" - -void -Hacl_HMAC_legacy_compute_sha1( - uint8_t *dst, - uint8_t *key, - uint32_t key_len, - uint8_t *data, - uint32_t data_len -) -{ - uint32_t l = (uint32_t)64U; - KRML_CHECK_SIZE(sizeof (uint8_t), l); - { - uint8_t key_block[l]; - memset(key_block, 0U, l * sizeof (uint8_t)); - { - uint32_t i0; - if (key_len <= (uint32_t)64U) - { - i0 = key_len; - } - else - { - i0 = (uint32_t)20U; - } - { - uint8_t *nkey = key_block; - if (key_len <= (uint32_t)64U) - { - memcpy(nkey, key, key_len * sizeof (uint8_t)); - } - else - { - Hacl_Hash_SHA1_legacy_hash(key, key_len, nkey); - } - KRML_CHECK_SIZE(sizeof (uint8_t), l); - { - uint8_t ipad[l]; - memset(ipad, (uint8_t)0x36U, l * sizeof (uint8_t)); - { - uint32_t i; - for (i = (uint32_t)0U; i < l; i++) - { - uint8_t xi = ipad[i]; - uint8_t yi = key_block[i]; - ipad[i] = xi ^ yi; - } - } - KRML_CHECK_SIZE(sizeof (uint8_t), l); - { - uint8_t opad[l]; - memset(opad, (uint8_t)0x5cU, l * sizeof (uint8_t)); - { - uint32_t scrut0[5]; - uint32_t *s; - uint8_t *dst1; - uint8_t *hash1; - uint32_t block_len; - uint32_t n_blocks0; - uint32_t rem0; - K___uint32_t_uint32_t scrut1; - uint32_t n_blocks1; - uint32_t rem_len0; - uint32_t full_blocks_len0; - uint8_t *full_blocks0; - uint8_t *rem1; - { - uint32_t i; - for (i = (uint32_t)0U; i < l; i++) - { - uint8_t xi = opad[i]; - uint8_t yi = key_block[i]; - opad[i] = xi ^ yi; - } - } - scrut0[0U] = (uint32_t)0x67452301U; - scrut0[1U] = (uint32_t)0xefcdab89U; - scrut0[2U] = (uint32_t)0x98badcfeU; - scrut0[3U] = (uint32_t)0x10325476U; - scrut0[4U] = (uint32_t)0xc3d2e1f0U; - s = scrut0; - dst1 = ipad; - Hacl_Hash_Core_SHA1_legacy_init(s); - if (data_len == (uint32_t)0U) - { - Hacl_Hash_SHA1_legacy_update_last(s, (uint64_t)0U, ipad, (uint32_t)64U); - } - else - { - Hacl_Hash_SHA1_legacy_update_multi(s, ipad, (uint32_t)1U); - { - uint32_t block_len0 = (uint32_t)64U; - uint32_t n_blocks2 = data_len / block_len0; - uint32_t rem = data_len % block_len0; - K___uint32_t_uint32_t scrut; - if (n_blocks2 > (uint32_t)0U && rem == (uint32_t)0U) - { - uint32_t n_blocks_ = n_blocks2 - (uint32_t)1U; - K___uint32_t_uint32_t lit; - lit.fst = n_blocks_; - lit.snd = data_len - n_blocks_ * block_len0; - scrut = lit; - } - else - { - K___uint32_t_uint32_t lit; - lit.fst = n_blocks2; - lit.snd = rem; - scrut = lit; - } - { - uint32_t n_blocks = scrut.fst; - uint32_t rem_len = scrut.snd; - uint32_t full_blocks_len = n_blocks * block_len0; - uint8_t *full_blocks = data; - Hacl_Hash_SHA1_legacy_update_multi(s, full_blocks, n_blocks); - { - uint8_t *rem2 = data + full_blocks_len; - Hacl_Hash_SHA1_legacy_update_last(s, - (uint64_t)(uint32_t)64U + (uint64_t)full_blocks_len, - rem2, - rem_len); - } - } - } - } - Hacl_Hash_Core_SHA1_legacy_finish(s, dst1); - hash1 = ipad; - Hacl_Hash_Core_SHA1_legacy_init(s); - Hacl_Hash_SHA1_legacy_update_multi(s, opad, (uint32_t)1U); - block_len = (uint32_t)64U; - n_blocks0 = (uint32_t)20U / block_len; - rem0 = (uint32_t)20U % block_len; - if (n_blocks0 > (uint32_t)0U && rem0 == (uint32_t)0U) - { - uint32_t n_blocks_ = n_blocks0 - (uint32_t)1U; - K___uint32_t_uint32_t lit; - lit.fst = n_blocks_; - lit.snd = (uint32_t)20U - n_blocks_ * block_len; - scrut1 = lit; - } - else - { - K___uint32_t_uint32_t lit; - lit.fst = n_blocks0; - lit.snd = rem0; - scrut1 = lit; - } - n_blocks1 = scrut1.fst; - rem_len0 = scrut1.snd; - full_blocks_len0 = n_blocks1 * block_len; - full_blocks0 = hash1; - Hacl_Hash_SHA1_legacy_update_multi(s, full_blocks0, n_blocks1); - rem1 = hash1 + full_blocks_len0; - Hacl_Hash_SHA1_legacy_update_last(s, - (uint64_t)(uint32_t)64U + (uint64_t)full_blocks_len0, - rem1, - rem_len0); - Hacl_Hash_Core_SHA1_legacy_finish(s, dst); - } - } - } - } - } - } -} - -void -Hacl_HMAC_compute_sha2_256( - uint8_t *dst, - uint8_t *key, - uint32_t key_len, - uint8_t *data, - uint32_t data_len -) -{ - uint32_t l = (uint32_t)64U; - KRML_CHECK_SIZE(sizeof (uint8_t), l); - { - uint8_t key_block[l]; - memset(key_block, 0U, l * sizeof (uint8_t)); - { - uint32_t i0; - if (key_len <= (uint32_t)64U) - { - i0 = key_len; - } - else - { - i0 = (uint32_t)32U; - } - { - uint8_t *nkey = key_block; - if (key_len <= (uint32_t)64U) - { - memcpy(nkey, key, key_len * sizeof (uint8_t)); - } - else - { - Hacl_Hash_SHA2_hash_256(key, key_len, nkey); - } - KRML_CHECK_SIZE(sizeof (uint8_t), l); - { - uint8_t ipad[l]; - memset(ipad, (uint8_t)0x36U, l * sizeof (uint8_t)); - { - uint32_t i; - for (i = (uint32_t)0U; i < l; i++) - { - uint8_t xi = ipad[i]; - uint8_t yi = key_block[i]; - ipad[i] = xi ^ yi; - } - } - KRML_CHECK_SIZE(sizeof (uint8_t), l); - { - uint8_t opad[l]; - memset(opad, (uint8_t)0x5cU, l * sizeof (uint8_t)); - { - uint32_t scrut0[8]; - uint32_t *s; - uint8_t *dst1; - uint8_t *hash1; - uint32_t block_len; - uint32_t n_blocks0; - uint32_t rem0; - K___uint32_t_uint32_t scrut1; - uint32_t n_blocks1; - uint32_t rem_len0; - uint32_t full_blocks_len0; - uint8_t *full_blocks0; - uint8_t *rem1; - { - uint32_t i; - for (i = (uint32_t)0U; i < l; i++) - { - uint8_t xi = opad[i]; - uint8_t yi = key_block[i]; - opad[i] = xi ^ yi; - } - } - scrut0[0U] = (uint32_t)0x6a09e667U; - scrut0[1U] = (uint32_t)0xbb67ae85U; - scrut0[2U] = (uint32_t)0x3c6ef372U; - scrut0[3U] = (uint32_t)0xa54ff53aU; - scrut0[4U] = (uint32_t)0x510e527fU; - scrut0[5U] = (uint32_t)0x9b05688cU; - scrut0[6U] = (uint32_t)0x1f83d9abU; - scrut0[7U] = (uint32_t)0x5be0cd19U; - s = scrut0; - dst1 = ipad; - Hacl_Hash_Core_SHA2_init_256(s); - if (data_len == (uint32_t)0U) - { - Hacl_Hash_SHA2_update_last_256(s, (uint64_t)0U, ipad, (uint32_t)64U); - } - else - { - Hacl_Hash_SHA2_update_multi_256(s, ipad, (uint32_t)1U); - { - uint32_t block_len0 = (uint32_t)64U; - uint32_t n_blocks2 = data_len / block_len0; - uint32_t rem = data_len % block_len0; - K___uint32_t_uint32_t scrut; - if (n_blocks2 > (uint32_t)0U && rem == (uint32_t)0U) - { - uint32_t n_blocks_ = n_blocks2 - (uint32_t)1U; - K___uint32_t_uint32_t lit; - lit.fst = n_blocks_; - lit.snd = data_len - n_blocks_ * block_len0; - scrut = lit; - } - else - { - K___uint32_t_uint32_t lit; - lit.fst = n_blocks2; - lit.snd = rem; - scrut = lit; - } - { - uint32_t n_blocks = scrut.fst; - uint32_t rem_len = scrut.snd; - uint32_t full_blocks_len = n_blocks * block_len0; - uint8_t *full_blocks = data; - Hacl_Hash_SHA2_update_multi_256(s, full_blocks, n_blocks); - { - uint8_t *rem2 = data + full_blocks_len; - Hacl_Hash_SHA2_update_last_256(s, - (uint64_t)(uint32_t)64U + (uint64_t)full_blocks_len, - rem2, - rem_len); - } - } - } - } - Hacl_Hash_Core_SHA2_finish_256(s, dst1); - hash1 = ipad; - Hacl_Hash_Core_SHA2_init_256(s); - Hacl_Hash_SHA2_update_multi_256(s, opad, (uint32_t)1U); - block_len = (uint32_t)64U; - n_blocks0 = (uint32_t)32U / block_len; - rem0 = (uint32_t)32U % block_len; - if (n_blocks0 > (uint32_t)0U && rem0 == (uint32_t)0U) - { - uint32_t n_blocks_ = n_blocks0 - (uint32_t)1U; - K___uint32_t_uint32_t lit; - lit.fst = n_blocks_; - lit.snd = (uint32_t)32U - n_blocks_ * block_len; - scrut1 = lit; - } - else - { - K___uint32_t_uint32_t lit; - lit.fst = n_blocks0; - lit.snd = rem0; - scrut1 = lit; - } - n_blocks1 = scrut1.fst; - rem_len0 = scrut1.snd; - full_blocks_len0 = n_blocks1 * block_len; - full_blocks0 = hash1; - Hacl_Hash_SHA2_update_multi_256(s, full_blocks0, n_blocks1); - rem1 = hash1 + full_blocks_len0; - Hacl_Hash_SHA2_update_last_256(s, - (uint64_t)(uint32_t)64U + (uint64_t)full_blocks_len0, - rem1, - rem_len0); - Hacl_Hash_Core_SHA2_finish_256(s, dst); - } - } - } - } - } - } -} - -void -Hacl_HMAC_compute_sha2_384( - uint8_t *dst, - uint8_t *key, - uint32_t key_len, - uint8_t *data, - uint32_t data_len -) -{ - uint32_t l = (uint32_t)128U; - KRML_CHECK_SIZE(sizeof (uint8_t), l); - { - uint8_t key_block[l]; - memset(key_block, 0U, l * sizeof (uint8_t)); - { - uint32_t i0; - if (key_len <= (uint32_t)128U) - { - i0 = key_len; - } - else - { - i0 = (uint32_t)48U; - } - { - uint8_t *nkey = key_block; - if (key_len <= (uint32_t)128U) - { - memcpy(nkey, key, key_len * sizeof (uint8_t)); - } - else - { - Hacl_Hash_SHA2_hash_384(key, key_len, nkey); - } - KRML_CHECK_SIZE(sizeof (uint8_t), l); - { - uint8_t ipad[l]; - memset(ipad, (uint8_t)0x36U, l * sizeof (uint8_t)); - { - uint32_t i; - for (i = (uint32_t)0U; i < l; i++) - { - uint8_t xi = ipad[i]; - uint8_t yi = key_block[i]; - ipad[i] = xi ^ yi; - } - } - KRML_CHECK_SIZE(sizeof (uint8_t), l); - { - uint8_t opad[l]; - memset(opad, (uint8_t)0x5cU, l * sizeof (uint8_t)); - { - uint64_t scrut0[8]; - uint64_t *s; - uint8_t *dst1; - uint8_t *hash1; - uint32_t block_len; - uint32_t n_blocks0; - uint32_t rem0; - K___uint32_t_uint32_t scrut1; - uint32_t n_blocks1; - uint32_t rem_len0; - uint32_t full_blocks_len0; - uint8_t *full_blocks0; - uint8_t *rem1; - { - uint32_t i; - for (i = (uint32_t)0U; i < l; i++) - { - uint8_t xi = opad[i]; - uint8_t yi = key_block[i]; - opad[i] = xi ^ yi; - } - } - scrut0[0U] = (uint64_t)0xcbbb9d5dc1059ed8U; - scrut0[1U] = (uint64_t)0x629a292a367cd507U; - scrut0[2U] = (uint64_t)0x9159015a3070dd17U; - scrut0[3U] = (uint64_t)0x152fecd8f70e5939U; - scrut0[4U] = (uint64_t)0x67332667ffc00b31U; - scrut0[5U] = (uint64_t)0x8eb44a8768581511U; - scrut0[6U] = (uint64_t)0xdb0c2e0d64f98fa7U; - scrut0[7U] = (uint64_t)0x47b5481dbefa4fa4U; - s = scrut0; - dst1 = ipad; - Hacl_Hash_Core_SHA2_init_384(s); - if (data_len == (uint32_t)0U) - { - Hacl_Hash_SHA2_update_last_384(s, - FStar_UInt128_uint64_to_uint128((uint64_t)0U), - ipad, - (uint32_t)128U); - } - else - { - Hacl_Hash_SHA2_update_multi_384(s, ipad, (uint32_t)1U); - { - uint32_t block_len0 = (uint32_t)128U; - uint32_t n_blocks2 = data_len / block_len0; - uint32_t rem = data_len % block_len0; - K___uint32_t_uint32_t scrut; - if (n_blocks2 > (uint32_t)0U && rem == (uint32_t)0U) - { - uint32_t n_blocks_ = n_blocks2 - (uint32_t)1U; - K___uint32_t_uint32_t lit; - lit.fst = n_blocks_; - lit.snd = data_len - n_blocks_ * block_len0; - scrut = lit; - } - else - { - K___uint32_t_uint32_t lit; - lit.fst = n_blocks2; - lit.snd = rem; - scrut = lit; - } - { - uint32_t n_blocks = scrut.fst; - uint32_t rem_len = scrut.snd; - uint32_t full_blocks_len = n_blocks * block_len0; - uint8_t *full_blocks = data; - Hacl_Hash_SHA2_update_multi_384(s, full_blocks, n_blocks); - { - uint8_t *rem2 = data + full_blocks_len; - Hacl_Hash_SHA2_update_last_384(s, - FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)(uint32_t)128U), - FStar_UInt128_uint64_to_uint128((uint64_t)full_blocks_len)), - rem2, - rem_len); - } - } - } - } - Hacl_Hash_Core_SHA2_finish_384(s, dst1); - hash1 = ipad; - Hacl_Hash_Core_SHA2_init_384(s); - Hacl_Hash_SHA2_update_multi_384(s, opad, (uint32_t)1U); - block_len = (uint32_t)128U; - n_blocks0 = (uint32_t)48U / block_len; - rem0 = (uint32_t)48U % block_len; - if (n_blocks0 > (uint32_t)0U && rem0 == (uint32_t)0U) - { - uint32_t n_blocks_ = n_blocks0 - (uint32_t)1U; - K___uint32_t_uint32_t lit; - lit.fst = n_blocks_; - lit.snd = (uint32_t)48U - n_blocks_ * block_len; - scrut1 = lit; - } - else - { - K___uint32_t_uint32_t lit; - lit.fst = n_blocks0; - lit.snd = rem0; - scrut1 = lit; - } - n_blocks1 = scrut1.fst; - rem_len0 = scrut1.snd; - full_blocks_len0 = n_blocks1 * block_len; - full_blocks0 = hash1; - Hacl_Hash_SHA2_update_multi_384(s, full_blocks0, n_blocks1); - rem1 = hash1 + full_blocks_len0; - Hacl_Hash_SHA2_update_last_384(s, - FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)(uint32_t)128U), - FStar_UInt128_uint64_to_uint128((uint64_t)full_blocks_len0)), - rem1, - rem_len0); - Hacl_Hash_Core_SHA2_finish_384(s, dst); - } - } - } - } - } - } -} - -void -Hacl_HMAC_compute_sha2_512( - uint8_t *dst, - uint8_t *key, - uint32_t key_len, - uint8_t *data, - uint32_t data_len -) -{ - uint32_t l = (uint32_t)128U; - KRML_CHECK_SIZE(sizeof (uint8_t), l); - { - uint8_t key_block[l]; - memset(key_block, 0U, l * sizeof (uint8_t)); - { - uint32_t i0; - if (key_len <= (uint32_t)128U) - { - i0 = key_len; - } - else - { - i0 = (uint32_t)64U; - } - { - uint8_t *nkey = key_block; - if (key_len <= (uint32_t)128U) - { - memcpy(nkey, key, key_len * sizeof (uint8_t)); - } - else - { - Hacl_Hash_SHA2_hash_512(key, key_len, nkey); - } - KRML_CHECK_SIZE(sizeof (uint8_t), l); - { - uint8_t ipad[l]; - memset(ipad, (uint8_t)0x36U, l * sizeof (uint8_t)); - { - uint32_t i; - for (i = (uint32_t)0U; i < l; i++) - { - uint8_t xi = ipad[i]; - uint8_t yi = key_block[i]; - ipad[i] = xi ^ yi; - } - } - KRML_CHECK_SIZE(sizeof (uint8_t), l); - { - uint8_t opad[l]; - memset(opad, (uint8_t)0x5cU, l * sizeof (uint8_t)); - { - uint64_t scrut0[8]; - uint64_t *s; - uint8_t *dst1; - uint8_t *hash1; - uint32_t block_len; - uint32_t n_blocks0; - uint32_t rem0; - K___uint32_t_uint32_t scrut1; - uint32_t n_blocks1; - uint32_t rem_len0; - uint32_t full_blocks_len0; - uint8_t *full_blocks0; - uint8_t *rem1; - { - uint32_t i; - for (i = (uint32_t)0U; i < l; i++) - { - uint8_t xi = opad[i]; - uint8_t yi = key_block[i]; - opad[i] = xi ^ yi; - } - } - scrut0[0U] = (uint64_t)0x6a09e667f3bcc908U; - scrut0[1U] = (uint64_t)0xbb67ae8584caa73bU; - scrut0[2U] = (uint64_t)0x3c6ef372fe94f82bU; - scrut0[3U] = (uint64_t)0xa54ff53a5f1d36f1U; - scrut0[4U] = (uint64_t)0x510e527fade682d1U; - scrut0[5U] = (uint64_t)0x9b05688c2b3e6c1fU; - scrut0[6U] = (uint64_t)0x1f83d9abfb41bd6bU; - scrut0[7U] = (uint64_t)0x5be0cd19137e2179U; - s = scrut0; - dst1 = ipad; - Hacl_Hash_Core_SHA2_init_512(s); - if (data_len == (uint32_t)0U) - { - Hacl_Hash_SHA2_update_last_512(s, - FStar_UInt128_uint64_to_uint128((uint64_t)0U), - ipad, - (uint32_t)128U); - } - else - { - Hacl_Hash_SHA2_update_multi_512(s, ipad, (uint32_t)1U); - { - uint32_t block_len0 = (uint32_t)128U; - uint32_t n_blocks2 = data_len / block_len0; - uint32_t rem = data_len % block_len0; - K___uint32_t_uint32_t scrut; - if (n_blocks2 > (uint32_t)0U && rem == (uint32_t)0U) - { - uint32_t n_blocks_ = n_blocks2 - (uint32_t)1U; - K___uint32_t_uint32_t lit; - lit.fst = n_blocks_; - lit.snd = data_len - n_blocks_ * block_len0; - scrut = lit; - } - else - { - K___uint32_t_uint32_t lit; - lit.fst = n_blocks2; - lit.snd = rem; - scrut = lit; - } - { - uint32_t n_blocks = scrut.fst; - uint32_t rem_len = scrut.snd; - uint32_t full_blocks_len = n_blocks * block_len0; - uint8_t *full_blocks = data; - Hacl_Hash_SHA2_update_multi_512(s, full_blocks, n_blocks); - { - uint8_t *rem2 = data + full_blocks_len; - Hacl_Hash_SHA2_update_last_512(s, - FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)(uint32_t)128U), - FStar_UInt128_uint64_to_uint128((uint64_t)full_blocks_len)), - rem2, - rem_len); - } - } - } - } - Hacl_Hash_Core_SHA2_finish_512(s, dst1); - hash1 = ipad; - Hacl_Hash_Core_SHA2_init_512(s); - Hacl_Hash_SHA2_update_multi_512(s, opad, (uint32_t)1U); - block_len = (uint32_t)128U; - n_blocks0 = (uint32_t)64U / block_len; - rem0 = (uint32_t)64U % block_len; - if (n_blocks0 > (uint32_t)0U && rem0 == (uint32_t)0U) - { - uint32_t n_blocks_ = n_blocks0 - (uint32_t)1U; - K___uint32_t_uint32_t lit; - lit.fst = n_blocks_; - lit.snd = (uint32_t)64U - n_blocks_ * block_len; - scrut1 = lit; - } - else - { - K___uint32_t_uint32_t lit; - lit.fst = n_blocks0; - lit.snd = rem0; - scrut1 = lit; - } - n_blocks1 = scrut1.fst; - rem_len0 = scrut1.snd; - full_blocks_len0 = n_blocks1 * block_len; - full_blocks0 = hash1; - Hacl_Hash_SHA2_update_multi_512(s, full_blocks0, n_blocks1); - rem1 = hash1 + full_blocks_len0; - Hacl_Hash_SHA2_update_last_512(s, - FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)(uint32_t)128U), - FStar_UInt128_uint64_to_uint128((uint64_t)full_blocks_len0)), - rem1, - rem_len0); - Hacl_Hash_Core_SHA2_finish_512(s, dst); - } - } - } - } - } - } -} - -void -Hacl_HMAC_compute_blake2s_32( - uint8_t *dst, - uint8_t *key, - uint32_t key_len, - uint8_t *data, - uint32_t data_len -) -{ - uint32_t l = (uint32_t)64U; - KRML_CHECK_SIZE(sizeof (uint8_t), l); - { - uint8_t key_block[l]; - memset(key_block, 0U, l * sizeof (uint8_t)); - { - uint32_t i0; - if (key_len <= (uint32_t)64U) - { - i0 = key_len; - } - else - { - i0 = (uint32_t)32U; - } - { - uint8_t *nkey = key_block; - if (key_len <= (uint32_t)64U) - { - memcpy(nkey, key, key_len * sizeof (uint8_t)); - } - else - { - Hacl_Hash_Blake2_hash_blake2s_32(key, key_len, nkey); - } - KRML_CHECK_SIZE(sizeof (uint8_t), l); - { - uint8_t ipad[l]; - memset(ipad, (uint8_t)0x36U, l * sizeof (uint8_t)); - { - uint32_t i; - for (i = (uint32_t)0U; i < l; i++) - { - uint8_t xi = ipad[i]; - uint8_t yi = key_block[i]; - ipad[i] = xi ^ yi; - } - } - KRML_CHECK_SIZE(sizeof (uint8_t), l); - { - uint8_t opad[l]; - memset(opad, (uint8_t)0x5cU, l * sizeof (uint8_t)); - { - uint32_t i; - for (i = (uint32_t)0U; i < l; i++) - { - uint8_t xi = opad[i]; - uint8_t yi = key_block[i]; - opad[i] = xi ^ yi; - } - } - { - uint32_t s0[16U] = { 0U }; - uint32_t *r0 = s0; - uint32_t *r1 = s0 + (uint32_t)4U; - uint32_t *r2 = s0 + (uint32_t)8U; - uint32_t *r3 = s0 + (uint32_t)12U; - uint32_t iv0 = Hacl_Impl_Blake2_Constants_ivTable_S[0U]; - uint32_t iv1 = Hacl_Impl_Blake2_Constants_ivTable_S[1U]; - uint32_t iv2 = Hacl_Impl_Blake2_Constants_ivTable_S[2U]; - uint32_t iv3 = Hacl_Impl_Blake2_Constants_ivTable_S[3U]; - uint32_t iv4 = Hacl_Impl_Blake2_Constants_ivTable_S[4U]; - uint32_t iv5 = Hacl_Impl_Blake2_Constants_ivTable_S[5U]; - uint32_t iv6 = Hacl_Impl_Blake2_Constants_ivTable_S[6U]; - uint32_t iv7 = Hacl_Impl_Blake2_Constants_ivTable_S[7U]; - uint32_t kk_shift_8; - uint32_t iv0_; - uint64_t es; - r2[0U] = iv0; - r2[1U] = iv1; - r2[2U] = iv2; - r2[3U] = iv3; - r3[0U] = iv4; - r3[1U] = iv5; - r3[2U] = iv6; - r3[3U] = iv7; - kk_shift_8 = (uint32_t)0U; - iv0_ = iv0 ^ ((uint32_t)0x01010000U ^ (kk_shift_8 ^ (uint32_t)32U)); - r0[0U] = iv0_; - r0[1U] = iv1; - r0[2U] = iv2; - r0[3U] = iv3; - r1[0U] = iv4; - r1[1U] = iv5; - r1[2U] = iv6; - r1[3U] = iv7; - es = (uint64_t)0U; - { - K____uint32_t__uint64_t scrut0; - uint32_t *s; - uint8_t *dst1; - uint64_t ev0; - uint64_t ev10; - uint8_t *hash1; - uint64_t ev; - uint64_t ev11; - uint32_t block_len0; - uint32_t n_blocks0; - uint32_t rem0; - K___uint32_t_uint32_t scrut1; - uint32_t n_blocks1; - uint32_t rem_len0; - uint32_t full_blocks_len0; - uint8_t *full_blocks0; - uint64_t ev2; - uint8_t *rem1; - uint64_t ev3; - uint64_t ev1; - scrut0.fst = s0; - scrut0.snd = es; - s = scrut0.fst; - dst1 = ipad; - ev0 = Hacl_Hash_Core_Blake2_init_blake2s_32(s); - if (data_len == (uint32_t)0U) - { - uint64_t - ev12 = - Hacl_Hash_Blake2_update_last_blake2s_32(s, - ev0, - (uint64_t)0U, - ipad, - (uint32_t)64U); - ev10 = ev12; - } - else - { - uint64_t - ev12 = Hacl_Hash_Blake2_update_multi_blake2s_32(s, ev0, ipad, (uint32_t)1U); - uint32_t block_len = (uint32_t)64U; - uint32_t n_blocks2 = data_len / block_len; - uint32_t rem2 = data_len % block_len; - K___uint32_t_uint32_t scrut; - if (n_blocks2 > (uint32_t)0U && rem2 == (uint32_t)0U) - { - uint32_t n_blocks_ = n_blocks2 - (uint32_t)1U; - K___uint32_t_uint32_t lit; - lit.fst = n_blocks_; - lit.snd = data_len - n_blocks_ * block_len; - scrut = lit; - } - else - { - K___uint32_t_uint32_t lit; - lit.fst = n_blocks2; - lit.snd = rem2; - scrut = lit; - } - { - uint32_t n_blocks = scrut.fst; - uint32_t rem_len = scrut.snd; - uint32_t full_blocks_len = n_blocks * block_len; - uint8_t *full_blocks = data; - uint64_t - ev20 = Hacl_Hash_Blake2_update_multi_blake2s_32(s, ev12, full_blocks, n_blocks); - uint8_t *rem = data + full_blocks_len; - uint64_t - ev30 = - Hacl_Hash_Blake2_update_last_blake2s_32(s, - ev20, - (uint64_t)(uint32_t)64U + (uint64_t)full_blocks_len, - rem, - rem_len); - ev10 = ev30; - } - } - Hacl_Hash_Core_Blake2_finish_blake2s_32(s, ev10, dst1); - hash1 = ipad; - ev = Hacl_Hash_Core_Blake2_init_blake2s_32(s); - ev11 = Hacl_Hash_Blake2_update_multi_blake2s_32(s, ev, opad, (uint32_t)1U); - block_len0 = (uint32_t)64U; - n_blocks0 = (uint32_t)32U / block_len0; - rem0 = (uint32_t)32U % block_len0; - if (n_blocks0 > (uint32_t)0U && rem0 == (uint32_t)0U) - { - uint32_t n_blocks_ = n_blocks0 - (uint32_t)1U; - K___uint32_t_uint32_t lit; - lit.fst = n_blocks_; - lit.snd = (uint32_t)32U - n_blocks_ * block_len0; - scrut1 = lit; - } - else - { - K___uint32_t_uint32_t lit; - lit.fst = n_blocks0; - lit.snd = rem0; - scrut1 = lit; - } - n_blocks1 = scrut1.fst; - rem_len0 = scrut1.snd; - full_blocks_len0 = n_blocks1 * block_len0; - full_blocks0 = hash1; - ev2 = Hacl_Hash_Blake2_update_multi_blake2s_32(s, ev11, full_blocks0, n_blocks1); - rem1 = hash1 + full_blocks_len0; - ev3 = - Hacl_Hash_Blake2_update_last_blake2s_32(s, - ev2, - (uint64_t)(uint32_t)64U + (uint64_t)full_blocks_len0, - rem1, - rem_len0); - ev1 = ev3; - Hacl_Hash_Core_Blake2_finish_blake2s_32(s, ev1, dst); - } - } - } - } - } - } - } -} - -void -Hacl_HMAC_compute_blake2b_32( - uint8_t *dst, - uint8_t *key, - uint32_t key_len, - uint8_t *data, - uint32_t data_len -) -{ - uint32_t l = (uint32_t)128U; - KRML_CHECK_SIZE(sizeof (uint8_t), l); - { - uint8_t key_block[l]; - memset(key_block, 0U, l * sizeof (uint8_t)); - { - uint32_t i0; - if (key_len <= (uint32_t)128U) - { - i0 = key_len; - } - else - { - i0 = (uint32_t)64U; - } - { - uint8_t *nkey = key_block; - if (key_len <= (uint32_t)128U) - { - memcpy(nkey, key, key_len * sizeof (uint8_t)); - } - else - { - Hacl_Hash_Blake2_hash_blake2b_32(key, key_len, nkey); - } - KRML_CHECK_SIZE(sizeof (uint8_t), l); - { - uint8_t ipad[l]; - memset(ipad, (uint8_t)0x36U, l * sizeof (uint8_t)); - { - uint32_t i; - for (i = (uint32_t)0U; i < l; i++) - { - uint8_t xi = ipad[i]; - uint8_t yi = key_block[i]; - ipad[i] = xi ^ yi; - } - } - KRML_CHECK_SIZE(sizeof (uint8_t), l); - { - uint8_t opad[l]; - memset(opad, (uint8_t)0x5cU, l * sizeof (uint8_t)); - { - uint32_t i; - for (i = (uint32_t)0U; i < l; i++) - { - uint8_t xi = opad[i]; - uint8_t yi = key_block[i]; - opad[i] = xi ^ yi; - } - } - { - uint64_t s0[16U] = { 0U }; - uint64_t *r0 = s0; - uint64_t *r1 = s0 + (uint32_t)4U; - uint64_t *r2 = s0 + (uint32_t)8U; - uint64_t *r3 = s0 + (uint32_t)12U; - uint64_t iv0 = Hacl_Impl_Blake2_Constants_ivTable_B[0U]; - uint64_t iv1 = Hacl_Impl_Blake2_Constants_ivTable_B[1U]; - uint64_t iv2 = Hacl_Impl_Blake2_Constants_ivTable_B[2U]; - uint64_t iv3 = Hacl_Impl_Blake2_Constants_ivTable_B[3U]; - uint64_t iv4 = Hacl_Impl_Blake2_Constants_ivTable_B[4U]; - uint64_t iv5 = Hacl_Impl_Blake2_Constants_ivTable_B[5U]; - uint64_t iv6 = Hacl_Impl_Blake2_Constants_ivTable_B[6U]; - uint64_t iv7 = Hacl_Impl_Blake2_Constants_ivTable_B[7U]; - uint64_t kk_shift_8; - uint64_t iv0_; - FStar_UInt128_uint128 es; - r2[0U] = iv0; - r2[1U] = iv1; - r2[2U] = iv2; - r2[3U] = iv3; - r3[0U] = iv4; - r3[1U] = iv5; - r3[2U] = iv6; - r3[3U] = iv7; - kk_shift_8 = (uint64_t)(uint32_t)0U << (uint32_t)8U; - iv0_ = iv0 ^ ((uint64_t)0x01010000U ^ (kk_shift_8 ^ (uint64_t)(uint32_t)64U)); - r0[0U] = iv0_; - r0[1U] = iv1; - r0[2U] = iv2; - r0[3U] = iv3; - r1[0U] = iv4; - r1[1U] = iv5; - r1[2U] = iv6; - r1[3U] = iv7; - es = FStar_UInt128_uint64_to_uint128((uint64_t)0U); - { - K____uint64_t__FStar_UInt128_uint128 scrut0; - uint64_t *s; - uint8_t *dst1; - FStar_UInt128_uint128 ev0; - FStar_UInt128_uint128 ev10; - uint8_t *hash1; - FStar_UInt128_uint128 ev; - FStar_UInt128_uint128 ev11; - uint32_t block_len0; - uint32_t n_blocks0; - uint32_t rem0; - K___uint32_t_uint32_t scrut1; - uint32_t n_blocks1; - uint32_t rem_len0; - uint32_t full_blocks_len0; - uint8_t *full_blocks0; - FStar_UInt128_uint128 ev2; - uint8_t *rem1; - FStar_UInt128_uint128 ev3; - FStar_UInt128_uint128 ev1; - scrut0.fst = s0; - scrut0.snd = es; - s = scrut0.fst; - dst1 = ipad; - ev0 = Hacl_Hash_Core_Blake2_init_blake2b_32(s); - if (data_len == (uint32_t)0U) - { - FStar_UInt128_uint128 - ev12 = - Hacl_Hash_Blake2_update_last_blake2b_32(s, - ev0, - FStar_UInt128_uint64_to_uint128((uint64_t)0U), - ipad, - (uint32_t)128U); - ev10 = ev12; - } - else - { - FStar_UInt128_uint128 - ev12 = Hacl_Hash_Blake2_update_multi_blake2b_32(s, ev0, ipad, (uint32_t)1U); - uint32_t block_len = (uint32_t)128U; - uint32_t n_blocks2 = data_len / block_len; - uint32_t rem2 = data_len % block_len; - K___uint32_t_uint32_t scrut; - if (n_blocks2 > (uint32_t)0U && rem2 == (uint32_t)0U) - { - uint32_t n_blocks_ = n_blocks2 - (uint32_t)1U; - K___uint32_t_uint32_t lit; - lit.fst = n_blocks_; - lit.snd = data_len - n_blocks_ * block_len; - scrut = lit; - } - else - { - K___uint32_t_uint32_t lit; - lit.fst = n_blocks2; - lit.snd = rem2; - scrut = lit; - } - { - uint32_t n_blocks = scrut.fst; - uint32_t rem_len = scrut.snd; - uint32_t full_blocks_len = n_blocks * block_len; - uint8_t *full_blocks = data; - FStar_UInt128_uint128 - ev20 = Hacl_Hash_Blake2_update_multi_blake2b_32(s, ev12, full_blocks, n_blocks); - uint8_t *rem = data + full_blocks_len; - FStar_UInt128_uint128 - ev30 = - Hacl_Hash_Blake2_update_last_blake2b_32(s, - ev20, - FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)(uint32_t)128U), - FStar_UInt128_uint64_to_uint128((uint64_t)full_blocks_len)), - rem, - rem_len); - ev10 = ev30; - } - } - Hacl_Hash_Core_Blake2_finish_blake2b_32(s, ev10, dst1); - hash1 = ipad; - ev = Hacl_Hash_Core_Blake2_init_blake2b_32(s); - ev11 = Hacl_Hash_Blake2_update_multi_blake2b_32(s, ev, opad, (uint32_t)1U); - block_len0 = (uint32_t)128U; - n_blocks0 = (uint32_t)64U / block_len0; - rem0 = (uint32_t)64U % block_len0; - if (n_blocks0 > (uint32_t)0U && rem0 == (uint32_t)0U) - { - uint32_t n_blocks_ = n_blocks0 - (uint32_t)1U; - K___uint32_t_uint32_t lit; - lit.fst = n_blocks_; - lit.snd = (uint32_t)64U - n_blocks_ * block_len0; - scrut1 = lit; - } - else - { - K___uint32_t_uint32_t lit; - lit.fst = n_blocks0; - lit.snd = rem0; - scrut1 = lit; - } - n_blocks1 = scrut1.fst; - rem_len0 = scrut1.snd; - full_blocks_len0 = n_blocks1 * block_len0; - full_blocks0 = hash1; - ev2 = Hacl_Hash_Blake2_update_multi_blake2b_32(s, ev11, full_blocks0, n_blocks1); - rem1 = hash1 + full_blocks_len0; - ev3 = - Hacl_Hash_Blake2_update_last_blake2b_32(s, - ev2, - FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)(uint32_t)128U), - FStar_UInt128_uint64_to_uint128((uint64_t)full_blocks_len0)), - rem1, - rem_len0); - ev1 = ev3; - Hacl_Hash_Core_Blake2_finish_blake2b_32(s, ev1, dst); - } - } - } - } - } - } - } -} - diff --git a/dist/c89-compatible/Hacl_HMAC.h b/dist/c89-compatible/Hacl_HMAC.h deleted file mode 100644 index 5fb1f86a56..0000000000 --- a/dist/c89-compatible/Hacl_HMAC.h +++ /dev/null @@ -1,102 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#ifndef __Hacl_HMAC_H -#define __Hacl_HMAC_H - -#if defined(__cplusplus) -extern "C" { -#endif - -#include -#include "krml/internal/types.h" -#include "krml/lowstar_endianness.h" -#include "krml/internal/target.h" - - -#include "Hacl_Krmllib.h" -#include "Hacl_Impl_Blake2_Constants.h" -#include "Hacl_Hash_SHA2.h" -#include "Hacl_Hash_SHA1.h" -#include "evercrypt_targetconfig.h" -void -Hacl_HMAC_legacy_compute_sha1( - uint8_t *dst, - uint8_t *key, - uint32_t key_len, - uint8_t *data, - uint32_t data_len -); - -void -Hacl_HMAC_compute_sha2_256( - uint8_t *dst, - uint8_t *key, - uint32_t key_len, - uint8_t *data, - uint32_t data_len -); - -void -Hacl_HMAC_compute_sha2_384( - uint8_t *dst, - uint8_t *key, - uint32_t key_len, - uint8_t *data, - uint32_t data_len -); - -void -Hacl_HMAC_compute_sha2_512( - uint8_t *dst, - uint8_t *key, - uint32_t key_len, - uint8_t *data, - uint32_t data_len -); - -void -Hacl_HMAC_compute_blake2s_32( - uint8_t *dst, - uint8_t *key, - uint32_t key_len, - uint8_t *data, - uint32_t data_len -); - -void -Hacl_HMAC_compute_blake2b_32( - uint8_t *dst, - uint8_t *key, - uint32_t key_len, - uint8_t *data, - uint32_t data_len -); - -#if defined(__cplusplus) -} -#endif - -#define __Hacl_HMAC_H_DEFINED -#endif diff --git a/dist/c89-compatible/Hacl_HMAC_Blake2b_256.c b/dist/c89-compatible/Hacl_HMAC_Blake2b_256.c deleted file mode 100644 index bccc12b7fe..0000000000 --- a/dist/c89-compatible/Hacl_HMAC_Blake2b_256.c +++ /dev/null @@ -1,255 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#include "Hacl_HMAC_Blake2b_256.h" - -#include "internal/Hacl_Hash_Blake2b_256.h" -#include "internal/Hacl_Hash_Blake2.h" - -typedef struct ___Lib_IntVector_Intrinsics_vec256__FStar_UInt128_uint128_s -{ - Lib_IntVector_Intrinsics_vec256 *fst; - FStar_UInt128_uint128 snd; -} -___Lib_IntVector_Intrinsics_vec256__FStar_UInt128_uint128; - -void -Hacl_HMAC_Blake2b_256_compute_blake2b_256( - uint8_t *dst, - uint8_t *key, - uint32_t key_len, - uint8_t *data, - uint32_t data_len -) -{ - uint32_t l = (uint32_t)128U; - KRML_CHECK_SIZE(sizeof (uint8_t), l); - { - uint8_t key_block[l]; - memset(key_block, 0U, l * sizeof (uint8_t)); - { - uint32_t i0; - if (key_len <= (uint32_t)128U) - { - i0 = key_len; - } - else - { - i0 = (uint32_t)64U; - } - { - uint8_t *nkey = key_block; - if (key_len <= (uint32_t)128U) - { - memcpy(nkey, key, key_len * sizeof (uint8_t)); - } - else - { - Hacl_Hash_Blake2b_256_hash_blake2b_256(key, key_len, nkey); - } - KRML_CHECK_SIZE(sizeof (uint8_t), l); - { - uint8_t ipad[l]; - memset(ipad, (uint8_t)0x36U, l * sizeof (uint8_t)); - { - uint32_t i; - for (i = (uint32_t)0U; i < l; i++) - { - uint8_t xi = ipad[i]; - uint8_t yi = key_block[i]; - ipad[i] = xi ^ yi; - } - } - KRML_CHECK_SIZE(sizeof (uint8_t), l); - { - uint8_t opad[l]; - memset(opad, (uint8_t)0x5cU, l * sizeof (uint8_t)); - { - uint32_t i; - for (i = (uint32_t)0U; i < l; i++) - { - uint8_t xi = opad[i]; - uint8_t yi = key_block[i]; - opad[i] = xi ^ yi; - } - } - { - KRML_PRE_ALIGN(32) - Lib_IntVector_Intrinsics_vec256 - s0[4U] KRML_POST_ALIGN(32) = { 0U }; - Lib_IntVector_Intrinsics_vec256 *r0 = s0; - Lib_IntVector_Intrinsics_vec256 *r1 = s0 + (uint32_t)1U; - Lib_IntVector_Intrinsics_vec256 *r2 = s0 + (uint32_t)2U; - Lib_IntVector_Intrinsics_vec256 *r3 = s0 + (uint32_t)3U; - uint64_t iv0 = Hacl_Impl_Blake2_Constants_ivTable_B[0U]; - uint64_t iv1 = Hacl_Impl_Blake2_Constants_ivTable_B[1U]; - uint64_t iv2 = Hacl_Impl_Blake2_Constants_ivTable_B[2U]; - uint64_t iv3 = Hacl_Impl_Blake2_Constants_ivTable_B[3U]; - uint64_t iv4 = Hacl_Impl_Blake2_Constants_ivTable_B[4U]; - uint64_t iv5 = Hacl_Impl_Blake2_Constants_ivTable_B[5U]; - uint64_t iv6 = Hacl_Impl_Blake2_Constants_ivTable_B[6U]; - uint64_t iv7 = Hacl_Impl_Blake2_Constants_ivTable_B[7U]; - uint64_t kk_shift_8; - uint64_t iv0_; - FStar_UInt128_uint128 es; - r2[0U] = Lib_IntVector_Intrinsics_vec256_load64s(iv0, iv1, iv2, iv3); - r3[0U] = Lib_IntVector_Intrinsics_vec256_load64s(iv4, iv5, iv6, iv7); - kk_shift_8 = (uint64_t)(uint32_t)0U << (uint32_t)8U; - iv0_ = iv0 ^ ((uint64_t)0x01010000U ^ (kk_shift_8 ^ (uint64_t)(uint32_t)64U)); - r0[0U] = Lib_IntVector_Intrinsics_vec256_load64s(iv0_, iv1, iv2, iv3); - r1[0U] = Lib_IntVector_Intrinsics_vec256_load64s(iv4, iv5, iv6, iv7); - es = FStar_UInt128_uint64_to_uint128((uint64_t)0U); - { - ___Lib_IntVector_Intrinsics_vec256__FStar_UInt128_uint128 scrut0; - Lib_IntVector_Intrinsics_vec256 *s; - uint8_t *dst1; - FStar_UInt128_uint128 ev0; - FStar_UInt128_uint128 ev10; - uint8_t *hash1; - FStar_UInt128_uint128 ev; - FStar_UInt128_uint128 ev11; - uint32_t block_len0; - uint32_t n_blocks0; - uint32_t rem0; - K___uint32_t_uint32_t scrut1; - uint32_t n_blocks1; - uint32_t rem_len0; - uint32_t full_blocks_len0; - uint8_t *full_blocks0; - FStar_UInt128_uint128 ev2; - uint8_t *rem1; - FStar_UInt128_uint128 ev3; - FStar_UInt128_uint128 ev1; - scrut0.fst = s0; - scrut0.snd = es; - s = scrut0.fst; - dst1 = ipad; - ev0 = Hacl_Hash_Blake2b_256_init_blake2b_256(s); - if (data_len == (uint32_t)0U) - { - FStar_UInt128_uint128 - ev12 = - Hacl_Hash_Blake2b_256_update_last_blake2b_256(s, - ev0, - FStar_UInt128_uint64_to_uint128((uint64_t)0U), - ipad, - (uint32_t)128U); - ev10 = ev12; - } - else - { - FStar_UInt128_uint128 - ev12 = Hacl_Hash_Blake2b_256_update_multi_blake2b_256(s, ev0, ipad, (uint32_t)1U); - uint32_t block_len = (uint32_t)128U; - uint32_t n_blocks2 = data_len / block_len; - uint32_t rem2 = data_len % block_len; - K___uint32_t_uint32_t scrut; - if (n_blocks2 > (uint32_t)0U && rem2 == (uint32_t)0U) - { - uint32_t n_blocks_ = n_blocks2 - (uint32_t)1U; - K___uint32_t_uint32_t lit; - lit.fst = n_blocks_; - lit.snd = data_len - n_blocks_ * block_len; - scrut = lit; - } - else - { - K___uint32_t_uint32_t lit; - lit.fst = n_blocks2; - lit.snd = rem2; - scrut = lit; - } - { - uint32_t n_blocks = scrut.fst; - uint32_t rem_len = scrut.snd; - uint32_t full_blocks_len = n_blocks * block_len; - uint8_t *full_blocks = data; - FStar_UInt128_uint128 - ev20 = - Hacl_Hash_Blake2b_256_update_multi_blake2b_256(s, - ev12, - full_blocks, - n_blocks); - uint8_t *rem = data + full_blocks_len; - FStar_UInt128_uint128 - ev30 = - Hacl_Hash_Blake2b_256_update_last_blake2b_256(s, - ev20, - FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)(uint32_t)128U), - FStar_UInt128_uint64_to_uint128((uint64_t)full_blocks_len)), - rem, - rem_len); - ev10 = ev30; - } - } - Hacl_Hash_Blake2b_256_finish_blake2b_256(s, ev10, dst1); - hash1 = ipad; - ev = Hacl_Hash_Blake2b_256_init_blake2b_256(s); - ev11 = Hacl_Hash_Blake2b_256_update_multi_blake2b_256(s, ev, opad, (uint32_t)1U); - block_len0 = (uint32_t)128U; - n_blocks0 = (uint32_t)64U / block_len0; - rem0 = (uint32_t)64U % block_len0; - if (n_blocks0 > (uint32_t)0U && rem0 == (uint32_t)0U) - { - uint32_t n_blocks_ = n_blocks0 - (uint32_t)1U; - K___uint32_t_uint32_t lit; - lit.fst = n_blocks_; - lit.snd = (uint32_t)64U - n_blocks_ * block_len0; - scrut1 = lit; - } - else - { - K___uint32_t_uint32_t lit; - lit.fst = n_blocks0; - lit.snd = rem0; - scrut1 = lit; - } - n_blocks1 = scrut1.fst; - rem_len0 = scrut1.snd; - full_blocks_len0 = n_blocks1 * block_len0; - full_blocks0 = hash1; - ev2 = - Hacl_Hash_Blake2b_256_update_multi_blake2b_256(s, - ev11, - full_blocks0, - n_blocks1); - rem1 = hash1 + full_blocks_len0; - ev3 = - Hacl_Hash_Blake2b_256_update_last_blake2b_256(s, - ev2, - FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)(uint32_t)128U), - FStar_UInt128_uint64_to_uint128((uint64_t)full_blocks_len0)), - rem1, - rem_len0); - ev1 = ev3; - Hacl_Hash_Blake2b_256_finish_blake2b_256(s, ev1, dst); - } - } - } - } - } - } - } -} - diff --git a/dist/c89-compatible/Hacl_HMAC_Blake2b_256.h b/dist/c89-compatible/Hacl_HMAC_Blake2b_256.h deleted file mode 100644 index 4d7bfcacb1..0000000000 --- a/dist/c89-compatible/Hacl_HMAC_Blake2b_256.h +++ /dev/null @@ -1,55 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#ifndef __Hacl_HMAC_Blake2b_256_H -#define __Hacl_HMAC_Blake2b_256_H - -#if defined(__cplusplus) -extern "C" { -#endif - -#include -#include "krml/internal/types.h" -#include "krml/lowstar_endianness.h" -#include "krml/internal/target.h" - - -#include "Hacl_Krmllib.h" -#include "Hacl_Impl_Blake2_Constants.h" -#include "evercrypt_targetconfig.h" -void -Hacl_HMAC_Blake2b_256_compute_blake2b_256( - uint8_t *dst, - uint8_t *key, - uint32_t key_len, - uint8_t *data, - uint32_t data_len -); - -#if defined(__cplusplus) -} -#endif - -#define __Hacl_HMAC_Blake2b_256_H_DEFINED -#endif diff --git a/dist/c89-compatible/Hacl_HMAC_Blake2s_128.c b/dist/c89-compatible/Hacl_HMAC_Blake2s_128.c deleted file mode 100644 index 1c1ab5450a..0000000000 --- a/dist/c89-compatible/Hacl_HMAC_Blake2s_128.c +++ /dev/null @@ -1,253 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#include "Hacl_HMAC_Blake2s_128.h" - -#include "internal/Hacl_Hash_Blake2s_128.h" -#include "internal/Hacl_Hash_Blake2.h" - -typedef struct ___Lib_IntVector_Intrinsics_vec128__uint64_t_s -{ - Lib_IntVector_Intrinsics_vec128 *fst; - uint64_t snd; -} -___Lib_IntVector_Intrinsics_vec128__uint64_t; - -void -Hacl_HMAC_Blake2s_128_compute_blake2s_128( - uint8_t *dst, - uint8_t *key, - uint32_t key_len, - uint8_t *data, - uint32_t data_len -) -{ - uint32_t l = (uint32_t)64U; - KRML_CHECK_SIZE(sizeof (uint8_t), l); - { - uint8_t key_block[l]; - memset(key_block, 0U, l * sizeof (uint8_t)); - { - uint32_t i0; - if (key_len <= (uint32_t)64U) - { - i0 = key_len; - } - else - { - i0 = (uint32_t)32U; - } - { - uint8_t *nkey = key_block; - if (key_len <= (uint32_t)64U) - { - memcpy(nkey, key, key_len * sizeof (uint8_t)); - } - else - { - Hacl_Hash_Blake2s_128_hash_blake2s_128(key, key_len, nkey); - } - KRML_CHECK_SIZE(sizeof (uint8_t), l); - { - uint8_t ipad[l]; - memset(ipad, (uint8_t)0x36U, l * sizeof (uint8_t)); - { - uint32_t i; - for (i = (uint32_t)0U; i < l; i++) - { - uint8_t xi = ipad[i]; - uint8_t yi = key_block[i]; - ipad[i] = xi ^ yi; - } - } - KRML_CHECK_SIZE(sizeof (uint8_t), l); - { - uint8_t opad[l]; - memset(opad, (uint8_t)0x5cU, l * sizeof (uint8_t)); - { - uint32_t i; - for (i = (uint32_t)0U; i < l; i++) - { - uint8_t xi = opad[i]; - uint8_t yi = key_block[i]; - opad[i] = xi ^ yi; - } - } - { - KRML_PRE_ALIGN(16) - Lib_IntVector_Intrinsics_vec128 - s0[4U] KRML_POST_ALIGN(16) = { 0U }; - Lib_IntVector_Intrinsics_vec128 *r0 = s0; - Lib_IntVector_Intrinsics_vec128 *r1 = s0 + (uint32_t)1U; - Lib_IntVector_Intrinsics_vec128 *r2 = s0 + (uint32_t)2U; - Lib_IntVector_Intrinsics_vec128 *r3 = s0 + (uint32_t)3U; - uint32_t iv0 = Hacl_Impl_Blake2_Constants_ivTable_S[0U]; - uint32_t iv1 = Hacl_Impl_Blake2_Constants_ivTable_S[1U]; - uint32_t iv2 = Hacl_Impl_Blake2_Constants_ivTable_S[2U]; - uint32_t iv3 = Hacl_Impl_Blake2_Constants_ivTable_S[3U]; - uint32_t iv4 = Hacl_Impl_Blake2_Constants_ivTable_S[4U]; - uint32_t iv5 = Hacl_Impl_Blake2_Constants_ivTable_S[5U]; - uint32_t iv6 = Hacl_Impl_Blake2_Constants_ivTable_S[6U]; - uint32_t iv7 = Hacl_Impl_Blake2_Constants_ivTable_S[7U]; - uint32_t kk_shift_8; - uint32_t iv0_; - uint64_t es; - r2[0U] = Lib_IntVector_Intrinsics_vec128_load32s(iv0, iv1, iv2, iv3); - r3[0U] = Lib_IntVector_Intrinsics_vec128_load32s(iv4, iv5, iv6, iv7); - kk_shift_8 = (uint32_t)0U; - iv0_ = iv0 ^ ((uint32_t)0x01010000U ^ (kk_shift_8 ^ (uint32_t)32U)); - r0[0U] = Lib_IntVector_Intrinsics_vec128_load32s(iv0_, iv1, iv2, iv3); - r1[0U] = Lib_IntVector_Intrinsics_vec128_load32s(iv4, iv5, iv6, iv7); - es = (uint64_t)0U; - { - ___Lib_IntVector_Intrinsics_vec128__uint64_t scrut0; - Lib_IntVector_Intrinsics_vec128 *s; - uint8_t *dst1; - uint64_t ev0; - uint64_t ev10; - uint8_t *hash1; - uint64_t ev; - uint64_t ev11; - uint32_t block_len0; - uint32_t n_blocks0; - uint32_t rem0; - K___uint32_t_uint32_t scrut1; - uint32_t n_blocks1; - uint32_t rem_len0; - uint32_t full_blocks_len0; - uint8_t *full_blocks0; - uint64_t ev2; - uint8_t *rem1; - uint64_t ev3; - uint64_t ev1; - scrut0.fst = s0; - scrut0.snd = es; - s = scrut0.fst; - dst1 = ipad; - ev0 = Hacl_Hash_Blake2s_128_init_blake2s_128(s); - if (data_len == (uint32_t)0U) - { - uint64_t - ev12 = - Hacl_Hash_Blake2s_128_update_last_blake2s_128(s, - ev0, - (uint64_t)0U, - ipad, - (uint32_t)64U); - ev10 = ev12; - } - else - { - uint64_t - ev12 = Hacl_Hash_Blake2s_128_update_multi_blake2s_128(s, ev0, ipad, (uint32_t)1U); - uint32_t block_len = (uint32_t)64U; - uint32_t n_blocks2 = data_len / block_len; - uint32_t rem2 = data_len % block_len; - K___uint32_t_uint32_t scrut; - if (n_blocks2 > (uint32_t)0U && rem2 == (uint32_t)0U) - { - uint32_t n_blocks_ = n_blocks2 - (uint32_t)1U; - K___uint32_t_uint32_t lit; - lit.fst = n_blocks_; - lit.snd = data_len - n_blocks_ * block_len; - scrut = lit; - } - else - { - K___uint32_t_uint32_t lit; - lit.fst = n_blocks2; - lit.snd = rem2; - scrut = lit; - } - { - uint32_t n_blocks = scrut.fst; - uint32_t rem_len = scrut.snd; - uint32_t full_blocks_len = n_blocks * block_len; - uint8_t *full_blocks = data; - uint64_t - ev20 = - Hacl_Hash_Blake2s_128_update_multi_blake2s_128(s, - ev12, - full_blocks, - n_blocks); - uint8_t *rem = data + full_blocks_len; - uint64_t - ev30 = - Hacl_Hash_Blake2s_128_update_last_blake2s_128(s, - ev20, - (uint64_t)(uint32_t)64U + (uint64_t)full_blocks_len, - rem, - rem_len); - ev10 = ev30; - } - } - Hacl_Hash_Blake2s_128_finish_blake2s_128(s, ev10, dst1); - hash1 = ipad; - ev = Hacl_Hash_Blake2s_128_init_blake2s_128(s); - ev11 = Hacl_Hash_Blake2s_128_update_multi_blake2s_128(s, ev, opad, (uint32_t)1U); - block_len0 = (uint32_t)64U; - n_blocks0 = (uint32_t)32U / block_len0; - rem0 = (uint32_t)32U % block_len0; - if (n_blocks0 > (uint32_t)0U && rem0 == (uint32_t)0U) - { - uint32_t n_blocks_ = n_blocks0 - (uint32_t)1U; - K___uint32_t_uint32_t lit; - lit.fst = n_blocks_; - lit.snd = (uint32_t)32U - n_blocks_ * block_len0; - scrut1 = lit; - } - else - { - K___uint32_t_uint32_t lit; - lit.fst = n_blocks0; - lit.snd = rem0; - scrut1 = lit; - } - n_blocks1 = scrut1.fst; - rem_len0 = scrut1.snd; - full_blocks_len0 = n_blocks1 * block_len0; - full_blocks0 = hash1; - ev2 = - Hacl_Hash_Blake2s_128_update_multi_blake2s_128(s, - ev11, - full_blocks0, - n_blocks1); - rem1 = hash1 + full_blocks_len0; - ev3 = - Hacl_Hash_Blake2s_128_update_last_blake2s_128(s, - ev2, - (uint64_t)(uint32_t)64U + (uint64_t)full_blocks_len0, - rem1, - rem_len0); - ev1 = ev3; - Hacl_Hash_Blake2s_128_finish_blake2s_128(s, ev1, dst); - } - } - } - } - } - } - } -} - diff --git a/dist/c89-compatible/Hacl_HMAC_Blake2s_128.h b/dist/c89-compatible/Hacl_HMAC_Blake2s_128.h deleted file mode 100644 index 90413b655f..0000000000 --- a/dist/c89-compatible/Hacl_HMAC_Blake2s_128.h +++ /dev/null @@ -1,54 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#ifndef __Hacl_HMAC_Blake2s_128_H -#define __Hacl_HMAC_Blake2s_128_H - -#if defined(__cplusplus) -extern "C" { -#endif - -#include -#include "krml/internal/types.h" -#include "krml/lowstar_endianness.h" -#include "krml/internal/target.h" - - -#include "Hacl_Impl_Blake2_Constants.h" -#include "evercrypt_targetconfig.h" -void -Hacl_HMAC_Blake2s_128_compute_blake2s_128( - uint8_t *dst, - uint8_t *key, - uint32_t key_len, - uint8_t *data, - uint32_t data_len -); - -#if defined(__cplusplus) -} -#endif - -#define __Hacl_HMAC_Blake2s_128_H_DEFINED -#endif diff --git a/dist/c89-compatible/Hacl_HMAC_DRBG.c b/dist/c89-compatible/Hacl_HMAC_DRBG.c deleted file mode 100644 index 6aac55d3b4..0000000000 --- a/dist/c89-compatible/Hacl_HMAC_DRBG.c +++ /dev/null @@ -1,1337 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#include "Hacl_HMAC_DRBG.h" - - - -uint32_t Hacl_HMAC_DRBG_reseed_interval = (uint32_t)1024U; - -uint32_t Hacl_HMAC_DRBG_max_output_length = (uint32_t)65536U; - -uint32_t Hacl_HMAC_DRBG_max_length = (uint32_t)65536U; - -uint32_t Hacl_HMAC_DRBG_max_personalization_string_length = (uint32_t)65536U; - -uint32_t Hacl_HMAC_DRBG_max_additional_input_length = (uint32_t)65536U; - -uint32_t Hacl_HMAC_DRBG_min_length(Spec_Hash_Definitions_hash_alg a) -{ - switch (a) - { - case Spec_Hash_Definitions_SHA1: - { - return (uint32_t)16U; - } - case Spec_Hash_Definitions_SHA2_256: - { - return (uint32_t)32U; - } - case Spec_Hash_Definitions_SHA2_384: - { - return (uint32_t)32U; - } - case Spec_Hash_Definitions_SHA2_512: - { - return (uint32_t)32U; - } - default: - { - KRML_HOST_PRINTF("KaRaMeL incomplete match at %s:%d\n", __FILE__, __LINE__); - KRML_HOST_EXIT(253U); - } - } -} - -bool -Hacl_HMAC_DRBG_uu___is_State(Spec_Hash_Definitions_hash_alg a, Hacl_HMAC_DRBG_state projectee) -{ - return true; -} - -Hacl_HMAC_DRBG_state Hacl_HMAC_DRBG_create_in(Spec_Hash_Definitions_hash_alg a) -{ - uint8_t *k; - switch (a) - { - case Spec_Hash_Definitions_SHA1: - { - uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)20U, sizeof (uint8_t)); - k = buf; - break; - } - case Spec_Hash_Definitions_SHA2_256: - { - uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)32U, sizeof (uint8_t)); - k = buf; - break; - } - case Spec_Hash_Definitions_SHA2_384: - { - uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)48U, sizeof (uint8_t)); - k = buf; - break; - } - case Spec_Hash_Definitions_SHA2_512: - { - uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)64U, sizeof (uint8_t)); - k = buf; - break; - } - default: - { - KRML_HOST_PRINTF("KaRaMeL incomplete match at %s:%d\n", __FILE__, __LINE__); - KRML_HOST_EXIT(253U); - } - } - { - uint8_t *v; - switch (a) - { - case Spec_Hash_Definitions_SHA1: - { - uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)20U, sizeof (uint8_t)); - v = buf; - break; - } - case Spec_Hash_Definitions_SHA2_256: - { - uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)32U, sizeof (uint8_t)); - v = buf; - break; - } - case Spec_Hash_Definitions_SHA2_384: - { - uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)48U, sizeof (uint8_t)); - v = buf; - break; - } - case Spec_Hash_Definitions_SHA2_512: - { - uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)64U, sizeof (uint8_t)); - v = buf; - break; - } - default: - { - KRML_HOST_PRINTF("KaRaMeL incomplete match at %s:%d\n", __FILE__, __LINE__); - KRML_HOST_EXIT(253U); - } - } - { - uint32_t *ctr = (uint32_t *)KRML_HOST_MALLOC(sizeof (uint32_t)); - ctr[0U] = (uint32_t)1U; - { - Hacl_HMAC_DRBG_state lit; - lit.k = k; - lit.v = v; - lit.reseed_counter = ctr; - return lit; - } - } - } -} - -void -Hacl_HMAC_DRBG_instantiate( - Spec_Hash_Definitions_hash_alg a, - Hacl_HMAC_DRBG_state st, - uint32_t entropy_input_len, - uint8_t *entropy_input, - uint32_t nonce_len, - uint8_t *nonce, - uint32_t personalization_string_len, - uint8_t *personalization_string -) -{ - switch (a) - { - case Spec_Hash_Definitions_SHA1: - { - KRML_CHECK_SIZE(sizeof (uint8_t), - entropy_input_len + nonce_len + personalization_string_len); - { - uint8_t seed_material[entropy_input_len + nonce_len + personalization_string_len]; - memset(seed_material, - 0U, - (entropy_input_len + nonce_len + personalization_string_len) * sizeof (uint8_t)); - { - uint8_t *k; - uint8_t *v; - uint32_t *ctr; - memcpy(seed_material, entropy_input, entropy_input_len * sizeof (uint8_t)); - memcpy(seed_material + entropy_input_len, nonce, nonce_len * sizeof (uint8_t)); - memcpy(seed_material + entropy_input_len + nonce_len, - personalization_string, - personalization_string_len * sizeof (uint8_t)); - k = st.k; - v = st.v; - ctr = st.reseed_counter; - memset(k, 0U, (uint32_t)20U * sizeof (uint8_t)); - memset(v, (uint8_t)1U, (uint32_t)20U * sizeof (uint8_t)); - ctr[0U] = (uint32_t)1U; - { - uint32_t - input_len = (uint32_t)21U + entropy_input_len + nonce_len + personalization_string_len; - KRML_CHECK_SIZE(sizeof (uint8_t), input_len); - { - uint8_t input0[input_len]; - memset(input0, 0U, input_len * sizeof (uint8_t)); - { - uint8_t *k_ = input0; - memcpy(k_, v, (uint32_t)20U * sizeof (uint8_t)); - if (entropy_input_len + nonce_len + personalization_string_len != (uint32_t)0U) - { - memcpy(input0 + (uint32_t)21U, - seed_material, - (entropy_input_len + nonce_len + personalization_string_len) - * sizeof (uint8_t)); - } - input0[20U] = (uint8_t)0U; - Hacl_HMAC_legacy_compute_sha1(k_, k, (uint32_t)20U, input0, input_len); - Hacl_HMAC_legacy_compute_sha1(v, k_, (uint32_t)20U, v, (uint32_t)20U); - memcpy(k, k_, (uint32_t)20U * sizeof (uint8_t)); - if (entropy_input_len + nonce_len + personalization_string_len != (uint32_t)0U) - { - uint32_t - input_len0 = - (uint32_t)21U - + entropy_input_len + nonce_len + personalization_string_len; - KRML_CHECK_SIZE(sizeof (uint8_t), input_len0); - { - uint8_t input[input_len0]; - memset(input, 0U, input_len0 * sizeof (uint8_t)); - { - uint8_t *k_0 = input; - memcpy(k_0, v, (uint32_t)20U * sizeof (uint8_t)); - if - (entropy_input_len + nonce_len + personalization_string_len != (uint32_t)0U) - { - memcpy(input + (uint32_t)21U, - seed_material, - (entropy_input_len + nonce_len + personalization_string_len) - * sizeof (uint8_t)); - } - input[20U] = (uint8_t)1U; - Hacl_HMAC_legacy_compute_sha1(k_0, k, (uint32_t)20U, input, input_len0); - Hacl_HMAC_legacy_compute_sha1(v, k_0, (uint32_t)20U, v, (uint32_t)20U); - memcpy(k, k_0, (uint32_t)20U * sizeof (uint8_t)); - } - } - } - } - } - } - } - } - break; - } - case Spec_Hash_Definitions_SHA2_256: - { - KRML_CHECK_SIZE(sizeof (uint8_t), - entropy_input_len + nonce_len + personalization_string_len); - { - uint8_t seed_material[entropy_input_len + nonce_len + personalization_string_len]; - memset(seed_material, - 0U, - (entropy_input_len + nonce_len + personalization_string_len) * sizeof (uint8_t)); - { - uint8_t *k; - uint8_t *v; - uint32_t *ctr; - memcpy(seed_material, entropy_input, entropy_input_len * sizeof (uint8_t)); - memcpy(seed_material + entropy_input_len, nonce, nonce_len * sizeof (uint8_t)); - memcpy(seed_material + entropy_input_len + nonce_len, - personalization_string, - personalization_string_len * sizeof (uint8_t)); - k = st.k; - v = st.v; - ctr = st.reseed_counter; - memset(k, 0U, (uint32_t)32U * sizeof (uint8_t)); - memset(v, (uint8_t)1U, (uint32_t)32U * sizeof (uint8_t)); - ctr[0U] = (uint32_t)1U; - { - uint32_t - input_len = (uint32_t)33U + entropy_input_len + nonce_len + personalization_string_len; - KRML_CHECK_SIZE(sizeof (uint8_t), input_len); - { - uint8_t input0[input_len]; - memset(input0, 0U, input_len * sizeof (uint8_t)); - { - uint8_t *k_ = input0; - memcpy(k_, v, (uint32_t)32U * sizeof (uint8_t)); - if (entropy_input_len + nonce_len + personalization_string_len != (uint32_t)0U) - { - memcpy(input0 + (uint32_t)33U, - seed_material, - (entropy_input_len + nonce_len + personalization_string_len) - * sizeof (uint8_t)); - } - input0[32U] = (uint8_t)0U; - Hacl_HMAC_compute_sha2_256(k_, k, (uint32_t)32U, input0, input_len); - Hacl_HMAC_compute_sha2_256(v, k_, (uint32_t)32U, v, (uint32_t)32U); - memcpy(k, k_, (uint32_t)32U * sizeof (uint8_t)); - if (entropy_input_len + nonce_len + personalization_string_len != (uint32_t)0U) - { - uint32_t - input_len0 = - (uint32_t)33U - + entropy_input_len + nonce_len + personalization_string_len; - KRML_CHECK_SIZE(sizeof (uint8_t), input_len0); - { - uint8_t input[input_len0]; - memset(input, 0U, input_len0 * sizeof (uint8_t)); - { - uint8_t *k_0 = input; - memcpy(k_0, v, (uint32_t)32U * sizeof (uint8_t)); - if - (entropy_input_len + nonce_len + personalization_string_len != (uint32_t)0U) - { - memcpy(input + (uint32_t)33U, - seed_material, - (entropy_input_len + nonce_len + personalization_string_len) - * sizeof (uint8_t)); - } - input[32U] = (uint8_t)1U; - Hacl_HMAC_compute_sha2_256(k_0, k, (uint32_t)32U, input, input_len0); - Hacl_HMAC_compute_sha2_256(v, k_0, (uint32_t)32U, v, (uint32_t)32U); - memcpy(k, k_0, (uint32_t)32U * sizeof (uint8_t)); - } - } - } - } - } - } - } - } - break; - } - case Spec_Hash_Definitions_SHA2_384: - { - KRML_CHECK_SIZE(sizeof (uint8_t), - entropy_input_len + nonce_len + personalization_string_len); - { - uint8_t seed_material[entropy_input_len + nonce_len + personalization_string_len]; - memset(seed_material, - 0U, - (entropy_input_len + nonce_len + personalization_string_len) * sizeof (uint8_t)); - { - uint8_t *k; - uint8_t *v; - uint32_t *ctr; - memcpy(seed_material, entropy_input, entropy_input_len * sizeof (uint8_t)); - memcpy(seed_material + entropy_input_len, nonce, nonce_len * sizeof (uint8_t)); - memcpy(seed_material + entropy_input_len + nonce_len, - personalization_string, - personalization_string_len * sizeof (uint8_t)); - k = st.k; - v = st.v; - ctr = st.reseed_counter; - memset(k, 0U, (uint32_t)48U * sizeof (uint8_t)); - memset(v, (uint8_t)1U, (uint32_t)48U * sizeof (uint8_t)); - ctr[0U] = (uint32_t)1U; - { - uint32_t - input_len = (uint32_t)49U + entropy_input_len + nonce_len + personalization_string_len; - KRML_CHECK_SIZE(sizeof (uint8_t), input_len); - { - uint8_t input0[input_len]; - memset(input0, 0U, input_len * sizeof (uint8_t)); - { - uint8_t *k_ = input0; - memcpy(k_, v, (uint32_t)48U * sizeof (uint8_t)); - if (entropy_input_len + nonce_len + personalization_string_len != (uint32_t)0U) - { - memcpy(input0 + (uint32_t)49U, - seed_material, - (entropy_input_len + nonce_len + personalization_string_len) - * sizeof (uint8_t)); - } - input0[48U] = (uint8_t)0U; - Hacl_HMAC_compute_sha2_384(k_, k, (uint32_t)48U, input0, input_len); - Hacl_HMAC_compute_sha2_384(v, k_, (uint32_t)48U, v, (uint32_t)48U); - memcpy(k, k_, (uint32_t)48U * sizeof (uint8_t)); - if (entropy_input_len + nonce_len + personalization_string_len != (uint32_t)0U) - { - uint32_t - input_len0 = - (uint32_t)49U - + entropy_input_len + nonce_len + personalization_string_len; - KRML_CHECK_SIZE(sizeof (uint8_t), input_len0); - { - uint8_t input[input_len0]; - memset(input, 0U, input_len0 * sizeof (uint8_t)); - { - uint8_t *k_0 = input; - memcpy(k_0, v, (uint32_t)48U * sizeof (uint8_t)); - if - (entropy_input_len + nonce_len + personalization_string_len != (uint32_t)0U) - { - memcpy(input + (uint32_t)49U, - seed_material, - (entropy_input_len + nonce_len + personalization_string_len) - * sizeof (uint8_t)); - } - input[48U] = (uint8_t)1U; - Hacl_HMAC_compute_sha2_384(k_0, k, (uint32_t)48U, input, input_len0); - Hacl_HMAC_compute_sha2_384(v, k_0, (uint32_t)48U, v, (uint32_t)48U); - memcpy(k, k_0, (uint32_t)48U * sizeof (uint8_t)); - } - } - } - } - } - } - } - } - break; - } - case Spec_Hash_Definitions_SHA2_512: - { - KRML_CHECK_SIZE(sizeof (uint8_t), - entropy_input_len + nonce_len + personalization_string_len); - { - uint8_t seed_material[entropy_input_len + nonce_len + personalization_string_len]; - memset(seed_material, - 0U, - (entropy_input_len + nonce_len + personalization_string_len) * sizeof (uint8_t)); - { - uint8_t *k; - uint8_t *v; - uint32_t *ctr; - memcpy(seed_material, entropy_input, entropy_input_len * sizeof (uint8_t)); - memcpy(seed_material + entropy_input_len, nonce, nonce_len * sizeof (uint8_t)); - memcpy(seed_material + entropy_input_len + nonce_len, - personalization_string, - personalization_string_len * sizeof (uint8_t)); - k = st.k; - v = st.v; - ctr = st.reseed_counter; - memset(k, 0U, (uint32_t)64U * sizeof (uint8_t)); - memset(v, (uint8_t)1U, (uint32_t)64U * sizeof (uint8_t)); - ctr[0U] = (uint32_t)1U; - { - uint32_t - input_len = (uint32_t)65U + entropy_input_len + nonce_len + personalization_string_len; - KRML_CHECK_SIZE(sizeof (uint8_t), input_len); - { - uint8_t input0[input_len]; - memset(input0, 0U, input_len * sizeof (uint8_t)); - { - uint8_t *k_ = input0; - memcpy(k_, v, (uint32_t)64U * sizeof (uint8_t)); - if (entropy_input_len + nonce_len + personalization_string_len != (uint32_t)0U) - { - memcpy(input0 + (uint32_t)65U, - seed_material, - (entropy_input_len + nonce_len + personalization_string_len) - * sizeof (uint8_t)); - } - input0[64U] = (uint8_t)0U; - Hacl_HMAC_compute_sha2_512(k_, k, (uint32_t)64U, input0, input_len); - Hacl_HMAC_compute_sha2_512(v, k_, (uint32_t)64U, v, (uint32_t)64U); - memcpy(k, k_, (uint32_t)64U * sizeof (uint8_t)); - if (entropy_input_len + nonce_len + personalization_string_len != (uint32_t)0U) - { - uint32_t - input_len0 = - (uint32_t)65U - + entropy_input_len + nonce_len + personalization_string_len; - KRML_CHECK_SIZE(sizeof (uint8_t), input_len0); - { - uint8_t input[input_len0]; - memset(input, 0U, input_len0 * sizeof (uint8_t)); - { - uint8_t *k_0 = input; - memcpy(k_0, v, (uint32_t)64U * sizeof (uint8_t)); - if - (entropy_input_len + nonce_len + personalization_string_len != (uint32_t)0U) - { - memcpy(input + (uint32_t)65U, - seed_material, - (entropy_input_len + nonce_len + personalization_string_len) - * sizeof (uint8_t)); - } - input[64U] = (uint8_t)1U; - Hacl_HMAC_compute_sha2_512(k_0, k, (uint32_t)64U, input, input_len0); - Hacl_HMAC_compute_sha2_512(v, k_0, (uint32_t)64U, v, (uint32_t)64U); - memcpy(k, k_0, (uint32_t)64U * sizeof (uint8_t)); - } - } - } - } - } - } - } - } - break; - } - default: - { - KRML_HOST_PRINTF("KaRaMeL incomplete match at %s:%d\n", __FILE__, __LINE__); - KRML_HOST_EXIT(253U); - } - } -} - -void -Hacl_HMAC_DRBG_reseed( - Spec_Hash_Definitions_hash_alg a, - Hacl_HMAC_DRBG_state st, - uint32_t entropy_input_len, - uint8_t *entropy_input, - uint32_t additional_input_input_len, - uint8_t *additional_input_input -) -{ - switch (a) - { - case Spec_Hash_Definitions_SHA1: - { - KRML_CHECK_SIZE(sizeof (uint8_t), entropy_input_len + additional_input_input_len); - { - uint8_t seed_material[entropy_input_len + additional_input_input_len]; - memset(seed_material, - 0U, - (entropy_input_len + additional_input_input_len) * sizeof (uint8_t)); - { - Hacl_HMAC_DRBG_state uu____0; - uint8_t *k; - uint8_t *v; - uint32_t *ctr; - memcpy(seed_material, entropy_input, entropy_input_len * sizeof (uint8_t)); - memcpy(seed_material + entropy_input_len, - additional_input_input, - additional_input_input_len * sizeof (uint8_t)); - uu____0 = st; - k = uu____0.k; - v = uu____0.v; - ctr = uu____0.reseed_counter; - { - uint32_t input_len = (uint32_t)21U + entropy_input_len + additional_input_input_len; - KRML_CHECK_SIZE(sizeof (uint8_t), input_len); - { - uint8_t input0[input_len]; - memset(input0, 0U, input_len * sizeof (uint8_t)); - { - uint8_t *k_ = input0; - memcpy(k_, v, (uint32_t)20U * sizeof (uint8_t)); - if (entropy_input_len + additional_input_input_len != (uint32_t)0U) - { - memcpy(input0 + (uint32_t)21U, - seed_material, - (entropy_input_len + additional_input_input_len) * sizeof (uint8_t)); - } - input0[20U] = (uint8_t)0U; - Hacl_HMAC_legacy_compute_sha1(k_, k, (uint32_t)20U, input0, input_len); - Hacl_HMAC_legacy_compute_sha1(v, k_, (uint32_t)20U, v, (uint32_t)20U); - memcpy(k, k_, (uint32_t)20U * sizeof (uint8_t)); - if (entropy_input_len + additional_input_input_len != (uint32_t)0U) - { - uint32_t - input_len0 = (uint32_t)21U + entropy_input_len + additional_input_input_len; - KRML_CHECK_SIZE(sizeof (uint8_t), input_len0); - { - uint8_t input[input_len0]; - memset(input, 0U, input_len0 * sizeof (uint8_t)); - { - uint8_t *k_0 = input; - memcpy(k_0, v, (uint32_t)20U * sizeof (uint8_t)); - if (entropy_input_len + additional_input_input_len != (uint32_t)0U) - { - memcpy(input + (uint32_t)21U, - seed_material, - (entropy_input_len + additional_input_input_len) * sizeof (uint8_t)); - } - input[20U] = (uint8_t)1U; - Hacl_HMAC_legacy_compute_sha1(k_0, k, (uint32_t)20U, input, input_len0); - Hacl_HMAC_legacy_compute_sha1(v, k_0, (uint32_t)20U, v, (uint32_t)20U); - memcpy(k, k_0, (uint32_t)20U * sizeof (uint8_t)); - } - } - } - ctr[0U] = (uint32_t)1U; - } - } - } - } - } - break; - } - case Spec_Hash_Definitions_SHA2_256: - { - KRML_CHECK_SIZE(sizeof (uint8_t), entropy_input_len + additional_input_input_len); - { - uint8_t seed_material[entropy_input_len + additional_input_input_len]; - memset(seed_material, - 0U, - (entropy_input_len + additional_input_input_len) * sizeof (uint8_t)); - { - Hacl_HMAC_DRBG_state uu____1; - uint8_t *k; - uint8_t *v; - uint32_t *ctr; - memcpy(seed_material, entropy_input, entropy_input_len * sizeof (uint8_t)); - memcpy(seed_material + entropy_input_len, - additional_input_input, - additional_input_input_len * sizeof (uint8_t)); - uu____1 = st; - k = uu____1.k; - v = uu____1.v; - ctr = uu____1.reseed_counter; - { - uint32_t input_len = (uint32_t)33U + entropy_input_len + additional_input_input_len; - KRML_CHECK_SIZE(sizeof (uint8_t), input_len); - { - uint8_t input0[input_len]; - memset(input0, 0U, input_len * sizeof (uint8_t)); - { - uint8_t *k_ = input0; - memcpy(k_, v, (uint32_t)32U * sizeof (uint8_t)); - if (entropy_input_len + additional_input_input_len != (uint32_t)0U) - { - memcpy(input0 + (uint32_t)33U, - seed_material, - (entropy_input_len + additional_input_input_len) * sizeof (uint8_t)); - } - input0[32U] = (uint8_t)0U; - Hacl_HMAC_compute_sha2_256(k_, k, (uint32_t)32U, input0, input_len); - Hacl_HMAC_compute_sha2_256(v, k_, (uint32_t)32U, v, (uint32_t)32U); - memcpy(k, k_, (uint32_t)32U * sizeof (uint8_t)); - if (entropy_input_len + additional_input_input_len != (uint32_t)0U) - { - uint32_t - input_len0 = (uint32_t)33U + entropy_input_len + additional_input_input_len; - KRML_CHECK_SIZE(sizeof (uint8_t), input_len0); - { - uint8_t input[input_len0]; - memset(input, 0U, input_len0 * sizeof (uint8_t)); - { - uint8_t *k_0 = input; - memcpy(k_0, v, (uint32_t)32U * sizeof (uint8_t)); - if (entropy_input_len + additional_input_input_len != (uint32_t)0U) - { - memcpy(input + (uint32_t)33U, - seed_material, - (entropy_input_len + additional_input_input_len) * sizeof (uint8_t)); - } - input[32U] = (uint8_t)1U; - Hacl_HMAC_compute_sha2_256(k_0, k, (uint32_t)32U, input, input_len0); - Hacl_HMAC_compute_sha2_256(v, k_0, (uint32_t)32U, v, (uint32_t)32U); - memcpy(k, k_0, (uint32_t)32U * sizeof (uint8_t)); - } - } - } - ctr[0U] = (uint32_t)1U; - } - } - } - } - } - break; - } - case Spec_Hash_Definitions_SHA2_384: - { - KRML_CHECK_SIZE(sizeof (uint8_t), entropy_input_len + additional_input_input_len); - { - uint8_t seed_material[entropy_input_len + additional_input_input_len]; - memset(seed_material, - 0U, - (entropy_input_len + additional_input_input_len) * sizeof (uint8_t)); - { - Hacl_HMAC_DRBG_state uu____2; - uint8_t *k; - uint8_t *v; - uint32_t *ctr; - memcpy(seed_material, entropy_input, entropy_input_len * sizeof (uint8_t)); - memcpy(seed_material + entropy_input_len, - additional_input_input, - additional_input_input_len * sizeof (uint8_t)); - uu____2 = st; - k = uu____2.k; - v = uu____2.v; - ctr = uu____2.reseed_counter; - { - uint32_t input_len = (uint32_t)49U + entropy_input_len + additional_input_input_len; - KRML_CHECK_SIZE(sizeof (uint8_t), input_len); - { - uint8_t input0[input_len]; - memset(input0, 0U, input_len * sizeof (uint8_t)); - { - uint8_t *k_ = input0; - memcpy(k_, v, (uint32_t)48U * sizeof (uint8_t)); - if (entropy_input_len + additional_input_input_len != (uint32_t)0U) - { - memcpy(input0 + (uint32_t)49U, - seed_material, - (entropy_input_len + additional_input_input_len) * sizeof (uint8_t)); - } - input0[48U] = (uint8_t)0U; - Hacl_HMAC_compute_sha2_384(k_, k, (uint32_t)48U, input0, input_len); - Hacl_HMAC_compute_sha2_384(v, k_, (uint32_t)48U, v, (uint32_t)48U); - memcpy(k, k_, (uint32_t)48U * sizeof (uint8_t)); - if (entropy_input_len + additional_input_input_len != (uint32_t)0U) - { - uint32_t - input_len0 = (uint32_t)49U + entropy_input_len + additional_input_input_len; - KRML_CHECK_SIZE(sizeof (uint8_t), input_len0); - { - uint8_t input[input_len0]; - memset(input, 0U, input_len0 * sizeof (uint8_t)); - { - uint8_t *k_0 = input; - memcpy(k_0, v, (uint32_t)48U * sizeof (uint8_t)); - if (entropy_input_len + additional_input_input_len != (uint32_t)0U) - { - memcpy(input + (uint32_t)49U, - seed_material, - (entropy_input_len + additional_input_input_len) * sizeof (uint8_t)); - } - input[48U] = (uint8_t)1U; - Hacl_HMAC_compute_sha2_384(k_0, k, (uint32_t)48U, input, input_len0); - Hacl_HMAC_compute_sha2_384(v, k_0, (uint32_t)48U, v, (uint32_t)48U); - memcpy(k, k_0, (uint32_t)48U * sizeof (uint8_t)); - } - } - } - ctr[0U] = (uint32_t)1U; - } - } - } - } - } - break; - } - case Spec_Hash_Definitions_SHA2_512: - { - KRML_CHECK_SIZE(sizeof (uint8_t), entropy_input_len + additional_input_input_len); - { - uint8_t seed_material[entropy_input_len + additional_input_input_len]; - memset(seed_material, - 0U, - (entropy_input_len + additional_input_input_len) * sizeof (uint8_t)); - { - Hacl_HMAC_DRBG_state uu____3; - uint8_t *k; - uint8_t *v; - uint32_t *ctr; - memcpy(seed_material, entropy_input, entropy_input_len * sizeof (uint8_t)); - memcpy(seed_material + entropy_input_len, - additional_input_input, - additional_input_input_len * sizeof (uint8_t)); - uu____3 = st; - k = uu____3.k; - v = uu____3.v; - ctr = uu____3.reseed_counter; - { - uint32_t input_len = (uint32_t)65U + entropy_input_len + additional_input_input_len; - KRML_CHECK_SIZE(sizeof (uint8_t), input_len); - { - uint8_t input0[input_len]; - memset(input0, 0U, input_len * sizeof (uint8_t)); - { - uint8_t *k_ = input0; - memcpy(k_, v, (uint32_t)64U * sizeof (uint8_t)); - if (entropy_input_len + additional_input_input_len != (uint32_t)0U) - { - memcpy(input0 + (uint32_t)65U, - seed_material, - (entropy_input_len + additional_input_input_len) * sizeof (uint8_t)); - } - input0[64U] = (uint8_t)0U; - Hacl_HMAC_compute_sha2_512(k_, k, (uint32_t)64U, input0, input_len); - Hacl_HMAC_compute_sha2_512(v, k_, (uint32_t)64U, v, (uint32_t)64U); - memcpy(k, k_, (uint32_t)64U * sizeof (uint8_t)); - if (entropy_input_len + additional_input_input_len != (uint32_t)0U) - { - uint32_t - input_len0 = (uint32_t)65U + entropy_input_len + additional_input_input_len; - KRML_CHECK_SIZE(sizeof (uint8_t), input_len0); - { - uint8_t input[input_len0]; - memset(input, 0U, input_len0 * sizeof (uint8_t)); - { - uint8_t *k_0 = input; - memcpy(k_0, v, (uint32_t)64U * sizeof (uint8_t)); - if (entropy_input_len + additional_input_input_len != (uint32_t)0U) - { - memcpy(input + (uint32_t)65U, - seed_material, - (entropy_input_len + additional_input_input_len) * sizeof (uint8_t)); - } - input[64U] = (uint8_t)1U; - Hacl_HMAC_compute_sha2_512(k_0, k, (uint32_t)64U, input, input_len0); - Hacl_HMAC_compute_sha2_512(v, k_0, (uint32_t)64U, v, (uint32_t)64U); - memcpy(k, k_0, (uint32_t)64U * sizeof (uint8_t)); - } - } - } - ctr[0U] = (uint32_t)1U; - } - } - } - } - } - break; - } - default: - { - KRML_HOST_PRINTF("KaRaMeL incomplete match at %s:%d\n", __FILE__, __LINE__); - KRML_HOST_EXIT(253U); - } - } -} - -bool -Hacl_HMAC_DRBG_generate( - Spec_Hash_Definitions_hash_alg a, - uint8_t *output, - Hacl_HMAC_DRBG_state st, - uint32_t n, - uint32_t additional_input_len, - uint8_t *additional_input -) -{ - switch (a) - { - case Spec_Hash_Definitions_SHA1: - { - if (st.reseed_counter[0U] > Hacl_HMAC_DRBG_reseed_interval) - { - return false; - } - { - uint8_t *k = st.k; - uint8_t *v = st.v; - uint32_t *ctr = st.reseed_counter; - if (additional_input_len > (uint32_t)0U) - { - uint32_t input_len = (uint32_t)21U + additional_input_len; - KRML_CHECK_SIZE(sizeof (uint8_t), input_len); - { - uint8_t input0[input_len]; - memset(input0, 0U, input_len * sizeof (uint8_t)); - { - uint8_t *k_ = input0; - memcpy(k_, v, (uint32_t)20U * sizeof (uint8_t)); - if (additional_input_len != (uint32_t)0U) - { - memcpy(input0 + (uint32_t)21U, - additional_input, - additional_input_len * sizeof (uint8_t)); - } - input0[20U] = (uint8_t)0U; - Hacl_HMAC_legacy_compute_sha1(k_, k, (uint32_t)20U, input0, input_len); - Hacl_HMAC_legacy_compute_sha1(v, k_, (uint32_t)20U, v, (uint32_t)20U); - memcpy(k, k_, (uint32_t)20U * sizeof (uint8_t)); - if (additional_input_len != (uint32_t)0U) - { - uint32_t input_len0 = (uint32_t)21U + additional_input_len; - KRML_CHECK_SIZE(sizeof (uint8_t), input_len0); - { - uint8_t input[input_len0]; - memset(input, 0U, input_len0 * sizeof (uint8_t)); - { - uint8_t *k_0 = input; - memcpy(k_0, v, (uint32_t)20U * sizeof (uint8_t)); - if (additional_input_len != (uint32_t)0U) - { - memcpy(input + (uint32_t)21U, - additional_input, - additional_input_len * sizeof (uint8_t)); - } - input[20U] = (uint8_t)1U; - Hacl_HMAC_legacy_compute_sha1(k_0, k, (uint32_t)20U, input, input_len0); - Hacl_HMAC_legacy_compute_sha1(v, k_0, (uint32_t)20U, v, (uint32_t)20U); - memcpy(k, k_0, (uint32_t)20U * sizeof (uint8_t)); - } - } - } - } - } - } - { - uint8_t *output1 = output; - uint32_t max = n / (uint32_t)20U; - uint8_t *out = output1; - { - uint32_t i; - for (i = (uint32_t)0U; i < max; i++) - { - Hacl_HMAC_legacy_compute_sha1(v, k, (uint32_t)20U, v, (uint32_t)20U); - memcpy(out + i * (uint32_t)20U, v, (uint32_t)20U * sizeof (uint8_t)); - } - } - if (max * (uint32_t)20U < n) - { - uint8_t *block = output1 + max * (uint32_t)20U; - Hacl_HMAC_legacy_compute_sha1(v, k, (uint32_t)20U, v, (uint32_t)20U); - memcpy(block, v, (n - max * (uint32_t)20U) * sizeof (uint8_t)); - } - { - uint32_t input_len = (uint32_t)21U + additional_input_len; - KRML_CHECK_SIZE(sizeof (uint8_t), input_len); - { - uint8_t input0[input_len]; - memset(input0, 0U, input_len * sizeof (uint8_t)); - { - uint8_t *k_ = input0; - memcpy(k_, v, (uint32_t)20U * sizeof (uint8_t)); - if (additional_input_len != (uint32_t)0U) - { - memcpy(input0 + (uint32_t)21U, - additional_input, - additional_input_len * sizeof (uint8_t)); - } - input0[20U] = (uint8_t)0U; - Hacl_HMAC_legacy_compute_sha1(k_, k, (uint32_t)20U, input0, input_len); - Hacl_HMAC_legacy_compute_sha1(v, k_, (uint32_t)20U, v, (uint32_t)20U); - memcpy(k, k_, (uint32_t)20U * sizeof (uint8_t)); - if (additional_input_len != (uint32_t)0U) - { - uint32_t input_len0 = (uint32_t)21U + additional_input_len; - KRML_CHECK_SIZE(sizeof (uint8_t), input_len0); - { - uint8_t input[input_len0]; - memset(input, 0U, input_len0 * sizeof (uint8_t)); - { - uint8_t *k_0 = input; - memcpy(k_0, v, (uint32_t)20U * sizeof (uint8_t)); - if (additional_input_len != (uint32_t)0U) - { - memcpy(input + (uint32_t)21U, - additional_input, - additional_input_len * sizeof (uint8_t)); - } - input[20U] = (uint8_t)1U; - Hacl_HMAC_legacy_compute_sha1(k_0, k, (uint32_t)20U, input, input_len0); - Hacl_HMAC_legacy_compute_sha1(v, k_0, (uint32_t)20U, v, (uint32_t)20U); - memcpy(k, k_0, (uint32_t)20U * sizeof (uint8_t)); - } - } - } - { - uint32_t old_ctr = ctr[0U]; - ctr[0U] = old_ctr + (uint32_t)1U; - return true; - } - } - } - } - } - } - break; - } - case Spec_Hash_Definitions_SHA2_256: - { - if (st.reseed_counter[0U] > Hacl_HMAC_DRBG_reseed_interval) - { - return false; - } - { - uint8_t *k = st.k; - uint8_t *v = st.v; - uint32_t *ctr = st.reseed_counter; - if (additional_input_len > (uint32_t)0U) - { - uint32_t input_len = (uint32_t)33U + additional_input_len; - KRML_CHECK_SIZE(sizeof (uint8_t), input_len); - { - uint8_t input0[input_len]; - memset(input0, 0U, input_len * sizeof (uint8_t)); - { - uint8_t *k_ = input0; - memcpy(k_, v, (uint32_t)32U * sizeof (uint8_t)); - if (additional_input_len != (uint32_t)0U) - { - memcpy(input0 + (uint32_t)33U, - additional_input, - additional_input_len * sizeof (uint8_t)); - } - input0[32U] = (uint8_t)0U; - Hacl_HMAC_compute_sha2_256(k_, k, (uint32_t)32U, input0, input_len); - Hacl_HMAC_compute_sha2_256(v, k_, (uint32_t)32U, v, (uint32_t)32U); - memcpy(k, k_, (uint32_t)32U * sizeof (uint8_t)); - if (additional_input_len != (uint32_t)0U) - { - uint32_t input_len0 = (uint32_t)33U + additional_input_len; - KRML_CHECK_SIZE(sizeof (uint8_t), input_len0); - { - uint8_t input[input_len0]; - memset(input, 0U, input_len0 * sizeof (uint8_t)); - { - uint8_t *k_0 = input; - memcpy(k_0, v, (uint32_t)32U * sizeof (uint8_t)); - if (additional_input_len != (uint32_t)0U) - { - memcpy(input + (uint32_t)33U, - additional_input, - additional_input_len * sizeof (uint8_t)); - } - input[32U] = (uint8_t)1U; - Hacl_HMAC_compute_sha2_256(k_0, k, (uint32_t)32U, input, input_len0); - Hacl_HMAC_compute_sha2_256(v, k_0, (uint32_t)32U, v, (uint32_t)32U); - memcpy(k, k_0, (uint32_t)32U * sizeof (uint8_t)); - } - } - } - } - } - } - { - uint8_t *output1 = output; - uint32_t max = n / (uint32_t)32U; - uint8_t *out = output1; - { - uint32_t i; - for (i = (uint32_t)0U; i < max; i++) - { - Hacl_HMAC_compute_sha2_256(v, k, (uint32_t)32U, v, (uint32_t)32U); - memcpy(out + i * (uint32_t)32U, v, (uint32_t)32U * sizeof (uint8_t)); - } - } - if (max * (uint32_t)32U < n) - { - uint8_t *block = output1 + max * (uint32_t)32U; - Hacl_HMAC_compute_sha2_256(v, k, (uint32_t)32U, v, (uint32_t)32U); - memcpy(block, v, (n - max * (uint32_t)32U) * sizeof (uint8_t)); - } - { - uint32_t input_len = (uint32_t)33U + additional_input_len; - KRML_CHECK_SIZE(sizeof (uint8_t), input_len); - { - uint8_t input0[input_len]; - memset(input0, 0U, input_len * sizeof (uint8_t)); - { - uint8_t *k_ = input0; - memcpy(k_, v, (uint32_t)32U * sizeof (uint8_t)); - if (additional_input_len != (uint32_t)0U) - { - memcpy(input0 + (uint32_t)33U, - additional_input, - additional_input_len * sizeof (uint8_t)); - } - input0[32U] = (uint8_t)0U; - Hacl_HMAC_compute_sha2_256(k_, k, (uint32_t)32U, input0, input_len); - Hacl_HMAC_compute_sha2_256(v, k_, (uint32_t)32U, v, (uint32_t)32U); - memcpy(k, k_, (uint32_t)32U * sizeof (uint8_t)); - if (additional_input_len != (uint32_t)0U) - { - uint32_t input_len0 = (uint32_t)33U + additional_input_len; - KRML_CHECK_SIZE(sizeof (uint8_t), input_len0); - { - uint8_t input[input_len0]; - memset(input, 0U, input_len0 * sizeof (uint8_t)); - { - uint8_t *k_0 = input; - memcpy(k_0, v, (uint32_t)32U * sizeof (uint8_t)); - if (additional_input_len != (uint32_t)0U) - { - memcpy(input + (uint32_t)33U, - additional_input, - additional_input_len * sizeof (uint8_t)); - } - input[32U] = (uint8_t)1U; - Hacl_HMAC_compute_sha2_256(k_0, k, (uint32_t)32U, input, input_len0); - Hacl_HMAC_compute_sha2_256(v, k_0, (uint32_t)32U, v, (uint32_t)32U); - memcpy(k, k_0, (uint32_t)32U * sizeof (uint8_t)); - } - } - } - { - uint32_t old_ctr = ctr[0U]; - ctr[0U] = old_ctr + (uint32_t)1U; - return true; - } - } - } - } - } - } - break; - } - case Spec_Hash_Definitions_SHA2_384: - { - if (st.reseed_counter[0U] > Hacl_HMAC_DRBG_reseed_interval) - { - return false; - } - { - uint8_t *k = st.k; - uint8_t *v = st.v; - uint32_t *ctr = st.reseed_counter; - if (additional_input_len > (uint32_t)0U) - { - uint32_t input_len = (uint32_t)49U + additional_input_len; - KRML_CHECK_SIZE(sizeof (uint8_t), input_len); - { - uint8_t input0[input_len]; - memset(input0, 0U, input_len * sizeof (uint8_t)); - { - uint8_t *k_ = input0; - memcpy(k_, v, (uint32_t)48U * sizeof (uint8_t)); - if (additional_input_len != (uint32_t)0U) - { - memcpy(input0 + (uint32_t)49U, - additional_input, - additional_input_len * sizeof (uint8_t)); - } - input0[48U] = (uint8_t)0U; - Hacl_HMAC_compute_sha2_384(k_, k, (uint32_t)48U, input0, input_len); - Hacl_HMAC_compute_sha2_384(v, k_, (uint32_t)48U, v, (uint32_t)48U); - memcpy(k, k_, (uint32_t)48U * sizeof (uint8_t)); - if (additional_input_len != (uint32_t)0U) - { - uint32_t input_len0 = (uint32_t)49U + additional_input_len; - KRML_CHECK_SIZE(sizeof (uint8_t), input_len0); - { - uint8_t input[input_len0]; - memset(input, 0U, input_len0 * sizeof (uint8_t)); - { - uint8_t *k_0 = input; - memcpy(k_0, v, (uint32_t)48U * sizeof (uint8_t)); - if (additional_input_len != (uint32_t)0U) - { - memcpy(input + (uint32_t)49U, - additional_input, - additional_input_len * sizeof (uint8_t)); - } - input[48U] = (uint8_t)1U; - Hacl_HMAC_compute_sha2_384(k_0, k, (uint32_t)48U, input, input_len0); - Hacl_HMAC_compute_sha2_384(v, k_0, (uint32_t)48U, v, (uint32_t)48U); - memcpy(k, k_0, (uint32_t)48U * sizeof (uint8_t)); - } - } - } - } - } - } - { - uint8_t *output1 = output; - uint32_t max = n / (uint32_t)48U; - uint8_t *out = output1; - { - uint32_t i; - for (i = (uint32_t)0U; i < max; i++) - { - Hacl_HMAC_compute_sha2_384(v, k, (uint32_t)48U, v, (uint32_t)48U); - memcpy(out + i * (uint32_t)48U, v, (uint32_t)48U * sizeof (uint8_t)); - } - } - if (max * (uint32_t)48U < n) - { - uint8_t *block = output1 + max * (uint32_t)48U; - Hacl_HMAC_compute_sha2_384(v, k, (uint32_t)48U, v, (uint32_t)48U); - memcpy(block, v, (n - max * (uint32_t)48U) * sizeof (uint8_t)); - } - { - uint32_t input_len = (uint32_t)49U + additional_input_len; - KRML_CHECK_SIZE(sizeof (uint8_t), input_len); - { - uint8_t input0[input_len]; - memset(input0, 0U, input_len * sizeof (uint8_t)); - { - uint8_t *k_ = input0; - memcpy(k_, v, (uint32_t)48U * sizeof (uint8_t)); - if (additional_input_len != (uint32_t)0U) - { - memcpy(input0 + (uint32_t)49U, - additional_input, - additional_input_len * sizeof (uint8_t)); - } - input0[48U] = (uint8_t)0U; - Hacl_HMAC_compute_sha2_384(k_, k, (uint32_t)48U, input0, input_len); - Hacl_HMAC_compute_sha2_384(v, k_, (uint32_t)48U, v, (uint32_t)48U); - memcpy(k, k_, (uint32_t)48U * sizeof (uint8_t)); - if (additional_input_len != (uint32_t)0U) - { - uint32_t input_len0 = (uint32_t)49U + additional_input_len; - KRML_CHECK_SIZE(sizeof (uint8_t), input_len0); - { - uint8_t input[input_len0]; - memset(input, 0U, input_len0 * sizeof (uint8_t)); - { - uint8_t *k_0 = input; - memcpy(k_0, v, (uint32_t)48U * sizeof (uint8_t)); - if (additional_input_len != (uint32_t)0U) - { - memcpy(input + (uint32_t)49U, - additional_input, - additional_input_len * sizeof (uint8_t)); - } - input[48U] = (uint8_t)1U; - Hacl_HMAC_compute_sha2_384(k_0, k, (uint32_t)48U, input, input_len0); - Hacl_HMAC_compute_sha2_384(v, k_0, (uint32_t)48U, v, (uint32_t)48U); - memcpy(k, k_0, (uint32_t)48U * sizeof (uint8_t)); - } - } - } - { - uint32_t old_ctr = ctr[0U]; - ctr[0U] = old_ctr + (uint32_t)1U; - return true; - } - } - } - } - } - } - break; - } - case Spec_Hash_Definitions_SHA2_512: - { - if (st.reseed_counter[0U] > Hacl_HMAC_DRBG_reseed_interval) - { - return false; - } - { - uint8_t *k = st.k; - uint8_t *v = st.v; - uint32_t *ctr = st.reseed_counter; - if (additional_input_len > (uint32_t)0U) - { - uint32_t input_len = (uint32_t)65U + additional_input_len; - KRML_CHECK_SIZE(sizeof (uint8_t), input_len); - { - uint8_t input0[input_len]; - memset(input0, 0U, input_len * sizeof (uint8_t)); - { - uint8_t *k_ = input0; - memcpy(k_, v, (uint32_t)64U * sizeof (uint8_t)); - if (additional_input_len != (uint32_t)0U) - { - memcpy(input0 + (uint32_t)65U, - additional_input, - additional_input_len * sizeof (uint8_t)); - } - input0[64U] = (uint8_t)0U; - Hacl_HMAC_compute_sha2_512(k_, k, (uint32_t)64U, input0, input_len); - Hacl_HMAC_compute_sha2_512(v, k_, (uint32_t)64U, v, (uint32_t)64U); - memcpy(k, k_, (uint32_t)64U * sizeof (uint8_t)); - if (additional_input_len != (uint32_t)0U) - { - uint32_t input_len0 = (uint32_t)65U + additional_input_len; - KRML_CHECK_SIZE(sizeof (uint8_t), input_len0); - { - uint8_t input[input_len0]; - memset(input, 0U, input_len0 * sizeof (uint8_t)); - { - uint8_t *k_0 = input; - memcpy(k_0, v, (uint32_t)64U * sizeof (uint8_t)); - if (additional_input_len != (uint32_t)0U) - { - memcpy(input + (uint32_t)65U, - additional_input, - additional_input_len * sizeof (uint8_t)); - } - input[64U] = (uint8_t)1U; - Hacl_HMAC_compute_sha2_512(k_0, k, (uint32_t)64U, input, input_len0); - Hacl_HMAC_compute_sha2_512(v, k_0, (uint32_t)64U, v, (uint32_t)64U); - memcpy(k, k_0, (uint32_t)64U * sizeof (uint8_t)); - } - } - } - } - } - } - { - uint8_t *output1 = output; - uint32_t max = n / (uint32_t)64U; - uint8_t *out = output1; - { - uint32_t i; - for (i = (uint32_t)0U; i < max; i++) - { - Hacl_HMAC_compute_sha2_512(v, k, (uint32_t)64U, v, (uint32_t)64U); - memcpy(out + i * (uint32_t)64U, v, (uint32_t)64U * sizeof (uint8_t)); - } - } - if (max * (uint32_t)64U < n) - { - uint8_t *block = output1 + max * (uint32_t)64U; - Hacl_HMAC_compute_sha2_512(v, k, (uint32_t)64U, v, (uint32_t)64U); - memcpy(block, v, (n - max * (uint32_t)64U) * sizeof (uint8_t)); - } - { - uint32_t input_len = (uint32_t)65U + additional_input_len; - KRML_CHECK_SIZE(sizeof (uint8_t), input_len); - { - uint8_t input0[input_len]; - memset(input0, 0U, input_len * sizeof (uint8_t)); - { - uint8_t *k_ = input0; - memcpy(k_, v, (uint32_t)64U * sizeof (uint8_t)); - if (additional_input_len != (uint32_t)0U) - { - memcpy(input0 + (uint32_t)65U, - additional_input, - additional_input_len * sizeof (uint8_t)); - } - input0[64U] = (uint8_t)0U; - Hacl_HMAC_compute_sha2_512(k_, k, (uint32_t)64U, input0, input_len); - Hacl_HMAC_compute_sha2_512(v, k_, (uint32_t)64U, v, (uint32_t)64U); - memcpy(k, k_, (uint32_t)64U * sizeof (uint8_t)); - if (additional_input_len != (uint32_t)0U) - { - uint32_t input_len0 = (uint32_t)65U + additional_input_len; - KRML_CHECK_SIZE(sizeof (uint8_t), input_len0); - { - uint8_t input[input_len0]; - memset(input, 0U, input_len0 * sizeof (uint8_t)); - { - uint8_t *k_0 = input; - memcpy(k_0, v, (uint32_t)64U * sizeof (uint8_t)); - if (additional_input_len != (uint32_t)0U) - { - memcpy(input + (uint32_t)65U, - additional_input, - additional_input_len * sizeof (uint8_t)); - } - input[64U] = (uint8_t)1U; - Hacl_HMAC_compute_sha2_512(k_0, k, (uint32_t)64U, input, input_len0); - Hacl_HMAC_compute_sha2_512(v, k_0, (uint32_t)64U, v, (uint32_t)64U); - memcpy(k, k_0, (uint32_t)64U * sizeof (uint8_t)); - } - } - } - { - uint32_t old_ctr = ctr[0U]; - ctr[0U] = old_ctr + (uint32_t)1U; - return true; - } - } - } - } - } - } - break; - } - default: - { - KRML_HOST_PRINTF("KaRaMeL incomplete match at %s:%d\n", __FILE__, __LINE__); - KRML_HOST_EXIT(253U); - } - } -} - diff --git a/dist/c89-compatible/Hacl_HMAC_DRBG.h b/dist/c89-compatible/Hacl_HMAC_DRBG.h deleted file mode 100644 index 28590930bd..0000000000 --- a/dist/c89-compatible/Hacl_HMAC_DRBG.h +++ /dev/null @@ -1,105 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#ifndef __Hacl_HMAC_DRBG_H -#define __Hacl_HMAC_DRBG_H - -#if defined(__cplusplus) -extern "C" { -#endif - -#include -#include "krml/internal/types.h" -#include "krml/lowstar_endianness.h" -#include "krml/internal/target.h" - - -#include "Hacl_Spec.h" -#include "Hacl_HMAC.h" -#include "evercrypt_targetconfig.h" -typedef Spec_Hash_Definitions_hash_alg Hacl_HMAC_DRBG_supported_alg; - -extern uint32_t Hacl_HMAC_DRBG_reseed_interval; - -extern uint32_t Hacl_HMAC_DRBG_max_output_length; - -extern uint32_t Hacl_HMAC_DRBG_max_length; - -extern uint32_t Hacl_HMAC_DRBG_max_personalization_string_length; - -extern uint32_t Hacl_HMAC_DRBG_max_additional_input_length; - -uint32_t Hacl_HMAC_DRBG_min_length(Spec_Hash_Definitions_hash_alg a); - -typedef struct Hacl_HMAC_DRBG_state_s -{ - uint8_t *k; - uint8_t *v; - uint32_t *reseed_counter; -} -Hacl_HMAC_DRBG_state; - -bool -Hacl_HMAC_DRBG_uu___is_State(Spec_Hash_Definitions_hash_alg a, Hacl_HMAC_DRBG_state projectee); - -Hacl_HMAC_DRBG_state Hacl_HMAC_DRBG_create_in(Spec_Hash_Definitions_hash_alg a); - -void -Hacl_HMAC_DRBG_instantiate( - Spec_Hash_Definitions_hash_alg a, - Hacl_HMAC_DRBG_state st, - uint32_t entropy_input_len, - uint8_t *entropy_input, - uint32_t nonce_len, - uint8_t *nonce, - uint32_t personalization_string_len, - uint8_t *personalization_string -); - -void -Hacl_HMAC_DRBG_reseed( - Spec_Hash_Definitions_hash_alg a, - Hacl_HMAC_DRBG_state st, - uint32_t entropy_input_len, - uint8_t *entropy_input, - uint32_t additional_input_input_len, - uint8_t *additional_input_input -); - -bool -Hacl_HMAC_DRBG_generate( - Spec_Hash_Definitions_hash_alg a, - uint8_t *output, - Hacl_HMAC_DRBG_state st, - uint32_t n, - uint32_t additional_input_len, - uint8_t *additional_input -); - -#if defined(__cplusplus) -} -#endif - -#define __Hacl_HMAC_DRBG_H_DEFINED -#endif diff --git a/dist/c89-compatible/Hacl_HPKE_Curve51_CP128_SHA256.c b/dist/c89-compatible/Hacl_HPKE_Curve51_CP128_SHA256.c deleted file mode 100644 index 508c8908ce..0000000000 --- a/dist/c89-compatible/Hacl_HPKE_Curve51_CP128_SHA256.c +++ /dev/null @@ -1,1077 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#include "Hacl_HPKE_Curve51_CP128_SHA256.h" - - - -uint32_t -Hacl_HPKE_Curve51_CP128_SHA256_setupBaseS( - uint8_t *o_pkE, - Hacl_Impl_HPKE_context_s o_ctx, - uint8_t *skE, - uint8_t *pkR, - uint32_t infolen, - uint8_t *info -) -{ - uint8_t o_shared[32U] = { 0U }; - uint8_t *o_pkE1 = o_pkE; - uint32_t res1; - uint32_t res0; - uint32_t ite; - Hacl_Curve25519_51_secret_to_public(o_pkE1, skE); - res1 = (uint32_t)0U; - if (res1 == (uint32_t)0U) - { - uint8_t o_dh[32U] = { 0U }; - uint8_t zeros[32U] = { 0U }; - Hacl_Curve25519_51_scalarmult(o_dh, skE, pkR); - { - uint8_t res2 = (uint8_t)255U; - { - uint32_t i; - for (i = (uint32_t)0U; i < (uint32_t)32U; i++) - { - uint8_t uu____0 = FStar_UInt8_eq_mask(o_dh[i], zeros[i]); - res2 = uu____0 & res2; - } - } - { - uint8_t z = res2; - uint32_t res; - if (z == (uint8_t)255U) - { - res = (uint32_t)1U; - } - else - { - res = (uint32_t)0U; - } - { - uint32_t res20 = res; - uint8_t o_kemcontext[64U] = { 0U }; - if (res20 == (uint32_t)0U) - { - memcpy(o_kemcontext, o_pkE, (uint32_t)32U * sizeof (uint8_t)); - { - uint8_t *o_pkRm = o_kemcontext + (uint32_t)32U; - uint8_t *o_pkR = o_pkRm; - memcpy(o_pkR, pkR, (uint32_t)32U * sizeof (uint8_t)); - { - uint8_t *o_dhm = o_dh; - uint8_t o_eae_prk[32U] = { 0U }; - uint8_t suite_id_kem[5U] = { 0U }; - uint8_t *uu____1 = suite_id_kem; - uu____1[0U] = (uint8_t)0x4bU; - uu____1[1U] = (uint8_t)0x45U; - uu____1[2U] = (uint8_t)0x4dU; - { - uint8_t *uu____2 = suite_id_kem + (uint32_t)3U; - uu____2[0U] = (uint8_t)0U; - uu____2[1U] = (uint8_t)32U; - { - uint8_t *empty = suite_id_kem; - uint8_t - label_eae_prk[7U] = - { - (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, - (uint8_t)0x70U, (uint8_t)0x72U, (uint8_t)0x6bU - }; - uint32_t len0 = (uint32_t)51U; - KRML_CHECK_SIZE(sizeof (uint8_t), len0); - { - uint8_t tmp0[len0]; - memset(tmp0, 0U, len0 * sizeof (uint8_t)); - { - uint8_t *uu____3 = tmp0; - uu____3[0U] = (uint8_t)0x48U; - uu____3[1U] = (uint8_t)0x50U; - uu____3[2U] = (uint8_t)0x4bU; - uu____3[3U] = (uint8_t)0x45U; - uu____3[4U] = (uint8_t)0x2dU; - uu____3[5U] = (uint8_t)0x76U; - uu____3[6U] = (uint8_t)0x31U; - memcpy(tmp0 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t)); - memcpy(tmp0 + (uint32_t)12U, - label_eae_prk, - (uint32_t)7U * sizeof (uint8_t)); - memcpy(tmp0 + (uint32_t)19U, o_dhm, (uint32_t)32U * sizeof (uint8_t)); - Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp0, len0); - { - uint8_t - label_shared_secret[13U] = - { - (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, - (uint8_t)0x65U, (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, - (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U, - (uint8_t)0x74U - }; - uint32_t len = (uint32_t)91U; - KRML_CHECK_SIZE(sizeof (uint8_t), len); - { - uint8_t tmp[len]; - memset(tmp, 0U, len * sizeof (uint8_t)); - { - uint8_t *uu____4 = tmp; - store32_be(uu____4, (uint32_t)32U); - memcpy(uu____4, - uu____4 + (uint32_t)2U, - (uint32_t)2U * sizeof (uint8_t)); - { - uint8_t *uu____5 = tmp + (uint32_t)2U; - uu____5[0U] = (uint8_t)0x48U; - uu____5[1U] = (uint8_t)0x50U; - uu____5[2U] = (uint8_t)0x4bU; - uu____5[3U] = (uint8_t)0x45U; - uu____5[4U] = (uint8_t)0x2dU; - uu____5[5U] = (uint8_t)0x76U; - uu____5[6U] = (uint8_t)0x31U; - memcpy(tmp + (uint32_t)9U, - suite_id_kem, - (uint32_t)5U * sizeof (uint8_t)); - memcpy(tmp + (uint32_t)14U, - label_shared_secret, - (uint32_t)13U * sizeof (uint8_t)); - memcpy(tmp + (uint32_t)27U, - o_kemcontext, - (uint32_t)64U * sizeof (uint8_t)); - Hacl_HKDF_expand_sha2_256(o_shared, - o_eae_prk, - (uint32_t)32U, - tmp, - len, - (uint32_t)32U); - res0 = (uint32_t)0U; - } - } - } - } - } - } - } - } - } - } - } - else - { - res0 = (uint32_t)1U; - } - } - } - } - } - else - { - res0 = (uint32_t)1U; - } - if (res0 == (uint32_t)0U) - { - uint8_t o_context[65U] = { 0U }; - uint8_t o_secret[32U] = { 0U }; - uint8_t suite_id[10U] = { 0U }; - uint8_t *uu____6 = suite_id; - uu____6[0U] = (uint8_t)0x48U; - uu____6[1U] = (uint8_t)0x50U; - uu____6[2U] = (uint8_t)0x4bU; - uu____6[3U] = (uint8_t)0x45U; - { - uint8_t *uu____7 = suite_id + (uint32_t)4U; - uu____7[0U] = (uint8_t)0U; - uu____7[1U] = (uint8_t)32U; - { - uint8_t *uu____8 = suite_id + (uint32_t)6U; - uu____8[0U] = (uint8_t)0U; - uu____8[1U] = (uint8_t)1U; - { - uint8_t *uu____9 = suite_id + (uint32_t)8U; - uu____9[0U] = (uint8_t)0U; - uu____9[1U] = (uint8_t)3U; - { - uint8_t - label_psk_id_hash[11U] = - { - (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U, - (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, - (uint8_t)0x68U - }; - uint8_t o_psk_id_hash[32U] = { 0U }; - uint8_t *empty = suite_id; - uint32_t len0 = (uint32_t)28U; - KRML_CHECK_SIZE(sizeof (uint8_t), len0); - { - uint8_t tmp0[len0]; - memset(tmp0, 0U, len0 * sizeof (uint8_t)); - { - uint8_t *uu____10 = tmp0; - uu____10[0U] = (uint8_t)0x48U; - uu____10[1U] = (uint8_t)0x50U; - uu____10[2U] = (uint8_t)0x4bU; - uu____10[3U] = (uint8_t)0x45U; - uu____10[4U] = (uint8_t)0x2dU; - uu____10[5U] = (uint8_t)0x76U; - uu____10[6U] = (uint8_t)0x31U; - memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp0 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t)); - memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t)); - Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, (uint32_t)0U, tmp0, len0); - { - uint8_t - label_info_hash[9U] = - { - (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU, - (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U - }; - uint8_t o_info_hash[32U] = { 0U }; - uint32_t len1 = (uint32_t)26U + infolen; - KRML_CHECK_SIZE(sizeof (uint8_t), len1); - { - uint8_t tmp1[len1]; - memset(tmp1, 0U, len1 * sizeof (uint8_t)); - { - uint8_t *uu____11 = tmp1; - uu____11[0U] = (uint8_t)0x48U; - uu____11[1U] = (uint8_t)0x50U; - uu____11[2U] = (uint8_t)0x4bU; - uu____11[3U] = (uint8_t)0x45U; - uu____11[4U] = (uint8_t)0x2dU; - uu____11[5U] = (uint8_t)0x76U; - uu____11[6U] = (uint8_t)0x31U; - memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp1 + (uint32_t)17U, - label_info_hash, - (uint32_t)9U * sizeof (uint8_t)); - memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t)); - Hacl_HKDF_extract_sha2_256(o_info_hash, empty, (uint32_t)0U, tmp1, len1); - o_context[0U] = (uint8_t)0U; - memcpy(o_context + (uint32_t)1U, - o_psk_id_hash, - (uint32_t)32U * sizeof (uint8_t)); - memcpy(o_context + (uint32_t)33U, - o_info_hash, - (uint32_t)32U * sizeof (uint8_t)); - { - uint8_t - label_secret[6U] = - { - (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, - (uint8_t)0x65U, (uint8_t)0x74U - }; - uint32_t len2 = (uint32_t)23U; - KRML_CHECK_SIZE(sizeof (uint8_t), len2); - { - uint8_t tmp2[len2]; - memset(tmp2, 0U, len2 * sizeof (uint8_t)); - { - uint8_t *uu____12 = tmp2; - uu____12[0U] = (uint8_t)0x48U; - uu____12[1U] = (uint8_t)0x50U; - uu____12[2U] = (uint8_t)0x4bU; - uu____12[3U] = (uint8_t)0x45U; - uu____12[4U] = (uint8_t)0x2dU; - uu____12[5U] = (uint8_t)0x76U; - uu____12[6U] = (uint8_t)0x31U; - memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp2 + (uint32_t)17U, - label_secret, - (uint32_t)6U * sizeof (uint8_t)); - memcpy(tmp2 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t)); - Hacl_HKDF_extract_sha2_256(o_secret, - o_shared, - (uint32_t)32U, - tmp2, - len2); - { - uint8_t - label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U }; - uint32_t len3 = (uint32_t)87U; - KRML_CHECK_SIZE(sizeof (uint8_t), len3); - { - uint8_t tmp3[len3]; - memset(tmp3, 0U, len3 * sizeof (uint8_t)); - { - uint8_t *uu____13 = tmp3; - store32_be(uu____13, (uint32_t)32U); - memcpy(uu____13, - uu____13 + (uint32_t)2U, - (uint32_t)2U * sizeof (uint8_t)); - { - uint8_t *uu____14 = tmp3 + (uint32_t)2U; - uu____14[0U] = (uint8_t)0x48U; - uu____14[1U] = (uint8_t)0x50U; - uu____14[2U] = (uint8_t)0x4bU; - uu____14[3U] = (uint8_t)0x45U; - uu____14[4U] = (uint8_t)0x2dU; - uu____14[5U] = (uint8_t)0x76U; - uu____14[6U] = (uint8_t)0x31U; - memcpy(tmp3 + (uint32_t)9U, - suite_id, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp3 + (uint32_t)19U, - label_exp, - (uint32_t)3U * sizeof (uint8_t)); - memcpy(tmp3 + (uint32_t)22U, - o_context, - (uint32_t)65U * sizeof (uint8_t)); - Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter, - o_secret, - (uint32_t)32U, - tmp3, - len3, - (uint32_t)32U); - { - uint8_t - label_key[3U] = - { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U }; - uint32_t len4 = (uint32_t)87U; - KRML_CHECK_SIZE(sizeof (uint8_t), len4); - { - uint8_t tmp4[len4]; - memset(tmp4, 0U, len4 * sizeof (uint8_t)); - { - uint8_t *uu____15 = tmp4; - store32_be(uu____15, (uint32_t)32U); - memcpy(uu____15, - uu____15 + (uint32_t)2U, - (uint32_t)2U * sizeof (uint8_t)); - { - uint8_t *uu____16 = tmp4 + (uint32_t)2U; - uu____16[0U] = (uint8_t)0x48U; - uu____16[1U] = (uint8_t)0x50U; - uu____16[2U] = (uint8_t)0x4bU; - uu____16[3U] = (uint8_t)0x45U; - uu____16[4U] = (uint8_t)0x2dU; - uu____16[5U] = (uint8_t)0x76U; - uu____16[6U] = (uint8_t)0x31U; - memcpy(tmp4 + (uint32_t)9U, - suite_id, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp4 + (uint32_t)19U, - label_key, - (uint32_t)3U * sizeof (uint8_t)); - memcpy(tmp4 + (uint32_t)22U, - o_context, - (uint32_t)65U * sizeof (uint8_t)); - Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, - o_secret, - (uint32_t)32U, - tmp4, - len4, - (uint32_t)32U); - { - uint8_t - label_base_nonce[10U] = - { - (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, - (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x6eU, - (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, - (uint8_t)0x65U - }; - uint32_t len = (uint32_t)94U; - KRML_CHECK_SIZE(sizeof (uint8_t), len); - { - uint8_t tmp[len]; - memset(tmp, 0U, len * sizeof (uint8_t)); - { - uint8_t *uu____17 = tmp; - store32_be(uu____17, (uint32_t)12U); - memcpy(uu____17, - uu____17 + (uint32_t)2U, - (uint32_t)2U * sizeof (uint8_t)); - { - uint8_t *uu____18 = tmp + (uint32_t)2U; - uu____18[0U] = (uint8_t)0x48U; - uu____18[1U] = (uint8_t)0x50U; - uu____18[2U] = (uint8_t)0x4bU; - uu____18[3U] = (uint8_t)0x45U; - uu____18[4U] = (uint8_t)0x2dU; - uu____18[5U] = (uint8_t)0x76U; - uu____18[6U] = (uint8_t)0x31U; - memcpy(tmp + (uint32_t)9U, - suite_id, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp + (uint32_t)19U, - label_base_nonce, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp + (uint32_t)29U, - o_context, - (uint32_t)65U * sizeof (uint8_t)); - Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, - o_secret, - (uint32_t)32U, - tmp, - len, - (uint32_t)12U); - o_ctx.ctx_seq[0U] = (uint64_t)0U; - ite = res0; - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - else - { - ite = res0; - } - return ite; -} - -uint32_t -Hacl_HPKE_Curve51_CP128_SHA256_setupBaseR( - Hacl_Impl_HPKE_context_s o_ctx, - uint8_t *enc, - uint8_t *skR, - uint32_t infolen, - uint8_t *info -) -{ - uint8_t pkR[32U] = { 0U }; - uint32_t res1; - uint32_t ite; - Hacl_Curve25519_51_secret_to_public(pkR, skR); - res1 = (uint32_t)0U; - { - uint8_t shared[32U] = { 0U }; - if (res1 == (uint32_t)0U) - { - uint8_t *pkE = enc; - uint8_t dh[32U] = { 0U }; - uint8_t zeros[32U] = { 0U }; - Hacl_Curve25519_51_scalarmult(dh, skR, pkE); - { - uint8_t res0 = (uint8_t)255U; - { - uint32_t i; - for (i = (uint32_t)0U; i < (uint32_t)32U; i++) - { - uint8_t uu____0 = FStar_UInt8_eq_mask(dh[i], zeros[i]); - res0 = uu____0 & res0; - } - } - { - uint8_t z = res0; - uint32_t res; - if (z == (uint8_t)255U) - { - res = (uint32_t)1U; - } - else - { - res = (uint32_t)0U; - } - { - uint32_t res11 = res; - uint32_t res2; - uint8_t kemcontext[64U] = { 0U }; - if (res11 == (uint32_t)0U) - { - uint8_t *pkRm = kemcontext + (uint32_t)32U; - uint8_t *pkR1 = pkRm; - Hacl_Curve25519_51_secret_to_public(pkR1, skR); - { - uint32_t res20 = (uint32_t)0U; - if (res20 == (uint32_t)0U) - { - memcpy(kemcontext, enc, (uint32_t)32U * sizeof (uint8_t)); - { - uint8_t *dhm = dh; - uint8_t o_eae_prk[32U] = { 0U }; - uint8_t suite_id_kem[5U] = { 0U }; - uint8_t *uu____1 = suite_id_kem; - uu____1[0U] = (uint8_t)0x4bU; - uu____1[1U] = (uint8_t)0x45U; - uu____1[2U] = (uint8_t)0x4dU; - { - uint8_t *uu____2 = suite_id_kem + (uint32_t)3U; - uu____2[0U] = (uint8_t)0U; - uu____2[1U] = (uint8_t)32U; - { - uint8_t *empty = suite_id_kem; - uint8_t - label_eae_prk[7U] = - { - (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, - (uint8_t)0x70U, (uint8_t)0x72U, (uint8_t)0x6bU - }; - uint32_t len0 = (uint32_t)51U; - KRML_CHECK_SIZE(sizeof (uint8_t), len0); - { - uint8_t tmp0[len0]; - memset(tmp0, 0U, len0 * sizeof (uint8_t)); - { - uint8_t *uu____3 = tmp0; - uu____3[0U] = (uint8_t)0x48U; - uu____3[1U] = (uint8_t)0x50U; - uu____3[2U] = (uint8_t)0x4bU; - uu____3[3U] = (uint8_t)0x45U; - uu____3[4U] = (uint8_t)0x2dU; - uu____3[5U] = (uint8_t)0x76U; - uu____3[6U] = (uint8_t)0x31U; - memcpy(tmp0 + (uint32_t)7U, - suite_id_kem, - (uint32_t)5U * sizeof (uint8_t)); - memcpy(tmp0 + (uint32_t)12U, - label_eae_prk, - (uint32_t)7U * sizeof (uint8_t)); - memcpy(tmp0 + (uint32_t)19U, dhm, (uint32_t)32U * sizeof (uint8_t)); - Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp0, len0); - { - uint8_t - label_shared_secret[13U] = - { - (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, - (uint8_t)0x65U, (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, - (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U, - (uint8_t)0x74U - }; - uint32_t len = (uint32_t)91U; - KRML_CHECK_SIZE(sizeof (uint8_t), len); - { - uint8_t tmp[len]; - memset(tmp, 0U, len * sizeof (uint8_t)); - { - uint8_t *uu____4 = tmp; - store32_be(uu____4, (uint32_t)32U); - memcpy(uu____4, - uu____4 + (uint32_t)2U, - (uint32_t)2U * sizeof (uint8_t)); - { - uint8_t *uu____5 = tmp + (uint32_t)2U; - uu____5[0U] = (uint8_t)0x48U; - uu____5[1U] = (uint8_t)0x50U; - uu____5[2U] = (uint8_t)0x4bU; - uu____5[3U] = (uint8_t)0x45U; - uu____5[4U] = (uint8_t)0x2dU; - uu____5[5U] = (uint8_t)0x76U; - uu____5[6U] = (uint8_t)0x31U; - memcpy(tmp + (uint32_t)9U, - suite_id_kem, - (uint32_t)5U * sizeof (uint8_t)); - memcpy(tmp + (uint32_t)14U, - label_shared_secret, - (uint32_t)13U * sizeof (uint8_t)); - memcpy(tmp + (uint32_t)27U, - kemcontext, - (uint32_t)64U * sizeof (uint8_t)); - Hacl_HKDF_expand_sha2_256(shared, - o_eae_prk, - (uint32_t)32U, - tmp, - len, - (uint32_t)32U); - res2 = (uint32_t)0U; - } - } - } - } - } - } - } - } - } - } - else - { - res2 = (uint32_t)1U; - } - } - } - else - { - res2 = (uint32_t)1U; - } - if (res2 == (uint32_t)0U) - { - uint8_t o_context[65U] = { 0U }; - uint8_t o_secret[32U] = { 0U }; - uint8_t suite_id[10U] = { 0U }; - uint8_t *uu____6 = suite_id; - uu____6[0U] = (uint8_t)0x48U; - uu____6[1U] = (uint8_t)0x50U; - uu____6[2U] = (uint8_t)0x4bU; - uu____6[3U] = (uint8_t)0x45U; - { - uint8_t *uu____7 = suite_id + (uint32_t)4U; - uu____7[0U] = (uint8_t)0U; - uu____7[1U] = (uint8_t)32U; - { - uint8_t *uu____8 = suite_id + (uint32_t)6U; - uu____8[0U] = (uint8_t)0U; - uu____8[1U] = (uint8_t)1U; - { - uint8_t *uu____9 = suite_id + (uint32_t)8U; - uu____9[0U] = (uint8_t)0U; - uu____9[1U] = (uint8_t)3U; - { - uint8_t - label_psk_id_hash[11U] = - { - (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, - (uint8_t)0x69U, (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, - (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U - }; - uint8_t o_psk_id_hash[32U] = { 0U }; - uint8_t *empty = suite_id; - uint32_t len0 = (uint32_t)28U; - KRML_CHECK_SIZE(sizeof (uint8_t), len0); - { - uint8_t tmp0[len0]; - memset(tmp0, 0U, len0 * sizeof (uint8_t)); - { - uint8_t *uu____10 = tmp0; - uu____10[0U] = (uint8_t)0x48U; - uu____10[1U] = (uint8_t)0x50U; - uu____10[2U] = (uint8_t)0x4bU; - uu____10[3U] = (uint8_t)0x45U; - uu____10[4U] = (uint8_t)0x2dU; - uu____10[5U] = (uint8_t)0x76U; - uu____10[6U] = (uint8_t)0x31U; - memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp0 + (uint32_t)17U, - label_psk_id_hash, - (uint32_t)11U * sizeof (uint8_t)); - memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t)); - Hacl_HKDF_extract_sha2_256(o_psk_id_hash, - empty, - (uint32_t)0U, - tmp0, - len0); - { - uint8_t - label_info_hash[9U] = - { - (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, - (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, - (uint8_t)0x68U - }; - uint8_t o_info_hash[32U] = { 0U }; - uint32_t len1 = (uint32_t)26U + infolen; - KRML_CHECK_SIZE(sizeof (uint8_t), len1); - { - uint8_t tmp1[len1]; - memset(tmp1, 0U, len1 * sizeof (uint8_t)); - { - uint8_t *uu____11 = tmp1; - uu____11[0U] = (uint8_t)0x48U; - uu____11[1U] = (uint8_t)0x50U; - uu____11[2U] = (uint8_t)0x4bU; - uu____11[3U] = (uint8_t)0x45U; - uu____11[4U] = (uint8_t)0x2dU; - uu____11[5U] = (uint8_t)0x76U; - uu____11[6U] = (uint8_t)0x31U; - memcpy(tmp1 + (uint32_t)7U, - suite_id, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp1 + (uint32_t)17U, - label_info_hash, - (uint32_t)9U * sizeof (uint8_t)); - memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t)); - Hacl_HKDF_extract_sha2_256(o_info_hash, - empty, - (uint32_t)0U, - tmp1, - len1); - o_context[0U] = (uint8_t)0U; - memcpy(o_context + (uint32_t)1U, - o_psk_id_hash, - (uint32_t)32U * sizeof (uint8_t)); - memcpy(o_context + (uint32_t)33U, - o_info_hash, - (uint32_t)32U * sizeof (uint8_t)); - { - uint8_t - label_secret[6U] = - { - (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, - (uint8_t)0x65U, (uint8_t)0x74U - }; - uint32_t len2 = (uint32_t)23U; - KRML_CHECK_SIZE(sizeof (uint8_t), len2); - { - uint8_t tmp2[len2]; - memset(tmp2, 0U, len2 * sizeof (uint8_t)); - { - uint8_t *uu____12 = tmp2; - uu____12[0U] = (uint8_t)0x48U; - uu____12[1U] = (uint8_t)0x50U; - uu____12[2U] = (uint8_t)0x4bU; - uu____12[3U] = (uint8_t)0x45U; - uu____12[4U] = (uint8_t)0x2dU; - uu____12[5U] = (uint8_t)0x76U; - uu____12[6U] = (uint8_t)0x31U; - memcpy(tmp2 + (uint32_t)7U, - suite_id, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp2 + (uint32_t)17U, - label_secret, - (uint32_t)6U * sizeof (uint8_t)); - memcpy(tmp2 + (uint32_t)23U, - empty, - (uint32_t)0U * sizeof (uint8_t)); - Hacl_HKDF_extract_sha2_256(o_secret, - shared, - (uint32_t)32U, - tmp2, - len2); - { - uint8_t - label_exp[3U] = - { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U }; - uint32_t len3 = (uint32_t)87U; - KRML_CHECK_SIZE(sizeof (uint8_t), len3); - { - uint8_t tmp3[len3]; - memset(tmp3, 0U, len3 * sizeof (uint8_t)); - { - uint8_t *uu____13 = tmp3; - store32_be(uu____13, (uint32_t)32U); - memcpy(uu____13, - uu____13 + (uint32_t)2U, - (uint32_t)2U * sizeof (uint8_t)); - { - uint8_t *uu____14 = tmp3 + (uint32_t)2U; - uu____14[0U] = (uint8_t)0x48U; - uu____14[1U] = (uint8_t)0x50U; - uu____14[2U] = (uint8_t)0x4bU; - uu____14[3U] = (uint8_t)0x45U; - uu____14[4U] = (uint8_t)0x2dU; - uu____14[5U] = (uint8_t)0x76U; - uu____14[6U] = (uint8_t)0x31U; - memcpy(tmp3 + (uint32_t)9U, - suite_id, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp3 + (uint32_t)19U, - label_exp, - (uint32_t)3U * sizeof (uint8_t)); - memcpy(tmp3 + (uint32_t)22U, - o_context, - (uint32_t)65U * sizeof (uint8_t)); - Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter, - o_secret, - (uint32_t)32U, - tmp3, - len3, - (uint32_t)32U); - { - uint8_t - label_key[3U] = - { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U }; - uint32_t len4 = (uint32_t)87U; - KRML_CHECK_SIZE(sizeof (uint8_t), len4); - { - uint8_t tmp4[len4]; - memset(tmp4, 0U, len4 * sizeof (uint8_t)); - { - uint8_t *uu____15 = tmp4; - store32_be(uu____15, (uint32_t)32U); - memcpy(uu____15, - uu____15 + (uint32_t)2U, - (uint32_t)2U * sizeof (uint8_t)); - { - uint8_t *uu____16 = tmp4 + (uint32_t)2U; - uu____16[0U] = (uint8_t)0x48U; - uu____16[1U] = (uint8_t)0x50U; - uu____16[2U] = (uint8_t)0x4bU; - uu____16[3U] = (uint8_t)0x45U; - uu____16[4U] = (uint8_t)0x2dU; - uu____16[5U] = (uint8_t)0x76U; - uu____16[6U] = (uint8_t)0x31U; - memcpy(tmp4 + (uint32_t)9U, - suite_id, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp4 + (uint32_t)19U, - label_key, - (uint32_t)3U * sizeof (uint8_t)); - memcpy(tmp4 + (uint32_t)22U, - o_context, - (uint32_t)65U * sizeof (uint8_t)); - Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, - o_secret, - (uint32_t)32U, - tmp4, - len4, - (uint32_t)32U); - { - uint8_t - label_base_nonce[10U] = - { - (uint8_t)0x62U, (uint8_t)0x61U, - (uint8_t)0x73U, (uint8_t)0x65U, - (uint8_t)0x5fU, (uint8_t)0x6eU, - (uint8_t)0x6fU, (uint8_t)0x6eU, - (uint8_t)0x63U, (uint8_t)0x65U - }; - uint32_t len = (uint32_t)94U; - KRML_CHECK_SIZE(sizeof (uint8_t), len); - { - uint8_t tmp[len]; - memset(tmp, 0U, len * sizeof (uint8_t)); - { - uint8_t *uu____17 = tmp; - store32_be(uu____17, (uint32_t)12U); - memcpy(uu____17, - uu____17 + (uint32_t)2U, - (uint32_t)2U * sizeof (uint8_t)); - { - uint8_t - *uu____18 = tmp + (uint32_t)2U; - uu____18[0U] = (uint8_t)0x48U; - uu____18[1U] = (uint8_t)0x50U; - uu____18[2U] = (uint8_t)0x4bU; - uu____18[3U] = (uint8_t)0x45U; - uu____18[4U] = (uint8_t)0x2dU; - uu____18[5U] = (uint8_t)0x76U; - uu____18[6U] = (uint8_t)0x31U; - memcpy(tmp + (uint32_t)9U, - suite_id, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp + (uint32_t)19U, - label_base_nonce, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp + (uint32_t)29U, - o_context, - (uint32_t)65U * sizeof (uint8_t)); - Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, - o_secret, - (uint32_t)32U, - tmp, - len, - (uint32_t)12U); - o_ctx.ctx_seq[0U] = (uint64_t)0U; - ite = (uint32_t)0U; - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - else - { - ite = (uint32_t)1U; - } - } - } - } - } - else - { - ite = (uint32_t)1U; - } - return ite; - } -} - -uint32_t -Hacl_HPKE_Curve51_CP128_SHA256_sealBase( - uint8_t *skE, - uint8_t *pkR, - uint32_t infolen, - uint8_t *info, - uint32_t aadlen, - uint8_t *aad, - uint32_t plainlen, - uint8_t *plain, - uint8_t *o_enc, - uint8_t *o_ct -) -{ - uint8_t ctx_key[32U] = { 0U }; - uint8_t ctx_nonce[12U] = { 0U }; - uint64_t ctx_seq = (uint64_t)0U; - uint8_t ctx_exporter[32U] = { 0U }; - Hacl_Impl_HPKE_context_s o_ctx; - uint32_t res; - uint32_t ite; - o_ctx.ctx_key = ctx_key; - o_ctx.ctx_nonce = ctx_nonce; - o_ctx.ctx_seq = &ctx_seq; - o_ctx.ctx_exporter = ctx_exporter; - res = Hacl_HPKE_Curve51_CP128_SHA256_setupBaseS(o_enc, o_ctx, skE, pkR, infolen, info); - if (res == (uint32_t)0U) - { - uint8_t nonce[12U] = { 0U }; - uint64_t s = o_ctx.ctx_seq[0U]; - uint8_t enc[12U] = { 0U }; - store64_be(enc + (uint32_t)4U, s); - KRML_MAYBE_FOR12(i, - (uint32_t)0U, - (uint32_t)12U, - (uint32_t)1U, - uint8_t xi = enc[i]; - uint8_t yi = o_ctx.ctx_nonce[i]; - nonce[i] = xi ^ yi;); - Hacl_Chacha20Poly1305_128_aead_encrypt(o_ctx.ctx_key, - nonce, - aadlen, - aad, - plainlen, - plain, - o_ct, - o_ct + plainlen); - { - uint64_t s1 = o_ctx.ctx_seq[0U]; - uint32_t res1; - if (s1 == (uint64_t)18446744073709551615U) - { - res1 = (uint32_t)1U; - } - else - { - uint64_t s_ = s1 + (uint64_t)1U; - o_ctx.ctx_seq[0U] = s_; - res1 = (uint32_t)0U; - } - { - uint32_t res10 = res1; - ite = res10; - } - } - } - else - { - ite = (uint32_t)1U; - } - return ite; -} - -uint32_t -Hacl_HPKE_Curve51_CP128_SHA256_openBase( - uint8_t *pkE, - uint8_t *skR, - uint32_t infolen, - uint8_t *info, - uint32_t aadlen, - uint8_t *aad, - uint32_t ctlen, - uint8_t *ct, - uint8_t *o_pt -) -{ - uint8_t ctx_key[32U] = { 0U }; - uint8_t ctx_nonce[12U] = { 0U }; - uint64_t ctx_seq = (uint64_t)0U; - uint8_t ctx_exporter[32U] = { 0U }; - Hacl_Impl_HPKE_context_s o_ctx; - uint32_t res; - uint32_t ite; - o_ctx.ctx_key = ctx_key; - o_ctx.ctx_nonce = ctx_nonce; - o_ctx.ctx_seq = &ctx_seq; - o_ctx.ctx_exporter = ctx_exporter; - res = Hacl_HPKE_Curve51_CP128_SHA256_setupBaseR(o_ctx, pkE, skR, infolen, info); - if (res == (uint32_t)0U) - { - uint8_t nonce[12U] = { 0U }; - uint64_t s = o_ctx.ctx_seq[0U]; - uint8_t enc[12U] = { 0U }; - store64_be(enc + (uint32_t)4U, s); - KRML_MAYBE_FOR12(i, - (uint32_t)0U, - (uint32_t)12U, - (uint32_t)1U, - uint8_t xi = enc[i]; - uint8_t yi = o_ctx.ctx_nonce[i]; - nonce[i] = xi ^ yi;); - { - uint32_t - res10 = - Hacl_Chacha20Poly1305_128_aead_decrypt(o_ctx.ctx_key, - nonce, - aadlen, - aad, - ctlen - (uint32_t)16U, - o_pt, - ct, - ct + ctlen - (uint32_t)16U); - uint32_t res1; - if (res10 == (uint32_t)0U) - { - uint64_t s1 = o_ctx.ctx_seq[0U]; - uint32_t res2; - if (s1 == (uint64_t)18446744073709551615U) - { - res2 = (uint32_t)1U; - } - else - { - uint64_t s_ = s1 + (uint64_t)1U; - o_ctx.ctx_seq[0U] = s_; - res2 = (uint32_t)0U; - } - res1 = res2; - } - else - { - res1 = (uint32_t)1U; - } - ite = res1; - } - } - else - { - ite = (uint32_t)1U; - } - return ite; -} - diff --git a/dist/c89-compatible/Hacl_HPKE_Curve51_CP128_SHA256.h b/dist/c89-compatible/Hacl_HPKE_Curve51_CP128_SHA256.h deleted file mode 100644 index edfa5e3e87..0000000000 --- a/dist/c89-compatible/Hacl_HPKE_Curve51_CP128_SHA256.h +++ /dev/null @@ -1,95 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#ifndef __Hacl_HPKE_Curve51_CP128_SHA256_H -#define __Hacl_HPKE_Curve51_CP128_SHA256_H - -#if defined(__cplusplus) -extern "C" { -#endif - -#include -#include "krml/internal/types.h" -#include "krml/lowstar_endianness.h" -#include "krml/internal/target.h" - - -#include "Hacl_Krmllib.h" -#include "Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE.h" -#include "Hacl_HKDF.h" -#include "Hacl_Curve25519_51.h" -#include "Hacl_Chacha20Poly1305_128.h" -#include "evercrypt_targetconfig.h" -uint32_t -Hacl_HPKE_Curve51_CP128_SHA256_setupBaseS( - uint8_t *o_pkE, - Hacl_Impl_HPKE_context_s o_ctx, - uint8_t *skE, - uint8_t *pkR, - uint32_t infolen, - uint8_t *info -); - -uint32_t -Hacl_HPKE_Curve51_CP128_SHA256_setupBaseR( - Hacl_Impl_HPKE_context_s o_ctx, - uint8_t *enc, - uint8_t *skR, - uint32_t infolen, - uint8_t *info -); - -uint32_t -Hacl_HPKE_Curve51_CP128_SHA256_sealBase( - uint8_t *skE, - uint8_t *pkR, - uint32_t infolen, - uint8_t *info, - uint32_t aadlen, - uint8_t *aad, - uint32_t plainlen, - uint8_t *plain, - uint8_t *o_enc, - uint8_t *o_ct -); - -uint32_t -Hacl_HPKE_Curve51_CP128_SHA256_openBase( - uint8_t *pkE, - uint8_t *skR, - uint32_t infolen, - uint8_t *info, - uint32_t aadlen, - uint8_t *aad, - uint32_t ctlen, - uint8_t *ct, - uint8_t *o_pt -); - -#if defined(__cplusplus) -} -#endif - -#define __Hacl_HPKE_Curve51_CP128_SHA256_H_DEFINED -#endif diff --git a/dist/c89-compatible/Hacl_HPKE_Curve51_CP128_SHA512.c b/dist/c89-compatible/Hacl_HPKE_Curve51_CP128_SHA512.c deleted file mode 100644 index 11437f0c8d..0000000000 --- a/dist/c89-compatible/Hacl_HPKE_Curve51_CP128_SHA512.c +++ /dev/null @@ -1,1077 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#include "Hacl_HPKE_Curve51_CP128_SHA512.h" - - - -uint32_t -Hacl_HPKE_Curve51_CP128_SHA512_setupBaseS( - uint8_t *o_pkE, - Hacl_Impl_HPKE_context_s o_ctx, - uint8_t *skE, - uint8_t *pkR, - uint32_t infolen, - uint8_t *info -) -{ - uint8_t o_shared[32U] = { 0U }; - uint8_t *o_pkE1 = o_pkE; - uint32_t res1; - uint32_t res0; - uint32_t ite; - Hacl_Curve25519_51_secret_to_public(o_pkE1, skE); - res1 = (uint32_t)0U; - if (res1 == (uint32_t)0U) - { - uint8_t o_dh[32U] = { 0U }; - uint8_t zeros[32U] = { 0U }; - Hacl_Curve25519_51_scalarmult(o_dh, skE, pkR); - { - uint8_t res2 = (uint8_t)255U; - { - uint32_t i; - for (i = (uint32_t)0U; i < (uint32_t)32U; i++) - { - uint8_t uu____0 = FStar_UInt8_eq_mask(o_dh[i], zeros[i]); - res2 = uu____0 & res2; - } - } - { - uint8_t z = res2; - uint32_t res; - if (z == (uint8_t)255U) - { - res = (uint32_t)1U; - } - else - { - res = (uint32_t)0U; - } - { - uint32_t res20 = res; - uint8_t o_kemcontext[64U] = { 0U }; - if (res20 == (uint32_t)0U) - { - memcpy(o_kemcontext, o_pkE, (uint32_t)32U * sizeof (uint8_t)); - { - uint8_t *o_pkRm = o_kemcontext + (uint32_t)32U; - uint8_t *o_pkR = o_pkRm; - memcpy(o_pkR, pkR, (uint32_t)32U * sizeof (uint8_t)); - { - uint8_t *o_dhm = o_dh; - uint8_t o_eae_prk[32U] = { 0U }; - uint8_t suite_id_kem[5U] = { 0U }; - uint8_t *uu____1 = suite_id_kem; - uu____1[0U] = (uint8_t)0x4bU; - uu____1[1U] = (uint8_t)0x45U; - uu____1[2U] = (uint8_t)0x4dU; - { - uint8_t *uu____2 = suite_id_kem + (uint32_t)3U; - uu____2[0U] = (uint8_t)0U; - uu____2[1U] = (uint8_t)32U; - { - uint8_t *empty = suite_id_kem; - uint8_t - label_eae_prk[7U] = - { - (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, - (uint8_t)0x70U, (uint8_t)0x72U, (uint8_t)0x6bU - }; - uint32_t len0 = (uint32_t)51U; - KRML_CHECK_SIZE(sizeof (uint8_t), len0); - { - uint8_t tmp0[len0]; - memset(tmp0, 0U, len0 * sizeof (uint8_t)); - { - uint8_t *uu____3 = tmp0; - uu____3[0U] = (uint8_t)0x48U; - uu____3[1U] = (uint8_t)0x50U; - uu____3[2U] = (uint8_t)0x4bU; - uu____3[3U] = (uint8_t)0x45U; - uu____3[4U] = (uint8_t)0x2dU; - uu____3[5U] = (uint8_t)0x76U; - uu____3[6U] = (uint8_t)0x31U; - memcpy(tmp0 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t)); - memcpy(tmp0 + (uint32_t)12U, - label_eae_prk, - (uint32_t)7U * sizeof (uint8_t)); - memcpy(tmp0 + (uint32_t)19U, o_dhm, (uint32_t)32U * sizeof (uint8_t)); - Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp0, len0); - { - uint8_t - label_shared_secret[13U] = - { - (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, - (uint8_t)0x65U, (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, - (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U, - (uint8_t)0x74U - }; - uint32_t len = (uint32_t)91U; - KRML_CHECK_SIZE(sizeof (uint8_t), len); - { - uint8_t tmp[len]; - memset(tmp, 0U, len * sizeof (uint8_t)); - { - uint8_t *uu____4 = tmp; - store32_be(uu____4, (uint32_t)32U); - memcpy(uu____4, - uu____4 + (uint32_t)2U, - (uint32_t)2U * sizeof (uint8_t)); - { - uint8_t *uu____5 = tmp + (uint32_t)2U; - uu____5[0U] = (uint8_t)0x48U; - uu____5[1U] = (uint8_t)0x50U; - uu____5[2U] = (uint8_t)0x4bU; - uu____5[3U] = (uint8_t)0x45U; - uu____5[4U] = (uint8_t)0x2dU; - uu____5[5U] = (uint8_t)0x76U; - uu____5[6U] = (uint8_t)0x31U; - memcpy(tmp + (uint32_t)9U, - suite_id_kem, - (uint32_t)5U * sizeof (uint8_t)); - memcpy(tmp + (uint32_t)14U, - label_shared_secret, - (uint32_t)13U * sizeof (uint8_t)); - memcpy(tmp + (uint32_t)27U, - o_kemcontext, - (uint32_t)64U * sizeof (uint8_t)); - Hacl_HKDF_expand_sha2_256(o_shared, - o_eae_prk, - (uint32_t)32U, - tmp, - len, - (uint32_t)32U); - res0 = (uint32_t)0U; - } - } - } - } - } - } - } - } - } - } - } - else - { - res0 = (uint32_t)1U; - } - } - } - } - } - else - { - res0 = (uint32_t)1U; - } - if (res0 == (uint32_t)0U) - { - uint8_t o_context[129U] = { 0U }; - uint8_t o_secret[64U] = { 0U }; - uint8_t suite_id[10U] = { 0U }; - uint8_t *uu____6 = suite_id; - uu____6[0U] = (uint8_t)0x48U; - uu____6[1U] = (uint8_t)0x50U; - uu____6[2U] = (uint8_t)0x4bU; - uu____6[3U] = (uint8_t)0x45U; - { - uint8_t *uu____7 = suite_id + (uint32_t)4U; - uu____7[0U] = (uint8_t)0U; - uu____7[1U] = (uint8_t)32U; - { - uint8_t *uu____8 = suite_id + (uint32_t)6U; - uu____8[0U] = (uint8_t)0U; - uu____8[1U] = (uint8_t)3U; - { - uint8_t *uu____9 = suite_id + (uint32_t)8U; - uu____9[0U] = (uint8_t)0U; - uu____9[1U] = (uint8_t)3U; - { - uint8_t - label_psk_id_hash[11U] = - { - (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U, - (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, - (uint8_t)0x68U - }; - uint8_t o_psk_id_hash[64U] = { 0U }; - uint8_t *empty = suite_id; - uint32_t len0 = (uint32_t)28U; - KRML_CHECK_SIZE(sizeof (uint8_t), len0); - { - uint8_t tmp0[len0]; - memset(tmp0, 0U, len0 * sizeof (uint8_t)); - { - uint8_t *uu____10 = tmp0; - uu____10[0U] = (uint8_t)0x48U; - uu____10[1U] = (uint8_t)0x50U; - uu____10[2U] = (uint8_t)0x4bU; - uu____10[3U] = (uint8_t)0x45U; - uu____10[4U] = (uint8_t)0x2dU; - uu____10[5U] = (uint8_t)0x76U; - uu____10[6U] = (uint8_t)0x31U; - memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp0 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t)); - memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t)); - Hacl_HKDF_extract_sha2_512(o_psk_id_hash, empty, (uint32_t)0U, tmp0, len0); - { - uint8_t - label_info_hash[9U] = - { - (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU, - (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U - }; - uint8_t o_info_hash[64U] = { 0U }; - uint32_t len1 = (uint32_t)26U + infolen; - KRML_CHECK_SIZE(sizeof (uint8_t), len1); - { - uint8_t tmp1[len1]; - memset(tmp1, 0U, len1 * sizeof (uint8_t)); - { - uint8_t *uu____11 = tmp1; - uu____11[0U] = (uint8_t)0x48U; - uu____11[1U] = (uint8_t)0x50U; - uu____11[2U] = (uint8_t)0x4bU; - uu____11[3U] = (uint8_t)0x45U; - uu____11[4U] = (uint8_t)0x2dU; - uu____11[5U] = (uint8_t)0x76U; - uu____11[6U] = (uint8_t)0x31U; - memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp1 + (uint32_t)17U, - label_info_hash, - (uint32_t)9U * sizeof (uint8_t)); - memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t)); - Hacl_HKDF_extract_sha2_512(o_info_hash, empty, (uint32_t)0U, tmp1, len1); - o_context[0U] = (uint8_t)0U; - memcpy(o_context + (uint32_t)1U, - o_psk_id_hash, - (uint32_t)64U * sizeof (uint8_t)); - memcpy(o_context + (uint32_t)65U, - o_info_hash, - (uint32_t)64U * sizeof (uint8_t)); - { - uint8_t - label_secret[6U] = - { - (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, - (uint8_t)0x65U, (uint8_t)0x74U - }; - uint32_t len2 = (uint32_t)23U; - KRML_CHECK_SIZE(sizeof (uint8_t), len2); - { - uint8_t tmp2[len2]; - memset(tmp2, 0U, len2 * sizeof (uint8_t)); - { - uint8_t *uu____12 = tmp2; - uu____12[0U] = (uint8_t)0x48U; - uu____12[1U] = (uint8_t)0x50U; - uu____12[2U] = (uint8_t)0x4bU; - uu____12[3U] = (uint8_t)0x45U; - uu____12[4U] = (uint8_t)0x2dU; - uu____12[5U] = (uint8_t)0x76U; - uu____12[6U] = (uint8_t)0x31U; - memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp2 + (uint32_t)17U, - label_secret, - (uint32_t)6U * sizeof (uint8_t)); - memcpy(tmp2 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t)); - Hacl_HKDF_extract_sha2_512(o_secret, - o_shared, - (uint32_t)32U, - tmp2, - len2); - { - uint8_t - label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U }; - uint32_t len3 = (uint32_t)151U; - KRML_CHECK_SIZE(sizeof (uint8_t), len3); - { - uint8_t tmp3[len3]; - memset(tmp3, 0U, len3 * sizeof (uint8_t)); - { - uint8_t *uu____13 = tmp3; - store32_be(uu____13, (uint32_t)64U); - memcpy(uu____13, - uu____13 + (uint32_t)2U, - (uint32_t)2U * sizeof (uint8_t)); - { - uint8_t *uu____14 = tmp3 + (uint32_t)2U; - uu____14[0U] = (uint8_t)0x48U; - uu____14[1U] = (uint8_t)0x50U; - uu____14[2U] = (uint8_t)0x4bU; - uu____14[3U] = (uint8_t)0x45U; - uu____14[4U] = (uint8_t)0x2dU; - uu____14[5U] = (uint8_t)0x76U; - uu____14[6U] = (uint8_t)0x31U; - memcpy(tmp3 + (uint32_t)9U, - suite_id, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp3 + (uint32_t)19U, - label_exp, - (uint32_t)3U * sizeof (uint8_t)); - memcpy(tmp3 + (uint32_t)22U, - o_context, - (uint32_t)129U * sizeof (uint8_t)); - Hacl_HKDF_expand_sha2_512(o_ctx.ctx_exporter, - o_secret, - (uint32_t)64U, - tmp3, - len3, - (uint32_t)64U); - { - uint8_t - label_key[3U] = - { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U }; - uint32_t len4 = (uint32_t)151U; - KRML_CHECK_SIZE(sizeof (uint8_t), len4); - { - uint8_t tmp4[len4]; - memset(tmp4, 0U, len4 * sizeof (uint8_t)); - { - uint8_t *uu____15 = tmp4; - store32_be(uu____15, (uint32_t)32U); - memcpy(uu____15, - uu____15 + (uint32_t)2U, - (uint32_t)2U * sizeof (uint8_t)); - { - uint8_t *uu____16 = tmp4 + (uint32_t)2U; - uu____16[0U] = (uint8_t)0x48U; - uu____16[1U] = (uint8_t)0x50U; - uu____16[2U] = (uint8_t)0x4bU; - uu____16[3U] = (uint8_t)0x45U; - uu____16[4U] = (uint8_t)0x2dU; - uu____16[5U] = (uint8_t)0x76U; - uu____16[6U] = (uint8_t)0x31U; - memcpy(tmp4 + (uint32_t)9U, - suite_id, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp4 + (uint32_t)19U, - label_key, - (uint32_t)3U * sizeof (uint8_t)); - memcpy(tmp4 + (uint32_t)22U, - o_context, - (uint32_t)129U * sizeof (uint8_t)); - Hacl_HKDF_expand_sha2_512(o_ctx.ctx_key, - o_secret, - (uint32_t)64U, - tmp4, - len4, - (uint32_t)32U); - { - uint8_t - label_base_nonce[10U] = - { - (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, - (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x6eU, - (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, - (uint8_t)0x65U - }; - uint32_t len = (uint32_t)158U; - KRML_CHECK_SIZE(sizeof (uint8_t), len); - { - uint8_t tmp[len]; - memset(tmp, 0U, len * sizeof (uint8_t)); - { - uint8_t *uu____17 = tmp; - store32_be(uu____17, (uint32_t)12U); - memcpy(uu____17, - uu____17 + (uint32_t)2U, - (uint32_t)2U * sizeof (uint8_t)); - { - uint8_t *uu____18 = tmp + (uint32_t)2U; - uu____18[0U] = (uint8_t)0x48U; - uu____18[1U] = (uint8_t)0x50U; - uu____18[2U] = (uint8_t)0x4bU; - uu____18[3U] = (uint8_t)0x45U; - uu____18[4U] = (uint8_t)0x2dU; - uu____18[5U] = (uint8_t)0x76U; - uu____18[6U] = (uint8_t)0x31U; - memcpy(tmp + (uint32_t)9U, - suite_id, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp + (uint32_t)19U, - label_base_nonce, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp + (uint32_t)29U, - o_context, - (uint32_t)129U * sizeof (uint8_t)); - Hacl_HKDF_expand_sha2_512(o_ctx.ctx_nonce, - o_secret, - (uint32_t)64U, - tmp, - len, - (uint32_t)12U); - o_ctx.ctx_seq[0U] = (uint64_t)0U; - ite = res0; - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - else - { - ite = res0; - } - return ite; -} - -uint32_t -Hacl_HPKE_Curve51_CP128_SHA512_setupBaseR( - Hacl_Impl_HPKE_context_s o_ctx, - uint8_t *enc, - uint8_t *skR, - uint32_t infolen, - uint8_t *info -) -{ - uint8_t pkR[32U] = { 0U }; - uint32_t res1; - uint32_t ite; - Hacl_Curve25519_51_secret_to_public(pkR, skR); - res1 = (uint32_t)0U; - { - uint8_t shared[32U] = { 0U }; - if (res1 == (uint32_t)0U) - { - uint8_t *pkE = enc; - uint8_t dh[32U] = { 0U }; - uint8_t zeros[32U] = { 0U }; - Hacl_Curve25519_51_scalarmult(dh, skR, pkE); - { - uint8_t res0 = (uint8_t)255U; - { - uint32_t i; - for (i = (uint32_t)0U; i < (uint32_t)32U; i++) - { - uint8_t uu____0 = FStar_UInt8_eq_mask(dh[i], zeros[i]); - res0 = uu____0 & res0; - } - } - { - uint8_t z = res0; - uint32_t res; - if (z == (uint8_t)255U) - { - res = (uint32_t)1U; - } - else - { - res = (uint32_t)0U; - } - { - uint32_t res11 = res; - uint32_t res2; - uint8_t kemcontext[64U] = { 0U }; - if (res11 == (uint32_t)0U) - { - uint8_t *pkRm = kemcontext + (uint32_t)32U; - uint8_t *pkR1 = pkRm; - Hacl_Curve25519_51_secret_to_public(pkR1, skR); - { - uint32_t res20 = (uint32_t)0U; - if (res20 == (uint32_t)0U) - { - memcpy(kemcontext, enc, (uint32_t)32U * sizeof (uint8_t)); - { - uint8_t *dhm = dh; - uint8_t o_eae_prk[32U] = { 0U }; - uint8_t suite_id_kem[5U] = { 0U }; - uint8_t *uu____1 = suite_id_kem; - uu____1[0U] = (uint8_t)0x4bU; - uu____1[1U] = (uint8_t)0x45U; - uu____1[2U] = (uint8_t)0x4dU; - { - uint8_t *uu____2 = suite_id_kem + (uint32_t)3U; - uu____2[0U] = (uint8_t)0U; - uu____2[1U] = (uint8_t)32U; - { - uint8_t *empty = suite_id_kem; - uint8_t - label_eae_prk[7U] = - { - (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, - (uint8_t)0x70U, (uint8_t)0x72U, (uint8_t)0x6bU - }; - uint32_t len0 = (uint32_t)51U; - KRML_CHECK_SIZE(sizeof (uint8_t), len0); - { - uint8_t tmp0[len0]; - memset(tmp0, 0U, len0 * sizeof (uint8_t)); - { - uint8_t *uu____3 = tmp0; - uu____3[0U] = (uint8_t)0x48U; - uu____3[1U] = (uint8_t)0x50U; - uu____3[2U] = (uint8_t)0x4bU; - uu____3[3U] = (uint8_t)0x45U; - uu____3[4U] = (uint8_t)0x2dU; - uu____3[5U] = (uint8_t)0x76U; - uu____3[6U] = (uint8_t)0x31U; - memcpy(tmp0 + (uint32_t)7U, - suite_id_kem, - (uint32_t)5U * sizeof (uint8_t)); - memcpy(tmp0 + (uint32_t)12U, - label_eae_prk, - (uint32_t)7U * sizeof (uint8_t)); - memcpy(tmp0 + (uint32_t)19U, dhm, (uint32_t)32U * sizeof (uint8_t)); - Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp0, len0); - { - uint8_t - label_shared_secret[13U] = - { - (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, - (uint8_t)0x65U, (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, - (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U, - (uint8_t)0x74U - }; - uint32_t len = (uint32_t)91U; - KRML_CHECK_SIZE(sizeof (uint8_t), len); - { - uint8_t tmp[len]; - memset(tmp, 0U, len * sizeof (uint8_t)); - { - uint8_t *uu____4 = tmp; - store32_be(uu____4, (uint32_t)32U); - memcpy(uu____4, - uu____4 + (uint32_t)2U, - (uint32_t)2U * sizeof (uint8_t)); - { - uint8_t *uu____5 = tmp + (uint32_t)2U; - uu____5[0U] = (uint8_t)0x48U; - uu____5[1U] = (uint8_t)0x50U; - uu____5[2U] = (uint8_t)0x4bU; - uu____5[3U] = (uint8_t)0x45U; - uu____5[4U] = (uint8_t)0x2dU; - uu____5[5U] = (uint8_t)0x76U; - uu____5[6U] = (uint8_t)0x31U; - memcpy(tmp + (uint32_t)9U, - suite_id_kem, - (uint32_t)5U * sizeof (uint8_t)); - memcpy(tmp + (uint32_t)14U, - label_shared_secret, - (uint32_t)13U * sizeof (uint8_t)); - memcpy(tmp + (uint32_t)27U, - kemcontext, - (uint32_t)64U * sizeof (uint8_t)); - Hacl_HKDF_expand_sha2_256(shared, - o_eae_prk, - (uint32_t)32U, - tmp, - len, - (uint32_t)32U); - res2 = (uint32_t)0U; - } - } - } - } - } - } - } - } - } - } - else - { - res2 = (uint32_t)1U; - } - } - } - else - { - res2 = (uint32_t)1U; - } - if (res2 == (uint32_t)0U) - { - uint8_t o_context[129U] = { 0U }; - uint8_t o_secret[64U] = { 0U }; - uint8_t suite_id[10U] = { 0U }; - uint8_t *uu____6 = suite_id; - uu____6[0U] = (uint8_t)0x48U; - uu____6[1U] = (uint8_t)0x50U; - uu____6[2U] = (uint8_t)0x4bU; - uu____6[3U] = (uint8_t)0x45U; - { - uint8_t *uu____7 = suite_id + (uint32_t)4U; - uu____7[0U] = (uint8_t)0U; - uu____7[1U] = (uint8_t)32U; - { - uint8_t *uu____8 = suite_id + (uint32_t)6U; - uu____8[0U] = (uint8_t)0U; - uu____8[1U] = (uint8_t)3U; - { - uint8_t *uu____9 = suite_id + (uint32_t)8U; - uu____9[0U] = (uint8_t)0U; - uu____9[1U] = (uint8_t)3U; - { - uint8_t - label_psk_id_hash[11U] = - { - (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, - (uint8_t)0x69U, (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, - (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U - }; - uint8_t o_psk_id_hash[64U] = { 0U }; - uint8_t *empty = suite_id; - uint32_t len0 = (uint32_t)28U; - KRML_CHECK_SIZE(sizeof (uint8_t), len0); - { - uint8_t tmp0[len0]; - memset(tmp0, 0U, len0 * sizeof (uint8_t)); - { - uint8_t *uu____10 = tmp0; - uu____10[0U] = (uint8_t)0x48U; - uu____10[1U] = (uint8_t)0x50U; - uu____10[2U] = (uint8_t)0x4bU; - uu____10[3U] = (uint8_t)0x45U; - uu____10[4U] = (uint8_t)0x2dU; - uu____10[5U] = (uint8_t)0x76U; - uu____10[6U] = (uint8_t)0x31U; - memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp0 + (uint32_t)17U, - label_psk_id_hash, - (uint32_t)11U * sizeof (uint8_t)); - memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t)); - Hacl_HKDF_extract_sha2_512(o_psk_id_hash, - empty, - (uint32_t)0U, - tmp0, - len0); - { - uint8_t - label_info_hash[9U] = - { - (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, - (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, - (uint8_t)0x68U - }; - uint8_t o_info_hash[64U] = { 0U }; - uint32_t len1 = (uint32_t)26U + infolen; - KRML_CHECK_SIZE(sizeof (uint8_t), len1); - { - uint8_t tmp1[len1]; - memset(tmp1, 0U, len1 * sizeof (uint8_t)); - { - uint8_t *uu____11 = tmp1; - uu____11[0U] = (uint8_t)0x48U; - uu____11[1U] = (uint8_t)0x50U; - uu____11[2U] = (uint8_t)0x4bU; - uu____11[3U] = (uint8_t)0x45U; - uu____11[4U] = (uint8_t)0x2dU; - uu____11[5U] = (uint8_t)0x76U; - uu____11[6U] = (uint8_t)0x31U; - memcpy(tmp1 + (uint32_t)7U, - suite_id, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp1 + (uint32_t)17U, - label_info_hash, - (uint32_t)9U * sizeof (uint8_t)); - memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t)); - Hacl_HKDF_extract_sha2_512(o_info_hash, - empty, - (uint32_t)0U, - tmp1, - len1); - o_context[0U] = (uint8_t)0U; - memcpy(o_context + (uint32_t)1U, - o_psk_id_hash, - (uint32_t)64U * sizeof (uint8_t)); - memcpy(o_context + (uint32_t)65U, - o_info_hash, - (uint32_t)64U * sizeof (uint8_t)); - { - uint8_t - label_secret[6U] = - { - (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, - (uint8_t)0x65U, (uint8_t)0x74U - }; - uint32_t len2 = (uint32_t)23U; - KRML_CHECK_SIZE(sizeof (uint8_t), len2); - { - uint8_t tmp2[len2]; - memset(tmp2, 0U, len2 * sizeof (uint8_t)); - { - uint8_t *uu____12 = tmp2; - uu____12[0U] = (uint8_t)0x48U; - uu____12[1U] = (uint8_t)0x50U; - uu____12[2U] = (uint8_t)0x4bU; - uu____12[3U] = (uint8_t)0x45U; - uu____12[4U] = (uint8_t)0x2dU; - uu____12[5U] = (uint8_t)0x76U; - uu____12[6U] = (uint8_t)0x31U; - memcpy(tmp2 + (uint32_t)7U, - suite_id, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp2 + (uint32_t)17U, - label_secret, - (uint32_t)6U * sizeof (uint8_t)); - memcpy(tmp2 + (uint32_t)23U, - empty, - (uint32_t)0U * sizeof (uint8_t)); - Hacl_HKDF_extract_sha2_512(o_secret, - shared, - (uint32_t)32U, - tmp2, - len2); - { - uint8_t - label_exp[3U] = - { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U }; - uint32_t len3 = (uint32_t)151U; - KRML_CHECK_SIZE(sizeof (uint8_t), len3); - { - uint8_t tmp3[len3]; - memset(tmp3, 0U, len3 * sizeof (uint8_t)); - { - uint8_t *uu____13 = tmp3; - store32_be(uu____13, (uint32_t)64U); - memcpy(uu____13, - uu____13 + (uint32_t)2U, - (uint32_t)2U * sizeof (uint8_t)); - { - uint8_t *uu____14 = tmp3 + (uint32_t)2U; - uu____14[0U] = (uint8_t)0x48U; - uu____14[1U] = (uint8_t)0x50U; - uu____14[2U] = (uint8_t)0x4bU; - uu____14[3U] = (uint8_t)0x45U; - uu____14[4U] = (uint8_t)0x2dU; - uu____14[5U] = (uint8_t)0x76U; - uu____14[6U] = (uint8_t)0x31U; - memcpy(tmp3 + (uint32_t)9U, - suite_id, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp3 + (uint32_t)19U, - label_exp, - (uint32_t)3U * sizeof (uint8_t)); - memcpy(tmp3 + (uint32_t)22U, - o_context, - (uint32_t)129U * sizeof (uint8_t)); - Hacl_HKDF_expand_sha2_512(o_ctx.ctx_exporter, - o_secret, - (uint32_t)64U, - tmp3, - len3, - (uint32_t)64U); - { - uint8_t - label_key[3U] = - { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U }; - uint32_t len4 = (uint32_t)151U; - KRML_CHECK_SIZE(sizeof (uint8_t), len4); - { - uint8_t tmp4[len4]; - memset(tmp4, 0U, len4 * sizeof (uint8_t)); - { - uint8_t *uu____15 = tmp4; - store32_be(uu____15, (uint32_t)32U); - memcpy(uu____15, - uu____15 + (uint32_t)2U, - (uint32_t)2U * sizeof (uint8_t)); - { - uint8_t *uu____16 = tmp4 + (uint32_t)2U; - uu____16[0U] = (uint8_t)0x48U; - uu____16[1U] = (uint8_t)0x50U; - uu____16[2U] = (uint8_t)0x4bU; - uu____16[3U] = (uint8_t)0x45U; - uu____16[4U] = (uint8_t)0x2dU; - uu____16[5U] = (uint8_t)0x76U; - uu____16[6U] = (uint8_t)0x31U; - memcpy(tmp4 + (uint32_t)9U, - suite_id, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp4 + (uint32_t)19U, - label_key, - (uint32_t)3U * sizeof (uint8_t)); - memcpy(tmp4 + (uint32_t)22U, - o_context, - (uint32_t)129U * sizeof (uint8_t)); - Hacl_HKDF_expand_sha2_512(o_ctx.ctx_key, - o_secret, - (uint32_t)64U, - tmp4, - len4, - (uint32_t)32U); - { - uint8_t - label_base_nonce[10U] = - { - (uint8_t)0x62U, (uint8_t)0x61U, - (uint8_t)0x73U, (uint8_t)0x65U, - (uint8_t)0x5fU, (uint8_t)0x6eU, - (uint8_t)0x6fU, (uint8_t)0x6eU, - (uint8_t)0x63U, (uint8_t)0x65U - }; - uint32_t len = (uint32_t)158U; - KRML_CHECK_SIZE(sizeof (uint8_t), len); - { - uint8_t tmp[len]; - memset(tmp, 0U, len * sizeof (uint8_t)); - { - uint8_t *uu____17 = tmp; - store32_be(uu____17, (uint32_t)12U); - memcpy(uu____17, - uu____17 + (uint32_t)2U, - (uint32_t)2U * sizeof (uint8_t)); - { - uint8_t - *uu____18 = tmp + (uint32_t)2U; - uu____18[0U] = (uint8_t)0x48U; - uu____18[1U] = (uint8_t)0x50U; - uu____18[2U] = (uint8_t)0x4bU; - uu____18[3U] = (uint8_t)0x45U; - uu____18[4U] = (uint8_t)0x2dU; - uu____18[5U] = (uint8_t)0x76U; - uu____18[6U] = (uint8_t)0x31U; - memcpy(tmp + (uint32_t)9U, - suite_id, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp + (uint32_t)19U, - label_base_nonce, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp + (uint32_t)29U, - o_context, - (uint32_t)129U * sizeof (uint8_t)); - Hacl_HKDF_expand_sha2_512(o_ctx.ctx_nonce, - o_secret, - (uint32_t)64U, - tmp, - len, - (uint32_t)12U); - o_ctx.ctx_seq[0U] = (uint64_t)0U; - ite = (uint32_t)0U; - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - else - { - ite = (uint32_t)1U; - } - } - } - } - } - else - { - ite = (uint32_t)1U; - } - return ite; - } -} - -uint32_t -Hacl_HPKE_Curve51_CP128_SHA512_sealBase( - uint8_t *skE, - uint8_t *pkR, - uint32_t infolen, - uint8_t *info, - uint32_t aadlen, - uint8_t *aad, - uint32_t plainlen, - uint8_t *plain, - uint8_t *o_enc, - uint8_t *o_ct -) -{ - uint8_t ctx_key[32U] = { 0U }; - uint8_t ctx_nonce[12U] = { 0U }; - uint64_t ctx_seq = (uint64_t)0U; - uint8_t ctx_exporter[64U] = { 0U }; - Hacl_Impl_HPKE_context_s o_ctx; - uint32_t res; - uint32_t ite; - o_ctx.ctx_key = ctx_key; - o_ctx.ctx_nonce = ctx_nonce; - o_ctx.ctx_seq = &ctx_seq; - o_ctx.ctx_exporter = ctx_exporter; - res = Hacl_HPKE_Curve51_CP128_SHA512_setupBaseS(o_enc, o_ctx, skE, pkR, infolen, info); - if (res == (uint32_t)0U) - { - uint8_t nonce[12U] = { 0U }; - uint64_t s = o_ctx.ctx_seq[0U]; - uint8_t enc[12U] = { 0U }; - store64_be(enc + (uint32_t)4U, s); - KRML_MAYBE_FOR12(i, - (uint32_t)0U, - (uint32_t)12U, - (uint32_t)1U, - uint8_t xi = enc[i]; - uint8_t yi = o_ctx.ctx_nonce[i]; - nonce[i] = xi ^ yi;); - Hacl_Chacha20Poly1305_128_aead_encrypt(o_ctx.ctx_key, - nonce, - aadlen, - aad, - plainlen, - plain, - o_ct, - o_ct + plainlen); - { - uint64_t s1 = o_ctx.ctx_seq[0U]; - uint32_t res1; - if (s1 == (uint64_t)18446744073709551615U) - { - res1 = (uint32_t)1U; - } - else - { - uint64_t s_ = s1 + (uint64_t)1U; - o_ctx.ctx_seq[0U] = s_; - res1 = (uint32_t)0U; - } - { - uint32_t res10 = res1; - ite = res10; - } - } - } - else - { - ite = (uint32_t)1U; - } - return ite; -} - -uint32_t -Hacl_HPKE_Curve51_CP128_SHA512_openBase( - uint8_t *pkE, - uint8_t *skR, - uint32_t infolen, - uint8_t *info, - uint32_t aadlen, - uint8_t *aad, - uint32_t ctlen, - uint8_t *ct, - uint8_t *o_pt -) -{ - uint8_t ctx_key[32U] = { 0U }; - uint8_t ctx_nonce[12U] = { 0U }; - uint64_t ctx_seq = (uint64_t)0U; - uint8_t ctx_exporter[64U] = { 0U }; - Hacl_Impl_HPKE_context_s o_ctx; - uint32_t res; - uint32_t ite; - o_ctx.ctx_key = ctx_key; - o_ctx.ctx_nonce = ctx_nonce; - o_ctx.ctx_seq = &ctx_seq; - o_ctx.ctx_exporter = ctx_exporter; - res = Hacl_HPKE_Curve51_CP128_SHA512_setupBaseR(o_ctx, pkE, skR, infolen, info); - if (res == (uint32_t)0U) - { - uint8_t nonce[12U] = { 0U }; - uint64_t s = o_ctx.ctx_seq[0U]; - uint8_t enc[12U] = { 0U }; - store64_be(enc + (uint32_t)4U, s); - KRML_MAYBE_FOR12(i, - (uint32_t)0U, - (uint32_t)12U, - (uint32_t)1U, - uint8_t xi = enc[i]; - uint8_t yi = o_ctx.ctx_nonce[i]; - nonce[i] = xi ^ yi;); - { - uint32_t - res10 = - Hacl_Chacha20Poly1305_128_aead_decrypt(o_ctx.ctx_key, - nonce, - aadlen, - aad, - ctlen - (uint32_t)16U, - o_pt, - ct, - ct + ctlen - (uint32_t)16U); - uint32_t res1; - if (res10 == (uint32_t)0U) - { - uint64_t s1 = o_ctx.ctx_seq[0U]; - uint32_t res2; - if (s1 == (uint64_t)18446744073709551615U) - { - res2 = (uint32_t)1U; - } - else - { - uint64_t s_ = s1 + (uint64_t)1U; - o_ctx.ctx_seq[0U] = s_; - res2 = (uint32_t)0U; - } - res1 = res2; - } - else - { - res1 = (uint32_t)1U; - } - ite = res1; - } - } - else - { - ite = (uint32_t)1U; - } - return ite; -} - diff --git a/dist/c89-compatible/Hacl_HPKE_Curve51_CP128_SHA512.h b/dist/c89-compatible/Hacl_HPKE_Curve51_CP128_SHA512.h deleted file mode 100644 index 3ddaff4172..0000000000 --- a/dist/c89-compatible/Hacl_HPKE_Curve51_CP128_SHA512.h +++ /dev/null @@ -1,95 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#ifndef __Hacl_HPKE_Curve51_CP128_SHA512_H -#define __Hacl_HPKE_Curve51_CP128_SHA512_H - -#if defined(__cplusplus) -extern "C" { -#endif - -#include -#include "krml/internal/types.h" -#include "krml/lowstar_endianness.h" -#include "krml/internal/target.h" - - -#include "Hacl_Krmllib.h" -#include "Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE.h" -#include "Hacl_HKDF.h" -#include "Hacl_Curve25519_51.h" -#include "Hacl_Chacha20Poly1305_128.h" -#include "evercrypt_targetconfig.h" -uint32_t -Hacl_HPKE_Curve51_CP128_SHA512_setupBaseS( - uint8_t *o_pkE, - Hacl_Impl_HPKE_context_s o_ctx, - uint8_t *skE, - uint8_t *pkR, - uint32_t infolen, - uint8_t *info -); - -uint32_t -Hacl_HPKE_Curve51_CP128_SHA512_setupBaseR( - Hacl_Impl_HPKE_context_s o_ctx, - uint8_t *enc, - uint8_t *skR, - uint32_t infolen, - uint8_t *info -); - -uint32_t -Hacl_HPKE_Curve51_CP128_SHA512_sealBase( - uint8_t *skE, - uint8_t *pkR, - uint32_t infolen, - uint8_t *info, - uint32_t aadlen, - uint8_t *aad, - uint32_t plainlen, - uint8_t *plain, - uint8_t *o_enc, - uint8_t *o_ct -); - -uint32_t -Hacl_HPKE_Curve51_CP128_SHA512_openBase( - uint8_t *pkE, - uint8_t *skR, - uint32_t infolen, - uint8_t *info, - uint32_t aadlen, - uint8_t *aad, - uint32_t ctlen, - uint8_t *ct, - uint8_t *o_pt -); - -#if defined(__cplusplus) -} -#endif - -#define __Hacl_HPKE_Curve51_CP128_SHA512_H_DEFINED -#endif diff --git a/dist/c89-compatible/Hacl_HPKE_Curve51_CP256_SHA256.c b/dist/c89-compatible/Hacl_HPKE_Curve51_CP256_SHA256.c deleted file mode 100644 index ca1f637f25..0000000000 --- a/dist/c89-compatible/Hacl_HPKE_Curve51_CP256_SHA256.c +++ /dev/null @@ -1,1077 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#include "Hacl_HPKE_Curve51_CP256_SHA256.h" - - - -uint32_t -Hacl_HPKE_Curve51_CP256_SHA256_setupBaseS( - uint8_t *o_pkE, - Hacl_Impl_HPKE_context_s o_ctx, - uint8_t *skE, - uint8_t *pkR, - uint32_t infolen, - uint8_t *info -) -{ - uint8_t o_shared[32U] = { 0U }; - uint8_t *o_pkE1 = o_pkE; - uint32_t res1; - uint32_t res0; - uint32_t ite; - Hacl_Curve25519_51_secret_to_public(o_pkE1, skE); - res1 = (uint32_t)0U; - if (res1 == (uint32_t)0U) - { - uint8_t o_dh[32U] = { 0U }; - uint8_t zeros[32U] = { 0U }; - Hacl_Curve25519_51_scalarmult(o_dh, skE, pkR); - { - uint8_t res2 = (uint8_t)255U; - { - uint32_t i; - for (i = (uint32_t)0U; i < (uint32_t)32U; i++) - { - uint8_t uu____0 = FStar_UInt8_eq_mask(o_dh[i], zeros[i]); - res2 = uu____0 & res2; - } - } - { - uint8_t z = res2; - uint32_t res; - if (z == (uint8_t)255U) - { - res = (uint32_t)1U; - } - else - { - res = (uint32_t)0U; - } - { - uint32_t res20 = res; - uint8_t o_kemcontext[64U] = { 0U }; - if (res20 == (uint32_t)0U) - { - memcpy(o_kemcontext, o_pkE, (uint32_t)32U * sizeof (uint8_t)); - { - uint8_t *o_pkRm = o_kemcontext + (uint32_t)32U; - uint8_t *o_pkR = o_pkRm; - memcpy(o_pkR, pkR, (uint32_t)32U * sizeof (uint8_t)); - { - uint8_t *o_dhm = o_dh; - uint8_t o_eae_prk[32U] = { 0U }; - uint8_t suite_id_kem[5U] = { 0U }; - uint8_t *uu____1 = suite_id_kem; - uu____1[0U] = (uint8_t)0x4bU; - uu____1[1U] = (uint8_t)0x45U; - uu____1[2U] = (uint8_t)0x4dU; - { - uint8_t *uu____2 = suite_id_kem + (uint32_t)3U; - uu____2[0U] = (uint8_t)0U; - uu____2[1U] = (uint8_t)32U; - { - uint8_t *empty = suite_id_kem; - uint8_t - label_eae_prk[7U] = - { - (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, - (uint8_t)0x70U, (uint8_t)0x72U, (uint8_t)0x6bU - }; - uint32_t len0 = (uint32_t)51U; - KRML_CHECK_SIZE(sizeof (uint8_t), len0); - { - uint8_t tmp0[len0]; - memset(tmp0, 0U, len0 * sizeof (uint8_t)); - { - uint8_t *uu____3 = tmp0; - uu____3[0U] = (uint8_t)0x48U; - uu____3[1U] = (uint8_t)0x50U; - uu____3[2U] = (uint8_t)0x4bU; - uu____3[3U] = (uint8_t)0x45U; - uu____3[4U] = (uint8_t)0x2dU; - uu____3[5U] = (uint8_t)0x76U; - uu____3[6U] = (uint8_t)0x31U; - memcpy(tmp0 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t)); - memcpy(tmp0 + (uint32_t)12U, - label_eae_prk, - (uint32_t)7U * sizeof (uint8_t)); - memcpy(tmp0 + (uint32_t)19U, o_dhm, (uint32_t)32U * sizeof (uint8_t)); - Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp0, len0); - { - uint8_t - label_shared_secret[13U] = - { - (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, - (uint8_t)0x65U, (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, - (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U, - (uint8_t)0x74U - }; - uint32_t len = (uint32_t)91U; - KRML_CHECK_SIZE(sizeof (uint8_t), len); - { - uint8_t tmp[len]; - memset(tmp, 0U, len * sizeof (uint8_t)); - { - uint8_t *uu____4 = tmp; - store32_be(uu____4, (uint32_t)32U); - memcpy(uu____4, - uu____4 + (uint32_t)2U, - (uint32_t)2U * sizeof (uint8_t)); - { - uint8_t *uu____5 = tmp + (uint32_t)2U; - uu____5[0U] = (uint8_t)0x48U; - uu____5[1U] = (uint8_t)0x50U; - uu____5[2U] = (uint8_t)0x4bU; - uu____5[3U] = (uint8_t)0x45U; - uu____5[4U] = (uint8_t)0x2dU; - uu____5[5U] = (uint8_t)0x76U; - uu____5[6U] = (uint8_t)0x31U; - memcpy(tmp + (uint32_t)9U, - suite_id_kem, - (uint32_t)5U * sizeof (uint8_t)); - memcpy(tmp + (uint32_t)14U, - label_shared_secret, - (uint32_t)13U * sizeof (uint8_t)); - memcpy(tmp + (uint32_t)27U, - o_kemcontext, - (uint32_t)64U * sizeof (uint8_t)); - Hacl_HKDF_expand_sha2_256(o_shared, - o_eae_prk, - (uint32_t)32U, - tmp, - len, - (uint32_t)32U); - res0 = (uint32_t)0U; - } - } - } - } - } - } - } - } - } - } - } - else - { - res0 = (uint32_t)1U; - } - } - } - } - } - else - { - res0 = (uint32_t)1U; - } - if (res0 == (uint32_t)0U) - { - uint8_t o_context[65U] = { 0U }; - uint8_t o_secret[32U] = { 0U }; - uint8_t suite_id[10U] = { 0U }; - uint8_t *uu____6 = suite_id; - uu____6[0U] = (uint8_t)0x48U; - uu____6[1U] = (uint8_t)0x50U; - uu____6[2U] = (uint8_t)0x4bU; - uu____6[3U] = (uint8_t)0x45U; - { - uint8_t *uu____7 = suite_id + (uint32_t)4U; - uu____7[0U] = (uint8_t)0U; - uu____7[1U] = (uint8_t)32U; - { - uint8_t *uu____8 = suite_id + (uint32_t)6U; - uu____8[0U] = (uint8_t)0U; - uu____8[1U] = (uint8_t)1U; - { - uint8_t *uu____9 = suite_id + (uint32_t)8U; - uu____9[0U] = (uint8_t)0U; - uu____9[1U] = (uint8_t)3U; - { - uint8_t - label_psk_id_hash[11U] = - { - (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U, - (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, - (uint8_t)0x68U - }; - uint8_t o_psk_id_hash[32U] = { 0U }; - uint8_t *empty = suite_id; - uint32_t len0 = (uint32_t)28U; - KRML_CHECK_SIZE(sizeof (uint8_t), len0); - { - uint8_t tmp0[len0]; - memset(tmp0, 0U, len0 * sizeof (uint8_t)); - { - uint8_t *uu____10 = tmp0; - uu____10[0U] = (uint8_t)0x48U; - uu____10[1U] = (uint8_t)0x50U; - uu____10[2U] = (uint8_t)0x4bU; - uu____10[3U] = (uint8_t)0x45U; - uu____10[4U] = (uint8_t)0x2dU; - uu____10[5U] = (uint8_t)0x76U; - uu____10[6U] = (uint8_t)0x31U; - memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp0 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t)); - memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t)); - Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, (uint32_t)0U, tmp0, len0); - { - uint8_t - label_info_hash[9U] = - { - (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU, - (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U - }; - uint8_t o_info_hash[32U] = { 0U }; - uint32_t len1 = (uint32_t)26U + infolen; - KRML_CHECK_SIZE(sizeof (uint8_t), len1); - { - uint8_t tmp1[len1]; - memset(tmp1, 0U, len1 * sizeof (uint8_t)); - { - uint8_t *uu____11 = tmp1; - uu____11[0U] = (uint8_t)0x48U; - uu____11[1U] = (uint8_t)0x50U; - uu____11[2U] = (uint8_t)0x4bU; - uu____11[3U] = (uint8_t)0x45U; - uu____11[4U] = (uint8_t)0x2dU; - uu____11[5U] = (uint8_t)0x76U; - uu____11[6U] = (uint8_t)0x31U; - memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp1 + (uint32_t)17U, - label_info_hash, - (uint32_t)9U * sizeof (uint8_t)); - memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t)); - Hacl_HKDF_extract_sha2_256(o_info_hash, empty, (uint32_t)0U, tmp1, len1); - o_context[0U] = (uint8_t)0U; - memcpy(o_context + (uint32_t)1U, - o_psk_id_hash, - (uint32_t)32U * sizeof (uint8_t)); - memcpy(o_context + (uint32_t)33U, - o_info_hash, - (uint32_t)32U * sizeof (uint8_t)); - { - uint8_t - label_secret[6U] = - { - (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, - (uint8_t)0x65U, (uint8_t)0x74U - }; - uint32_t len2 = (uint32_t)23U; - KRML_CHECK_SIZE(sizeof (uint8_t), len2); - { - uint8_t tmp2[len2]; - memset(tmp2, 0U, len2 * sizeof (uint8_t)); - { - uint8_t *uu____12 = tmp2; - uu____12[0U] = (uint8_t)0x48U; - uu____12[1U] = (uint8_t)0x50U; - uu____12[2U] = (uint8_t)0x4bU; - uu____12[3U] = (uint8_t)0x45U; - uu____12[4U] = (uint8_t)0x2dU; - uu____12[5U] = (uint8_t)0x76U; - uu____12[6U] = (uint8_t)0x31U; - memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp2 + (uint32_t)17U, - label_secret, - (uint32_t)6U * sizeof (uint8_t)); - memcpy(tmp2 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t)); - Hacl_HKDF_extract_sha2_256(o_secret, - o_shared, - (uint32_t)32U, - tmp2, - len2); - { - uint8_t - label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U }; - uint32_t len3 = (uint32_t)87U; - KRML_CHECK_SIZE(sizeof (uint8_t), len3); - { - uint8_t tmp3[len3]; - memset(tmp3, 0U, len3 * sizeof (uint8_t)); - { - uint8_t *uu____13 = tmp3; - store32_be(uu____13, (uint32_t)32U); - memcpy(uu____13, - uu____13 + (uint32_t)2U, - (uint32_t)2U * sizeof (uint8_t)); - { - uint8_t *uu____14 = tmp3 + (uint32_t)2U; - uu____14[0U] = (uint8_t)0x48U; - uu____14[1U] = (uint8_t)0x50U; - uu____14[2U] = (uint8_t)0x4bU; - uu____14[3U] = (uint8_t)0x45U; - uu____14[4U] = (uint8_t)0x2dU; - uu____14[5U] = (uint8_t)0x76U; - uu____14[6U] = (uint8_t)0x31U; - memcpy(tmp3 + (uint32_t)9U, - suite_id, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp3 + (uint32_t)19U, - label_exp, - (uint32_t)3U * sizeof (uint8_t)); - memcpy(tmp3 + (uint32_t)22U, - o_context, - (uint32_t)65U * sizeof (uint8_t)); - Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter, - o_secret, - (uint32_t)32U, - tmp3, - len3, - (uint32_t)32U); - { - uint8_t - label_key[3U] = - { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U }; - uint32_t len4 = (uint32_t)87U; - KRML_CHECK_SIZE(sizeof (uint8_t), len4); - { - uint8_t tmp4[len4]; - memset(tmp4, 0U, len4 * sizeof (uint8_t)); - { - uint8_t *uu____15 = tmp4; - store32_be(uu____15, (uint32_t)32U); - memcpy(uu____15, - uu____15 + (uint32_t)2U, - (uint32_t)2U * sizeof (uint8_t)); - { - uint8_t *uu____16 = tmp4 + (uint32_t)2U; - uu____16[0U] = (uint8_t)0x48U; - uu____16[1U] = (uint8_t)0x50U; - uu____16[2U] = (uint8_t)0x4bU; - uu____16[3U] = (uint8_t)0x45U; - uu____16[4U] = (uint8_t)0x2dU; - uu____16[5U] = (uint8_t)0x76U; - uu____16[6U] = (uint8_t)0x31U; - memcpy(tmp4 + (uint32_t)9U, - suite_id, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp4 + (uint32_t)19U, - label_key, - (uint32_t)3U * sizeof (uint8_t)); - memcpy(tmp4 + (uint32_t)22U, - o_context, - (uint32_t)65U * sizeof (uint8_t)); - Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, - o_secret, - (uint32_t)32U, - tmp4, - len4, - (uint32_t)32U); - { - uint8_t - label_base_nonce[10U] = - { - (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, - (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x6eU, - (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, - (uint8_t)0x65U - }; - uint32_t len = (uint32_t)94U; - KRML_CHECK_SIZE(sizeof (uint8_t), len); - { - uint8_t tmp[len]; - memset(tmp, 0U, len * sizeof (uint8_t)); - { - uint8_t *uu____17 = tmp; - store32_be(uu____17, (uint32_t)12U); - memcpy(uu____17, - uu____17 + (uint32_t)2U, - (uint32_t)2U * sizeof (uint8_t)); - { - uint8_t *uu____18 = tmp + (uint32_t)2U; - uu____18[0U] = (uint8_t)0x48U; - uu____18[1U] = (uint8_t)0x50U; - uu____18[2U] = (uint8_t)0x4bU; - uu____18[3U] = (uint8_t)0x45U; - uu____18[4U] = (uint8_t)0x2dU; - uu____18[5U] = (uint8_t)0x76U; - uu____18[6U] = (uint8_t)0x31U; - memcpy(tmp + (uint32_t)9U, - suite_id, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp + (uint32_t)19U, - label_base_nonce, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp + (uint32_t)29U, - o_context, - (uint32_t)65U * sizeof (uint8_t)); - Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, - o_secret, - (uint32_t)32U, - tmp, - len, - (uint32_t)12U); - o_ctx.ctx_seq[0U] = (uint64_t)0U; - ite = res0; - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - else - { - ite = res0; - } - return ite; -} - -uint32_t -Hacl_HPKE_Curve51_CP256_SHA256_setupBaseR( - Hacl_Impl_HPKE_context_s o_ctx, - uint8_t *enc, - uint8_t *skR, - uint32_t infolen, - uint8_t *info -) -{ - uint8_t pkR[32U] = { 0U }; - uint32_t res1; - uint32_t ite; - Hacl_Curve25519_51_secret_to_public(pkR, skR); - res1 = (uint32_t)0U; - { - uint8_t shared[32U] = { 0U }; - if (res1 == (uint32_t)0U) - { - uint8_t *pkE = enc; - uint8_t dh[32U] = { 0U }; - uint8_t zeros[32U] = { 0U }; - Hacl_Curve25519_51_scalarmult(dh, skR, pkE); - { - uint8_t res0 = (uint8_t)255U; - { - uint32_t i; - for (i = (uint32_t)0U; i < (uint32_t)32U; i++) - { - uint8_t uu____0 = FStar_UInt8_eq_mask(dh[i], zeros[i]); - res0 = uu____0 & res0; - } - } - { - uint8_t z = res0; - uint32_t res; - if (z == (uint8_t)255U) - { - res = (uint32_t)1U; - } - else - { - res = (uint32_t)0U; - } - { - uint32_t res11 = res; - uint32_t res2; - uint8_t kemcontext[64U] = { 0U }; - if (res11 == (uint32_t)0U) - { - uint8_t *pkRm = kemcontext + (uint32_t)32U; - uint8_t *pkR1 = pkRm; - Hacl_Curve25519_51_secret_to_public(pkR1, skR); - { - uint32_t res20 = (uint32_t)0U; - if (res20 == (uint32_t)0U) - { - memcpy(kemcontext, enc, (uint32_t)32U * sizeof (uint8_t)); - { - uint8_t *dhm = dh; - uint8_t o_eae_prk[32U] = { 0U }; - uint8_t suite_id_kem[5U] = { 0U }; - uint8_t *uu____1 = suite_id_kem; - uu____1[0U] = (uint8_t)0x4bU; - uu____1[1U] = (uint8_t)0x45U; - uu____1[2U] = (uint8_t)0x4dU; - { - uint8_t *uu____2 = suite_id_kem + (uint32_t)3U; - uu____2[0U] = (uint8_t)0U; - uu____2[1U] = (uint8_t)32U; - { - uint8_t *empty = suite_id_kem; - uint8_t - label_eae_prk[7U] = - { - (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, - (uint8_t)0x70U, (uint8_t)0x72U, (uint8_t)0x6bU - }; - uint32_t len0 = (uint32_t)51U; - KRML_CHECK_SIZE(sizeof (uint8_t), len0); - { - uint8_t tmp0[len0]; - memset(tmp0, 0U, len0 * sizeof (uint8_t)); - { - uint8_t *uu____3 = tmp0; - uu____3[0U] = (uint8_t)0x48U; - uu____3[1U] = (uint8_t)0x50U; - uu____3[2U] = (uint8_t)0x4bU; - uu____3[3U] = (uint8_t)0x45U; - uu____3[4U] = (uint8_t)0x2dU; - uu____3[5U] = (uint8_t)0x76U; - uu____3[6U] = (uint8_t)0x31U; - memcpy(tmp0 + (uint32_t)7U, - suite_id_kem, - (uint32_t)5U * sizeof (uint8_t)); - memcpy(tmp0 + (uint32_t)12U, - label_eae_prk, - (uint32_t)7U * sizeof (uint8_t)); - memcpy(tmp0 + (uint32_t)19U, dhm, (uint32_t)32U * sizeof (uint8_t)); - Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp0, len0); - { - uint8_t - label_shared_secret[13U] = - { - (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, - (uint8_t)0x65U, (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, - (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U, - (uint8_t)0x74U - }; - uint32_t len = (uint32_t)91U; - KRML_CHECK_SIZE(sizeof (uint8_t), len); - { - uint8_t tmp[len]; - memset(tmp, 0U, len * sizeof (uint8_t)); - { - uint8_t *uu____4 = tmp; - store32_be(uu____4, (uint32_t)32U); - memcpy(uu____4, - uu____4 + (uint32_t)2U, - (uint32_t)2U * sizeof (uint8_t)); - { - uint8_t *uu____5 = tmp + (uint32_t)2U; - uu____5[0U] = (uint8_t)0x48U; - uu____5[1U] = (uint8_t)0x50U; - uu____5[2U] = (uint8_t)0x4bU; - uu____5[3U] = (uint8_t)0x45U; - uu____5[4U] = (uint8_t)0x2dU; - uu____5[5U] = (uint8_t)0x76U; - uu____5[6U] = (uint8_t)0x31U; - memcpy(tmp + (uint32_t)9U, - suite_id_kem, - (uint32_t)5U * sizeof (uint8_t)); - memcpy(tmp + (uint32_t)14U, - label_shared_secret, - (uint32_t)13U * sizeof (uint8_t)); - memcpy(tmp + (uint32_t)27U, - kemcontext, - (uint32_t)64U * sizeof (uint8_t)); - Hacl_HKDF_expand_sha2_256(shared, - o_eae_prk, - (uint32_t)32U, - tmp, - len, - (uint32_t)32U); - res2 = (uint32_t)0U; - } - } - } - } - } - } - } - } - } - } - else - { - res2 = (uint32_t)1U; - } - } - } - else - { - res2 = (uint32_t)1U; - } - if (res2 == (uint32_t)0U) - { - uint8_t o_context[65U] = { 0U }; - uint8_t o_secret[32U] = { 0U }; - uint8_t suite_id[10U] = { 0U }; - uint8_t *uu____6 = suite_id; - uu____6[0U] = (uint8_t)0x48U; - uu____6[1U] = (uint8_t)0x50U; - uu____6[2U] = (uint8_t)0x4bU; - uu____6[3U] = (uint8_t)0x45U; - { - uint8_t *uu____7 = suite_id + (uint32_t)4U; - uu____7[0U] = (uint8_t)0U; - uu____7[1U] = (uint8_t)32U; - { - uint8_t *uu____8 = suite_id + (uint32_t)6U; - uu____8[0U] = (uint8_t)0U; - uu____8[1U] = (uint8_t)1U; - { - uint8_t *uu____9 = suite_id + (uint32_t)8U; - uu____9[0U] = (uint8_t)0U; - uu____9[1U] = (uint8_t)3U; - { - uint8_t - label_psk_id_hash[11U] = - { - (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, - (uint8_t)0x69U, (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, - (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U - }; - uint8_t o_psk_id_hash[32U] = { 0U }; - uint8_t *empty = suite_id; - uint32_t len0 = (uint32_t)28U; - KRML_CHECK_SIZE(sizeof (uint8_t), len0); - { - uint8_t tmp0[len0]; - memset(tmp0, 0U, len0 * sizeof (uint8_t)); - { - uint8_t *uu____10 = tmp0; - uu____10[0U] = (uint8_t)0x48U; - uu____10[1U] = (uint8_t)0x50U; - uu____10[2U] = (uint8_t)0x4bU; - uu____10[3U] = (uint8_t)0x45U; - uu____10[4U] = (uint8_t)0x2dU; - uu____10[5U] = (uint8_t)0x76U; - uu____10[6U] = (uint8_t)0x31U; - memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp0 + (uint32_t)17U, - label_psk_id_hash, - (uint32_t)11U * sizeof (uint8_t)); - memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t)); - Hacl_HKDF_extract_sha2_256(o_psk_id_hash, - empty, - (uint32_t)0U, - tmp0, - len0); - { - uint8_t - label_info_hash[9U] = - { - (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, - (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, - (uint8_t)0x68U - }; - uint8_t o_info_hash[32U] = { 0U }; - uint32_t len1 = (uint32_t)26U + infolen; - KRML_CHECK_SIZE(sizeof (uint8_t), len1); - { - uint8_t tmp1[len1]; - memset(tmp1, 0U, len1 * sizeof (uint8_t)); - { - uint8_t *uu____11 = tmp1; - uu____11[0U] = (uint8_t)0x48U; - uu____11[1U] = (uint8_t)0x50U; - uu____11[2U] = (uint8_t)0x4bU; - uu____11[3U] = (uint8_t)0x45U; - uu____11[4U] = (uint8_t)0x2dU; - uu____11[5U] = (uint8_t)0x76U; - uu____11[6U] = (uint8_t)0x31U; - memcpy(tmp1 + (uint32_t)7U, - suite_id, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp1 + (uint32_t)17U, - label_info_hash, - (uint32_t)9U * sizeof (uint8_t)); - memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t)); - Hacl_HKDF_extract_sha2_256(o_info_hash, - empty, - (uint32_t)0U, - tmp1, - len1); - o_context[0U] = (uint8_t)0U; - memcpy(o_context + (uint32_t)1U, - o_psk_id_hash, - (uint32_t)32U * sizeof (uint8_t)); - memcpy(o_context + (uint32_t)33U, - o_info_hash, - (uint32_t)32U * sizeof (uint8_t)); - { - uint8_t - label_secret[6U] = - { - (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, - (uint8_t)0x65U, (uint8_t)0x74U - }; - uint32_t len2 = (uint32_t)23U; - KRML_CHECK_SIZE(sizeof (uint8_t), len2); - { - uint8_t tmp2[len2]; - memset(tmp2, 0U, len2 * sizeof (uint8_t)); - { - uint8_t *uu____12 = tmp2; - uu____12[0U] = (uint8_t)0x48U; - uu____12[1U] = (uint8_t)0x50U; - uu____12[2U] = (uint8_t)0x4bU; - uu____12[3U] = (uint8_t)0x45U; - uu____12[4U] = (uint8_t)0x2dU; - uu____12[5U] = (uint8_t)0x76U; - uu____12[6U] = (uint8_t)0x31U; - memcpy(tmp2 + (uint32_t)7U, - suite_id, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp2 + (uint32_t)17U, - label_secret, - (uint32_t)6U * sizeof (uint8_t)); - memcpy(tmp2 + (uint32_t)23U, - empty, - (uint32_t)0U * sizeof (uint8_t)); - Hacl_HKDF_extract_sha2_256(o_secret, - shared, - (uint32_t)32U, - tmp2, - len2); - { - uint8_t - label_exp[3U] = - { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U }; - uint32_t len3 = (uint32_t)87U; - KRML_CHECK_SIZE(sizeof (uint8_t), len3); - { - uint8_t tmp3[len3]; - memset(tmp3, 0U, len3 * sizeof (uint8_t)); - { - uint8_t *uu____13 = tmp3; - store32_be(uu____13, (uint32_t)32U); - memcpy(uu____13, - uu____13 + (uint32_t)2U, - (uint32_t)2U * sizeof (uint8_t)); - { - uint8_t *uu____14 = tmp3 + (uint32_t)2U; - uu____14[0U] = (uint8_t)0x48U; - uu____14[1U] = (uint8_t)0x50U; - uu____14[2U] = (uint8_t)0x4bU; - uu____14[3U] = (uint8_t)0x45U; - uu____14[4U] = (uint8_t)0x2dU; - uu____14[5U] = (uint8_t)0x76U; - uu____14[6U] = (uint8_t)0x31U; - memcpy(tmp3 + (uint32_t)9U, - suite_id, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp3 + (uint32_t)19U, - label_exp, - (uint32_t)3U * sizeof (uint8_t)); - memcpy(tmp3 + (uint32_t)22U, - o_context, - (uint32_t)65U * sizeof (uint8_t)); - Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter, - o_secret, - (uint32_t)32U, - tmp3, - len3, - (uint32_t)32U); - { - uint8_t - label_key[3U] = - { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U }; - uint32_t len4 = (uint32_t)87U; - KRML_CHECK_SIZE(sizeof (uint8_t), len4); - { - uint8_t tmp4[len4]; - memset(tmp4, 0U, len4 * sizeof (uint8_t)); - { - uint8_t *uu____15 = tmp4; - store32_be(uu____15, (uint32_t)32U); - memcpy(uu____15, - uu____15 + (uint32_t)2U, - (uint32_t)2U * sizeof (uint8_t)); - { - uint8_t *uu____16 = tmp4 + (uint32_t)2U; - uu____16[0U] = (uint8_t)0x48U; - uu____16[1U] = (uint8_t)0x50U; - uu____16[2U] = (uint8_t)0x4bU; - uu____16[3U] = (uint8_t)0x45U; - uu____16[4U] = (uint8_t)0x2dU; - uu____16[5U] = (uint8_t)0x76U; - uu____16[6U] = (uint8_t)0x31U; - memcpy(tmp4 + (uint32_t)9U, - suite_id, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp4 + (uint32_t)19U, - label_key, - (uint32_t)3U * sizeof (uint8_t)); - memcpy(tmp4 + (uint32_t)22U, - o_context, - (uint32_t)65U * sizeof (uint8_t)); - Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, - o_secret, - (uint32_t)32U, - tmp4, - len4, - (uint32_t)32U); - { - uint8_t - label_base_nonce[10U] = - { - (uint8_t)0x62U, (uint8_t)0x61U, - (uint8_t)0x73U, (uint8_t)0x65U, - (uint8_t)0x5fU, (uint8_t)0x6eU, - (uint8_t)0x6fU, (uint8_t)0x6eU, - (uint8_t)0x63U, (uint8_t)0x65U - }; - uint32_t len = (uint32_t)94U; - KRML_CHECK_SIZE(sizeof (uint8_t), len); - { - uint8_t tmp[len]; - memset(tmp, 0U, len * sizeof (uint8_t)); - { - uint8_t *uu____17 = tmp; - store32_be(uu____17, (uint32_t)12U); - memcpy(uu____17, - uu____17 + (uint32_t)2U, - (uint32_t)2U * sizeof (uint8_t)); - { - uint8_t - *uu____18 = tmp + (uint32_t)2U; - uu____18[0U] = (uint8_t)0x48U; - uu____18[1U] = (uint8_t)0x50U; - uu____18[2U] = (uint8_t)0x4bU; - uu____18[3U] = (uint8_t)0x45U; - uu____18[4U] = (uint8_t)0x2dU; - uu____18[5U] = (uint8_t)0x76U; - uu____18[6U] = (uint8_t)0x31U; - memcpy(tmp + (uint32_t)9U, - suite_id, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp + (uint32_t)19U, - label_base_nonce, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp + (uint32_t)29U, - o_context, - (uint32_t)65U * sizeof (uint8_t)); - Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, - o_secret, - (uint32_t)32U, - tmp, - len, - (uint32_t)12U); - o_ctx.ctx_seq[0U] = (uint64_t)0U; - ite = (uint32_t)0U; - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - else - { - ite = (uint32_t)1U; - } - } - } - } - } - else - { - ite = (uint32_t)1U; - } - return ite; - } -} - -uint32_t -Hacl_HPKE_Curve51_CP256_SHA256_sealBase( - uint8_t *skE, - uint8_t *pkR, - uint32_t infolen, - uint8_t *info, - uint32_t aadlen, - uint8_t *aad, - uint32_t plainlen, - uint8_t *plain, - uint8_t *o_enc, - uint8_t *o_ct -) -{ - uint8_t ctx_key[32U] = { 0U }; - uint8_t ctx_nonce[12U] = { 0U }; - uint64_t ctx_seq = (uint64_t)0U; - uint8_t ctx_exporter[32U] = { 0U }; - Hacl_Impl_HPKE_context_s o_ctx; - uint32_t res; - uint32_t ite; - o_ctx.ctx_key = ctx_key; - o_ctx.ctx_nonce = ctx_nonce; - o_ctx.ctx_seq = &ctx_seq; - o_ctx.ctx_exporter = ctx_exporter; - res = Hacl_HPKE_Curve51_CP256_SHA256_setupBaseS(o_enc, o_ctx, skE, pkR, infolen, info); - if (res == (uint32_t)0U) - { - uint8_t nonce[12U] = { 0U }; - uint64_t s = o_ctx.ctx_seq[0U]; - uint8_t enc[12U] = { 0U }; - store64_be(enc + (uint32_t)4U, s); - KRML_MAYBE_FOR12(i, - (uint32_t)0U, - (uint32_t)12U, - (uint32_t)1U, - uint8_t xi = enc[i]; - uint8_t yi = o_ctx.ctx_nonce[i]; - nonce[i] = xi ^ yi;); - Hacl_Chacha20Poly1305_256_aead_encrypt(o_ctx.ctx_key, - nonce, - aadlen, - aad, - plainlen, - plain, - o_ct, - o_ct + plainlen); - { - uint64_t s1 = o_ctx.ctx_seq[0U]; - uint32_t res1; - if (s1 == (uint64_t)18446744073709551615U) - { - res1 = (uint32_t)1U; - } - else - { - uint64_t s_ = s1 + (uint64_t)1U; - o_ctx.ctx_seq[0U] = s_; - res1 = (uint32_t)0U; - } - { - uint32_t res10 = res1; - ite = res10; - } - } - } - else - { - ite = (uint32_t)1U; - } - return ite; -} - -uint32_t -Hacl_HPKE_Curve51_CP256_SHA256_openBase( - uint8_t *pkE, - uint8_t *skR, - uint32_t infolen, - uint8_t *info, - uint32_t aadlen, - uint8_t *aad, - uint32_t ctlen, - uint8_t *ct, - uint8_t *o_pt -) -{ - uint8_t ctx_key[32U] = { 0U }; - uint8_t ctx_nonce[12U] = { 0U }; - uint64_t ctx_seq = (uint64_t)0U; - uint8_t ctx_exporter[32U] = { 0U }; - Hacl_Impl_HPKE_context_s o_ctx; - uint32_t res; - uint32_t ite; - o_ctx.ctx_key = ctx_key; - o_ctx.ctx_nonce = ctx_nonce; - o_ctx.ctx_seq = &ctx_seq; - o_ctx.ctx_exporter = ctx_exporter; - res = Hacl_HPKE_Curve51_CP256_SHA256_setupBaseR(o_ctx, pkE, skR, infolen, info); - if (res == (uint32_t)0U) - { - uint8_t nonce[12U] = { 0U }; - uint64_t s = o_ctx.ctx_seq[0U]; - uint8_t enc[12U] = { 0U }; - store64_be(enc + (uint32_t)4U, s); - KRML_MAYBE_FOR12(i, - (uint32_t)0U, - (uint32_t)12U, - (uint32_t)1U, - uint8_t xi = enc[i]; - uint8_t yi = o_ctx.ctx_nonce[i]; - nonce[i] = xi ^ yi;); - { - uint32_t - res10 = - Hacl_Chacha20Poly1305_256_aead_decrypt(o_ctx.ctx_key, - nonce, - aadlen, - aad, - ctlen - (uint32_t)16U, - o_pt, - ct, - ct + ctlen - (uint32_t)16U); - uint32_t res1; - if (res10 == (uint32_t)0U) - { - uint64_t s1 = o_ctx.ctx_seq[0U]; - uint32_t res2; - if (s1 == (uint64_t)18446744073709551615U) - { - res2 = (uint32_t)1U; - } - else - { - uint64_t s_ = s1 + (uint64_t)1U; - o_ctx.ctx_seq[0U] = s_; - res2 = (uint32_t)0U; - } - res1 = res2; - } - else - { - res1 = (uint32_t)1U; - } - ite = res1; - } - } - else - { - ite = (uint32_t)1U; - } - return ite; -} - diff --git a/dist/c89-compatible/Hacl_HPKE_Curve51_CP256_SHA256.h b/dist/c89-compatible/Hacl_HPKE_Curve51_CP256_SHA256.h deleted file mode 100644 index 6d9e9397ba..0000000000 --- a/dist/c89-compatible/Hacl_HPKE_Curve51_CP256_SHA256.h +++ /dev/null @@ -1,95 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#ifndef __Hacl_HPKE_Curve51_CP256_SHA256_H -#define __Hacl_HPKE_Curve51_CP256_SHA256_H - -#if defined(__cplusplus) -extern "C" { -#endif - -#include -#include "krml/internal/types.h" -#include "krml/lowstar_endianness.h" -#include "krml/internal/target.h" - - -#include "Hacl_Krmllib.h" -#include "Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE.h" -#include "Hacl_HKDF.h" -#include "Hacl_Curve25519_51.h" -#include "Hacl_Chacha20Poly1305_256.h" -#include "evercrypt_targetconfig.h" -uint32_t -Hacl_HPKE_Curve51_CP256_SHA256_setupBaseS( - uint8_t *o_pkE, - Hacl_Impl_HPKE_context_s o_ctx, - uint8_t *skE, - uint8_t *pkR, - uint32_t infolen, - uint8_t *info -); - -uint32_t -Hacl_HPKE_Curve51_CP256_SHA256_setupBaseR( - Hacl_Impl_HPKE_context_s o_ctx, - uint8_t *enc, - uint8_t *skR, - uint32_t infolen, - uint8_t *info -); - -uint32_t -Hacl_HPKE_Curve51_CP256_SHA256_sealBase( - uint8_t *skE, - uint8_t *pkR, - uint32_t infolen, - uint8_t *info, - uint32_t aadlen, - uint8_t *aad, - uint32_t plainlen, - uint8_t *plain, - uint8_t *o_enc, - uint8_t *o_ct -); - -uint32_t -Hacl_HPKE_Curve51_CP256_SHA256_openBase( - uint8_t *pkE, - uint8_t *skR, - uint32_t infolen, - uint8_t *info, - uint32_t aadlen, - uint8_t *aad, - uint32_t ctlen, - uint8_t *ct, - uint8_t *o_pt -); - -#if defined(__cplusplus) -} -#endif - -#define __Hacl_HPKE_Curve51_CP256_SHA256_H_DEFINED -#endif diff --git a/dist/c89-compatible/Hacl_HPKE_Curve51_CP256_SHA512.c b/dist/c89-compatible/Hacl_HPKE_Curve51_CP256_SHA512.c deleted file mode 100644 index 978ea1e9aa..0000000000 --- a/dist/c89-compatible/Hacl_HPKE_Curve51_CP256_SHA512.c +++ /dev/null @@ -1,1077 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#include "Hacl_HPKE_Curve51_CP256_SHA512.h" - - - -uint32_t -Hacl_HPKE_Curve51_CP256_SHA512_setupBaseS( - uint8_t *o_pkE, - Hacl_Impl_HPKE_context_s o_ctx, - uint8_t *skE, - uint8_t *pkR, - uint32_t infolen, - uint8_t *info -) -{ - uint8_t o_shared[32U] = { 0U }; - uint8_t *o_pkE1 = o_pkE; - uint32_t res1; - uint32_t res0; - uint32_t ite; - Hacl_Curve25519_51_secret_to_public(o_pkE1, skE); - res1 = (uint32_t)0U; - if (res1 == (uint32_t)0U) - { - uint8_t o_dh[32U] = { 0U }; - uint8_t zeros[32U] = { 0U }; - Hacl_Curve25519_51_scalarmult(o_dh, skE, pkR); - { - uint8_t res2 = (uint8_t)255U; - { - uint32_t i; - for (i = (uint32_t)0U; i < (uint32_t)32U; i++) - { - uint8_t uu____0 = FStar_UInt8_eq_mask(o_dh[i], zeros[i]); - res2 = uu____0 & res2; - } - } - { - uint8_t z = res2; - uint32_t res; - if (z == (uint8_t)255U) - { - res = (uint32_t)1U; - } - else - { - res = (uint32_t)0U; - } - { - uint32_t res20 = res; - uint8_t o_kemcontext[64U] = { 0U }; - if (res20 == (uint32_t)0U) - { - memcpy(o_kemcontext, o_pkE, (uint32_t)32U * sizeof (uint8_t)); - { - uint8_t *o_pkRm = o_kemcontext + (uint32_t)32U; - uint8_t *o_pkR = o_pkRm; - memcpy(o_pkR, pkR, (uint32_t)32U * sizeof (uint8_t)); - { - uint8_t *o_dhm = o_dh; - uint8_t o_eae_prk[32U] = { 0U }; - uint8_t suite_id_kem[5U] = { 0U }; - uint8_t *uu____1 = suite_id_kem; - uu____1[0U] = (uint8_t)0x4bU; - uu____1[1U] = (uint8_t)0x45U; - uu____1[2U] = (uint8_t)0x4dU; - { - uint8_t *uu____2 = suite_id_kem + (uint32_t)3U; - uu____2[0U] = (uint8_t)0U; - uu____2[1U] = (uint8_t)32U; - { - uint8_t *empty = suite_id_kem; - uint8_t - label_eae_prk[7U] = - { - (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, - (uint8_t)0x70U, (uint8_t)0x72U, (uint8_t)0x6bU - }; - uint32_t len0 = (uint32_t)51U; - KRML_CHECK_SIZE(sizeof (uint8_t), len0); - { - uint8_t tmp0[len0]; - memset(tmp0, 0U, len0 * sizeof (uint8_t)); - { - uint8_t *uu____3 = tmp0; - uu____3[0U] = (uint8_t)0x48U; - uu____3[1U] = (uint8_t)0x50U; - uu____3[2U] = (uint8_t)0x4bU; - uu____3[3U] = (uint8_t)0x45U; - uu____3[4U] = (uint8_t)0x2dU; - uu____3[5U] = (uint8_t)0x76U; - uu____3[6U] = (uint8_t)0x31U; - memcpy(tmp0 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t)); - memcpy(tmp0 + (uint32_t)12U, - label_eae_prk, - (uint32_t)7U * sizeof (uint8_t)); - memcpy(tmp0 + (uint32_t)19U, o_dhm, (uint32_t)32U * sizeof (uint8_t)); - Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp0, len0); - { - uint8_t - label_shared_secret[13U] = - { - (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, - (uint8_t)0x65U, (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, - (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U, - (uint8_t)0x74U - }; - uint32_t len = (uint32_t)91U; - KRML_CHECK_SIZE(sizeof (uint8_t), len); - { - uint8_t tmp[len]; - memset(tmp, 0U, len * sizeof (uint8_t)); - { - uint8_t *uu____4 = tmp; - store32_be(uu____4, (uint32_t)32U); - memcpy(uu____4, - uu____4 + (uint32_t)2U, - (uint32_t)2U * sizeof (uint8_t)); - { - uint8_t *uu____5 = tmp + (uint32_t)2U; - uu____5[0U] = (uint8_t)0x48U; - uu____5[1U] = (uint8_t)0x50U; - uu____5[2U] = (uint8_t)0x4bU; - uu____5[3U] = (uint8_t)0x45U; - uu____5[4U] = (uint8_t)0x2dU; - uu____5[5U] = (uint8_t)0x76U; - uu____5[6U] = (uint8_t)0x31U; - memcpy(tmp + (uint32_t)9U, - suite_id_kem, - (uint32_t)5U * sizeof (uint8_t)); - memcpy(tmp + (uint32_t)14U, - label_shared_secret, - (uint32_t)13U * sizeof (uint8_t)); - memcpy(tmp + (uint32_t)27U, - o_kemcontext, - (uint32_t)64U * sizeof (uint8_t)); - Hacl_HKDF_expand_sha2_256(o_shared, - o_eae_prk, - (uint32_t)32U, - tmp, - len, - (uint32_t)32U); - res0 = (uint32_t)0U; - } - } - } - } - } - } - } - } - } - } - } - else - { - res0 = (uint32_t)1U; - } - } - } - } - } - else - { - res0 = (uint32_t)1U; - } - if (res0 == (uint32_t)0U) - { - uint8_t o_context[129U] = { 0U }; - uint8_t o_secret[64U] = { 0U }; - uint8_t suite_id[10U] = { 0U }; - uint8_t *uu____6 = suite_id; - uu____6[0U] = (uint8_t)0x48U; - uu____6[1U] = (uint8_t)0x50U; - uu____6[2U] = (uint8_t)0x4bU; - uu____6[3U] = (uint8_t)0x45U; - { - uint8_t *uu____7 = suite_id + (uint32_t)4U; - uu____7[0U] = (uint8_t)0U; - uu____7[1U] = (uint8_t)32U; - { - uint8_t *uu____8 = suite_id + (uint32_t)6U; - uu____8[0U] = (uint8_t)0U; - uu____8[1U] = (uint8_t)3U; - { - uint8_t *uu____9 = suite_id + (uint32_t)8U; - uu____9[0U] = (uint8_t)0U; - uu____9[1U] = (uint8_t)3U; - { - uint8_t - label_psk_id_hash[11U] = - { - (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U, - (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, - (uint8_t)0x68U - }; - uint8_t o_psk_id_hash[64U] = { 0U }; - uint8_t *empty = suite_id; - uint32_t len0 = (uint32_t)28U; - KRML_CHECK_SIZE(sizeof (uint8_t), len0); - { - uint8_t tmp0[len0]; - memset(tmp0, 0U, len0 * sizeof (uint8_t)); - { - uint8_t *uu____10 = tmp0; - uu____10[0U] = (uint8_t)0x48U; - uu____10[1U] = (uint8_t)0x50U; - uu____10[2U] = (uint8_t)0x4bU; - uu____10[3U] = (uint8_t)0x45U; - uu____10[4U] = (uint8_t)0x2dU; - uu____10[5U] = (uint8_t)0x76U; - uu____10[6U] = (uint8_t)0x31U; - memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp0 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t)); - memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t)); - Hacl_HKDF_extract_sha2_512(o_psk_id_hash, empty, (uint32_t)0U, tmp0, len0); - { - uint8_t - label_info_hash[9U] = - { - (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU, - (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U - }; - uint8_t o_info_hash[64U] = { 0U }; - uint32_t len1 = (uint32_t)26U + infolen; - KRML_CHECK_SIZE(sizeof (uint8_t), len1); - { - uint8_t tmp1[len1]; - memset(tmp1, 0U, len1 * sizeof (uint8_t)); - { - uint8_t *uu____11 = tmp1; - uu____11[0U] = (uint8_t)0x48U; - uu____11[1U] = (uint8_t)0x50U; - uu____11[2U] = (uint8_t)0x4bU; - uu____11[3U] = (uint8_t)0x45U; - uu____11[4U] = (uint8_t)0x2dU; - uu____11[5U] = (uint8_t)0x76U; - uu____11[6U] = (uint8_t)0x31U; - memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp1 + (uint32_t)17U, - label_info_hash, - (uint32_t)9U * sizeof (uint8_t)); - memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t)); - Hacl_HKDF_extract_sha2_512(o_info_hash, empty, (uint32_t)0U, tmp1, len1); - o_context[0U] = (uint8_t)0U; - memcpy(o_context + (uint32_t)1U, - o_psk_id_hash, - (uint32_t)64U * sizeof (uint8_t)); - memcpy(o_context + (uint32_t)65U, - o_info_hash, - (uint32_t)64U * sizeof (uint8_t)); - { - uint8_t - label_secret[6U] = - { - (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, - (uint8_t)0x65U, (uint8_t)0x74U - }; - uint32_t len2 = (uint32_t)23U; - KRML_CHECK_SIZE(sizeof (uint8_t), len2); - { - uint8_t tmp2[len2]; - memset(tmp2, 0U, len2 * sizeof (uint8_t)); - { - uint8_t *uu____12 = tmp2; - uu____12[0U] = (uint8_t)0x48U; - uu____12[1U] = (uint8_t)0x50U; - uu____12[2U] = (uint8_t)0x4bU; - uu____12[3U] = (uint8_t)0x45U; - uu____12[4U] = (uint8_t)0x2dU; - uu____12[5U] = (uint8_t)0x76U; - uu____12[6U] = (uint8_t)0x31U; - memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp2 + (uint32_t)17U, - label_secret, - (uint32_t)6U * sizeof (uint8_t)); - memcpy(tmp2 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t)); - Hacl_HKDF_extract_sha2_512(o_secret, - o_shared, - (uint32_t)32U, - tmp2, - len2); - { - uint8_t - label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U }; - uint32_t len3 = (uint32_t)151U; - KRML_CHECK_SIZE(sizeof (uint8_t), len3); - { - uint8_t tmp3[len3]; - memset(tmp3, 0U, len3 * sizeof (uint8_t)); - { - uint8_t *uu____13 = tmp3; - store32_be(uu____13, (uint32_t)64U); - memcpy(uu____13, - uu____13 + (uint32_t)2U, - (uint32_t)2U * sizeof (uint8_t)); - { - uint8_t *uu____14 = tmp3 + (uint32_t)2U; - uu____14[0U] = (uint8_t)0x48U; - uu____14[1U] = (uint8_t)0x50U; - uu____14[2U] = (uint8_t)0x4bU; - uu____14[3U] = (uint8_t)0x45U; - uu____14[4U] = (uint8_t)0x2dU; - uu____14[5U] = (uint8_t)0x76U; - uu____14[6U] = (uint8_t)0x31U; - memcpy(tmp3 + (uint32_t)9U, - suite_id, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp3 + (uint32_t)19U, - label_exp, - (uint32_t)3U * sizeof (uint8_t)); - memcpy(tmp3 + (uint32_t)22U, - o_context, - (uint32_t)129U * sizeof (uint8_t)); - Hacl_HKDF_expand_sha2_512(o_ctx.ctx_exporter, - o_secret, - (uint32_t)64U, - tmp3, - len3, - (uint32_t)64U); - { - uint8_t - label_key[3U] = - { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U }; - uint32_t len4 = (uint32_t)151U; - KRML_CHECK_SIZE(sizeof (uint8_t), len4); - { - uint8_t tmp4[len4]; - memset(tmp4, 0U, len4 * sizeof (uint8_t)); - { - uint8_t *uu____15 = tmp4; - store32_be(uu____15, (uint32_t)32U); - memcpy(uu____15, - uu____15 + (uint32_t)2U, - (uint32_t)2U * sizeof (uint8_t)); - { - uint8_t *uu____16 = tmp4 + (uint32_t)2U; - uu____16[0U] = (uint8_t)0x48U; - uu____16[1U] = (uint8_t)0x50U; - uu____16[2U] = (uint8_t)0x4bU; - uu____16[3U] = (uint8_t)0x45U; - uu____16[4U] = (uint8_t)0x2dU; - uu____16[5U] = (uint8_t)0x76U; - uu____16[6U] = (uint8_t)0x31U; - memcpy(tmp4 + (uint32_t)9U, - suite_id, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp4 + (uint32_t)19U, - label_key, - (uint32_t)3U * sizeof (uint8_t)); - memcpy(tmp4 + (uint32_t)22U, - o_context, - (uint32_t)129U * sizeof (uint8_t)); - Hacl_HKDF_expand_sha2_512(o_ctx.ctx_key, - o_secret, - (uint32_t)64U, - tmp4, - len4, - (uint32_t)32U); - { - uint8_t - label_base_nonce[10U] = - { - (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, - (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x6eU, - (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, - (uint8_t)0x65U - }; - uint32_t len = (uint32_t)158U; - KRML_CHECK_SIZE(sizeof (uint8_t), len); - { - uint8_t tmp[len]; - memset(tmp, 0U, len * sizeof (uint8_t)); - { - uint8_t *uu____17 = tmp; - store32_be(uu____17, (uint32_t)12U); - memcpy(uu____17, - uu____17 + (uint32_t)2U, - (uint32_t)2U * sizeof (uint8_t)); - { - uint8_t *uu____18 = tmp + (uint32_t)2U; - uu____18[0U] = (uint8_t)0x48U; - uu____18[1U] = (uint8_t)0x50U; - uu____18[2U] = (uint8_t)0x4bU; - uu____18[3U] = (uint8_t)0x45U; - uu____18[4U] = (uint8_t)0x2dU; - uu____18[5U] = (uint8_t)0x76U; - uu____18[6U] = (uint8_t)0x31U; - memcpy(tmp + (uint32_t)9U, - suite_id, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp + (uint32_t)19U, - label_base_nonce, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp + (uint32_t)29U, - o_context, - (uint32_t)129U * sizeof (uint8_t)); - Hacl_HKDF_expand_sha2_512(o_ctx.ctx_nonce, - o_secret, - (uint32_t)64U, - tmp, - len, - (uint32_t)12U); - o_ctx.ctx_seq[0U] = (uint64_t)0U; - ite = res0; - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - else - { - ite = res0; - } - return ite; -} - -uint32_t -Hacl_HPKE_Curve51_CP256_SHA512_setupBaseR( - Hacl_Impl_HPKE_context_s o_ctx, - uint8_t *enc, - uint8_t *skR, - uint32_t infolen, - uint8_t *info -) -{ - uint8_t pkR[32U] = { 0U }; - uint32_t res1; - uint32_t ite; - Hacl_Curve25519_51_secret_to_public(pkR, skR); - res1 = (uint32_t)0U; - { - uint8_t shared[32U] = { 0U }; - if (res1 == (uint32_t)0U) - { - uint8_t *pkE = enc; - uint8_t dh[32U] = { 0U }; - uint8_t zeros[32U] = { 0U }; - Hacl_Curve25519_51_scalarmult(dh, skR, pkE); - { - uint8_t res0 = (uint8_t)255U; - { - uint32_t i; - for (i = (uint32_t)0U; i < (uint32_t)32U; i++) - { - uint8_t uu____0 = FStar_UInt8_eq_mask(dh[i], zeros[i]); - res0 = uu____0 & res0; - } - } - { - uint8_t z = res0; - uint32_t res; - if (z == (uint8_t)255U) - { - res = (uint32_t)1U; - } - else - { - res = (uint32_t)0U; - } - { - uint32_t res11 = res; - uint32_t res2; - uint8_t kemcontext[64U] = { 0U }; - if (res11 == (uint32_t)0U) - { - uint8_t *pkRm = kemcontext + (uint32_t)32U; - uint8_t *pkR1 = pkRm; - Hacl_Curve25519_51_secret_to_public(pkR1, skR); - { - uint32_t res20 = (uint32_t)0U; - if (res20 == (uint32_t)0U) - { - memcpy(kemcontext, enc, (uint32_t)32U * sizeof (uint8_t)); - { - uint8_t *dhm = dh; - uint8_t o_eae_prk[32U] = { 0U }; - uint8_t suite_id_kem[5U] = { 0U }; - uint8_t *uu____1 = suite_id_kem; - uu____1[0U] = (uint8_t)0x4bU; - uu____1[1U] = (uint8_t)0x45U; - uu____1[2U] = (uint8_t)0x4dU; - { - uint8_t *uu____2 = suite_id_kem + (uint32_t)3U; - uu____2[0U] = (uint8_t)0U; - uu____2[1U] = (uint8_t)32U; - { - uint8_t *empty = suite_id_kem; - uint8_t - label_eae_prk[7U] = - { - (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, - (uint8_t)0x70U, (uint8_t)0x72U, (uint8_t)0x6bU - }; - uint32_t len0 = (uint32_t)51U; - KRML_CHECK_SIZE(sizeof (uint8_t), len0); - { - uint8_t tmp0[len0]; - memset(tmp0, 0U, len0 * sizeof (uint8_t)); - { - uint8_t *uu____3 = tmp0; - uu____3[0U] = (uint8_t)0x48U; - uu____3[1U] = (uint8_t)0x50U; - uu____3[2U] = (uint8_t)0x4bU; - uu____3[3U] = (uint8_t)0x45U; - uu____3[4U] = (uint8_t)0x2dU; - uu____3[5U] = (uint8_t)0x76U; - uu____3[6U] = (uint8_t)0x31U; - memcpy(tmp0 + (uint32_t)7U, - suite_id_kem, - (uint32_t)5U * sizeof (uint8_t)); - memcpy(tmp0 + (uint32_t)12U, - label_eae_prk, - (uint32_t)7U * sizeof (uint8_t)); - memcpy(tmp0 + (uint32_t)19U, dhm, (uint32_t)32U * sizeof (uint8_t)); - Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp0, len0); - { - uint8_t - label_shared_secret[13U] = - { - (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, - (uint8_t)0x65U, (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, - (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U, - (uint8_t)0x74U - }; - uint32_t len = (uint32_t)91U; - KRML_CHECK_SIZE(sizeof (uint8_t), len); - { - uint8_t tmp[len]; - memset(tmp, 0U, len * sizeof (uint8_t)); - { - uint8_t *uu____4 = tmp; - store32_be(uu____4, (uint32_t)32U); - memcpy(uu____4, - uu____4 + (uint32_t)2U, - (uint32_t)2U * sizeof (uint8_t)); - { - uint8_t *uu____5 = tmp + (uint32_t)2U; - uu____5[0U] = (uint8_t)0x48U; - uu____5[1U] = (uint8_t)0x50U; - uu____5[2U] = (uint8_t)0x4bU; - uu____5[3U] = (uint8_t)0x45U; - uu____5[4U] = (uint8_t)0x2dU; - uu____5[5U] = (uint8_t)0x76U; - uu____5[6U] = (uint8_t)0x31U; - memcpy(tmp + (uint32_t)9U, - suite_id_kem, - (uint32_t)5U * sizeof (uint8_t)); - memcpy(tmp + (uint32_t)14U, - label_shared_secret, - (uint32_t)13U * sizeof (uint8_t)); - memcpy(tmp + (uint32_t)27U, - kemcontext, - (uint32_t)64U * sizeof (uint8_t)); - Hacl_HKDF_expand_sha2_256(shared, - o_eae_prk, - (uint32_t)32U, - tmp, - len, - (uint32_t)32U); - res2 = (uint32_t)0U; - } - } - } - } - } - } - } - } - } - } - else - { - res2 = (uint32_t)1U; - } - } - } - else - { - res2 = (uint32_t)1U; - } - if (res2 == (uint32_t)0U) - { - uint8_t o_context[129U] = { 0U }; - uint8_t o_secret[64U] = { 0U }; - uint8_t suite_id[10U] = { 0U }; - uint8_t *uu____6 = suite_id; - uu____6[0U] = (uint8_t)0x48U; - uu____6[1U] = (uint8_t)0x50U; - uu____6[2U] = (uint8_t)0x4bU; - uu____6[3U] = (uint8_t)0x45U; - { - uint8_t *uu____7 = suite_id + (uint32_t)4U; - uu____7[0U] = (uint8_t)0U; - uu____7[1U] = (uint8_t)32U; - { - uint8_t *uu____8 = suite_id + (uint32_t)6U; - uu____8[0U] = (uint8_t)0U; - uu____8[1U] = (uint8_t)3U; - { - uint8_t *uu____9 = suite_id + (uint32_t)8U; - uu____9[0U] = (uint8_t)0U; - uu____9[1U] = (uint8_t)3U; - { - uint8_t - label_psk_id_hash[11U] = - { - (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, - (uint8_t)0x69U, (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, - (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U - }; - uint8_t o_psk_id_hash[64U] = { 0U }; - uint8_t *empty = suite_id; - uint32_t len0 = (uint32_t)28U; - KRML_CHECK_SIZE(sizeof (uint8_t), len0); - { - uint8_t tmp0[len0]; - memset(tmp0, 0U, len0 * sizeof (uint8_t)); - { - uint8_t *uu____10 = tmp0; - uu____10[0U] = (uint8_t)0x48U; - uu____10[1U] = (uint8_t)0x50U; - uu____10[2U] = (uint8_t)0x4bU; - uu____10[3U] = (uint8_t)0x45U; - uu____10[4U] = (uint8_t)0x2dU; - uu____10[5U] = (uint8_t)0x76U; - uu____10[6U] = (uint8_t)0x31U; - memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp0 + (uint32_t)17U, - label_psk_id_hash, - (uint32_t)11U * sizeof (uint8_t)); - memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t)); - Hacl_HKDF_extract_sha2_512(o_psk_id_hash, - empty, - (uint32_t)0U, - tmp0, - len0); - { - uint8_t - label_info_hash[9U] = - { - (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, - (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, - (uint8_t)0x68U - }; - uint8_t o_info_hash[64U] = { 0U }; - uint32_t len1 = (uint32_t)26U + infolen; - KRML_CHECK_SIZE(sizeof (uint8_t), len1); - { - uint8_t tmp1[len1]; - memset(tmp1, 0U, len1 * sizeof (uint8_t)); - { - uint8_t *uu____11 = tmp1; - uu____11[0U] = (uint8_t)0x48U; - uu____11[1U] = (uint8_t)0x50U; - uu____11[2U] = (uint8_t)0x4bU; - uu____11[3U] = (uint8_t)0x45U; - uu____11[4U] = (uint8_t)0x2dU; - uu____11[5U] = (uint8_t)0x76U; - uu____11[6U] = (uint8_t)0x31U; - memcpy(tmp1 + (uint32_t)7U, - suite_id, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp1 + (uint32_t)17U, - label_info_hash, - (uint32_t)9U * sizeof (uint8_t)); - memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t)); - Hacl_HKDF_extract_sha2_512(o_info_hash, - empty, - (uint32_t)0U, - tmp1, - len1); - o_context[0U] = (uint8_t)0U; - memcpy(o_context + (uint32_t)1U, - o_psk_id_hash, - (uint32_t)64U * sizeof (uint8_t)); - memcpy(o_context + (uint32_t)65U, - o_info_hash, - (uint32_t)64U * sizeof (uint8_t)); - { - uint8_t - label_secret[6U] = - { - (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, - (uint8_t)0x65U, (uint8_t)0x74U - }; - uint32_t len2 = (uint32_t)23U; - KRML_CHECK_SIZE(sizeof (uint8_t), len2); - { - uint8_t tmp2[len2]; - memset(tmp2, 0U, len2 * sizeof (uint8_t)); - { - uint8_t *uu____12 = tmp2; - uu____12[0U] = (uint8_t)0x48U; - uu____12[1U] = (uint8_t)0x50U; - uu____12[2U] = (uint8_t)0x4bU; - uu____12[3U] = (uint8_t)0x45U; - uu____12[4U] = (uint8_t)0x2dU; - uu____12[5U] = (uint8_t)0x76U; - uu____12[6U] = (uint8_t)0x31U; - memcpy(tmp2 + (uint32_t)7U, - suite_id, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp2 + (uint32_t)17U, - label_secret, - (uint32_t)6U * sizeof (uint8_t)); - memcpy(tmp2 + (uint32_t)23U, - empty, - (uint32_t)0U * sizeof (uint8_t)); - Hacl_HKDF_extract_sha2_512(o_secret, - shared, - (uint32_t)32U, - tmp2, - len2); - { - uint8_t - label_exp[3U] = - { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U }; - uint32_t len3 = (uint32_t)151U; - KRML_CHECK_SIZE(sizeof (uint8_t), len3); - { - uint8_t tmp3[len3]; - memset(tmp3, 0U, len3 * sizeof (uint8_t)); - { - uint8_t *uu____13 = tmp3; - store32_be(uu____13, (uint32_t)64U); - memcpy(uu____13, - uu____13 + (uint32_t)2U, - (uint32_t)2U * sizeof (uint8_t)); - { - uint8_t *uu____14 = tmp3 + (uint32_t)2U; - uu____14[0U] = (uint8_t)0x48U; - uu____14[1U] = (uint8_t)0x50U; - uu____14[2U] = (uint8_t)0x4bU; - uu____14[3U] = (uint8_t)0x45U; - uu____14[4U] = (uint8_t)0x2dU; - uu____14[5U] = (uint8_t)0x76U; - uu____14[6U] = (uint8_t)0x31U; - memcpy(tmp3 + (uint32_t)9U, - suite_id, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp3 + (uint32_t)19U, - label_exp, - (uint32_t)3U * sizeof (uint8_t)); - memcpy(tmp3 + (uint32_t)22U, - o_context, - (uint32_t)129U * sizeof (uint8_t)); - Hacl_HKDF_expand_sha2_512(o_ctx.ctx_exporter, - o_secret, - (uint32_t)64U, - tmp3, - len3, - (uint32_t)64U); - { - uint8_t - label_key[3U] = - { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U }; - uint32_t len4 = (uint32_t)151U; - KRML_CHECK_SIZE(sizeof (uint8_t), len4); - { - uint8_t tmp4[len4]; - memset(tmp4, 0U, len4 * sizeof (uint8_t)); - { - uint8_t *uu____15 = tmp4; - store32_be(uu____15, (uint32_t)32U); - memcpy(uu____15, - uu____15 + (uint32_t)2U, - (uint32_t)2U * sizeof (uint8_t)); - { - uint8_t *uu____16 = tmp4 + (uint32_t)2U; - uu____16[0U] = (uint8_t)0x48U; - uu____16[1U] = (uint8_t)0x50U; - uu____16[2U] = (uint8_t)0x4bU; - uu____16[3U] = (uint8_t)0x45U; - uu____16[4U] = (uint8_t)0x2dU; - uu____16[5U] = (uint8_t)0x76U; - uu____16[6U] = (uint8_t)0x31U; - memcpy(tmp4 + (uint32_t)9U, - suite_id, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp4 + (uint32_t)19U, - label_key, - (uint32_t)3U * sizeof (uint8_t)); - memcpy(tmp4 + (uint32_t)22U, - o_context, - (uint32_t)129U * sizeof (uint8_t)); - Hacl_HKDF_expand_sha2_512(o_ctx.ctx_key, - o_secret, - (uint32_t)64U, - tmp4, - len4, - (uint32_t)32U); - { - uint8_t - label_base_nonce[10U] = - { - (uint8_t)0x62U, (uint8_t)0x61U, - (uint8_t)0x73U, (uint8_t)0x65U, - (uint8_t)0x5fU, (uint8_t)0x6eU, - (uint8_t)0x6fU, (uint8_t)0x6eU, - (uint8_t)0x63U, (uint8_t)0x65U - }; - uint32_t len = (uint32_t)158U; - KRML_CHECK_SIZE(sizeof (uint8_t), len); - { - uint8_t tmp[len]; - memset(tmp, 0U, len * sizeof (uint8_t)); - { - uint8_t *uu____17 = tmp; - store32_be(uu____17, (uint32_t)12U); - memcpy(uu____17, - uu____17 + (uint32_t)2U, - (uint32_t)2U * sizeof (uint8_t)); - { - uint8_t - *uu____18 = tmp + (uint32_t)2U; - uu____18[0U] = (uint8_t)0x48U; - uu____18[1U] = (uint8_t)0x50U; - uu____18[2U] = (uint8_t)0x4bU; - uu____18[3U] = (uint8_t)0x45U; - uu____18[4U] = (uint8_t)0x2dU; - uu____18[5U] = (uint8_t)0x76U; - uu____18[6U] = (uint8_t)0x31U; - memcpy(tmp + (uint32_t)9U, - suite_id, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp + (uint32_t)19U, - label_base_nonce, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp + (uint32_t)29U, - o_context, - (uint32_t)129U * sizeof (uint8_t)); - Hacl_HKDF_expand_sha2_512(o_ctx.ctx_nonce, - o_secret, - (uint32_t)64U, - tmp, - len, - (uint32_t)12U); - o_ctx.ctx_seq[0U] = (uint64_t)0U; - ite = (uint32_t)0U; - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - else - { - ite = (uint32_t)1U; - } - } - } - } - } - else - { - ite = (uint32_t)1U; - } - return ite; - } -} - -uint32_t -Hacl_HPKE_Curve51_CP256_SHA512_sealBase( - uint8_t *skE, - uint8_t *pkR, - uint32_t infolen, - uint8_t *info, - uint32_t aadlen, - uint8_t *aad, - uint32_t plainlen, - uint8_t *plain, - uint8_t *o_enc, - uint8_t *o_ct -) -{ - uint8_t ctx_key[32U] = { 0U }; - uint8_t ctx_nonce[12U] = { 0U }; - uint64_t ctx_seq = (uint64_t)0U; - uint8_t ctx_exporter[64U] = { 0U }; - Hacl_Impl_HPKE_context_s o_ctx; - uint32_t res; - uint32_t ite; - o_ctx.ctx_key = ctx_key; - o_ctx.ctx_nonce = ctx_nonce; - o_ctx.ctx_seq = &ctx_seq; - o_ctx.ctx_exporter = ctx_exporter; - res = Hacl_HPKE_Curve51_CP256_SHA512_setupBaseS(o_enc, o_ctx, skE, pkR, infolen, info); - if (res == (uint32_t)0U) - { - uint8_t nonce[12U] = { 0U }; - uint64_t s = o_ctx.ctx_seq[0U]; - uint8_t enc[12U] = { 0U }; - store64_be(enc + (uint32_t)4U, s); - KRML_MAYBE_FOR12(i, - (uint32_t)0U, - (uint32_t)12U, - (uint32_t)1U, - uint8_t xi = enc[i]; - uint8_t yi = o_ctx.ctx_nonce[i]; - nonce[i] = xi ^ yi;); - Hacl_Chacha20Poly1305_256_aead_encrypt(o_ctx.ctx_key, - nonce, - aadlen, - aad, - plainlen, - plain, - o_ct, - o_ct + plainlen); - { - uint64_t s1 = o_ctx.ctx_seq[0U]; - uint32_t res1; - if (s1 == (uint64_t)18446744073709551615U) - { - res1 = (uint32_t)1U; - } - else - { - uint64_t s_ = s1 + (uint64_t)1U; - o_ctx.ctx_seq[0U] = s_; - res1 = (uint32_t)0U; - } - { - uint32_t res10 = res1; - ite = res10; - } - } - } - else - { - ite = (uint32_t)1U; - } - return ite; -} - -uint32_t -Hacl_HPKE_Curve51_CP256_SHA512_openBase( - uint8_t *pkE, - uint8_t *skR, - uint32_t infolen, - uint8_t *info, - uint32_t aadlen, - uint8_t *aad, - uint32_t ctlen, - uint8_t *ct, - uint8_t *o_pt -) -{ - uint8_t ctx_key[32U] = { 0U }; - uint8_t ctx_nonce[12U] = { 0U }; - uint64_t ctx_seq = (uint64_t)0U; - uint8_t ctx_exporter[64U] = { 0U }; - Hacl_Impl_HPKE_context_s o_ctx; - uint32_t res; - uint32_t ite; - o_ctx.ctx_key = ctx_key; - o_ctx.ctx_nonce = ctx_nonce; - o_ctx.ctx_seq = &ctx_seq; - o_ctx.ctx_exporter = ctx_exporter; - res = Hacl_HPKE_Curve51_CP256_SHA512_setupBaseR(o_ctx, pkE, skR, infolen, info); - if (res == (uint32_t)0U) - { - uint8_t nonce[12U] = { 0U }; - uint64_t s = o_ctx.ctx_seq[0U]; - uint8_t enc[12U] = { 0U }; - store64_be(enc + (uint32_t)4U, s); - KRML_MAYBE_FOR12(i, - (uint32_t)0U, - (uint32_t)12U, - (uint32_t)1U, - uint8_t xi = enc[i]; - uint8_t yi = o_ctx.ctx_nonce[i]; - nonce[i] = xi ^ yi;); - { - uint32_t - res10 = - Hacl_Chacha20Poly1305_256_aead_decrypt(o_ctx.ctx_key, - nonce, - aadlen, - aad, - ctlen - (uint32_t)16U, - o_pt, - ct, - ct + ctlen - (uint32_t)16U); - uint32_t res1; - if (res10 == (uint32_t)0U) - { - uint64_t s1 = o_ctx.ctx_seq[0U]; - uint32_t res2; - if (s1 == (uint64_t)18446744073709551615U) - { - res2 = (uint32_t)1U; - } - else - { - uint64_t s_ = s1 + (uint64_t)1U; - o_ctx.ctx_seq[0U] = s_; - res2 = (uint32_t)0U; - } - res1 = res2; - } - else - { - res1 = (uint32_t)1U; - } - ite = res1; - } - } - else - { - ite = (uint32_t)1U; - } - return ite; -} - diff --git a/dist/c89-compatible/Hacl_HPKE_Curve51_CP256_SHA512.h b/dist/c89-compatible/Hacl_HPKE_Curve51_CP256_SHA512.h deleted file mode 100644 index 0d01a71e1d..0000000000 --- a/dist/c89-compatible/Hacl_HPKE_Curve51_CP256_SHA512.h +++ /dev/null @@ -1,95 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#ifndef __Hacl_HPKE_Curve51_CP256_SHA512_H -#define __Hacl_HPKE_Curve51_CP256_SHA512_H - -#if defined(__cplusplus) -extern "C" { -#endif - -#include -#include "krml/internal/types.h" -#include "krml/lowstar_endianness.h" -#include "krml/internal/target.h" - - -#include "Hacl_Krmllib.h" -#include "Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE.h" -#include "Hacl_HKDF.h" -#include "Hacl_Curve25519_51.h" -#include "Hacl_Chacha20Poly1305_256.h" -#include "evercrypt_targetconfig.h" -uint32_t -Hacl_HPKE_Curve51_CP256_SHA512_setupBaseS( - uint8_t *o_pkE, - Hacl_Impl_HPKE_context_s o_ctx, - uint8_t *skE, - uint8_t *pkR, - uint32_t infolen, - uint8_t *info -); - -uint32_t -Hacl_HPKE_Curve51_CP256_SHA512_setupBaseR( - Hacl_Impl_HPKE_context_s o_ctx, - uint8_t *enc, - uint8_t *skR, - uint32_t infolen, - uint8_t *info -); - -uint32_t -Hacl_HPKE_Curve51_CP256_SHA512_sealBase( - uint8_t *skE, - uint8_t *pkR, - uint32_t infolen, - uint8_t *info, - uint32_t aadlen, - uint8_t *aad, - uint32_t plainlen, - uint8_t *plain, - uint8_t *o_enc, - uint8_t *o_ct -); - -uint32_t -Hacl_HPKE_Curve51_CP256_SHA512_openBase( - uint8_t *pkE, - uint8_t *skR, - uint32_t infolen, - uint8_t *info, - uint32_t aadlen, - uint8_t *aad, - uint32_t ctlen, - uint8_t *ct, - uint8_t *o_pt -); - -#if defined(__cplusplus) -} -#endif - -#define __Hacl_HPKE_Curve51_CP256_SHA512_H_DEFINED -#endif diff --git a/dist/c89-compatible/Hacl_HPKE_Curve51_CP32_SHA256.c b/dist/c89-compatible/Hacl_HPKE_Curve51_CP32_SHA256.c deleted file mode 100644 index c95a207b0f..0000000000 --- a/dist/c89-compatible/Hacl_HPKE_Curve51_CP32_SHA256.c +++ /dev/null @@ -1,1077 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#include "Hacl_HPKE_Curve51_CP32_SHA256.h" - - - -uint32_t -Hacl_HPKE_Curve51_CP32_SHA256_setupBaseS( - uint8_t *o_pkE, - Hacl_Impl_HPKE_context_s o_ctx, - uint8_t *skE, - uint8_t *pkR, - uint32_t infolen, - uint8_t *info -) -{ - uint8_t o_shared[32U] = { 0U }; - uint8_t *o_pkE1 = o_pkE; - uint32_t res1; - uint32_t res0; - uint32_t ite; - Hacl_Curve25519_51_secret_to_public(o_pkE1, skE); - res1 = (uint32_t)0U; - if (res1 == (uint32_t)0U) - { - uint8_t o_dh[32U] = { 0U }; - uint8_t zeros[32U] = { 0U }; - Hacl_Curve25519_51_scalarmult(o_dh, skE, pkR); - { - uint8_t res2 = (uint8_t)255U; - { - uint32_t i; - for (i = (uint32_t)0U; i < (uint32_t)32U; i++) - { - uint8_t uu____0 = FStar_UInt8_eq_mask(o_dh[i], zeros[i]); - res2 = uu____0 & res2; - } - } - { - uint8_t z = res2; - uint32_t res; - if (z == (uint8_t)255U) - { - res = (uint32_t)1U; - } - else - { - res = (uint32_t)0U; - } - { - uint32_t res20 = res; - uint8_t o_kemcontext[64U] = { 0U }; - if (res20 == (uint32_t)0U) - { - memcpy(o_kemcontext, o_pkE, (uint32_t)32U * sizeof (uint8_t)); - { - uint8_t *o_pkRm = o_kemcontext + (uint32_t)32U; - uint8_t *o_pkR = o_pkRm; - memcpy(o_pkR, pkR, (uint32_t)32U * sizeof (uint8_t)); - { - uint8_t *o_dhm = o_dh; - uint8_t o_eae_prk[32U] = { 0U }; - uint8_t suite_id_kem[5U] = { 0U }; - uint8_t *uu____1 = suite_id_kem; - uu____1[0U] = (uint8_t)0x4bU; - uu____1[1U] = (uint8_t)0x45U; - uu____1[2U] = (uint8_t)0x4dU; - { - uint8_t *uu____2 = suite_id_kem + (uint32_t)3U; - uu____2[0U] = (uint8_t)0U; - uu____2[1U] = (uint8_t)32U; - { - uint8_t *empty = suite_id_kem; - uint8_t - label_eae_prk[7U] = - { - (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, - (uint8_t)0x70U, (uint8_t)0x72U, (uint8_t)0x6bU - }; - uint32_t len0 = (uint32_t)51U; - KRML_CHECK_SIZE(sizeof (uint8_t), len0); - { - uint8_t tmp0[len0]; - memset(tmp0, 0U, len0 * sizeof (uint8_t)); - { - uint8_t *uu____3 = tmp0; - uu____3[0U] = (uint8_t)0x48U; - uu____3[1U] = (uint8_t)0x50U; - uu____3[2U] = (uint8_t)0x4bU; - uu____3[3U] = (uint8_t)0x45U; - uu____3[4U] = (uint8_t)0x2dU; - uu____3[5U] = (uint8_t)0x76U; - uu____3[6U] = (uint8_t)0x31U; - memcpy(tmp0 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t)); - memcpy(tmp0 + (uint32_t)12U, - label_eae_prk, - (uint32_t)7U * sizeof (uint8_t)); - memcpy(tmp0 + (uint32_t)19U, o_dhm, (uint32_t)32U * sizeof (uint8_t)); - Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp0, len0); - { - uint8_t - label_shared_secret[13U] = - { - (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, - (uint8_t)0x65U, (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, - (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U, - (uint8_t)0x74U - }; - uint32_t len = (uint32_t)91U; - KRML_CHECK_SIZE(sizeof (uint8_t), len); - { - uint8_t tmp[len]; - memset(tmp, 0U, len * sizeof (uint8_t)); - { - uint8_t *uu____4 = tmp; - store32_be(uu____4, (uint32_t)32U); - memcpy(uu____4, - uu____4 + (uint32_t)2U, - (uint32_t)2U * sizeof (uint8_t)); - { - uint8_t *uu____5 = tmp + (uint32_t)2U; - uu____5[0U] = (uint8_t)0x48U; - uu____5[1U] = (uint8_t)0x50U; - uu____5[2U] = (uint8_t)0x4bU; - uu____5[3U] = (uint8_t)0x45U; - uu____5[4U] = (uint8_t)0x2dU; - uu____5[5U] = (uint8_t)0x76U; - uu____5[6U] = (uint8_t)0x31U; - memcpy(tmp + (uint32_t)9U, - suite_id_kem, - (uint32_t)5U * sizeof (uint8_t)); - memcpy(tmp + (uint32_t)14U, - label_shared_secret, - (uint32_t)13U * sizeof (uint8_t)); - memcpy(tmp + (uint32_t)27U, - o_kemcontext, - (uint32_t)64U * sizeof (uint8_t)); - Hacl_HKDF_expand_sha2_256(o_shared, - o_eae_prk, - (uint32_t)32U, - tmp, - len, - (uint32_t)32U); - res0 = (uint32_t)0U; - } - } - } - } - } - } - } - } - } - } - } - else - { - res0 = (uint32_t)1U; - } - } - } - } - } - else - { - res0 = (uint32_t)1U; - } - if (res0 == (uint32_t)0U) - { - uint8_t o_context[65U] = { 0U }; - uint8_t o_secret[32U] = { 0U }; - uint8_t suite_id[10U] = { 0U }; - uint8_t *uu____6 = suite_id; - uu____6[0U] = (uint8_t)0x48U; - uu____6[1U] = (uint8_t)0x50U; - uu____6[2U] = (uint8_t)0x4bU; - uu____6[3U] = (uint8_t)0x45U; - { - uint8_t *uu____7 = suite_id + (uint32_t)4U; - uu____7[0U] = (uint8_t)0U; - uu____7[1U] = (uint8_t)32U; - { - uint8_t *uu____8 = suite_id + (uint32_t)6U; - uu____8[0U] = (uint8_t)0U; - uu____8[1U] = (uint8_t)1U; - { - uint8_t *uu____9 = suite_id + (uint32_t)8U; - uu____9[0U] = (uint8_t)0U; - uu____9[1U] = (uint8_t)3U; - { - uint8_t - label_psk_id_hash[11U] = - { - (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U, - (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, - (uint8_t)0x68U - }; - uint8_t o_psk_id_hash[32U] = { 0U }; - uint8_t *empty = suite_id; - uint32_t len0 = (uint32_t)28U; - KRML_CHECK_SIZE(sizeof (uint8_t), len0); - { - uint8_t tmp0[len0]; - memset(tmp0, 0U, len0 * sizeof (uint8_t)); - { - uint8_t *uu____10 = tmp0; - uu____10[0U] = (uint8_t)0x48U; - uu____10[1U] = (uint8_t)0x50U; - uu____10[2U] = (uint8_t)0x4bU; - uu____10[3U] = (uint8_t)0x45U; - uu____10[4U] = (uint8_t)0x2dU; - uu____10[5U] = (uint8_t)0x76U; - uu____10[6U] = (uint8_t)0x31U; - memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp0 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t)); - memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t)); - Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, (uint32_t)0U, tmp0, len0); - { - uint8_t - label_info_hash[9U] = - { - (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU, - (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U - }; - uint8_t o_info_hash[32U] = { 0U }; - uint32_t len1 = (uint32_t)26U + infolen; - KRML_CHECK_SIZE(sizeof (uint8_t), len1); - { - uint8_t tmp1[len1]; - memset(tmp1, 0U, len1 * sizeof (uint8_t)); - { - uint8_t *uu____11 = tmp1; - uu____11[0U] = (uint8_t)0x48U; - uu____11[1U] = (uint8_t)0x50U; - uu____11[2U] = (uint8_t)0x4bU; - uu____11[3U] = (uint8_t)0x45U; - uu____11[4U] = (uint8_t)0x2dU; - uu____11[5U] = (uint8_t)0x76U; - uu____11[6U] = (uint8_t)0x31U; - memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp1 + (uint32_t)17U, - label_info_hash, - (uint32_t)9U * sizeof (uint8_t)); - memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t)); - Hacl_HKDF_extract_sha2_256(o_info_hash, empty, (uint32_t)0U, tmp1, len1); - o_context[0U] = (uint8_t)0U; - memcpy(o_context + (uint32_t)1U, - o_psk_id_hash, - (uint32_t)32U * sizeof (uint8_t)); - memcpy(o_context + (uint32_t)33U, - o_info_hash, - (uint32_t)32U * sizeof (uint8_t)); - { - uint8_t - label_secret[6U] = - { - (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, - (uint8_t)0x65U, (uint8_t)0x74U - }; - uint32_t len2 = (uint32_t)23U; - KRML_CHECK_SIZE(sizeof (uint8_t), len2); - { - uint8_t tmp2[len2]; - memset(tmp2, 0U, len2 * sizeof (uint8_t)); - { - uint8_t *uu____12 = tmp2; - uu____12[0U] = (uint8_t)0x48U; - uu____12[1U] = (uint8_t)0x50U; - uu____12[2U] = (uint8_t)0x4bU; - uu____12[3U] = (uint8_t)0x45U; - uu____12[4U] = (uint8_t)0x2dU; - uu____12[5U] = (uint8_t)0x76U; - uu____12[6U] = (uint8_t)0x31U; - memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp2 + (uint32_t)17U, - label_secret, - (uint32_t)6U * sizeof (uint8_t)); - memcpy(tmp2 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t)); - Hacl_HKDF_extract_sha2_256(o_secret, - o_shared, - (uint32_t)32U, - tmp2, - len2); - { - uint8_t - label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U }; - uint32_t len3 = (uint32_t)87U; - KRML_CHECK_SIZE(sizeof (uint8_t), len3); - { - uint8_t tmp3[len3]; - memset(tmp3, 0U, len3 * sizeof (uint8_t)); - { - uint8_t *uu____13 = tmp3; - store32_be(uu____13, (uint32_t)32U); - memcpy(uu____13, - uu____13 + (uint32_t)2U, - (uint32_t)2U * sizeof (uint8_t)); - { - uint8_t *uu____14 = tmp3 + (uint32_t)2U; - uu____14[0U] = (uint8_t)0x48U; - uu____14[1U] = (uint8_t)0x50U; - uu____14[2U] = (uint8_t)0x4bU; - uu____14[3U] = (uint8_t)0x45U; - uu____14[4U] = (uint8_t)0x2dU; - uu____14[5U] = (uint8_t)0x76U; - uu____14[6U] = (uint8_t)0x31U; - memcpy(tmp3 + (uint32_t)9U, - suite_id, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp3 + (uint32_t)19U, - label_exp, - (uint32_t)3U * sizeof (uint8_t)); - memcpy(tmp3 + (uint32_t)22U, - o_context, - (uint32_t)65U * sizeof (uint8_t)); - Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter, - o_secret, - (uint32_t)32U, - tmp3, - len3, - (uint32_t)32U); - { - uint8_t - label_key[3U] = - { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U }; - uint32_t len4 = (uint32_t)87U; - KRML_CHECK_SIZE(sizeof (uint8_t), len4); - { - uint8_t tmp4[len4]; - memset(tmp4, 0U, len4 * sizeof (uint8_t)); - { - uint8_t *uu____15 = tmp4; - store32_be(uu____15, (uint32_t)32U); - memcpy(uu____15, - uu____15 + (uint32_t)2U, - (uint32_t)2U * sizeof (uint8_t)); - { - uint8_t *uu____16 = tmp4 + (uint32_t)2U; - uu____16[0U] = (uint8_t)0x48U; - uu____16[1U] = (uint8_t)0x50U; - uu____16[2U] = (uint8_t)0x4bU; - uu____16[3U] = (uint8_t)0x45U; - uu____16[4U] = (uint8_t)0x2dU; - uu____16[5U] = (uint8_t)0x76U; - uu____16[6U] = (uint8_t)0x31U; - memcpy(tmp4 + (uint32_t)9U, - suite_id, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp4 + (uint32_t)19U, - label_key, - (uint32_t)3U * sizeof (uint8_t)); - memcpy(tmp4 + (uint32_t)22U, - o_context, - (uint32_t)65U * sizeof (uint8_t)); - Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, - o_secret, - (uint32_t)32U, - tmp4, - len4, - (uint32_t)32U); - { - uint8_t - label_base_nonce[10U] = - { - (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, - (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x6eU, - (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, - (uint8_t)0x65U - }; - uint32_t len = (uint32_t)94U; - KRML_CHECK_SIZE(sizeof (uint8_t), len); - { - uint8_t tmp[len]; - memset(tmp, 0U, len * sizeof (uint8_t)); - { - uint8_t *uu____17 = tmp; - store32_be(uu____17, (uint32_t)12U); - memcpy(uu____17, - uu____17 + (uint32_t)2U, - (uint32_t)2U * sizeof (uint8_t)); - { - uint8_t *uu____18 = tmp + (uint32_t)2U; - uu____18[0U] = (uint8_t)0x48U; - uu____18[1U] = (uint8_t)0x50U; - uu____18[2U] = (uint8_t)0x4bU; - uu____18[3U] = (uint8_t)0x45U; - uu____18[4U] = (uint8_t)0x2dU; - uu____18[5U] = (uint8_t)0x76U; - uu____18[6U] = (uint8_t)0x31U; - memcpy(tmp + (uint32_t)9U, - suite_id, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp + (uint32_t)19U, - label_base_nonce, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp + (uint32_t)29U, - o_context, - (uint32_t)65U * sizeof (uint8_t)); - Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, - o_secret, - (uint32_t)32U, - tmp, - len, - (uint32_t)12U); - o_ctx.ctx_seq[0U] = (uint64_t)0U; - ite = res0; - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - else - { - ite = res0; - } - return ite; -} - -uint32_t -Hacl_HPKE_Curve51_CP32_SHA256_setupBaseR( - Hacl_Impl_HPKE_context_s o_ctx, - uint8_t *enc, - uint8_t *skR, - uint32_t infolen, - uint8_t *info -) -{ - uint8_t pkR[32U] = { 0U }; - uint32_t res1; - uint32_t ite; - Hacl_Curve25519_51_secret_to_public(pkR, skR); - res1 = (uint32_t)0U; - { - uint8_t shared[32U] = { 0U }; - if (res1 == (uint32_t)0U) - { - uint8_t *pkE = enc; - uint8_t dh[32U] = { 0U }; - uint8_t zeros[32U] = { 0U }; - Hacl_Curve25519_51_scalarmult(dh, skR, pkE); - { - uint8_t res0 = (uint8_t)255U; - { - uint32_t i; - for (i = (uint32_t)0U; i < (uint32_t)32U; i++) - { - uint8_t uu____0 = FStar_UInt8_eq_mask(dh[i], zeros[i]); - res0 = uu____0 & res0; - } - } - { - uint8_t z = res0; - uint32_t res; - if (z == (uint8_t)255U) - { - res = (uint32_t)1U; - } - else - { - res = (uint32_t)0U; - } - { - uint32_t res11 = res; - uint32_t res2; - uint8_t kemcontext[64U] = { 0U }; - if (res11 == (uint32_t)0U) - { - uint8_t *pkRm = kemcontext + (uint32_t)32U; - uint8_t *pkR1 = pkRm; - Hacl_Curve25519_51_secret_to_public(pkR1, skR); - { - uint32_t res20 = (uint32_t)0U; - if (res20 == (uint32_t)0U) - { - memcpy(kemcontext, enc, (uint32_t)32U * sizeof (uint8_t)); - { - uint8_t *dhm = dh; - uint8_t o_eae_prk[32U] = { 0U }; - uint8_t suite_id_kem[5U] = { 0U }; - uint8_t *uu____1 = suite_id_kem; - uu____1[0U] = (uint8_t)0x4bU; - uu____1[1U] = (uint8_t)0x45U; - uu____1[2U] = (uint8_t)0x4dU; - { - uint8_t *uu____2 = suite_id_kem + (uint32_t)3U; - uu____2[0U] = (uint8_t)0U; - uu____2[1U] = (uint8_t)32U; - { - uint8_t *empty = suite_id_kem; - uint8_t - label_eae_prk[7U] = - { - (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, - (uint8_t)0x70U, (uint8_t)0x72U, (uint8_t)0x6bU - }; - uint32_t len0 = (uint32_t)51U; - KRML_CHECK_SIZE(sizeof (uint8_t), len0); - { - uint8_t tmp0[len0]; - memset(tmp0, 0U, len0 * sizeof (uint8_t)); - { - uint8_t *uu____3 = tmp0; - uu____3[0U] = (uint8_t)0x48U; - uu____3[1U] = (uint8_t)0x50U; - uu____3[2U] = (uint8_t)0x4bU; - uu____3[3U] = (uint8_t)0x45U; - uu____3[4U] = (uint8_t)0x2dU; - uu____3[5U] = (uint8_t)0x76U; - uu____3[6U] = (uint8_t)0x31U; - memcpy(tmp0 + (uint32_t)7U, - suite_id_kem, - (uint32_t)5U * sizeof (uint8_t)); - memcpy(tmp0 + (uint32_t)12U, - label_eae_prk, - (uint32_t)7U * sizeof (uint8_t)); - memcpy(tmp0 + (uint32_t)19U, dhm, (uint32_t)32U * sizeof (uint8_t)); - Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp0, len0); - { - uint8_t - label_shared_secret[13U] = - { - (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, - (uint8_t)0x65U, (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, - (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U, - (uint8_t)0x74U - }; - uint32_t len = (uint32_t)91U; - KRML_CHECK_SIZE(sizeof (uint8_t), len); - { - uint8_t tmp[len]; - memset(tmp, 0U, len * sizeof (uint8_t)); - { - uint8_t *uu____4 = tmp; - store32_be(uu____4, (uint32_t)32U); - memcpy(uu____4, - uu____4 + (uint32_t)2U, - (uint32_t)2U * sizeof (uint8_t)); - { - uint8_t *uu____5 = tmp + (uint32_t)2U; - uu____5[0U] = (uint8_t)0x48U; - uu____5[1U] = (uint8_t)0x50U; - uu____5[2U] = (uint8_t)0x4bU; - uu____5[3U] = (uint8_t)0x45U; - uu____5[4U] = (uint8_t)0x2dU; - uu____5[5U] = (uint8_t)0x76U; - uu____5[6U] = (uint8_t)0x31U; - memcpy(tmp + (uint32_t)9U, - suite_id_kem, - (uint32_t)5U * sizeof (uint8_t)); - memcpy(tmp + (uint32_t)14U, - label_shared_secret, - (uint32_t)13U * sizeof (uint8_t)); - memcpy(tmp + (uint32_t)27U, - kemcontext, - (uint32_t)64U * sizeof (uint8_t)); - Hacl_HKDF_expand_sha2_256(shared, - o_eae_prk, - (uint32_t)32U, - tmp, - len, - (uint32_t)32U); - res2 = (uint32_t)0U; - } - } - } - } - } - } - } - } - } - } - else - { - res2 = (uint32_t)1U; - } - } - } - else - { - res2 = (uint32_t)1U; - } - if (res2 == (uint32_t)0U) - { - uint8_t o_context[65U] = { 0U }; - uint8_t o_secret[32U] = { 0U }; - uint8_t suite_id[10U] = { 0U }; - uint8_t *uu____6 = suite_id; - uu____6[0U] = (uint8_t)0x48U; - uu____6[1U] = (uint8_t)0x50U; - uu____6[2U] = (uint8_t)0x4bU; - uu____6[3U] = (uint8_t)0x45U; - { - uint8_t *uu____7 = suite_id + (uint32_t)4U; - uu____7[0U] = (uint8_t)0U; - uu____7[1U] = (uint8_t)32U; - { - uint8_t *uu____8 = suite_id + (uint32_t)6U; - uu____8[0U] = (uint8_t)0U; - uu____8[1U] = (uint8_t)1U; - { - uint8_t *uu____9 = suite_id + (uint32_t)8U; - uu____9[0U] = (uint8_t)0U; - uu____9[1U] = (uint8_t)3U; - { - uint8_t - label_psk_id_hash[11U] = - { - (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, - (uint8_t)0x69U, (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, - (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U - }; - uint8_t o_psk_id_hash[32U] = { 0U }; - uint8_t *empty = suite_id; - uint32_t len0 = (uint32_t)28U; - KRML_CHECK_SIZE(sizeof (uint8_t), len0); - { - uint8_t tmp0[len0]; - memset(tmp0, 0U, len0 * sizeof (uint8_t)); - { - uint8_t *uu____10 = tmp0; - uu____10[0U] = (uint8_t)0x48U; - uu____10[1U] = (uint8_t)0x50U; - uu____10[2U] = (uint8_t)0x4bU; - uu____10[3U] = (uint8_t)0x45U; - uu____10[4U] = (uint8_t)0x2dU; - uu____10[5U] = (uint8_t)0x76U; - uu____10[6U] = (uint8_t)0x31U; - memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp0 + (uint32_t)17U, - label_psk_id_hash, - (uint32_t)11U * sizeof (uint8_t)); - memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t)); - Hacl_HKDF_extract_sha2_256(o_psk_id_hash, - empty, - (uint32_t)0U, - tmp0, - len0); - { - uint8_t - label_info_hash[9U] = - { - (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, - (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, - (uint8_t)0x68U - }; - uint8_t o_info_hash[32U] = { 0U }; - uint32_t len1 = (uint32_t)26U + infolen; - KRML_CHECK_SIZE(sizeof (uint8_t), len1); - { - uint8_t tmp1[len1]; - memset(tmp1, 0U, len1 * sizeof (uint8_t)); - { - uint8_t *uu____11 = tmp1; - uu____11[0U] = (uint8_t)0x48U; - uu____11[1U] = (uint8_t)0x50U; - uu____11[2U] = (uint8_t)0x4bU; - uu____11[3U] = (uint8_t)0x45U; - uu____11[4U] = (uint8_t)0x2dU; - uu____11[5U] = (uint8_t)0x76U; - uu____11[6U] = (uint8_t)0x31U; - memcpy(tmp1 + (uint32_t)7U, - suite_id, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp1 + (uint32_t)17U, - label_info_hash, - (uint32_t)9U * sizeof (uint8_t)); - memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t)); - Hacl_HKDF_extract_sha2_256(o_info_hash, - empty, - (uint32_t)0U, - tmp1, - len1); - o_context[0U] = (uint8_t)0U; - memcpy(o_context + (uint32_t)1U, - o_psk_id_hash, - (uint32_t)32U * sizeof (uint8_t)); - memcpy(o_context + (uint32_t)33U, - o_info_hash, - (uint32_t)32U * sizeof (uint8_t)); - { - uint8_t - label_secret[6U] = - { - (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, - (uint8_t)0x65U, (uint8_t)0x74U - }; - uint32_t len2 = (uint32_t)23U; - KRML_CHECK_SIZE(sizeof (uint8_t), len2); - { - uint8_t tmp2[len2]; - memset(tmp2, 0U, len2 * sizeof (uint8_t)); - { - uint8_t *uu____12 = tmp2; - uu____12[0U] = (uint8_t)0x48U; - uu____12[1U] = (uint8_t)0x50U; - uu____12[2U] = (uint8_t)0x4bU; - uu____12[3U] = (uint8_t)0x45U; - uu____12[4U] = (uint8_t)0x2dU; - uu____12[5U] = (uint8_t)0x76U; - uu____12[6U] = (uint8_t)0x31U; - memcpy(tmp2 + (uint32_t)7U, - suite_id, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp2 + (uint32_t)17U, - label_secret, - (uint32_t)6U * sizeof (uint8_t)); - memcpy(tmp2 + (uint32_t)23U, - empty, - (uint32_t)0U * sizeof (uint8_t)); - Hacl_HKDF_extract_sha2_256(o_secret, - shared, - (uint32_t)32U, - tmp2, - len2); - { - uint8_t - label_exp[3U] = - { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U }; - uint32_t len3 = (uint32_t)87U; - KRML_CHECK_SIZE(sizeof (uint8_t), len3); - { - uint8_t tmp3[len3]; - memset(tmp3, 0U, len3 * sizeof (uint8_t)); - { - uint8_t *uu____13 = tmp3; - store32_be(uu____13, (uint32_t)32U); - memcpy(uu____13, - uu____13 + (uint32_t)2U, - (uint32_t)2U * sizeof (uint8_t)); - { - uint8_t *uu____14 = tmp3 + (uint32_t)2U; - uu____14[0U] = (uint8_t)0x48U; - uu____14[1U] = (uint8_t)0x50U; - uu____14[2U] = (uint8_t)0x4bU; - uu____14[3U] = (uint8_t)0x45U; - uu____14[4U] = (uint8_t)0x2dU; - uu____14[5U] = (uint8_t)0x76U; - uu____14[6U] = (uint8_t)0x31U; - memcpy(tmp3 + (uint32_t)9U, - suite_id, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp3 + (uint32_t)19U, - label_exp, - (uint32_t)3U * sizeof (uint8_t)); - memcpy(tmp3 + (uint32_t)22U, - o_context, - (uint32_t)65U * sizeof (uint8_t)); - Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter, - o_secret, - (uint32_t)32U, - tmp3, - len3, - (uint32_t)32U); - { - uint8_t - label_key[3U] = - { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U }; - uint32_t len4 = (uint32_t)87U; - KRML_CHECK_SIZE(sizeof (uint8_t), len4); - { - uint8_t tmp4[len4]; - memset(tmp4, 0U, len4 * sizeof (uint8_t)); - { - uint8_t *uu____15 = tmp4; - store32_be(uu____15, (uint32_t)32U); - memcpy(uu____15, - uu____15 + (uint32_t)2U, - (uint32_t)2U * sizeof (uint8_t)); - { - uint8_t *uu____16 = tmp4 + (uint32_t)2U; - uu____16[0U] = (uint8_t)0x48U; - uu____16[1U] = (uint8_t)0x50U; - uu____16[2U] = (uint8_t)0x4bU; - uu____16[3U] = (uint8_t)0x45U; - uu____16[4U] = (uint8_t)0x2dU; - uu____16[5U] = (uint8_t)0x76U; - uu____16[6U] = (uint8_t)0x31U; - memcpy(tmp4 + (uint32_t)9U, - suite_id, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp4 + (uint32_t)19U, - label_key, - (uint32_t)3U * sizeof (uint8_t)); - memcpy(tmp4 + (uint32_t)22U, - o_context, - (uint32_t)65U * sizeof (uint8_t)); - Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, - o_secret, - (uint32_t)32U, - tmp4, - len4, - (uint32_t)32U); - { - uint8_t - label_base_nonce[10U] = - { - (uint8_t)0x62U, (uint8_t)0x61U, - (uint8_t)0x73U, (uint8_t)0x65U, - (uint8_t)0x5fU, (uint8_t)0x6eU, - (uint8_t)0x6fU, (uint8_t)0x6eU, - (uint8_t)0x63U, (uint8_t)0x65U - }; - uint32_t len = (uint32_t)94U; - KRML_CHECK_SIZE(sizeof (uint8_t), len); - { - uint8_t tmp[len]; - memset(tmp, 0U, len * sizeof (uint8_t)); - { - uint8_t *uu____17 = tmp; - store32_be(uu____17, (uint32_t)12U); - memcpy(uu____17, - uu____17 + (uint32_t)2U, - (uint32_t)2U * sizeof (uint8_t)); - { - uint8_t - *uu____18 = tmp + (uint32_t)2U; - uu____18[0U] = (uint8_t)0x48U; - uu____18[1U] = (uint8_t)0x50U; - uu____18[2U] = (uint8_t)0x4bU; - uu____18[3U] = (uint8_t)0x45U; - uu____18[4U] = (uint8_t)0x2dU; - uu____18[5U] = (uint8_t)0x76U; - uu____18[6U] = (uint8_t)0x31U; - memcpy(tmp + (uint32_t)9U, - suite_id, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp + (uint32_t)19U, - label_base_nonce, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp + (uint32_t)29U, - o_context, - (uint32_t)65U * sizeof (uint8_t)); - Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, - o_secret, - (uint32_t)32U, - tmp, - len, - (uint32_t)12U); - o_ctx.ctx_seq[0U] = (uint64_t)0U; - ite = (uint32_t)0U; - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - else - { - ite = (uint32_t)1U; - } - } - } - } - } - else - { - ite = (uint32_t)1U; - } - return ite; - } -} - -uint32_t -Hacl_HPKE_Curve51_CP32_SHA256_sealBase( - uint8_t *skE, - uint8_t *pkR, - uint32_t infolen, - uint8_t *info, - uint32_t aadlen, - uint8_t *aad, - uint32_t plainlen, - uint8_t *plain, - uint8_t *o_enc, - uint8_t *o_ct -) -{ - uint8_t ctx_key[32U] = { 0U }; - uint8_t ctx_nonce[12U] = { 0U }; - uint64_t ctx_seq = (uint64_t)0U; - uint8_t ctx_exporter[32U] = { 0U }; - Hacl_Impl_HPKE_context_s o_ctx; - uint32_t res; - uint32_t ite; - o_ctx.ctx_key = ctx_key; - o_ctx.ctx_nonce = ctx_nonce; - o_ctx.ctx_seq = &ctx_seq; - o_ctx.ctx_exporter = ctx_exporter; - res = Hacl_HPKE_Curve51_CP32_SHA256_setupBaseS(o_enc, o_ctx, skE, pkR, infolen, info); - if (res == (uint32_t)0U) - { - uint8_t nonce[12U] = { 0U }; - uint64_t s = o_ctx.ctx_seq[0U]; - uint8_t enc[12U] = { 0U }; - store64_be(enc + (uint32_t)4U, s); - KRML_MAYBE_FOR12(i, - (uint32_t)0U, - (uint32_t)12U, - (uint32_t)1U, - uint8_t xi = enc[i]; - uint8_t yi = o_ctx.ctx_nonce[i]; - nonce[i] = xi ^ yi;); - Hacl_Chacha20Poly1305_32_aead_encrypt(o_ctx.ctx_key, - nonce, - aadlen, - aad, - plainlen, - plain, - o_ct, - o_ct + plainlen); - { - uint64_t s1 = o_ctx.ctx_seq[0U]; - uint32_t res1; - if (s1 == (uint64_t)18446744073709551615U) - { - res1 = (uint32_t)1U; - } - else - { - uint64_t s_ = s1 + (uint64_t)1U; - o_ctx.ctx_seq[0U] = s_; - res1 = (uint32_t)0U; - } - { - uint32_t res10 = res1; - ite = res10; - } - } - } - else - { - ite = (uint32_t)1U; - } - return ite; -} - -uint32_t -Hacl_HPKE_Curve51_CP32_SHA256_openBase( - uint8_t *pkE, - uint8_t *skR, - uint32_t infolen, - uint8_t *info, - uint32_t aadlen, - uint8_t *aad, - uint32_t ctlen, - uint8_t *ct, - uint8_t *o_pt -) -{ - uint8_t ctx_key[32U] = { 0U }; - uint8_t ctx_nonce[12U] = { 0U }; - uint64_t ctx_seq = (uint64_t)0U; - uint8_t ctx_exporter[32U] = { 0U }; - Hacl_Impl_HPKE_context_s o_ctx; - uint32_t res; - uint32_t ite; - o_ctx.ctx_key = ctx_key; - o_ctx.ctx_nonce = ctx_nonce; - o_ctx.ctx_seq = &ctx_seq; - o_ctx.ctx_exporter = ctx_exporter; - res = Hacl_HPKE_Curve51_CP32_SHA256_setupBaseR(o_ctx, pkE, skR, infolen, info); - if (res == (uint32_t)0U) - { - uint8_t nonce[12U] = { 0U }; - uint64_t s = o_ctx.ctx_seq[0U]; - uint8_t enc[12U] = { 0U }; - store64_be(enc + (uint32_t)4U, s); - KRML_MAYBE_FOR12(i, - (uint32_t)0U, - (uint32_t)12U, - (uint32_t)1U, - uint8_t xi = enc[i]; - uint8_t yi = o_ctx.ctx_nonce[i]; - nonce[i] = xi ^ yi;); - { - uint32_t - res10 = - Hacl_Chacha20Poly1305_32_aead_decrypt(o_ctx.ctx_key, - nonce, - aadlen, - aad, - ctlen - (uint32_t)16U, - o_pt, - ct, - ct + ctlen - (uint32_t)16U); - uint32_t res1; - if (res10 == (uint32_t)0U) - { - uint64_t s1 = o_ctx.ctx_seq[0U]; - uint32_t res2; - if (s1 == (uint64_t)18446744073709551615U) - { - res2 = (uint32_t)1U; - } - else - { - uint64_t s_ = s1 + (uint64_t)1U; - o_ctx.ctx_seq[0U] = s_; - res2 = (uint32_t)0U; - } - res1 = res2; - } - else - { - res1 = (uint32_t)1U; - } - ite = res1; - } - } - else - { - ite = (uint32_t)1U; - } - return ite; -} - diff --git a/dist/c89-compatible/Hacl_HPKE_Curve51_CP32_SHA256.h b/dist/c89-compatible/Hacl_HPKE_Curve51_CP32_SHA256.h deleted file mode 100644 index bdb1330c67..0000000000 --- a/dist/c89-compatible/Hacl_HPKE_Curve51_CP32_SHA256.h +++ /dev/null @@ -1,95 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#ifndef __Hacl_HPKE_Curve51_CP32_SHA256_H -#define __Hacl_HPKE_Curve51_CP32_SHA256_H - -#if defined(__cplusplus) -extern "C" { -#endif - -#include -#include "krml/internal/types.h" -#include "krml/lowstar_endianness.h" -#include "krml/internal/target.h" - - -#include "Hacl_Krmllib.h" -#include "Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE.h" -#include "Hacl_HKDF.h" -#include "Hacl_Curve25519_51.h" -#include "Hacl_Chacha20Poly1305_32.h" -#include "evercrypt_targetconfig.h" -uint32_t -Hacl_HPKE_Curve51_CP32_SHA256_setupBaseS( - uint8_t *o_pkE, - Hacl_Impl_HPKE_context_s o_ctx, - uint8_t *skE, - uint8_t *pkR, - uint32_t infolen, - uint8_t *info -); - -uint32_t -Hacl_HPKE_Curve51_CP32_SHA256_setupBaseR( - Hacl_Impl_HPKE_context_s o_ctx, - uint8_t *enc, - uint8_t *skR, - uint32_t infolen, - uint8_t *info -); - -uint32_t -Hacl_HPKE_Curve51_CP32_SHA256_sealBase( - uint8_t *skE, - uint8_t *pkR, - uint32_t infolen, - uint8_t *info, - uint32_t aadlen, - uint8_t *aad, - uint32_t plainlen, - uint8_t *plain, - uint8_t *o_enc, - uint8_t *o_ct -); - -uint32_t -Hacl_HPKE_Curve51_CP32_SHA256_openBase( - uint8_t *pkE, - uint8_t *skR, - uint32_t infolen, - uint8_t *info, - uint32_t aadlen, - uint8_t *aad, - uint32_t ctlen, - uint8_t *ct, - uint8_t *o_pt -); - -#if defined(__cplusplus) -} -#endif - -#define __Hacl_HPKE_Curve51_CP32_SHA256_H_DEFINED -#endif diff --git a/dist/c89-compatible/Hacl_HPKE_Curve51_CP32_SHA512.c b/dist/c89-compatible/Hacl_HPKE_Curve51_CP32_SHA512.c deleted file mode 100644 index bde5e755f1..0000000000 --- a/dist/c89-compatible/Hacl_HPKE_Curve51_CP32_SHA512.c +++ /dev/null @@ -1,1077 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#include "Hacl_HPKE_Curve51_CP32_SHA512.h" - - - -uint32_t -Hacl_HPKE_Curve51_CP32_SHA512_setupBaseS( - uint8_t *o_pkE, - Hacl_Impl_HPKE_context_s o_ctx, - uint8_t *skE, - uint8_t *pkR, - uint32_t infolen, - uint8_t *info -) -{ - uint8_t o_shared[32U] = { 0U }; - uint8_t *o_pkE1 = o_pkE; - uint32_t res1; - uint32_t res0; - uint32_t ite; - Hacl_Curve25519_51_secret_to_public(o_pkE1, skE); - res1 = (uint32_t)0U; - if (res1 == (uint32_t)0U) - { - uint8_t o_dh[32U] = { 0U }; - uint8_t zeros[32U] = { 0U }; - Hacl_Curve25519_51_scalarmult(o_dh, skE, pkR); - { - uint8_t res2 = (uint8_t)255U; - { - uint32_t i; - for (i = (uint32_t)0U; i < (uint32_t)32U; i++) - { - uint8_t uu____0 = FStar_UInt8_eq_mask(o_dh[i], zeros[i]); - res2 = uu____0 & res2; - } - } - { - uint8_t z = res2; - uint32_t res; - if (z == (uint8_t)255U) - { - res = (uint32_t)1U; - } - else - { - res = (uint32_t)0U; - } - { - uint32_t res20 = res; - uint8_t o_kemcontext[64U] = { 0U }; - if (res20 == (uint32_t)0U) - { - memcpy(o_kemcontext, o_pkE, (uint32_t)32U * sizeof (uint8_t)); - { - uint8_t *o_pkRm = o_kemcontext + (uint32_t)32U; - uint8_t *o_pkR = o_pkRm; - memcpy(o_pkR, pkR, (uint32_t)32U * sizeof (uint8_t)); - { - uint8_t *o_dhm = o_dh; - uint8_t o_eae_prk[32U] = { 0U }; - uint8_t suite_id_kem[5U] = { 0U }; - uint8_t *uu____1 = suite_id_kem; - uu____1[0U] = (uint8_t)0x4bU; - uu____1[1U] = (uint8_t)0x45U; - uu____1[2U] = (uint8_t)0x4dU; - { - uint8_t *uu____2 = suite_id_kem + (uint32_t)3U; - uu____2[0U] = (uint8_t)0U; - uu____2[1U] = (uint8_t)32U; - { - uint8_t *empty = suite_id_kem; - uint8_t - label_eae_prk[7U] = - { - (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, - (uint8_t)0x70U, (uint8_t)0x72U, (uint8_t)0x6bU - }; - uint32_t len0 = (uint32_t)51U; - KRML_CHECK_SIZE(sizeof (uint8_t), len0); - { - uint8_t tmp0[len0]; - memset(tmp0, 0U, len0 * sizeof (uint8_t)); - { - uint8_t *uu____3 = tmp0; - uu____3[0U] = (uint8_t)0x48U; - uu____3[1U] = (uint8_t)0x50U; - uu____3[2U] = (uint8_t)0x4bU; - uu____3[3U] = (uint8_t)0x45U; - uu____3[4U] = (uint8_t)0x2dU; - uu____3[5U] = (uint8_t)0x76U; - uu____3[6U] = (uint8_t)0x31U; - memcpy(tmp0 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t)); - memcpy(tmp0 + (uint32_t)12U, - label_eae_prk, - (uint32_t)7U * sizeof (uint8_t)); - memcpy(tmp0 + (uint32_t)19U, o_dhm, (uint32_t)32U * sizeof (uint8_t)); - Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp0, len0); - { - uint8_t - label_shared_secret[13U] = - { - (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, - (uint8_t)0x65U, (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, - (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U, - (uint8_t)0x74U - }; - uint32_t len = (uint32_t)91U; - KRML_CHECK_SIZE(sizeof (uint8_t), len); - { - uint8_t tmp[len]; - memset(tmp, 0U, len * sizeof (uint8_t)); - { - uint8_t *uu____4 = tmp; - store32_be(uu____4, (uint32_t)32U); - memcpy(uu____4, - uu____4 + (uint32_t)2U, - (uint32_t)2U * sizeof (uint8_t)); - { - uint8_t *uu____5 = tmp + (uint32_t)2U; - uu____5[0U] = (uint8_t)0x48U; - uu____5[1U] = (uint8_t)0x50U; - uu____5[2U] = (uint8_t)0x4bU; - uu____5[3U] = (uint8_t)0x45U; - uu____5[4U] = (uint8_t)0x2dU; - uu____5[5U] = (uint8_t)0x76U; - uu____5[6U] = (uint8_t)0x31U; - memcpy(tmp + (uint32_t)9U, - suite_id_kem, - (uint32_t)5U * sizeof (uint8_t)); - memcpy(tmp + (uint32_t)14U, - label_shared_secret, - (uint32_t)13U * sizeof (uint8_t)); - memcpy(tmp + (uint32_t)27U, - o_kemcontext, - (uint32_t)64U * sizeof (uint8_t)); - Hacl_HKDF_expand_sha2_256(o_shared, - o_eae_prk, - (uint32_t)32U, - tmp, - len, - (uint32_t)32U); - res0 = (uint32_t)0U; - } - } - } - } - } - } - } - } - } - } - } - else - { - res0 = (uint32_t)1U; - } - } - } - } - } - else - { - res0 = (uint32_t)1U; - } - if (res0 == (uint32_t)0U) - { - uint8_t o_context[129U] = { 0U }; - uint8_t o_secret[64U] = { 0U }; - uint8_t suite_id[10U] = { 0U }; - uint8_t *uu____6 = suite_id; - uu____6[0U] = (uint8_t)0x48U; - uu____6[1U] = (uint8_t)0x50U; - uu____6[2U] = (uint8_t)0x4bU; - uu____6[3U] = (uint8_t)0x45U; - { - uint8_t *uu____7 = suite_id + (uint32_t)4U; - uu____7[0U] = (uint8_t)0U; - uu____7[1U] = (uint8_t)32U; - { - uint8_t *uu____8 = suite_id + (uint32_t)6U; - uu____8[0U] = (uint8_t)0U; - uu____8[1U] = (uint8_t)3U; - { - uint8_t *uu____9 = suite_id + (uint32_t)8U; - uu____9[0U] = (uint8_t)0U; - uu____9[1U] = (uint8_t)3U; - { - uint8_t - label_psk_id_hash[11U] = - { - (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U, - (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, - (uint8_t)0x68U - }; - uint8_t o_psk_id_hash[64U] = { 0U }; - uint8_t *empty = suite_id; - uint32_t len0 = (uint32_t)28U; - KRML_CHECK_SIZE(sizeof (uint8_t), len0); - { - uint8_t tmp0[len0]; - memset(tmp0, 0U, len0 * sizeof (uint8_t)); - { - uint8_t *uu____10 = tmp0; - uu____10[0U] = (uint8_t)0x48U; - uu____10[1U] = (uint8_t)0x50U; - uu____10[2U] = (uint8_t)0x4bU; - uu____10[3U] = (uint8_t)0x45U; - uu____10[4U] = (uint8_t)0x2dU; - uu____10[5U] = (uint8_t)0x76U; - uu____10[6U] = (uint8_t)0x31U; - memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp0 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t)); - memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t)); - Hacl_HKDF_extract_sha2_512(o_psk_id_hash, empty, (uint32_t)0U, tmp0, len0); - { - uint8_t - label_info_hash[9U] = - { - (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU, - (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U - }; - uint8_t o_info_hash[64U] = { 0U }; - uint32_t len1 = (uint32_t)26U + infolen; - KRML_CHECK_SIZE(sizeof (uint8_t), len1); - { - uint8_t tmp1[len1]; - memset(tmp1, 0U, len1 * sizeof (uint8_t)); - { - uint8_t *uu____11 = tmp1; - uu____11[0U] = (uint8_t)0x48U; - uu____11[1U] = (uint8_t)0x50U; - uu____11[2U] = (uint8_t)0x4bU; - uu____11[3U] = (uint8_t)0x45U; - uu____11[4U] = (uint8_t)0x2dU; - uu____11[5U] = (uint8_t)0x76U; - uu____11[6U] = (uint8_t)0x31U; - memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp1 + (uint32_t)17U, - label_info_hash, - (uint32_t)9U * sizeof (uint8_t)); - memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t)); - Hacl_HKDF_extract_sha2_512(o_info_hash, empty, (uint32_t)0U, tmp1, len1); - o_context[0U] = (uint8_t)0U; - memcpy(o_context + (uint32_t)1U, - o_psk_id_hash, - (uint32_t)64U * sizeof (uint8_t)); - memcpy(o_context + (uint32_t)65U, - o_info_hash, - (uint32_t)64U * sizeof (uint8_t)); - { - uint8_t - label_secret[6U] = - { - (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, - (uint8_t)0x65U, (uint8_t)0x74U - }; - uint32_t len2 = (uint32_t)23U; - KRML_CHECK_SIZE(sizeof (uint8_t), len2); - { - uint8_t tmp2[len2]; - memset(tmp2, 0U, len2 * sizeof (uint8_t)); - { - uint8_t *uu____12 = tmp2; - uu____12[0U] = (uint8_t)0x48U; - uu____12[1U] = (uint8_t)0x50U; - uu____12[2U] = (uint8_t)0x4bU; - uu____12[3U] = (uint8_t)0x45U; - uu____12[4U] = (uint8_t)0x2dU; - uu____12[5U] = (uint8_t)0x76U; - uu____12[6U] = (uint8_t)0x31U; - memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp2 + (uint32_t)17U, - label_secret, - (uint32_t)6U * sizeof (uint8_t)); - memcpy(tmp2 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t)); - Hacl_HKDF_extract_sha2_512(o_secret, - o_shared, - (uint32_t)32U, - tmp2, - len2); - { - uint8_t - label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U }; - uint32_t len3 = (uint32_t)151U; - KRML_CHECK_SIZE(sizeof (uint8_t), len3); - { - uint8_t tmp3[len3]; - memset(tmp3, 0U, len3 * sizeof (uint8_t)); - { - uint8_t *uu____13 = tmp3; - store32_be(uu____13, (uint32_t)64U); - memcpy(uu____13, - uu____13 + (uint32_t)2U, - (uint32_t)2U * sizeof (uint8_t)); - { - uint8_t *uu____14 = tmp3 + (uint32_t)2U; - uu____14[0U] = (uint8_t)0x48U; - uu____14[1U] = (uint8_t)0x50U; - uu____14[2U] = (uint8_t)0x4bU; - uu____14[3U] = (uint8_t)0x45U; - uu____14[4U] = (uint8_t)0x2dU; - uu____14[5U] = (uint8_t)0x76U; - uu____14[6U] = (uint8_t)0x31U; - memcpy(tmp3 + (uint32_t)9U, - suite_id, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp3 + (uint32_t)19U, - label_exp, - (uint32_t)3U * sizeof (uint8_t)); - memcpy(tmp3 + (uint32_t)22U, - o_context, - (uint32_t)129U * sizeof (uint8_t)); - Hacl_HKDF_expand_sha2_512(o_ctx.ctx_exporter, - o_secret, - (uint32_t)64U, - tmp3, - len3, - (uint32_t)64U); - { - uint8_t - label_key[3U] = - { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U }; - uint32_t len4 = (uint32_t)151U; - KRML_CHECK_SIZE(sizeof (uint8_t), len4); - { - uint8_t tmp4[len4]; - memset(tmp4, 0U, len4 * sizeof (uint8_t)); - { - uint8_t *uu____15 = tmp4; - store32_be(uu____15, (uint32_t)32U); - memcpy(uu____15, - uu____15 + (uint32_t)2U, - (uint32_t)2U * sizeof (uint8_t)); - { - uint8_t *uu____16 = tmp4 + (uint32_t)2U; - uu____16[0U] = (uint8_t)0x48U; - uu____16[1U] = (uint8_t)0x50U; - uu____16[2U] = (uint8_t)0x4bU; - uu____16[3U] = (uint8_t)0x45U; - uu____16[4U] = (uint8_t)0x2dU; - uu____16[5U] = (uint8_t)0x76U; - uu____16[6U] = (uint8_t)0x31U; - memcpy(tmp4 + (uint32_t)9U, - suite_id, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp4 + (uint32_t)19U, - label_key, - (uint32_t)3U * sizeof (uint8_t)); - memcpy(tmp4 + (uint32_t)22U, - o_context, - (uint32_t)129U * sizeof (uint8_t)); - Hacl_HKDF_expand_sha2_512(o_ctx.ctx_key, - o_secret, - (uint32_t)64U, - tmp4, - len4, - (uint32_t)32U); - { - uint8_t - label_base_nonce[10U] = - { - (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, - (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x6eU, - (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, - (uint8_t)0x65U - }; - uint32_t len = (uint32_t)158U; - KRML_CHECK_SIZE(sizeof (uint8_t), len); - { - uint8_t tmp[len]; - memset(tmp, 0U, len * sizeof (uint8_t)); - { - uint8_t *uu____17 = tmp; - store32_be(uu____17, (uint32_t)12U); - memcpy(uu____17, - uu____17 + (uint32_t)2U, - (uint32_t)2U * sizeof (uint8_t)); - { - uint8_t *uu____18 = tmp + (uint32_t)2U; - uu____18[0U] = (uint8_t)0x48U; - uu____18[1U] = (uint8_t)0x50U; - uu____18[2U] = (uint8_t)0x4bU; - uu____18[3U] = (uint8_t)0x45U; - uu____18[4U] = (uint8_t)0x2dU; - uu____18[5U] = (uint8_t)0x76U; - uu____18[6U] = (uint8_t)0x31U; - memcpy(tmp + (uint32_t)9U, - suite_id, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp + (uint32_t)19U, - label_base_nonce, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp + (uint32_t)29U, - o_context, - (uint32_t)129U * sizeof (uint8_t)); - Hacl_HKDF_expand_sha2_512(o_ctx.ctx_nonce, - o_secret, - (uint32_t)64U, - tmp, - len, - (uint32_t)12U); - o_ctx.ctx_seq[0U] = (uint64_t)0U; - ite = res0; - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - else - { - ite = res0; - } - return ite; -} - -uint32_t -Hacl_HPKE_Curve51_CP32_SHA512_setupBaseR( - Hacl_Impl_HPKE_context_s o_ctx, - uint8_t *enc, - uint8_t *skR, - uint32_t infolen, - uint8_t *info -) -{ - uint8_t pkR[32U] = { 0U }; - uint32_t res1; - uint32_t ite; - Hacl_Curve25519_51_secret_to_public(pkR, skR); - res1 = (uint32_t)0U; - { - uint8_t shared[32U] = { 0U }; - if (res1 == (uint32_t)0U) - { - uint8_t *pkE = enc; - uint8_t dh[32U] = { 0U }; - uint8_t zeros[32U] = { 0U }; - Hacl_Curve25519_51_scalarmult(dh, skR, pkE); - { - uint8_t res0 = (uint8_t)255U; - { - uint32_t i; - for (i = (uint32_t)0U; i < (uint32_t)32U; i++) - { - uint8_t uu____0 = FStar_UInt8_eq_mask(dh[i], zeros[i]); - res0 = uu____0 & res0; - } - } - { - uint8_t z = res0; - uint32_t res; - if (z == (uint8_t)255U) - { - res = (uint32_t)1U; - } - else - { - res = (uint32_t)0U; - } - { - uint32_t res11 = res; - uint32_t res2; - uint8_t kemcontext[64U] = { 0U }; - if (res11 == (uint32_t)0U) - { - uint8_t *pkRm = kemcontext + (uint32_t)32U; - uint8_t *pkR1 = pkRm; - Hacl_Curve25519_51_secret_to_public(pkR1, skR); - { - uint32_t res20 = (uint32_t)0U; - if (res20 == (uint32_t)0U) - { - memcpy(kemcontext, enc, (uint32_t)32U * sizeof (uint8_t)); - { - uint8_t *dhm = dh; - uint8_t o_eae_prk[32U] = { 0U }; - uint8_t suite_id_kem[5U] = { 0U }; - uint8_t *uu____1 = suite_id_kem; - uu____1[0U] = (uint8_t)0x4bU; - uu____1[1U] = (uint8_t)0x45U; - uu____1[2U] = (uint8_t)0x4dU; - { - uint8_t *uu____2 = suite_id_kem + (uint32_t)3U; - uu____2[0U] = (uint8_t)0U; - uu____2[1U] = (uint8_t)32U; - { - uint8_t *empty = suite_id_kem; - uint8_t - label_eae_prk[7U] = - { - (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, - (uint8_t)0x70U, (uint8_t)0x72U, (uint8_t)0x6bU - }; - uint32_t len0 = (uint32_t)51U; - KRML_CHECK_SIZE(sizeof (uint8_t), len0); - { - uint8_t tmp0[len0]; - memset(tmp0, 0U, len0 * sizeof (uint8_t)); - { - uint8_t *uu____3 = tmp0; - uu____3[0U] = (uint8_t)0x48U; - uu____3[1U] = (uint8_t)0x50U; - uu____3[2U] = (uint8_t)0x4bU; - uu____3[3U] = (uint8_t)0x45U; - uu____3[4U] = (uint8_t)0x2dU; - uu____3[5U] = (uint8_t)0x76U; - uu____3[6U] = (uint8_t)0x31U; - memcpy(tmp0 + (uint32_t)7U, - suite_id_kem, - (uint32_t)5U * sizeof (uint8_t)); - memcpy(tmp0 + (uint32_t)12U, - label_eae_prk, - (uint32_t)7U * sizeof (uint8_t)); - memcpy(tmp0 + (uint32_t)19U, dhm, (uint32_t)32U * sizeof (uint8_t)); - Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp0, len0); - { - uint8_t - label_shared_secret[13U] = - { - (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, - (uint8_t)0x65U, (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, - (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U, - (uint8_t)0x74U - }; - uint32_t len = (uint32_t)91U; - KRML_CHECK_SIZE(sizeof (uint8_t), len); - { - uint8_t tmp[len]; - memset(tmp, 0U, len * sizeof (uint8_t)); - { - uint8_t *uu____4 = tmp; - store32_be(uu____4, (uint32_t)32U); - memcpy(uu____4, - uu____4 + (uint32_t)2U, - (uint32_t)2U * sizeof (uint8_t)); - { - uint8_t *uu____5 = tmp + (uint32_t)2U; - uu____5[0U] = (uint8_t)0x48U; - uu____5[1U] = (uint8_t)0x50U; - uu____5[2U] = (uint8_t)0x4bU; - uu____5[3U] = (uint8_t)0x45U; - uu____5[4U] = (uint8_t)0x2dU; - uu____5[5U] = (uint8_t)0x76U; - uu____5[6U] = (uint8_t)0x31U; - memcpy(tmp + (uint32_t)9U, - suite_id_kem, - (uint32_t)5U * sizeof (uint8_t)); - memcpy(tmp + (uint32_t)14U, - label_shared_secret, - (uint32_t)13U * sizeof (uint8_t)); - memcpy(tmp + (uint32_t)27U, - kemcontext, - (uint32_t)64U * sizeof (uint8_t)); - Hacl_HKDF_expand_sha2_256(shared, - o_eae_prk, - (uint32_t)32U, - tmp, - len, - (uint32_t)32U); - res2 = (uint32_t)0U; - } - } - } - } - } - } - } - } - } - } - else - { - res2 = (uint32_t)1U; - } - } - } - else - { - res2 = (uint32_t)1U; - } - if (res2 == (uint32_t)0U) - { - uint8_t o_context[129U] = { 0U }; - uint8_t o_secret[64U] = { 0U }; - uint8_t suite_id[10U] = { 0U }; - uint8_t *uu____6 = suite_id; - uu____6[0U] = (uint8_t)0x48U; - uu____6[1U] = (uint8_t)0x50U; - uu____6[2U] = (uint8_t)0x4bU; - uu____6[3U] = (uint8_t)0x45U; - { - uint8_t *uu____7 = suite_id + (uint32_t)4U; - uu____7[0U] = (uint8_t)0U; - uu____7[1U] = (uint8_t)32U; - { - uint8_t *uu____8 = suite_id + (uint32_t)6U; - uu____8[0U] = (uint8_t)0U; - uu____8[1U] = (uint8_t)3U; - { - uint8_t *uu____9 = suite_id + (uint32_t)8U; - uu____9[0U] = (uint8_t)0U; - uu____9[1U] = (uint8_t)3U; - { - uint8_t - label_psk_id_hash[11U] = - { - (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, - (uint8_t)0x69U, (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, - (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U - }; - uint8_t o_psk_id_hash[64U] = { 0U }; - uint8_t *empty = suite_id; - uint32_t len0 = (uint32_t)28U; - KRML_CHECK_SIZE(sizeof (uint8_t), len0); - { - uint8_t tmp0[len0]; - memset(tmp0, 0U, len0 * sizeof (uint8_t)); - { - uint8_t *uu____10 = tmp0; - uu____10[0U] = (uint8_t)0x48U; - uu____10[1U] = (uint8_t)0x50U; - uu____10[2U] = (uint8_t)0x4bU; - uu____10[3U] = (uint8_t)0x45U; - uu____10[4U] = (uint8_t)0x2dU; - uu____10[5U] = (uint8_t)0x76U; - uu____10[6U] = (uint8_t)0x31U; - memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp0 + (uint32_t)17U, - label_psk_id_hash, - (uint32_t)11U * sizeof (uint8_t)); - memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t)); - Hacl_HKDF_extract_sha2_512(o_psk_id_hash, - empty, - (uint32_t)0U, - tmp0, - len0); - { - uint8_t - label_info_hash[9U] = - { - (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, - (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, - (uint8_t)0x68U - }; - uint8_t o_info_hash[64U] = { 0U }; - uint32_t len1 = (uint32_t)26U + infolen; - KRML_CHECK_SIZE(sizeof (uint8_t), len1); - { - uint8_t tmp1[len1]; - memset(tmp1, 0U, len1 * sizeof (uint8_t)); - { - uint8_t *uu____11 = tmp1; - uu____11[0U] = (uint8_t)0x48U; - uu____11[1U] = (uint8_t)0x50U; - uu____11[2U] = (uint8_t)0x4bU; - uu____11[3U] = (uint8_t)0x45U; - uu____11[4U] = (uint8_t)0x2dU; - uu____11[5U] = (uint8_t)0x76U; - uu____11[6U] = (uint8_t)0x31U; - memcpy(tmp1 + (uint32_t)7U, - suite_id, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp1 + (uint32_t)17U, - label_info_hash, - (uint32_t)9U * sizeof (uint8_t)); - memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t)); - Hacl_HKDF_extract_sha2_512(o_info_hash, - empty, - (uint32_t)0U, - tmp1, - len1); - o_context[0U] = (uint8_t)0U; - memcpy(o_context + (uint32_t)1U, - o_psk_id_hash, - (uint32_t)64U * sizeof (uint8_t)); - memcpy(o_context + (uint32_t)65U, - o_info_hash, - (uint32_t)64U * sizeof (uint8_t)); - { - uint8_t - label_secret[6U] = - { - (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, - (uint8_t)0x65U, (uint8_t)0x74U - }; - uint32_t len2 = (uint32_t)23U; - KRML_CHECK_SIZE(sizeof (uint8_t), len2); - { - uint8_t tmp2[len2]; - memset(tmp2, 0U, len2 * sizeof (uint8_t)); - { - uint8_t *uu____12 = tmp2; - uu____12[0U] = (uint8_t)0x48U; - uu____12[1U] = (uint8_t)0x50U; - uu____12[2U] = (uint8_t)0x4bU; - uu____12[3U] = (uint8_t)0x45U; - uu____12[4U] = (uint8_t)0x2dU; - uu____12[5U] = (uint8_t)0x76U; - uu____12[6U] = (uint8_t)0x31U; - memcpy(tmp2 + (uint32_t)7U, - suite_id, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp2 + (uint32_t)17U, - label_secret, - (uint32_t)6U * sizeof (uint8_t)); - memcpy(tmp2 + (uint32_t)23U, - empty, - (uint32_t)0U * sizeof (uint8_t)); - Hacl_HKDF_extract_sha2_512(o_secret, - shared, - (uint32_t)32U, - tmp2, - len2); - { - uint8_t - label_exp[3U] = - { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U }; - uint32_t len3 = (uint32_t)151U; - KRML_CHECK_SIZE(sizeof (uint8_t), len3); - { - uint8_t tmp3[len3]; - memset(tmp3, 0U, len3 * sizeof (uint8_t)); - { - uint8_t *uu____13 = tmp3; - store32_be(uu____13, (uint32_t)64U); - memcpy(uu____13, - uu____13 + (uint32_t)2U, - (uint32_t)2U * sizeof (uint8_t)); - { - uint8_t *uu____14 = tmp3 + (uint32_t)2U; - uu____14[0U] = (uint8_t)0x48U; - uu____14[1U] = (uint8_t)0x50U; - uu____14[2U] = (uint8_t)0x4bU; - uu____14[3U] = (uint8_t)0x45U; - uu____14[4U] = (uint8_t)0x2dU; - uu____14[5U] = (uint8_t)0x76U; - uu____14[6U] = (uint8_t)0x31U; - memcpy(tmp3 + (uint32_t)9U, - suite_id, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp3 + (uint32_t)19U, - label_exp, - (uint32_t)3U * sizeof (uint8_t)); - memcpy(tmp3 + (uint32_t)22U, - o_context, - (uint32_t)129U * sizeof (uint8_t)); - Hacl_HKDF_expand_sha2_512(o_ctx.ctx_exporter, - o_secret, - (uint32_t)64U, - tmp3, - len3, - (uint32_t)64U); - { - uint8_t - label_key[3U] = - { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U }; - uint32_t len4 = (uint32_t)151U; - KRML_CHECK_SIZE(sizeof (uint8_t), len4); - { - uint8_t tmp4[len4]; - memset(tmp4, 0U, len4 * sizeof (uint8_t)); - { - uint8_t *uu____15 = tmp4; - store32_be(uu____15, (uint32_t)32U); - memcpy(uu____15, - uu____15 + (uint32_t)2U, - (uint32_t)2U * sizeof (uint8_t)); - { - uint8_t *uu____16 = tmp4 + (uint32_t)2U; - uu____16[0U] = (uint8_t)0x48U; - uu____16[1U] = (uint8_t)0x50U; - uu____16[2U] = (uint8_t)0x4bU; - uu____16[3U] = (uint8_t)0x45U; - uu____16[4U] = (uint8_t)0x2dU; - uu____16[5U] = (uint8_t)0x76U; - uu____16[6U] = (uint8_t)0x31U; - memcpy(tmp4 + (uint32_t)9U, - suite_id, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp4 + (uint32_t)19U, - label_key, - (uint32_t)3U * sizeof (uint8_t)); - memcpy(tmp4 + (uint32_t)22U, - o_context, - (uint32_t)129U * sizeof (uint8_t)); - Hacl_HKDF_expand_sha2_512(o_ctx.ctx_key, - o_secret, - (uint32_t)64U, - tmp4, - len4, - (uint32_t)32U); - { - uint8_t - label_base_nonce[10U] = - { - (uint8_t)0x62U, (uint8_t)0x61U, - (uint8_t)0x73U, (uint8_t)0x65U, - (uint8_t)0x5fU, (uint8_t)0x6eU, - (uint8_t)0x6fU, (uint8_t)0x6eU, - (uint8_t)0x63U, (uint8_t)0x65U - }; - uint32_t len = (uint32_t)158U; - KRML_CHECK_SIZE(sizeof (uint8_t), len); - { - uint8_t tmp[len]; - memset(tmp, 0U, len * sizeof (uint8_t)); - { - uint8_t *uu____17 = tmp; - store32_be(uu____17, (uint32_t)12U); - memcpy(uu____17, - uu____17 + (uint32_t)2U, - (uint32_t)2U * sizeof (uint8_t)); - { - uint8_t - *uu____18 = tmp + (uint32_t)2U; - uu____18[0U] = (uint8_t)0x48U; - uu____18[1U] = (uint8_t)0x50U; - uu____18[2U] = (uint8_t)0x4bU; - uu____18[3U] = (uint8_t)0x45U; - uu____18[4U] = (uint8_t)0x2dU; - uu____18[5U] = (uint8_t)0x76U; - uu____18[6U] = (uint8_t)0x31U; - memcpy(tmp + (uint32_t)9U, - suite_id, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp + (uint32_t)19U, - label_base_nonce, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp + (uint32_t)29U, - o_context, - (uint32_t)129U * sizeof (uint8_t)); - Hacl_HKDF_expand_sha2_512(o_ctx.ctx_nonce, - o_secret, - (uint32_t)64U, - tmp, - len, - (uint32_t)12U); - o_ctx.ctx_seq[0U] = (uint64_t)0U; - ite = (uint32_t)0U; - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - else - { - ite = (uint32_t)1U; - } - } - } - } - } - else - { - ite = (uint32_t)1U; - } - return ite; - } -} - -uint32_t -Hacl_HPKE_Curve51_CP32_SHA512_sealBase( - uint8_t *skE, - uint8_t *pkR, - uint32_t infolen, - uint8_t *info, - uint32_t aadlen, - uint8_t *aad, - uint32_t plainlen, - uint8_t *plain, - uint8_t *o_enc, - uint8_t *o_ct -) -{ - uint8_t ctx_key[32U] = { 0U }; - uint8_t ctx_nonce[12U] = { 0U }; - uint64_t ctx_seq = (uint64_t)0U; - uint8_t ctx_exporter[64U] = { 0U }; - Hacl_Impl_HPKE_context_s o_ctx; - uint32_t res; - uint32_t ite; - o_ctx.ctx_key = ctx_key; - o_ctx.ctx_nonce = ctx_nonce; - o_ctx.ctx_seq = &ctx_seq; - o_ctx.ctx_exporter = ctx_exporter; - res = Hacl_HPKE_Curve51_CP32_SHA512_setupBaseS(o_enc, o_ctx, skE, pkR, infolen, info); - if (res == (uint32_t)0U) - { - uint8_t nonce[12U] = { 0U }; - uint64_t s = o_ctx.ctx_seq[0U]; - uint8_t enc[12U] = { 0U }; - store64_be(enc + (uint32_t)4U, s); - KRML_MAYBE_FOR12(i, - (uint32_t)0U, - (uint32_t)12U, - (uint32_t)1U, - uint8_t xi = enc[i]; - uint8_t yi = o_ctx.ctx_nonce[i]; - nonce[i] = xi ^ yi;); - Hacl_Chacha20Poly1305_32_aead_encrypt(o_ctx.ctx_key, - nonce, - aadlen, - aad, - plainlen, - plain, - o_ct, - o_ct + plainlen); - { - uint64_t s1 = o_ctx.ctx_seq[0U]; - uint32_t res1; - if (s1 == (uint64_t)18446744073709551615U) - { - res1 = (uint32_t)1U; - } - else - { - uint64_t s_ = s1 + (uint64_t)1U; - o_ctx.ctx_seq[0U] = s_; - res1 = (uint32_t)0U; - } - { - uint32_t res10 = res1; - ite = res10; - } - } - } - else - { - ite = (uint32_t)1U; - } - return ite; -} - -uint32_t -Hacl_HPKE_Curve51_CP32_SHA512_openBase( - uint8_t *pkE, - uint8_t *skR, - uint32_t infolen, - uint8_t *info, - uint32_t aadlen, - uint8_t *aad, - uint32_t ctlen, - uint8_t *ct, - uint8_t *o_pt -) -{ - uint8_t ctx_key[32U] = { 0U }; - uint8_t ctx_nonce[12U] = { 0U }; - uint64_t ctx_seq = (uint64_t)0U; - uint8_t ctx_exporter[64U] = { 0U }; - Hacl_Impl_HPKE_context_s o_ctx; - uint32_t res; - uint32_t ite; - o_ctx.ctx_key = ctx_key; - o_ctx.ctx_nonce = ctx_nonce; - o_ctx.ctx_seq = &ctx_seq; - o_ctx.ctx_exporter = ctx_exporter; - res = Hacl_HPKE_Curve51_CP32_SHA512_setupBaseR(o_ctx, pkE, skR, infolen, info); - if (res == (uint32_t)0U) - { - uint8_t nonce[12U] = { 0U }; - uint64_t s = o_ctx.ctx_seq[0U]; - uint8_t enc[12U] = { 0U }; - store64_be(enc + (uint32_t)4U, s); - KRML_MAYBE_FOR12(i, - (uint32_t)0U, - (uint32_t)12U, - (uint32_t)1U, - uint8_t xi = enc[i]; - uint8_t yi = o_ctx.ctx_nonce[i]; - nonce[i] = xi ^ yi;); - { - uint32_t - res10 = - Hacl_Chacha20Poly1305_32_aead_decrypt(o_ctx.ctx_key, - nonce, - aadlen, - aad, - ctlen - (uint32_t)16U, - o_pt, - ct, - ct + ctlen - (uint32_t)16U); - uint32_t res1; - if (res10 == (uint32_t)0U) - { - uint64_t s1 = o_ctx.ctx_seq[0U]; - uint32_t res2; - if (s1 == (uint64_t)18446744073709551615U) - { - res2 = (uint32_t)1U; - } - else - { - uint64_t s_ = s1 + (uint64_t)1U; - o_ctx.ctx_seq[0U] = s_; - res2 = (uint32_t)0U; - } - res1 = res2; - } - else - { - res1 = (uint32_t)1U; - } - ite = res1; - } - } - else - { - ite = (uint32_t)1U; - } - return ite; -} - diff --git a/dist/c89-compatible/Hacl_HPKE_Curve51_CP32_SHA512.h b/dist/c89-compatible/Hacl_HPKE_Curve51_CP32_SHA512.h deleted file mode 100644 index 16b5a855b6..0000000000 --- a/dist/c89-compatible/Hacl_HPKE_Curve51_CP32_SHA512.h +++ /dev/null @@ -1,95 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#ifndef __Hacl_HPKE_Curve51_CP32_SHA512_H -#define __Hacl_HPKE_Curve51_CP32_SHA512_H - -#if defined(__cplusplus) -extern "C" { -#endif - -#include -#include "krml/internal/types.h" -#include "krml/lowstar_endianness.h" -#include "krml/internal/target.h" - - -#include "Hacl_Krmllib.h" -#include "Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE.h" -#include "Hacl_HKDF.h" -#include "Hacl_Curve25519_51.h" -#include "Hacl_Chacha20Poly1305_32.h" -#include "evercrypt_targetconfig.h" -uint32_t -Hacl_HPKE_Curve51_CP32_SHA512_setupBaseS( - uint8_t *o_pkE, - Hacl_Impl_HPKE_context_s o_ctx, - uint8_t *skE, - uint8_t *pkR, - uint32_t infolen, - uint8_t *info -); - -uint32_t -Hacl_HPKE_Curve51_CP32_SHA512_setupBaseR( - Hacl_Impl_HPKE_context_s o_ctx, - uint8_t *enc, - uint8_t *skR, - uint32_t infolen, - uint8_t *info -); - -uint32_t -Hacl_HPKE_Curve51_CP32_SHA512_sealBase( - uint8_t *skE, - uint8_t *pkR, - uint32_t infolen, - uint8_t *info, - uint32_t aadlen, - uint8_t *aad, - uint32_t plainlen, - uint8_t *plain, - uint8_t *o_enc, - uint8_t *o_ct -); - -uint32_t -Hacl_HPKE_Curve51_CP32_SHA512_openBase( - uint8_t *pkE, - uint8_t *skR, - uint32_t infolen, - uint8_t *info, - uint32_t aadlen, - uint8_t *aad, - uint32_t ctlen, - uint8_t *ct, - uint8_t *o_pt -); - -#if defined(__cplusplus) -} -#endif - -#define __Hacl_HPKE_Curve51_CP32_SHA512_H_DEFINED -#endif diff --git a/dist/c89-compatible/Hacl_HPKE_Curve64_CP128_SHA256.c b/dist/c89-compatible/Hacl_HPKE_Curve64_CP128_SHA256.c deleted file mode 100644 index c1a932da34..0000000000 --- a/dist/c89-compatible/Hacl_HPKE_Curve64_CP128_SHA256.c +++ /dev/null @@ -1,1077 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#include "Hacl_HPKE_Curve64_CP128_SHA256.h" - - - -uint32_t -Hacl_HPKE_Curve64_CP128_SHA256_setupBaseS( - uint8_t *o_pkE, - Hacl_Impl_HPKE_context_s o_ctx, - uint8_t *skE, - uint8_t *pkR, - uint32_t infolen, - uint8_t *info -) -{ - uint8_t o_shared[32U] = { 0U }; - uint8_t *o_pkE1 = o_pkE; - uint32_t res1; - uint32_t res0; - uint32_t ite; - Hacl_Curve25519_64_secret_to_public(o_pkE1, skE); - res1 = (uint32_t)0U; - if (res1 == (uint32_t)0U) - { - uint8_t o_dh[32U] = { 0U }; - uint8_t zeros[32U] = { 0U }; - Hacl_Curve25519_64_scalarmult(o_dh, skE, pkR); - { - uint8_t res2 = (uint8_t)255U; - { - uint32_t i; - for (i = (uint32_t)0U; i < (uint32_t)32U; i++) - { - uint8_t uu____0 = FStar_UInt8_eq_mask(o_dh[i], zeros[i]); - res2 = uu____0 & res2; - } - } - { - uint8_t z = res2; - uint32_t res; - if (z == (uint8_t)255U) - { - res = (uint32_t)1U; - } - else - { - res = (uint32_t)0U; - } - { - uint32_t res20 = res; - uint8_t o_kemcontext[64U] = { 0U }; - if (res20 == (uint32_t)0U) - { - memcpy(o_kemcontext, o_pkE, (uint32_t)32U * sizeof (uint8_t)); - { - uint8_t *o_pkRm = o_kemcontext + (uint32_t)32U; - uint8_t *o_pkR = o_pkRm; - memcpy(o_pkR, pkR, (uint32_t)32U * sizeof (uint8_t)); - { - uint8_t *o_dhm = o_dh; - uint8_t o_eae_prk[32U] = { 0U }; - uint8_t suite_id_kem[5U] = { 0U }; - uint8_t *uu____1 = suite_id_kem; - uu____1[0U] = (uint8_t)0x4bU; - uu____1[1U] = (uint8_t)0x45U; - uu____1[2U] = (uint8_t)0x4dU; - { - uint8_t *uu____2 = suite_id_kem + (uint32_t)3U; - uu____2[0U] = (uint8_t)0U; - uu____2[1U] = (uint8_t)32U; - { - uint8_t *empty = suite_id_kem; - uint8_t - label_eae_prk[7U] = - { - (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, - (uint8_t)0x70U, (uint8_t)0x72U, (uint8_t)0x6bU - }; - uint32_t len0 = (uint32_t)51U; - KRML_CHECK_SIZE(sizeof (uint8_t), len0); - { - uint8_t tmp0[len0]; - memset(tmp0, 0U, len0 * sizeof (uint8_t)); - { - uint8_t *uu____3 = tmp0; - uu____3[0U] = (uint8_t)0x48U; - uu____3[1U] = (uint8_t)0x50U; - uu____3[2U] = (uint8_t)0x4bU; - uu____3[3U] = (uint8_t)0x45U; - uu____3[4U] = (uint8_t)0x2dU; - uu____3[5U] = (uint8_t)0x76U; - uu____3[6U] = (uint8_t)0x31U; - memcpy(tmp0 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t)); - memcpy(tmp0 + (uint32_t)12U, - label_eae_prk, - (uint32_t)7U * sizeof (uint8_t)); - memcpy(tmp0 + (uint32_t)19U, o_dhm, (uint32_t)32U * sizeof (uint8_t)); - Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp0, len0); - { - uint8_t - label_shared_secret[13U] = - { - (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, - (uint8_t)0x65U, (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, - (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U, - (uint8_t)0x74U - }; - uint32_t len = (uint32_t)91U; - KRML_CHECK_SIZE(sizeof (uint8_t), len); - { - uint8_t tmp[len]; - memset(tmp, 0U, len * sizeof (uint8_t)); - { - uint8_t *uu____4 = tmp; - store32_be(uu____4, (uint32_t)32U); - memcpy(uu____4, - uu____4 + (uint32_t)2U, - (uint32_t)2U * sizeof (uint8_t)); - { - uint8_t *uu____5 = tmp + (uint32_t)2U; - uu____5[0U] = (uint8_t)0x48U; - uu____5[1U] = (uint8_t)0x50U; - uu____5[2U] = (uint8_t)0x4bU; - uu____5[3U] = (uint8_t)0x45U; - uu____5[4U] = (uint8_t)0x2dU; - uu____5[5U] = (uint8_t)0x76U; - uu____5[6U] = (uint8_t)0x31U; - memcpy(tmp + (uint32_t)9U, - suite_id_kem, - (uint32_t)5U * sizeof (uint8_t)); - memcpy(tmp + (uint32_t)14U, - label_shared_secret, - (uint32_t)13U * sizeof (uint8_t)); - memcpy(tmp + (uint32_t)27U, - o_kemcontext, - (uint32_t)64U * sizeof (uint8_t)); - Hacl_HKDF_expand_sha2_256(o_shared, - o_eae_prk, - (uint32_t)32U, - tmp, - len, - (uint32_t)32U); - res0 = (uint32_t)0U; - } - } - } - } - } - } - } - } - } - } - } - else - { - res0 = (uint32_t)1U; - } - } - } - } - } - else - { - res0 = (uint32_t)1U; - } - if (res0 == (uint32_t)0U) - { - uint8_t o_context[65U] = { 0U }; - uint8_t o_secret[32U] = { 0U }; - uint8_t suite_id[10U] = { 0U }; - uint8_t *uu____6 = suite_id; - uu____6[0U] = (uint8_t)0x48U; - uu____6[1U] = (uint8_t)0x50U; - uu____6[2U] = (uint8_t)0x4bU; - uu____6[3U] = (uint8_t)0x45U; - { - uint8_t *uu____7 = suite_id + (uint32_t)4U; - uu____7[0U] = (uint8_t)0U; - uu____7[1U] = (uint8_t)32U; - { - uint8_t *uu____8 = suite_id + (uint32_t)6U; - uu____8[0U] = (uint8_t)0U; - uu____8[1U] = (uint8_t)1U; - { - uint8_t *uu____9 = suite_id + (uint32_t)8U; - uu____9[0U] = (uint8_t)0U; - uu____9[1U] = (uint8_t)3U; - { - uint8_t - label_psk_id_hash[11U] = - { - (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U, - (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, - (uint8_t)0x68U - }; - uint8_t o_psk_id_hash[32U] = { 0U }; - uint8_t *empty = suite_id; - uint32_t len0 = (uint32_t)28U; - KRML_CHECK_SIZE(sizeof (uint8_t), len0); - { - uint8_t tmp0[len0]; - memset(tmp0, 0U, len0 * sizeof (uint8_t)); - { - uint8_t *uu____10 = tmp0; - uu____10[0U] = (uint8_t)0x48U; - uu____10[1U] = (uint8_t)0x50U; - uu____10[2U] = (uint8_t)0x4bU; - uu____10[3U] = (uint8_t)0x45U; - uu____10[4U] = (uint8_t)0x2dU; - uu____10[5U] = (uint8_t)0x76U; - uu____10[6U] = (uint8_t)0x31U; - memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp0 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t)); - memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t)); - Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, (uint32_t)0U, tmp0, len0); - { - uint8_t - label_info_hash[9U] = - { - (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU, - (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U - }; - uint8_t o_info_hash[32U] = { 0U }; - uint32_t len1 = (uint32_t)26U + infolen; - KRML_CHECK_SIZE(sizeof (uint8_t), len1); - { - uint8_t tmp1[len1]; - memset(tmp1, 0U, len1 * sizeof (uint8_t)); - { - uint8_t *uu____11 = tmp1; - uu____11[0U] = (uint8_t)0x48U; - uu____11[1U] = (uint8_t)0x50U; - uu____11[2U] = (uint8_t)0x4bU; - uu____11[3U] = (uint8_t)0x45U; - uu____11[4U] = (uint8_t)0x2dU; - uu____11[5U] = (uint8_t)0x76U; - uu____11[6U] = (uint8_t)0x31U; - memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp1 + (uint32_t)17U, - label_info_hash, - (uint32_t)9U * sizeof (uint8_t)); - memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t)); - Hacl_HKDF_extract_sha2_256(o_info_hash, empty, (uint32_t)0U, tmp1, len1); - o_context[0U] = (uint8_t)0U; - memcpy(o_context + (uint32_t)1U, - o_psk_id_hash, - (uint32_t)32U * sizeof (uint8_t)); - memcpy(o_context + (uint32_t)33U, - o_info_hash, - (uint32_t)32U * sizeof (uint8_t)); - { - uint8_t - label_secret[6U] = - { - (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, - (uint8_t)0x65U, (uint8_t)0x74U - }; - uint32_t len2 = (uint32_t)23U; - KRML_CHECK_SIZE(sizeof (uint8_t), len2); - { - uint8_t tmp2[len2]; - memset(tmp2, 0U, len2 * sizeof (uint8_t)); - { - uint8_t *uu____12 = tmp2; - uu____12[0U] = (uint8_t)0x48U; - uu____12[1U] = (uint8_t)0x50U; - uu____12[2U] = (uint8_t)0x4bU; - uu____12[3U] = (uint8_t)0x45U; - uu____12[4U] = (uint8_t)0x2dU; - uu____12[5U] = (uint8_t)0x76U; - uu____12[6U] = (uint8_t)0x31U; - memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp2 + (uint32_t)17U, - label_secret, - (uint32_t)6U * sizeof (uint8_t)); - memcpy(tmp2 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t)); - Hacl_HKDF_extract_sha2_256(o_secret, - o_shared, - (uint32_t)32U, - tmp2, - len2); - { - uint8_t - label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U }; - uint32_t len3 = (uint32_t)87U; - KRML_CHECK_SIZE(sizeof (uint8_t), len3); - { - uint8_t tmp3[len3]; - memset(tmp3, 0U, len3 * sizeof (uint8_t)); - { - uint8_t *uu____13 = tmp3; - store32_be(uu____13, (uint32_t)32U); - memcpy(uu____13, - uu____13 + (uint32_t)2U, - (uint32_t)2U * sizeof (uint8_t)); - { - uint8_t *uu____14 = tmp3 + (uint32_t)2U; - uu____14[0U] = (uint8_t)0x48U; - uu____14[1U] = (uint8_t)0x50U; - uu____14[2U] = (uint8_t)0x4bU; - uu____14[3U] = (uint8_t)0x45U; - uu____14[4U] = (uint8_t)0x2dU; - uu____14[5U] = (uint8_t)0x76U; - uu____14[6U] = (uint8_t)0x31U; - memcpy(tmp3 + (uint32_t)9U, - suite_id, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp3 + (uint32_t)19U, - label_exp, - (uint32_t)3U * sizeof (uint8_t)); - memcpy(tmp3 + (uint32_t)22U, - o_context, - (uint32_t)65U * sizeof (uint8_t)); - Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter, - o_secret, - (uint32_t)32U, - tmp3, - len3, - (uint32_t)32U); - { - uint8_t - label_key[3U] = - { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U }; - uint32_t len4 = (uint32_t)87U; - KRML_CHECK_SIZE(sizeof (uint8_t), len4); - { - uint8_t tmp4[len4]; - memset(tmp4, 0U, len4 * sizeof (uint8_t)); - { - uint8_t *uu____15 = tmp4; - store32_be(uu____15, (uint32_t)32U); - memcpy(uu____15, - uu____15 + (uint32_t)2U, - (uint32_t)2U * sizeof (uint8_t)); - { - uint8_t *uu____16 = tmp4 + (uint32_t)2U; - uu____16[0U] = (uint8_t)0x48U; - uu____16[1U] = (uint8_t)0x50U; - uu____16[2U] = (uint8_t)0x4bU; - uu____16[3U] = (uint8_t)0x45U; - uu____16[4U] = (uint8_t)0x2dU; - uu____16[5U] = (uint8_t)0x76U; - uu____16[6U] = (uint8_t)0x31U; - memcpy(tmp4 + (uint32_t)9U, - suite_id, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp4 + (uint32_t)19U, - label_key, - (uint32_t)3U * sizeof (uint8_t)); - memcpy(tmp4 + (uint32_t)22U, - o_context, - (uint32_t)65U * sizeof (uint8_t)); - Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, - o_secret, - (uint32_t)32U, - tmp4, - len4, - (uint32_t)32U); - { - uint8_t - label_base_nonce[10U] = - { - (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, - (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x6eU, - (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, - (uint8_t)0x65U - }; - uint32_t len = (uint32_t)94U; - KRML_CHECK_SIZE(sizeof (uint8_t), len); - { - uint8_t tmp[len]; - memset(tmp, 0U, len * sizeof (uint8_t)); - { - uint8_t *uu____17 = tmp; - store32_be(uu____17, (uint32_t)12U); - memcpy(uu____17, - uu____17 + (uint32_t)2U, - (uint32_t)2U * sizeof (uint8_t)); - { - uint8_t *uu____18 = tmp + (uint32_t)2U; - uu____18[0U] = (uint8_t)0x48U; - uu____18[1U] = (uint8_t)0x50U; - uu____18[2U] = (uint8_t)0x4bU; - uu____18[3U] = (uint8_t)0x45U; - uu____18[4U] = (uint8_t)0x2dU; - uu____18[5U] = (uint8_t)0x76U; - uu____18[6U] = (uint8_t)0x31U; - memcpy(tmp + (uint32_t)9U, - suite_id, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp + (uint32_t)19U, - label_base_nonce, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp + (uint32_t)29U, - o_context, - (uint32_t)65U * sizeof (uint8_t)); - Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, - o_secret, - (uint32_t)32U, - tmp, - len, - (uint32_t)12U); - o_ctx.ctx_seq[0U] = (uint64_t)0U; - ite = res0; - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - else - { - ite = res0; - } - return ite; -} - -uint32_t -Hacl_HPKE_Curve64_CP128_SHA256_setupBaseR( - Hacl_Impl_HPKE_context_s o_ctx, - uint8_t *enc, - uint8_t *skR, - uint32_t infolen, - uint8_t *info -) -{ - uint8_t pkR[32U] = { 0U }; - uint32_t res1; - uint32_t ite; - Hacl_Curve25519_64_secret_to_public(pkR, skR); - res1 = (uint32_t)0U; - { - uint8_t shared[32U] = { 0U }; - if (res1 == (uint32_t)0U) - { - uint8_t *pkE = enc; - uint8_t dh[32U] = { 0U }; - uint8_t zeros[32U] = { 0U }; - Hacl_Curve25519_64_scalarmult(dh, skR, pkE); - { - uint8_t res0 = (uint8_t)255U; - { - uint32_t i; - for (i = (uint32_t)0U; i < (uint32_t)32U; i++) - { - uint8_t uu____0 = FStar_UInt8_eq_mask(dh[i], zeros[i]); - res0 = uu____0 & res0; - } - } - { - uint8_t z = res0; - uint32_t res; - if (z == (uint8_t)255U) - { - res = (uint32_t)1U; - } - else - { - res = (uint32_t)0U; - } - { - uint32_t res11 = res; - uint32_t res2; - uint8_t kemcontext[64U] = { 0U }; - if (res11 == (uint32_t)0U) - { - uint8_t *pkRm = kemcontext + (uint32_t)32U; - uint8_t *pkR1 = pkRm; - Hacl_Curve25519_64_secret_to_public(pkR1, skR); - { - uint32_t res20 = (uint32_t)0U; - if (res20 == (uint32_t)0U) - { - memcpy(kemcontext, enc, (uint32_t)32U * sizeof (uint8_t)); - { - uint8_t *dhm = dh; - uint8_t o_eae_prk[32U] = { 0U }; - uint8_t suite_id_kem[5U] = { 0U }; - uint8_t *uu____1 = suite_id_kem; - uu____1[0U] = (uint8_t)0x4bU; - uu____1[1U] = (uint8_t)0x45U; - uu____1[2U] = (uint8_t)0x4dU; - { - uint8_t *uu____2 = suite_id_kem + (uint32_t)3U; - uu____2[0U] = (uint8_t)0U; - uu____2[1U] = (uint8_t)32U; - { - uint8_t *empty = suite_id_kem; - uint8_t - label_eae_prk[7U] = - { - (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, - (uint8_t)0x70U, (uint8_t)0x72U, (uint8_t)0x6bU - }; - uint32_t len0 = (uint32_t)51U; - KRML_CHECK_SIZE(sizeof (uint8_t), len0); - { - uint8_t tmp0[len0]; - memset(tmp0, 0U, len0 * sizeof (uint8_t)); - { - uint8_t *uu____3 = tmp0; - uu____3[0U] = (uint8_t)0x48U; - uu____3[1U] = (uint8_t)0x50U; - uu____3[2U] = (uint8_t)0x4bU; - uu____3[3U] = (uint8_t)0x45U; - uu____3[4U] = (uint8_t)0x2dU; - uu____3[5U] = (uint8_t)0x76U; - uu____3[6U] = (uint8_t)0x31U; - memcpy(tmp0 + (uint32_t)7U, - suite_id_kem, - (uint32_t)5U * sizeof (uint8_t)); - memcpy(tmp0 + (uint32_t)12U, - label_eae_prk, - (uint32_t)7U * sizeof (uint8_t)); - memcpy(tmp0 + (uint32_t)19U, dhm, (uint32_t)32U * sizeof (uint8_t)); - Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp0, len0); - { - uint8_t - label_shared_secret[13U] = - { - (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, - (uint8_t)0x65U, (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, - (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U, - (uint8_t)0x74U - }; - uint32_t len = (uint32_t)91U; - KRML_CHECK_SIZE(sizeof (uint8_t), len); - { - uint8_t tmp[len]; - memset(tmp, 0U, len * sizeof (uint8_t)); - { - uint8_t *uu____4 = tmp; - store32_be(uu____4, (uint32_t)32U); - memcpy(uu____4, - uu____4 + (uint32_t)2U, - (uint32_t)2U * sizeof (uint8_t)); - { - uint8_t *uu____5 = tmp + (uint32_t)2U; - uu____5[0U] = (uint8_t)0x48U; - uu____5[1U] = (uint8_t)0x50U; - uu____5[2U] = (uint8_t)0x4bU; - uu____5[3U] = (uint8_t)0x45U; - uu____5[4U] = (uint8_t)0x2dU; - uu____5[5U] = (uint8_t)0x76U; - uu____5[6U] = (uint8_t)0x31U; - memcpy(tmp + (uint32_t)9U, - suite_id_kem, - (uint32_t)5U * sizeof (uint8_t)); - memcpy(tmp + (uint32_t)14U, - label_shared_secret, - (uint32_t)13U * sizeof (uint8_t)); - memcpy(tmp + (uint32_t)27U, - kemcontext, - (uint32_t)64U * sizeof (uint8_t)); - Hacl_HKDF_expand_sha2_256(shared, - o_eae_prk, - (uint32_t)32U, - tmp, - len, - (uint32_t)32U); - res2 = (uint32_t)0U; - } - } - } - } - } - } - } - } - } - } - else - { - res2 = (uint32_t)1U; - } - } - } - else - { - res2 = (uint32_t)1U; - } - if (res2 == (uint32_t)0U) - { - uint8_t o_context[65U] = { 0U }; - uint8_t o_secret[32U] = { 0U }; - uint8_t suite_id[10U] = { 0U }; - uint8_t *uu____6 = suite_id; - uu____6[0U] = (uint8_t)0x48U; - uu____6[1U] = (uint8_t)0x50U; - uu____6[2U] = (uint8_t)0x4bU; - uu____6[3U] = (uint8_t)0x45U; - { - uint8_t *uu____7 = suite_id + (uint32_t)4U; - uu____7[0U] = (uint8_t)0U; - uu____7[1U] = (uint8_t)32U; - { - uint8_t *uu____8 = suite_id + (uint32_t)6U; - uu____8[0U] = (uint8_t)0U; - uu____8[1U] = (uint8_t)1U; - { - uint8_t *uu____9 = suite_id + (uint32_t)8U; - uu____9[0U] = (uint8_t)0U; - uu____9[1U] = (uint8_t)3U; - { - uint8_t - label_psk_id_hash[11U] = - { - (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, - (uint8_t)0x69U, (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, - (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U - }; - uint8_t o_psk_id_hash[32U] = { 0U }; - uint8_t *empty = suite_id; - uint32_t len0 = (uint32_t)28U; - KRML_CHECK_SIZE(sizeof (uint8_t), len0); - { - uint8_t tmp0[len0]; - memset(tmp0, 0U, len0 * sizeof (uint8_t)); - { - uint8_t *uu____10 = tmp0; - uu____10[0U] = (uint8_t)0x48U; - uu____10[1U] = (uint8_t)0x50U; - uu____10[2U] = (uint8_t)0x4bU; - uu____10[3U] = (uint8_t)0x45U; - uu____10[4U] = (uint8_t)0x2dU; - uu____10[5U] = (uint8_t)0x76U; - uu____10[6U] = (uint8_t)0x31U; - memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp0 + (uint32_t)17U, - label_psk_id_hash, - (uint32_t)11U * sizeof (uint8_t)); - memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t)); - Hacl_HKDF_extract_sha2_256(o_psk_id_hash, - empty, - (uint32_t)0U, - tmp0, - len0); - { - uint8_t - label_info_hash[9U] = - { - (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, - (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, - (uint8_t)0x68U - }; - uint8_t o_info_hash[32U] = { 0U }; - uint32_t len1 = (uint32_t)26U + infolen; - KRML_CHECK_SIZE(sizeof (uint8_t), len1); - { - uint8_t tmp1[len1]; - memset(tmp1, 0U, len1 * sizeof (uint8_t)); - { - uint8_t *uu____11 = tmp1; - uu____11[0U] = (uint8_t)0x48U; - uu____11[1U] = (uint8_t)0x50U; - uu____11[2U] = (uint8_t)0x4bU; - uu____11[3U] = (uint8_t)0x45U; - uu____11[4U] = (uint8_t)0x2dU; - uu____11[5U] = (uint8_t)0x76U; - uu____11[6U] = (uint8_t)0x31U; - memcpy(tmp1 + (uint32_t)7U, - suite_id, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp1 + (uint32_t)17U, - label_info_hash, - (uint32_t)9U * sizeof (uint8_t)); - memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t)); - Hacl_HKDF_extract_sha2_256(o_info_hash, - empty, - (uint32_t)0U, - tmp1, - len1); - o_context[0U] = (uint8_t)0U; - memcpy(o_context + (uint32_t)1U, - o_psk_id_hash, - (uint32_t)32U * sizeof (uint8_t)); - memcpy(o_context + (uint32_t)33U, - o_info_hash, - (uint32_t)32U * sizeof (uint8_t)); - { - uint8_t - label_secret[6U] = - { - (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, - (uint8_t)0x65U, (uint8_t)0x74U - }; - uint32_t len2 = (uint32_t)23U; - KRML_CHECK_SIZE(sizeof (uint8_t), len2); - { - uint8_t tmp2[len2]; - memset(tmp2, 0U, len2 * sizeof (uint8_t)); - { - uint8_t *uu____12 = tmp2; - uu____12[0U] = (uint8_t)0x48U; - uu____12[1U] = (uint8_t)0x50U; - uu____12[2U] = (uint8_t)0x4bU; - uu____12[3U] = (uint8_t)0x45U; - uu____12[4U] = (uint8_t)0x2dU; - uu____12[5U] = (uint8_t)0x76U; - uu____12[6U] = (uint8_t)0x31U; - memcpy(tmp2 + (uint32_t)7U, - suite_id, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp2 + (uint32_t)17U, - label_secret, - (uint32_t)6U * sizeof (uint8_t)); - memcpy(tmp2 + (uint32_t)23U, - empty, - (uint32_t)0U * sizeof (uint8_t)); - Hacl_HKDF_extract_sha2_256(o_secret, - shared, - (uint32_t)32U, - tmp2, - len2); - { - uint8_t - label_exp[3U] = - { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U }; - uint32_t len3 = (uint32_t)87U; - KRML_CHECK_SIZE(sizeof (uint8_t), len3); - { - uint8_t tmp3[len3]; - memset(tmp3, 0U, len3 * sizeof (uint8_t)); - { - uint8_t *uu____13 = tmp3; - store32_be(uu____13, (uint32_t)32U); - memcpy(uu____13, - uu____13 + (uint32_t)2U, - (uint32_t)2U * sizeof (uint8_t)); - { - uint8_t *uu____14 = tmp3 + (uint32_t)2U; - uu____14[0U] = (uint8_t)0x48U; - uu____14[1U] = (uint8_t)0x50U; - uu____14[2U] = (uint8_t)0x4bU; - uu____14[3U] = (uint8_t)0x45U; - uu____14[4U] = (uint8_t)0x2dU; - uu____14[5U] = (uint8_t)0x76U; - uu____14[6U] = (uint8_t)0x31U; - memcpy(tmp3 + (uint32_t)9U, - suite_id, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp3 + (uint32_t)19U, - label_exp, - (uint32_t)3U * sizeof (uint8_t)); - memcpy(tmp3 + (uint32_t)22U, - o_context, - (uint32_t)65U * sizeof (uint8_t)); - Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter, - o_secret, - (uint32_t)32U, - tmp3, - len3, - (uint32_t)32U); - { - uint8_t - label_key[3U] = - { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U }; - uint32_t len4 = (uint32_t)87U; - KRML_CHECK_SIZE(sizeof (uint8_t), len4); - { - uint8_t tmp4[len4]; - memset(tmp4, 0U, len4 * sizeof (uint8_t)); - { - uint8_t *uu____15 = tmp4; - store32_be(uu____15, (uint32_t)32U); - memcpy(uu____15, - uu____15 + (uint32_t)2U, - (uint32_t)2U * sizeof (uint8_t)); - { - uint8_t *uu____16 = tmp4 + (uint32_t)2U; - uu____16[0U] = (uint8_t)0x48U; - uu____16[1U] = (uint8_t)0x50U; - uu____16[2U] = (uint8_t)0x4bU; - uu____16[3U] = (uint8_t)0x45U; - uu____16[4U] = (uint8_t)0x2dU; - uu____16[5U] = (uint8_t)0x76U; - uu____16[6U] = (uint8_t)0x31U; - memcpy(tmp4 + (uint32_t)9U, - suite_id, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp4 + (uint32_t)19U, - label_key, - (uint32_t)3U * sizeof (uint8_t)); - memcpy(tmp4 + (uint32_t)22U, - o_context, - (uint32_t)65U * sizeof (uint8_t)); - Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, - o_secret, - (uint32_t)32U, - tmp4, - len4, - (uint32_t)32U); - { - uint8_t - label_base_nonce[10U] = - { - (uint8_t)0x62U, (uint8_t)0x61U, - (uint8_t)0x73U, (uint8_t)0x65U, - (uint8_t)0x5fU, (uint8_t)0x6eU, - (uint8_t)0x6fU, (uint8_t)0x6eU, - (uint8_t)0x63U, (uint8_t)0x65U - }; - uint32_t len = (uint32_t)94U; - KRML_CHECK_SIZE(sizeof (uint8_t), len); - { - uint8_t tmp[len]; - memset(tmp, 0U, len * sizeof (uint8_t)); - { - uint8_t *uu____17 = tmp; - store32_be(uu____17, (uint32_t)12U); - memcpy(uu____17, - uu____17 + (uint32_t)2U, - (uint32_t)2U * sizeof (uint8_t)); - { - uint8_t - *uu____18 = tmp + (uint32_t)2U; - uu____18[0U] = (uint8_t)0x48U; - uu____18[1U] = (uint8_t)0x50U; - uu____18[2U] = (uint8_t)0x4bU; - uu____18[3U] = (uint8_t)0x45U; - uu____18[4U] = (uint8_t)0x2dU; - uu____18[5U] = (uint8_t)0x76U; - uu____18[6U] = (uint8_t)0x31U; - memcpy(tmp + (uint32_t)9U, - suite_id, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp + (uint32_t)19U, - label_base_nonce, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp + (uint32_t)29U, - o_context, - (uint32_t)65U * sizeof (uint8_t)); - Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, - o_secret, - (uint32_t)32U, - tmp, - len, - (uint32_t)12U); - o_ctx.ctx_seq[0U] = (uint64_t)0U; - ite = (uint32_t)0U; - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - else - { - ite = (uint32_t)1U; - } - } - } - } - } - else - { - ite = (uint32_t)1U; - } - return ite; - } -} - -uint32_t -Hacl_HPKE_Curve64_CP128_SHA256_sealBase( - uint8_t *skE, - uint8_t *pkR, - uint32_t infolen, - uint8_t *info, - uint32_t aadlen, - uint8_t *aad, - uint32_t plainlen, - uint8_t *plain, - uint8_t *o_enc, - uint8_t *o_ct -) -{ - uint8_t ctx_key[32U] = { 0U }; - uint8_t ctx_nonce[12U] = { 0U }; - uint64_t ctx_seq = (uint64_t)0U; - uint8_t ctx_exporter[32U] = { 0U }; - Hacl_Impl_HPKE_context_s o_ctx; - uint32_t res; - uint32_t ite; - o_ctx.ctx_key = ctx_key; - o_ctx.ctx_nonce = ctx_nonce; - o_ctx.ctx_seq = &ctx_seq; - o_ctx.ctx_exporter = ctx_exporter; - res = Hacl_HPKE_Curve64_CP128_SHA256_setupBaseS(o_enc, o_ctx, skE, pkR, infolen, info); - if (res == (uint32_t)0U) - { - uint8_t nonce[12U] = { 0U }; - uint64_t s = o_ctx.ctx_seq[0U]; - uint8_t enc[12U] = { 0U }; - store64_be(enc + (uint32_t)4U, s); - KRML_MAYBE_FOR12(i, - (uint32_t)0U, - (uint32_t)12U, - (uint32_t)1U, - uint8_t xi = enc[i]; - uint8_t yi = o_ctx.ctx_nonce[i]; - nonce[i] = xi ^ yi;); - Hacl_Chacha20Poly1305_128_aead_encrypt(o_ctx.ctx_key, - nonce, - aadlen, - aad, - plainlen, - plain, - o_ct, - o_ct + plainlen); - { - uint64_t s1 = o_ctx.ctx_seq[0U]; - uint32_t res1; - if (s1 == (uint64_t)18446744073709551615U) - { - res1 = (uint32_t)1U; - } - else - { - uint64_t s_ = s1 + (uint64_t)1U; - o_ctx.ctx_seq[0U] = s_; - res1 = (uint32_t)0U; - } - { - uint32_t res10 = res1; - ite = res10; - } - } - } - else - { - ite = (uint32_t)1U; - } - return ite; -} - -uint32_t -Hacl_HPKE_Curve64_CP128_SHA256_openBase( - uint8_t *pkE, - uint8_t *skR, - uint32_t infolen, - uint8_t *info, - uint32_t aadlen, - uint8_t *aad, - uint32_t ctlen, - uint8_t *ct, - uint8_t *o_pt -) -{ - uint8_t ctx_key[32U] = { 0U }; - uint8_t ctx_nonce[12U] = { 0U }; - uint64_t ctx_seq = (uint64_t)0U; - uint8_t ctx_exporter[32U] = { 0U }; - Hacl_Impl_HPKE_context_s o_ctx; - uint32_t res; - uint32_t ite; - o_ctx.ctx_key = ctx_key; - o_ctx.ctx_nonce = ctx_nonce; - o_ctx.ctx_seq = &ctx_seq; - o_ctx.ctx_exporter = ctx_exporter; - res = Hacl_HPKE_Curve64_CP128_SHA256_setupBaseR(o_ctx, pkE, skR, infolen, info); - if (res == (uint32_t)0U) - { - uint8_t nonce[12U] = { 0U }; - uint64_t s = o_ctx.ctx_seq[0U]; - uint8_t enc[12U] = { 0U }; - store64_be(enc + (uint32_t)4U, s); - KRML_MAYBE_FOR12(i, - (uint32_t)0U, - (uint32_t)12U, - (uint32_t)1U, - uint8_t xi = enc[i]; - uint8_t yi = o_ctx.ctx_nonce[i]; - nonce[i] = xi ^ yi;); - { - uint32_t - res10 = - Hacl_Chacha20Poly1305_128_aead_decrypt(o_ctx.ctx_key, - nonce, - aadlen, - aad, - ctlen - (uint32_t)16U, - o_pt, - ct, - ct + ctlen - (uint32_t)16U); - uint32_t res1; - if (res10 == (uint32_t)0U) - { - uint64_t s1 = o_ctx.ctx_seq[0U]; - uint32_t res2; - if (s1 == (uint64_t)18446744073709551615U) - { - res2 = (uint32_t)1U; - } - else - { - uint64_t s_ = s1 + (uint64_t)1U; - o_ctx.ctx_seq[0U] = s_; - res2 = (uint32_t)0U; - } - res1 = res2; - } - else - { - res1 = (uint32_t)1U; - } - ite = res1; - } - } - else - { - ite = (uint32_t)1U; - } - return ite; -} - diff --git a/dist/c89-compatible/Hacl_HPKE_Curve64_CP128_SHA256.h b/dist/c89-compatible/Hacl_HPKE_Curve64_CP128_SHA256.h deleted file mode 100644 index 809544c9cd..0000000000 --- a/dist/c89-compatible/Hacl_HPKE_Curve64_CP128_SHA256.h +++ /dev/null @@ -1,95 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#ifndef __Hacl_HPKE_Curve64_CP128_SHA256_H -#define __Hacl_HPKE_Curve64_CP128_SHA256_H - -#if defined(__cplusplus) -extern "C" { -#endif - -#include -#include "krml/internal/types.h" -#include "krml/lowstar_endianness.h" -#include "krml/internal/target.h" - - -#include "Hacl_Krmllib.h" -#include "Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE.h" -#include "Hacl_HKDF.h" -#include "Hacl_Curve25519_64.h" -#include "Hacl_Chacha20Poly1305_128.h" -#include "evercrypt_targetconfig.h" -uint32_t -Hacl_HPKE_Curve64_CP128_SHA256_setupBaseS( - uint8_t *o_pkE, - Hacl_Impl_HPKE_context_s o_ctx, - uint8_t *skE, - uint8_t *pkR, - uint32_t infolen, - uint8_t *info -); - -uint32_t -Hacl_HPKE_Curve64_CP128_SHA256_setupBaseR( - Hacl_Impl_HPKE_context_s o_ctx, - uint8_t *enc, - uint8_t *skR, - uint32_t infolen, - uint8_t *info -); - -uint32_t -Hacl_HPKE_Curve64_CP128_SHA256_sealBase( - uint8_t *skE, - uint8_t *pkR, - uint32_t infolen, - uint8_t *info, - uint32_t aadlen, - uint8_t *aad, - uint32_t plainlen, - uint8_t *plain, - uint8_t *o_enc, - uint8_t *o_ct -); - -uint32_t -Hacl_HPKE_Curve64_CP128_SHA256_openBase( - uint8_t *pkE, - uint8_t *skR, - uint32_t infolen, - uint8_t *info, - uint32_t aadlen, - uint8_t *aad, - uint32_t ctlen, - uint8_t *ct, - uint8_t *o_pt -); - -#if defined(__cplusplus) -} -#endif - -#define __Hacl_HPKE_Curve64_CP128_SHA256_H_DEFINED -#endif diff --git a/dist/c89-compatible/Hacl_HPKE_Curve64_CP128_SHA512.c b/dist/c89-compatible/Hacl_HPKE_Curve64_CP128_SHA512.c deleted file mode 100644 index 97ba4043c6..0000000000 --- a/dist/c89-compatible/Hacl_HPKE_Curve64_CP128_SHA512.c +++ /dev/null @@ -1,1077 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#include "Hacl_HPKE_Curve64_CP128_SHA512.h" - - - -uint32_t -Hacl_HPKE_Curve64_CP128_SHA512_setupBaseS( - uint8_t *o_pkE, - Hacl_Impl_HPKE_context_s o_ctx, - uint8_t *skE, - uint8_t *pkR, - uint32_t infolen, - uint8_t *info -) -{ - uint8_t o_shared[32U] = { 0U }; - uint8_t *o_pkE1 = o_pkE; - uint32_t res1; - uint32_t res0; - uint32_t ite; - Hacl_Curve25519_64_secret_to_public(o_pkE1, skE); - res1 = (uint32_t)0U; - if (res1 == (uint32_t)0U) - { - uint8_t o_dh[32U] = { 0U }; - uint8_t zeros[32U] = { 0U }; - Hacl_Curve25519_64_scalarmult(o_dh, skE, pkR); - { - uint8_t res2 = (uint8_t)255U; - { - uint32_t i; - for (i = (uint32_t)0U; i < (uint32_t)32U; i++) - { - uint8_t uu____0 = FStar_UInt8_eq_mask(o_dh[i], zeros[i]); - res2 = uu____0 & res2; - } - } - { - uint8_t z = res2; - uint32_t res; - if (z == (uint8_t)255U) - { - res = (uint32_t)1U; - } - else - { - res = (uint32_t)0U; - } - { - uint32_t res20 = res; - uint8_t o_kemcontext[64U] = { 0U }; - if (res20 == (uint32_t)0U) - { - memcpy(o_kemcontext, o_pkE, (uint32_t)32U * sizeof (uint8_t)); - { - uint8_t *o_pkRm = o_kemcontext + (uint32_t)32U; - uint8_t *o_pkR = o_pkRm; - memcpy(o_pkR, pkR, (uint32_t)32U * sizeof (uint8_t)); - { - uint8_t *o_dhm = o_dh; - uint8_t o_eae_prk[32U] = { 0U }; - uint8_t suite_id_kem[5U] = { 0U }; - uint8_t *uu____1 = suite_id_kem; - uu____1[0U] = (uint8_t)0x4bU; - uu____1[1U] = (uint8_t)0x45U; - uu____1[2U] = (uint8_t)0x4dU; - { - uint8_t *uu____2 = suite_id_kem + (uint32_t)3U; - uu____2[0U] = (uint8_t)0U; - uu____2[1U] = (uint8_t)32U; - { - uint8_t *empty = suite_id_kem; - uint8_t - label_eae_prk[7U] = - { - (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, - (uint8_t)0x70U, (uint8_t)0x72U, (uint8_t)0x6bU - }; - uint32_t len0 = (uint32_t)51U; - KRML_CHECK_SIZE(sizeof (uint8_t), len0); - { - uint8_t tmp0[len0]; - memset(tmp0, 0U, len0 * sizeof (uint8_t)); - { - uint8_t *uu____3 = tmp0; - uu____3[0U] = (uint8_t)0x48U; - uu____3[1U] = (uint8_t)0x50U; - uu____3[2U] = (uint8_t)0x4bU; - uu____3[3U] = (uint8_t)0x45U; - uu____3[4U] = (uint8_t)0x2dU; - uu____3[5U] = (uint8_t)0x76U; - uu____3[6U] = (uint8_t)0x31U; - memcpy(tmp0 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t)); - memcpy(tmp0 + (uint32_t)12U, - label_eae_prk, - (uint32_t)7U * sizeof (uint8_t)); - memcpy(tmp0 + (uint32_t)19U, o_dhm, (uint32_t)32U * sizeof (uint8_t)); - Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp0, len0); - { - uint8_t - label_shared_secret[13U] = - { - (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, - (uint8_t)0x65U, (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, - (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U, - (uint8_t)0x74U - }; - uint32_t len = (uint32_t)91U; - KRML_CHECK_SIZE(sizeof (uint8_t), len); - { - uint8_t tmp[len]; - memset(tmp, 0U, len * sizeof (uint8_t)); - { - uint8_t *uu____4 = tmp; - store32_be(uu____4, (uint32_t)32U); - memcpy(uu____4, - uu____4 + (uint32_t)2U, - (uint32_t)2U * sizeof (uint8_t)); - { - uint8_t *uu____5 = tmp + (uint32_t)2U; - uu____5[0U] = (uint8_t)0x48U; - uu____5[1U] = (uint8_t)0x50U; - uu____5[2U] = (uint8_t)0x4bU; - uu____5[3U] = (uint8_t)0x45U; - uu____5[4U] = (uint8_t)0x2dU; - uu____5[5U] = (uint8_t)0x76U; - uu____5[6U] = (uint8_t)0x31U; - memcpy(tmp + (uint32_t)9U, - suite_id_kem, - (uint32_t)5U * sizeof (uint8_t)); - memcpy(tmp + (uint32_t)14U, - label_shared_secret, - (uint32_t)13U * sizeof (uint8_t)); - memcpy(tmp + (uint32_t)27U, - o_kemcontext, - (uint32_t)64U * sizeof (uint8_t)); - Hacl_HKDF_expand_sha2_256(o_shared, - o_eae_prk, - (uint32_t)32U, - tmp, - len, - (uint32_t)32U); - res0 = (uint32_t)0U; - } - } - } - } - } - } - } - } - } - } - } - else - { - res0 = (uint32_t)1U; - } - } - } - } - } - else - { - res0 = (uint32_t)1U; - } - if (res0 == (uint32_t)0U) - { - uint8_t o_context[129U] = { 0U }; - uint8_t o_secret[64U] = { 0U }; - uint8_t suite_id[10U] = { 0U }; - uint8_t *uu____6 = suite_id; - uu____6[0U] = (uint8_t)0x48U; - uu____6[1U] = (uint8_t)0x50U; - uu____6[2U] = (uint8_t)0x4bU; - uu____6[3U] = (uint8_t)0x45U; - { - uint8_t *uu____7 = suite_id + (uint32_t)4U; - uu____7[0U] = (uint8_t)0U; - uu____7[1U] = (uint8_t)32U; - { - uint8_t *uu____8 = suite_id + (uint32_t)6U; - uu____8[0U] = (uint8_t)0U; - uu____8[1U] = (uint8_t)3U; - { - uint8_t *uu____9 = suite_id + (uint32_t)8U; - uu____9[0U] = (uint8_t)0U; - uu____9[1U] = (uint8_t)3U; - { - uint8_t - label_psk_id_hash[11U] = - { - (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U, - (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, - (uint8_t)0x68U - }; - uint8_t o_psk_id_hash[64U] = { 0U }; - uint8_t *empty = suite_id; - uint32_t len0 = (uint32_t)28U; - KRML_CHECK_SIZE(sizeof (uint8_t), len0); - { - uint8_t tmp0[len0]; - memset(tmp0, 0U, len0 * sizeof (uint8_t)); - { - uint8_t *uu____10 = tmp0; - uu____10[0U] = (uint8_t)0x48U; - uu____10[1U] = (uint8_t)0x50U; - uu____10[2U] = (uint8_t)0x4bU; - uu____10[3U] = (uint8_t)0x45U; - uu____10[4U] = (uint8_t)0x2dU; - uu____10[5U] = (uint8_t)0x76U; - uu____10[6U] = (uint8_t)0x31U; - memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp0 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t)); - memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t)); - Hacl_HKDF_extract_sha2_512(o_psk_id_hash, empty, (uint32_t)0U, tmp0, len0); - { - uint8_t - label_info_hash[9U] = - { - (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU, - (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U - }; - uint8_t o_info_hash[64U] = { 0U }; - uint32_t len1 = (uint32_t)26U + infolen; - KRML_CHECK_SIZE(sizeof (uint8_t), len1); - { - uint8_t tmp1[len1]; - memset(tmp1, 0U, len1 * sizeof (uint8_t)); - { - uint8_t *uu____11 = tmp1; - uu____11[0U] = (uint8_t)0x48U; - uu____11[1U] = (uint8_t)0x50U; - uu____11[2U] = (uint8_t)0x4bU; - uu____11[3U] = (uint8_t)0x45U; - uu____11[4U] = (uint8_t)0x2dU; - uu____11[5U] = (uint8_t)0x76U; - uu____11[6U] = (uint8_t)0x31U; - memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp1 + (uint32_t)17U, - label_info_hash, - (uint32_t)9U * sizeof (uint8_t)); - memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t)); - Hacl_HKDF_extract_sha2_512(o_info_hash, empty, (uint32_t)0U, tmp1, len1); - o_context[0U] = (uint8_t)0U; - memcpy(o_context + (uint32_t)1U, - o_psk_id_hash, - (uint32_t)64U * sizeof (uint8_t)); - memcpy(o_context + (uint32_t)65U, - o_info_hash, - (uint32_t)64U * sizeof (uint8_t)); - { - uint8_t - label_secret[6U] = - { - (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, - (uint8_t)0x65U, (uint8_t)0x74U - }; - uint32_t len2 = (uint32_t)23U; - KRML_CHECK_SIZE(sizeof (uint8_t), len2); - { - uint8_t tmp2[len2]; - memset(tmp2, 0U, len2 * sizeof (uint8_t)); - { - uint8_t *uu____12 = tmp2; - uu____12[0U] = (uint8_t)0x48U; - uu____12[1U] = (uint8_t)0x50U; - uu____12[2U] = (uint8_t)0x4bU; - uu____12[3U] = (uint8_t)0x45U; - uu____12[4U] = (uint8_t)0x2dU; - uu____12[5U] = (uint8_t)0x76U; - uu____12[6U] = (uint8_t)0x31U; - memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp2 + (uint32_t)17U, - label_secret, - (uint32_t)6U * sizeof (uint8_t)); - memcpy(tmp2 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t)); - Hacl_HKDF_extract_sha2_512(o_secret, - o_shared, - (uint32_t)32U, - tmp2, - len2); - { - uint8_t - label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U }; - uint32_t len3 = (uint32_t)151U; - KRML_CHECK_SIZE(sizeof (uint8_t), len3); - { - uint8_t tmp3[len3]; - memset(tmp3, 0U, len3 * sizeof (uint8_t)); - { - uint8_t *uu____13 = tmp3; - store32_be(uu____13, (uint32_t)64U); - memcpy(uu____13, - uu____13 + (uint32_t)2U, - (uint32_t)2U * sizeof (uint8_t)); - { - uint8_t *uu____14 = tmp3 + (uint32_t)2U; - uu____14[0U] = (uint8_t)0x48U; - uu____14[1U] = (uint8_t)0x50U; - uu____14[2U] = (uint8_t)0x4bU; - uu____14[3U] = (uint8_t)0x45U; - uu____14[4U] = (uint8_t)0x2dU; - uu____14[5U] = (uint8_t)0x76U; - uu____14[6U] = (uint8_t)0x31U; - memcpy(tmp3 + (uint32_t)9U, - suite_id, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp3 + (uint32_t)19U, - label_exp, - (uint32_t)3U * sizeof (uint8_t)); - memcpy(tmp3 + (uint32_t)22U, - o_context, - (uint32_t)129U * sizeof (uint8_t)); - Hacl_HKDF_expand_sha2_512(o_ctx.ctx_exporter, - o_secret, - (uint32_t)64U, - tmp3, - len3, - (uint32_t)64U); - { - uint8_t - label_key[3U] = - { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U }; - uint32_t len4 = (uint32_t)151U; - KRML_CHECK_SIZE(sizeof (uint8_t), len4); - { - uint8_t tmp4[len4]; - memset(tmp4, 0U, len4 * sizeof (uint8_t)); - { - uint8_t *uu____15 = tmp4; - store32_be(uu____15, (uint32_t)32U); - memcpy(uu____15, - uu____15 + (uint32_t)2U, - (uint32_t)2U * sizeof (uint8_t)); - { - uint8_t *uu____16 = tmp4 + (uint32_t)2U; - uu____16[0U] = (uint8_t)0x48U; - uu____16[1U] = (uint8_t)0x50U; - uu____16[2U] = (uint8_t)0x4bU; - uu____16[3U] = (uint8_t)0x45U; - uu____16[4U] = (uint8_t)0x2dU; - uu____16[5U] = (uint8_t)0x76U; - uu____16[6U] = (uint8_t)0x31U; - memcpy(tmp4 + (uint32_t)9U, - suite_id, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp4 + (uint32_t)19U, - label_key, - (uint32_t)3U * sizeof (uint8_t)); - memcpy(tmp4 + (uint32_t)22U, - o_context, - (uint32_t)129U * sizeof (uint8_t)); - Hacl_HKDF_expand_sha2_512(o_ctx.ctx_key, - o_secret, - (uint32_t)64U, - tmp4, - len4, - (uint32_t)32U); - { - uint8_t - label_base_nonce[10U] = - { - (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, - (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x6eU, - (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, - (uint8_t)0x65U - }; - uint32_t len = (uint32_t)158U; - KRML_CHECK_SIZE(sizeof (uint8_t), len); - { - uint8_t tmp[len]; - memset(tmp, 0U, len * sizeof (uint8_t)); - { - uint8_t *uu____17 = tmp; - store32_be(uu____17, (uint32_t)12U); - memcpy(uu____17, - uu____17 + (uint32_t)2U, - (uint32_t)2U * sizeof (uint8_t)); - { - uint8_t *uu____18 = tmp + (uint32_t)2U; - uu____18[0U] = (uint8_t)0x48U; - uu____18[1U] = (uint8_t)0x50U; - uu____18[2U] = (uint8_t)0x4bU; - uu____18[3U] = (uint8_t)0x45U; - uu____18[4U] = (uint8_t)0x2dU; - uu____18[5U] = (uint8_t)0x76U; - uu____18[6U] = (uint8_t)0x31U; - memcpy(tmp + (uint32_t)9U, - suite_id, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp + (uint32_t)19U, - label_base_nonce, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp + (uint32_t)29U, - o_context, - (uint32_t)129U * sizeof (uint8_t)); - Hacl_HKDF_expand_sha2_512(o_ctx.ctx_nonce, - o_secret, - (uint32_t)64U, - tmp, - len, - (uint32_t)12U); - o_ctx.ctx_seq[0U] = (uint64_t)0U; - ite = res0; - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - else - { - ite = res0; - } - return ite; -} - -uint32_t -Hacl_HPKE_Curve64_CP128_SHA512_setupBaseR( - Hacl_Impl_HPKE_context_s o_ctx, - uint8_t *enc, - uint8_t *skR, - uint32_t infolen, - uint8_t *info -) -{ - uint8_t pkR[32U] = { 0U }; - uint32_t res1; - uint32_t ite; - Hacl_Curve25519_64_secret_to_public(pkR, skR); - res1 = (uint32_t)0U; - { - uint8_t shared[32U] = { 0U }; - if (res1 == (uint32_t)0U) - { - uint8_t *pkE = enc; - uint8_t dh[32U] = { 0U }; - uint8_t zeros[32U] = { 0U }; - Hacl_Curve25519_64_scalarmult(dh, skR, pkE); - { - uint8_t res0 = (uint8_t)255U; - { - uint32_t i; - for (i = (uint32_t)0U; i < (uint32_t)32U; i++) - { - uint8_t uu____0 = FStar_UInt8_eq_mask(dh[i], zeros[i]); - res0 = uu____0 & res0; - } - } - { - uint8_t z = res0; - uint32_t res; - if (z == (uint8_t)255U) - { - res = (uint32_t)1U; - } - else - { - res = (uint32_t)0U; - } - { - uint32_t res11 = res; - uint32_t res2; - uint8_t kemcontext[64U] = { 0U }; - if (res11 == (uint32_t)0U) - { - uint8_t *pkRm = kemcontext + (uint32_t)32U; - uint8_t *pkR1 = pkRm; - Hacl_Curve25519_64_secret_to_public(pkR1, skR); - { - uint32_t res20 = (uint32_t)0U; - if (res20 == (uint32_t)0U) - { - memcpy(kemcontext, enc, (uint32_t)32U * sizeof (uint8_t)); - { - uint8_t *dhm = dh; - uint8_t o_eae_prk[32U] = { 0U }; - uint8_t suite_id_kem[5U] = { 0U }; - uint8_t *uu____1 = suite_id_kem; - uu____1[0U] = (uint8_t)0x4bU; - uu____1[1U] = (uint8_t)0x45U; - uu____1[2U] = (uint8_t)0x4dU; - { - uint8_t *uu____2 = suite_id_kem + (uint32_t)3U; - uu____2[0U] = (uint8_t)0U; - uu____2[1U] = (uint8_t)32U; - { - uint8_t *empty = suite_id_kem; - uint8_t - label_eae_prk[7U] = - { - (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, - (uint8_t)0x70U, (uint8_t)0x72U, (uint8_t)0x6bU - }; - uint32_t len0 = (uint32_t)51U; - KRML_CHECK_SIZE(sizeof (uint8_t), len0); - { - uint8_t tmp0[len0]; - memset(tmp0, 0U, len0 * sizeof (uint8_t)); - { - uint8_t *uu____3 = tmp0; - uu____3[0U] = (uint8_t)0x48U; - uu____3[1U] = (uint8_t)0x50U; - uu____3[2U] = (uint8_t)0x4bU; - uu____3[3U] = (uint8_t)0x45U; - uu____3[4U] = (uint8_t)0x2dU; - uu____3[5U] = (uint8_t)0x76U; - uu____3[6U] = (uint8_t)0x31U; - memcpy(tmp0 + (uint32_t)7U, - suite_id_kem, - (uint32_t)5U * sizeof (uint8_t)); - memcpy(tmp0 + (uint32_t)12U, - label_eae_prk, - (uint32_t)7U * sizeof (uint8_t)); - memcpy(tmp0 + (uint32_t)19U, dhm, (uint32_t)32U * sizeof (uint8_t)); - Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp0, len0); - { - uint8_t - label_shared_secret[13U] = - { - (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, - (uint8_t)0x65U, (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, - (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U, - (uint8_t)0x74U - }; - uint32_t len = (uint32_t)91U; - KRML_CHECK_SIZE(sizeof (uint8_t), len); - { - uint8_t tmp[len]; - memset(tmp, 0U, len * sizeof (uint8_t)); - { - uint8_t *uu____4 = tmp; - store32_be(uu____4, (uint32_t)32U); - memcpy(uu____4, - uu____4 + (uint32_t)2U, - (uint32_t)2U * sizeof (uint8_t)); - { - uint8_t *uu____5 = tmp + (uint32_t)2U; - uu____5[0U] = (uint8_t)0x48U; - uu____5[1U] = (uint8_t)0x50U; - uu____5[2U] = (uint8_t)0x4bU; - uu____5[3U] = (uint8_t)0x45U; - uu____5[4U] = (uint8_t)0x2dU; - uu____5[5U] = (uint8_t)0x76U; - uu____5[6U] = (uint8_t)0x31U; - memcpy(tmp + (uint32_t)9U, - suite_id_kem, - (uint32_t)5U * sizeof (uint8_t)); - memcpy(tmp + (uint32_t)14U, - label_shared_secret, - (uint32_t)13U * sizeof (uint8_t)); - memcpy(tmp + (uint32_t)27U, - kemcontext, - (uint32_t)64U * sizeof (uint8_t)); - Hacl_HKDF_expand_sha2_256(shared, - o_eae_prk, - (uint32_t)32U, - tmp, - len, - (uint32_t)32U); - res2 = (uint32_t)0U; - } - } - } - } - } - } - } - } - } - } - else - { - res2 = (uint32_t)1U; - } - } - } - else - { - res2 = (uint32_t)1U; - } - if (res2 == (uint32_t)0U) - { - uint8_t o_context[129U] = { 0U }; - uint8_t o_secret[64U] = { 0U }; - uint8_t suite_id[10U] = { 0U }; - uint8_t *uu____6 = suite_id; - uu____6[0U] = (uint8_t)0x48U; - uu____6[1U] = (uint8_t)0x50U; - uu____6[2U] = (uint8_t)0x4bU; - uu____6[3U] = (uint8_t)0x45U; - { - uint8_t *uu____7 = suite_id + (uint32_t)4U; - uu____7[0U] = (uint8_t)0U; - uu____7[1U] = (uint8_t)32U; - { - uint8_t *uu____8 = suite_id + (uint32_t)6U; - uu____8[0U] = (uint8_t)0U; - uu____8[1U] = (uint8_t)3U; - { - uint8_t *uu____9 = suite_id + (uint32_t)8U; - uu____9[0U] = (uint8_t)0U; - uu____9[1U] = (uint8_t)3U; - { - uint8_t - label_psk_id_hash[11U] = - { - (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, - (uint8_t)0x69U, (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, - (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U - }; - uint8_t o_psk_id_hash[64U] = { 0U }; - uint8_t *empty = suite_id; - uint32_t len0 = (uint32_t)28U; - KRML_CHECK_SIZE(sizeof (uint8_t), len0); - { - uint8_t tmp0[len0]; - memset(tmp0, 0U, len0 * sizeof (uint8_t)); - { - uint8_t *uu____10 = tmp0; - uu____10[0U] = (uint8_t)0x48U; - uu____10[1U] = (uint8_t)0x50U; - uu____10[2U] = (uint8_t)0x4bU; - uu____10[3U] = (uint8_t)0x45U; - uu____10[4U] = (uint8_t)0x2dU; - uu____10[5U] = (uint8_t)0x76U; - uu____10[6U] = (uint8_t)0x31U; - memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp0 + (uint32_t)17U, - label_psk_id_hash, - (uint32_t)11U * sizeof (uint8_t)); - memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t)); - Hacl_HKDF_extract_sha2_512(o_psk_id_hash, - empty, - (uint32_t)0U, - tmp0, - len0); - { - uint8_t - label_info_hash[9U] = - { - (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, - (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, - (uint8_t)0x68U - }; - uint8_t o_info_hash[64U] = { 0U }; - uint32_t len1 = (uint32_t)26U + infolen; - KRML_CHECK_SIZE(sizeof (uint8_t), len1); - { - uint8_t tmp1[len1]; - memset(tmp1, 0U, len1 * sizeof (uint8_t)); - { - uint8_t *uu____11 = tmp1; - uu____11[0U] = (uint8_t)0x48U; - uu____11[1U] = (uint8_t)0x50U; - uu____11[2U] = (uint8_t)0x4bU; - uu____11[3U] = (uint8_t)0x45U; - uu____11[4U] = (uint8_t)0x2dU; - uu____11[5U] = (uint8_t)0x76U; - uu____11[6U] = (uint8_t)0x31U; - memcpy(tmp1 + (uint32_t)7U, - suite_id, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp1 + (uint32_t)17U, - label_info_hash, - (uint32_t)9U * sizeof (uint8_t)); - memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t)); - Hacl_HKDF_extract_sha2_512(o_info_hash, - empty, - (uint32_t)0U, - tmp1, - len1); - o_context[0U] = (uint8_t)0U; - memcpy(o_context + (uint32_t)1U, - o_psk_id_hash, - (uint32_t)64U * sizeof (uint8_t)); - memcpy(o_context + (uint32_t)65U, - o_info_hash, - (uint32_t)64U * sizeof (uint8_t)); - { - uint8_t - label_secret[6U] = - { - (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, - (uint8_t)0x65U, (uint8_t)0x74U - }; - uint32_t len2 = (uint32_t)23U; - KRML_CHECK_SIZE(sizeof (uint8_t), len2); - { - uint8_t tmp2[len2]; - memset(tmp2, 0U, len2 * sizeof (uint8_t)); - { - uint8_t *uu____12 = tmp2; - uu____12[0U] = (uint8_t)0x48U; - uu____12[1U] = (uint8_t)0x50U; - uu____12[2U] = (uint8_t)0x4bU; - uu____12[3U] = (uint8_t)0x45U; - uu____12[4U] = (uint8_t)0x2dU; - uu____12[5U] = (uint8_t)0x76U; - uu____12[6U] = (uint8_t)0x31U; - memcpy(tmp2 + (uint32_t)7U, - suite_id, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp2 + (uint32_t)17U, - label_secret, - (uint32_t)6U * sizeof (uint8_t)); - memcpy(tmp2 + (uint32_t)23U, - empty, - (uint32_t)0U * sizeof (uint8_t)); - Hacl_HKDF_extract_sha2_512(o_secret, - shared, - (uint32_t)32U, - tmp2, - len2); - { - uint8_t - label_exp[3U] = - { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U }; - uint32_t len3 = (uint32_t)151U; - KRML_CHECK_SIZE(sizeof (uint8_t), len3); - { - uint8_t tmp3[len3]; - memset(tmp3, 0U, len3 * sizeof (uint8_t)); - { - uint8_t *uu____13 = tmp3; - store32_be(uu____13, (uint32_t)64U); - memcpy(uu____13, - uu____13 + (uint32_t)2U, - (uint32_t)2U * sizeof (uint8_t)); - { - uint8_t *uu____14 = tmp3 + (uint32_t)2U; - uu____14[0U] = (uint8_t)0x48U; - uu____14[1U] = (uint8_t)0x50U; - uu____14[2U] = (uint8_t)0x4bU; - uu____14[3U] = (uint8_t)0x45U; - uu____14[4U] = (uint8_t)0x2dU; - uu____14[5U] = (uint8_t)0x76U; - uu____14[6U] = (uint8_t)0x31U; - memcpy(tmp3 + (uint32_t)9U, - suite_id, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp3 + (uint32_t)19U, - label_exp, - (uint32_t)3U * sizeof (uint8_t)); - memcpy(tmp3 + (uint32_t)22U, - o_context, - (uint32_t)129U * sizeof (uint8_t)); - Hacl_HKDF_expand_sha2_512(o_ctx.ctx_exporter, - o_secret, - (uint32_t)64U, - tmp3, - len3, - (uint32_t)64U); - { - uint8_t - label_key[3U] = - { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U }; - uint32_t len4 = (uint32_t)151U; - KRML_CHECK_SIZE(sizeof (uint8_t), len4); - { - uint8_t tmp4[len4]; - memset(tmp4, 0U, len4 * sizeof (uint8_t)); - { - uint8_t *uu____15 = tmp4; - store32_be(uu____15, (uint32_t)32U); - memcpy(uu____15, - uu____15 + (uint32_t)2U, - (uint32_t)2U * sizeof (uint8_t)); - { - uint8_t *uu____16 = tmp4 + (uint32_t)2U; - uu____16[0U] = (uint8_t)0x48U; - uu____16[1U] = (uint8_t)0x50U; - uu____16[2U] = (uint8_t)0x4bU; - uu____16[3U] = (uint8_t)0x45U; - uu____16[4U] = (uint8_t)0x2dU; - uu____16[5U] = (uint8_t)0x76U; - uu____16[6U] = (uint8_t)0x31U; - memcpy(tmp4 + (uint32_t)9U, - suite_id, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp4 + (uint32_t)19U, - label_key, - (uint32_t)3U * sizeof (uint8_t)); - memcpy(tmp4 + (uint32_t)22U, - o_context, - (uint32_t)129U * sizeof (uint8_t)); - Hacl_HKDF_expand_sha2_512(o_ctx.ctx_key, - o_secret, - (uint32_t)64U, - tmp4, - len4, - (uint32_t)32U); - { - uint8_t - label_base_nonce[10U] = - { - (uint8_t)0x62U, (uint8_t)0x61U, - (uint8_t)0x73U, (uint8_t)0x65U, - (uint8_t)0x5fU, (uint8_t)0x6eU, - (uint8_t)0x6fU, (uint8_t)0x6eU, - (uint8_t)0x63U, (uint8_t)0x65U - }; - uint32_t len = (uint32_t)158U; - KRML_CHECK_SIZE(sizeof (uint8_t), len); - { - uint8_t tmp[len]; - memset(tmp, 0U, len * sizeof (uint8_t)); - { - uint8_t *uu____17 = tmp; - store32_be(uu____17, (uint32_t)12U); - memcpy(uu____17, - uu____17 + (uint32_t)2U, - (uint32_t)2U * sizeof (uint8_t)); - { - uint8_t - *uu____18 = tmp + (uint32_t)2U; - uu____18[0U] = (uint8_t)0x48U; - uu____18[1U] = (uint8_t)0x50U; - uu____18[2U] = (uint8_t)0x4bU; - uu____18[3U] = (uint8_t)0x45U; - uu____18[4U] = (uint8_t)0x2dU; - uu____18[5U] = (uint8_t)0x76U; - uu____18[6U] = (uint8_t)0x31U; - memcpy(tmp + (uint32_t)9U, - suite_id, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp + (uint32_t)19U, - label_base_nonce, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp + (uint32_t)29U, - o_context, - (uint32_t)129U * sizeof (uint8_t)); - Hacl_HKDF_expand_sha2_512(o_ctx.ctx_nonce, - o_secret, - (uint32_t)64U, - tmp, - len, - (uint32_t)12U); - o_ctx.ctx_seq[0U] = (uint64_t)0U; - ite = (uint32_t)0U; - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - else - { - ite = (uint32_t)1U; - } - } - } - } - } - else - { - ite = (uint32_t)1U; - } - return ite; - } -} - -uint32_t -Hacl_HPKE_Curve64_CP128_SHA512_sealBase( - uint8_t *skE, - uint8_t *pkR, - uint32_t infolen, - uint8_t *info, - uint32_t aadlen, - uint8_t *aad, - uint32_t plainlen, - uint8_t *plain, - uint8_t *o_enc, - uint8_t *o_ct -) -{ - uint8_t ctx_key[32U] = { 0U }; - uint8_t ctx_nonce[12U] = { 0U }; - uint64_t ctx_seq = (uint64_t)0U; - uint8_t ctx_exporter[64U] = { 0U }; - Hacl_Impl_HPKE_context_s o_ctx; - uint32_t res; - uint32_t ite; - o_ctx.ctx_key = ctx_key; - o_ctx.ctx_nonce = ctx_nonce; - o_ctx.ctx_seq = &ctx_seq; - o_ctx.ctx_exporter = ctx_exporter; - res = Hacl_HPKE_Curve64_CP128_SHA512_setupBaseS(o_enc, o_ctx, skE, pkR, infolen, info); - if (res == (uint32_t)0U) - { - uint8_t nonce[12U] = { 0U }; - uint64_t s = o_ctx.ctx_seq[0U]; - uint8_t enc[12U] = { 0U }; - store64_be(enc + (uint32_t)4U, s); - KRML_MAYBE_FOR12(i, - (uint32_t)0U, - (uint32_t)12U, - (uint32_t)1U, - uint8_t xi = enc[i]; - uint8_t yi = o_ctx.ctx_nonce[i]; - nonce[i] = xi ^ yi;); - Hacl_Chacha20Poly1305_128_aead_encrypt(o_ctx.ctx_key, - nonce, - aadlen, - aad, - plainlen, - plain, - o_ct, - o_ct + plainlen); - { - uint64_t s1 = o_ctx.ctx_seq[0U]; - uint32_t res1; - if (s1 == (uint64_t)18446744073709551615U) - { - res1 = (uint32_t)1U; - } - else - { - uint64_t s_ = s1 + (uint64_t)1U; - o_ctx.ctx_seq[0U] = s_; - res1 = (uint32_t)0U; - } - { - uint32_t res10 = res1; - ite = res10; - } - } - } - else - { - ite = (uint32_t)1U; - } - return ite; -} - -uint32_t -Hacl_HPKE_Curve64_CP128_SHA512_openBase( - uint8_t *pkE, - uint8_t *skR, - uint32_t infolen, - uint8_t *info, - uint32_t aadlen, - uint8_t *aad, - uint32_t ctlen, - uint8_t *ct, - uint8_t *o_pt -) -{ - uint8_t ctx_key[32U] = { 0U }; - uint8_t ctx_nonce[12U] = { 0U }; - uint64_t ctx_seq = (uint64_t)0U; - uint8_t ctx_exporter[64U] = { 0U }; - Hacl_Impl_HPKE_context_s o_ctx; - uint32_t res; - uint32_t ite; - o_ctx.ctx_key = ctx_key; - o_ctx.ctx_nonce = ctx_nonce; - o_ctx.ctx_seq = &ctx_seq; - o_ctx.ctx_exporter = ctx_exporter; - res = Hacl_HPKE_Curve64_CP128_SHA512_setupBaseR(o_ctx, pkE, skR, infolen, info); - if (res == (uint32_t)0U) - { - uint8_t nonce[12U] = { 0U }; - uint64_t s = o_ctx.ctx_seq[0U]; - uint8_t enc[12U] = { 0U }; - store64_be(enc + (uint32_t)4U, s); - KRML_MAYBE_FOR12(i, - (uint32_t)0U, - (uint32_t)12U, - (uint32_t)1U, - uint8_t xi = enc[i]; - uint8_t yi = o_ctx.ctx_nonce[i]; - nonce[i] = xi ^ yi;); - { - uint32_t - res10 = - Hacl_Chacha20Poly1305_128_aead_decrypt(o_ctx.ctx_key, - nonce, - aadlen, - aad, - ctlen - (uint32_t)16U, - o_pt, - ct, - ct + ctlen - (uint32_t)16U); - uint32_t res1; - if (res10 == (uint32_t)0U) - { - uint64_t s1 = o_ctx.ctx_seq[0U]; - uint32_t res2; - if (s1 == (uint64_t)18446744073709551615U) - { - res2 = (uint32_t)1U; - } - else - { - uint64_t s_ = s1 + (uint64_t)1U; - o_ctx.ctx_seq[0U] = s_; - res2 = (uint32_t)0U; - } - res1 = res2; - } - else - { - res1 = (uint32_t)1U; - } - ite = res1; - } - } - else - { - ite = (uint32_t)1U; - } - return ite; -} - diff --git a/dist/c89-compatible/Hacl_HPKE_Curve64_CP128_SHA512.h b/dist/c89-compatible/Hacl_HPKE_Curve64_CP128_SHA512.h deleted file mode 100644 index ddc745ce9d..0000000000 --- a/dist/c89-compatible/Hacl_HPKE_Curve64_CP128_SHA512.h +++ /dev/null @@ -1,95 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#ifndef __Hacl_HPKE_Curve64_CP128_SHA512_H -#define __Hacl_HPKE_Curve64_CP128_SHA512_H - -#if defined(__cplusplus) -extern "C" { -#endif - -#include -#include "krml/internal/types.h" -#include "krml/lowstar_endianness.h" -#include "krml/internal/target.h" - - -#include "Hacl_Krmllib.h" -#include "Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE.h" -#include "Hacl_HKDF.h" -#include "Hacl_Curve25519_64.h" -#include "Hacl_Chacha20Poly1305_128.h" -#include "evercrypt_targetconfig.h" -uint32_t -Hacl_HPKE_Curve64_CP128_SHA512_setupBaseS( - uint8_t *o_pkE, - Hacl_Impl_HPKE_context_s o_ctx, - uint8_t *skE, - uint8_t *pkR, - uint32_t infolen, - uint8_t *info -); - -uint32_t -Hacl_HPKE_Curve64_CP128_SHA512_setupBaseR( - Hacl_Impl_HPKE_context_s o_ctx, - uint8_t *enc, - uint8_t *skR, - uint32_t infolen, - uint8_t *info -); - -uint32_t -Hacl_HPKE_Curve64_CP128_SHA512_sealBase( - uint8_t *skE, - uint8_t *pkR, - uint32_t infolen, - uint8_t *info, - uint32_t aadlen, - uint8_t *aad, - uint32_t plainlen, - uint8_t *plain, - uint8_t *o_enc, - uint8_t *o_ct -); - -uint32_t -Hacl_HPKE_Curve64_CP128_SHA512_openBase( - uint8_t *pkE, - uint8_t *skR, - uint32_t infolen, - uint8_t *info, - uint32_t aadlen, - uint8_t *aad, - uint32_t ctlen, - uint8_t *ct, - uint8_t *o_pt -); - -#if defined(__cplusplus) -} -#endif - -#define __Hacl_HPKE_Curve64_CP128_SHA512_H_DEFINED -#endif diff --git a/dist/c89-compatible/Hacl_HPKE_Curve64_CP256_SHA256.c b/dist/c89-compatible/Hacl_HPKE_Curve64_CP256_SHA256.c deleted file mode 100644 index ecd9ea1519..0000000000 --- a/dist/c89-compatible/Hacl_HPKE_Curve64_CP256_SHA256.c +++ /dev/null @@ -1,1077 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#include "Hacl_HPKE_Curve64_CP256_SHA256.h" - - - -uint32_t -Hacl_HPKE_Curve64_CP256_SHA256_setupBaseS( - uint8_t *o_pkE, - Hacl_Impl_HPKE_context_s o_ctx, - uint8_t *skE, - uint8_t *pkR, - uint32_t infolen, - uint8_t *info -) -{ - uint8_t o_shared[32U] = { 0U }; - uint8_t *o_pkE1 = o_pkE; - uint32_t res1; - uint32_t res0; - uint32_t ite; - Hacl_Curve25519_64_secret_to_public(o_pkE1, skE); - res1 = (uint32_t)0U; - if (res1 == (uint32_t)0U) - { - uint8_t o_dh[32U] = { 0U }; - uint8_t zeros[32U] = { 0U }; - Hacl_Curve25519_64_scalarmult(o_dh, skE, pkR); - { - uint8_t res2 = (uint8_t)255U; - { - uint32_t i; - for (i = (uint32_t)0U; i < (uint32_t)32U; i++) - { - uint8_t uu____0 = FStar_UInt8_eq_mask(o_dh[i], zeros[i]); - res2 = uu____0 & res2; - } - } - { - uint8_t z = res2; - uint32_t res; - if (z == (uint8_t)255U) - { - res = (uint32_t)1U; - } - else - { - res = (uint32_t)0U; - } - { - uint32_t res20 = res; - uint8_t o_kemcontext[64U] = { 0U }; - if (res20 == (uint32_t)0U) - { - memcpy(o_kemcontext, o_pkE, (uint32_t)32U * sizeof (uint8_t)); - { - uint8_t *o_pkRm = o_kemcontext + (uint32_t)32U; - uint8_t *o_pkR = o_pkRm; - memcpy(o_pkR, pkR, (uint32_t)32U * sizeof (uint8_t)); - { - uint8_t *o_dhm = o_dh; - uint8_t o_eae_prk[32U] = { 0U }; - uint8_t suite_id_kem[5U] = { 0U }; - uint8_t *uu____1 = suite_id_kem; - uu____1[0U] = (uint8_t)0x4bU; - uu____1[1U] = (uint8_t)0x45U; - uu____1[2U] = (uint8_t)0x4dU; - { - uint8_t *uu____2 = suite_id_kem + (uint32_t)3U; - uu____2[0U] = (uint8_t)0U; - uu____2[1U] = (uint8_t)32U; - { - uint8_t *empty = suite_id_kem; - uint8_t - label_eae_prk[7U] = - { - (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, - (uint8_t)0x70U, (uint8_t)0x72U, (uint8_t)0x6bU - }; - uint32_t len0 = (uint32_t)51U; - KRML_CHECK_SIZE(sizeof (uint8_t), len0); - { - uint8_t tmp0[len0]; - memset(tmp0, 0U, len0 * sizeof (uint8_t)); - { - uint8_t *uu____3 = tmp0; - uu____3[0U] = (uint8_t)0x48U; - uu____3[1U] = (uint8_t)0x50U; - uu____3[2U] = (uint8_t)0x4bU; - uu____3[3U] = (uint8_t)0x45U; - uu____3[4U] = (uint8_t)0x2dU; - uu____3[5U] = (uint8_t)0x76U; - uu____3[6U] = (uint8_t)0x31U; - memcpy(tmp0 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t)); - memcpy(tmp0 + (uint32_t)12U, - label_eae_prk, - (uint32_t)7U * sizeof (uint8_t)); - memcpy(tmp0 + (uint32_t)19U, o_dhm, (uint32_t)32U * sizeof (uint8_t)); - Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp0, len0); - { - uint8_t - label_shared_secret[13U] = - { - (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, - (uint8_t)0x65U, (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, - (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U, - (uint8_t)0x74U - }; - uint32_t len = (uint32_t)91U; - KRML_CHECK_SIZE(sizeof (uint8_t), len); - { - uint8_t tmp[len]; - memset(tmp, 0U, len * sizeof (uint8_t)); - { - uint8_t *uu____4 = tmp; - store32_be(uu____4, (uint32_t)32U); - memcpy(uu____4, - uu____4 + (uint32_t)2U, - (uint32_t)2U * sizeof (uint8_t)); - { - uint8_t *uu____5 = tmp + (uint32_t)2U; - uu____5[0U] = (uint8_t)0x48U; - uu____5[1U] = (uint8_t)0x50U; - uu____5[2U] = (uint8_t)0x4bU; - uu____5[3U] = (uint8_t)0x45U; - uu____5[4U] = (uint8_t)0x2dU; - uu____5[5U] = (uint8_t)0x76U; - uu____5[6U] = (uint8_t)0x31U; - memcpy(tmp + (uint32_t)9U, - suite_id_kem, - (uint32_t)5U * sizeof (uint8_t)); - memcpy(tmp + (uint32_t)14U, - label_shared_secret, - (uint32_t)13U * sizeof (uint8_t)); - memcpy(tmp + (uint32_t)27U, - o_kemcontext, - (uint32_t)64U * sizeof (uint8_t)); - Hacl_HKDF_expand_sha2_256(o_shared, - o_eae_prk, - (uint32_t)32U, - tmp, - len, - (uint32_t)32U); - res0 = (uint32_t)0U; - } - } - } - } - } - } - } - } - } - } - } - else - { - res0 = (uint32_t)1U; - } - } - } - } - } - else - { - res0 = (uint32_t)1U; - } - if (res0 == (uint32_t)0U) - { - uint8_t o_context[65U] = { 0U }; - uint8_t o_secret[32U] = { 0U }; - uint8_t suite_id[10U] = { 0U }; - uint8_t *uu____6 = suite_id; - uu____6[0U] = (uint8_t)0x48U; - uu____6[1U] = (uint8_t)0x50U; - uu____6[2U] = (uint8_t)0x4bU; - uu____6[3U] = (uint8_t)0x45U; - { - uint8_t *uu____7 = suite_id + (uint32_t)4U; - uu____7[0U] = (uint8_t)0U; - uu____7[1U] = (uint8_t)32U; - { - uint8_t *uu____8 = suite_id + (uint32_t)6U; - uu____8[0U] = (uint8_t)0U; - uu____8[1U] = (uint8_t)1U; - { - uint8_t *uu____9 = suite_id + (uint32_t)8U; - uu____9[0U] = (uint8_t)0U; - uu____9[1U] = (uint8_t)3U; - { - uint8_t - label_psk_id_hash[11U] = - { - (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U, - (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, - (uint8_t)0x68U - }; - uint8_t o_psk_id_hash[32U] = { 0U }; - uint8_t *empty = suite_id; - uint32_t len0 = (uint32_t)28U; - KRML_CHECK_SIZE(sizeof (uint8_t), len0); - { - uint8_t tmp0[len0]; - memset(tmp0, 0U, len0 * sizeof (uint8_t)); - { - uint8_t *uu____10 = tmp0; - uu____10[0U] = (uint8_t)0x48U; - uu____10[1U] = (uint8_t)0x50U; - uu____10[2U] = (uint8_t)0x4bU; - uu____10[3U] = (uint8_t)0x45U; - uu____10[4U] = (uint8_t)0x2dU; - uu____10[5U] = (uint8_t)0x76U; - uu____10[6U] = (uint8_t)0x31U; - memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp0 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t)); - memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t)); - Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, (uint32_t)0U, tmp0, len0); - { - uint8_t - label_info_hash[9U] = - { - (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU, - (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U - }; - uint8_t o_info_hash[32U] = { 0U }; - uint32_t len1 = (uint32_t)26U + infolen; - KRML_CHECK_SIZE(sizeof (uint8_t), len1); - { - uint8_t tmp1[len1]; - memset(tmp1, 0U, len1 * sizeof (uint8_t)); - { - uint8_t *uu____11 = tmp1; - uu____11[0U] = (uint8_t)0x48U; - uu____11[1U] = (uint8_t)0x50U; - uu____11[2U] = (uint8_t)0x4bU; - uu____11[3U] = (uint8_t)0x45U; - uu____11[4U] = (uint8_t)0x2dU; - uu____11[5U] = (uint8_t)0x76U; - uu____11[6U] = (uint8_t)0x31U; - memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp1 + (uint32_t)17U, - label_info_hash, - (uint32_t)9U * sizeof (uint8_t)); - memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t)); - Hacl_HKDF_extract_sha2_256(o_info_hash, empty, (uint32_t)0U, tmp1, len1); - o_context[0U] = (uint8_t)0U; - memcpy(o_context + (uint32_t)1U, - o_psk_id_hash, - (uint32_t)32U * sizeof (uint8_t)); - memcpy(o_context + (uint32_t)33U, - o_info_hash, - (uint32_t)32U * sizeof (uint8_t)); - { - uint8_t - label_secret[6U] = - { - (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, - (uint8_t)0x65U, (uint8_t)0x74U - }; - uint32_t len2 = (uint32_t)23U; - KRML_CHECK_SIZE(sizeof (uint8_t), len2); - { - uint8_t tmp2[len2]; - memset(tmp2, 0U, len2 * sizeof (uint8_t)); - { - uint8_t *uu____12 = tmp2; - uu____12[0U] = (uint8_t)0x48U; - uu____12[1U] = (uint8_t)0x50U; - uu____12[2U] = (uint8_t)0x4bU; - uu____12[3U] = (uint8_t)0x45U; - uu____12[4U] = (uint8_t)0x2dU; - uu____12[5U] = (uint8_t)0x76U; - uu____12[6U] = (uint8_t)0x31U; - memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp2 + (uint32_t)17U, - label_secret, - (uint32_t)6U * sizeof (uint8_t)); - memcpy(tmp2 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t)); - Hacl_HKDF_extract_sha2_256(o_secret, - o_shared, - (uint32_t)32U, - tmp2, - len2); - { - uint8_t - label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U }; - uint32_t len3 = (uint32_t)87U; - KRML_CHECK_SIZE(sizeof (uint8_t), len3); - { - uint8_t tmp3[len3]; - memset(tmp3, 0U, len3 * sizeof (uint8_t)); - { - uint8_t *uu____13 = tmp3; - store32_be(uu____13, (uint32_t)32U); - memcpy(uu____13, - uu____13 + (uint32_t)2U, - (uint32_t)2U * sizeof (uint8_t)); - { - uint8_t *uu____14 = tmp3 + (uint32_t)2U; - uu____14[0U] = (uint8_t)0x48U; - uu____14[1U] = (uint8_t)0x50U; - uu____14[2U] = (uint8_t)0x4bU; - uu____14[3U] = (uint8_t)0x45U; - uu____14[4U] = (uint8_t)0x2dU; - uu____14[5U] = (uint8_t)0x76U; - uu____14[6U] = (uint8_t)0x31U; - memcpy(tmp3 + (uint32_t)9U, - suite_id, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp3 + (uint32_t)19U, - label_exp, - (uint32_t)3U * sizeof (uint8_t)); - memcpy(tmp3 + (uint32_t)22U, - o_context, - (uint32_t)65U * sizeof (uint8_t)); - Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter, - o_secret, - (uint32_t)32U, - tmp3, - len3, - (uint32_t)32U); - { - uint8_t - label_key[3U] = - { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U }; - uint32_t len4 = (uint32_t)87U; - KRML_CHECK_SIZE(sizeof (uint8_t), len4); - { - uint8_t tmp4[len4]; - memset(tmp4, 0U, len4 * sizeof (uint8_t)); - { - uint8_t *uu____15 = tmp4; - store32_be(uu____15, (uint32_t)32U); - memcpy(uu____15, - uu____15 + (uint32_t)2U, - (uint32_t)2U * sizeof (uint8_t)); - { - uint8_t *uu____16 = tmp4 + (uint32_t)2U; - uu____16[0U] = (uint8_t)0x48U; - uu____16[1U] = (uint8_t)0x50U; - uu____16[2U] = (uint8_t)0x4bU; - uu____16[3U] = (uint8_t)0x45U; - uu____16[4U] = (uint8_t)0x2dU; - uu____16[5U] = (uint8_t)0x76U; - uu____16[6U] = (uint8_t)0x31U; - memcpy(tmp4 + (uint32_t)9U, - suite_id, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp4 + (uint32_t)19U, - label_key, - (uint32_t)3U * sizeof (uint8_t)); - memcpy(tmp4 + (uint32_t)22U, - o_context, - (uint32_t)65U * sizeof (uint8_t)); - Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, - o_secret, - (uint32_t)32U, - tmp4, - len4, - (uint32_t)32U); - { - uint8_t - label_base_nonce[10U] = - { - (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, - (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x6eU, - (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, - (uint8_t)0x65U - }; - uint32_t len = (uint32_t)94U; - KRML_CHECK_SIZE(sizeof (uint8_t), len); - { - uint8_t tmp[len]; - memset(tmp, 0U, len * sizeof (uint8_t)); - { - uint8_t *uu____17 = tmp; - store32_be(uu____17, (uint32_t)12U); - memcpy(uu____17, - uu____17 + (uint32_t)2U, - (uint32_t)2U * sizeof (uint8_t)); - { - uint8_t *uu____18 = tmp + (uint32_t)2U; - uu____18[0U] = (uint8_t)0x48U; - uu____18[1U] = (uint8_t)0x50U; - uu____18[2U] = (uint8_t)0x4bU; - uu____18[3U] = (uint8_t)0x45U; - uu____18[4U] = (uint8_t)0x2dU; - uu____18[5U] = (uint8_t)0x76U; - uu____18[6U] = (uint8_t)0x31U; - memcpy(tmp + (uint32_t)9U, - suite_id, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp + (uint32_t)19U, - label_base_nonce, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp + (uint32_t)29U, - o_context, - (uint32_t)65U * sizeof (uint8_t)); - Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, - o_secret, - (uint32_t)32U, - tmp, - len, - (uint32_t)12U); - o_ctx.ctx_seq[0U] = (uint64_t)0U; - ite = res0; - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - else - { - ite = res0; - } - return ite; -} - -uint32_t -Hacl_HPKE_Curve64_CP256_SHA256_setupBaseR( - Hacl_Impl_HPKE_context_s o_ctx, - uint8_t *enc, - uint8_t *skR, - uint32_t infolen, - uint8_t *info -) -{ - uint8_t pkR[32U] = { 0U }; - uint32_t res1; - uint32_t ite; - Hacl_Curve25519_64_secret_to_public(pkR, skR); - res1 = (uint32_t)0U; - { - uint8_t shared[32U] = { 0U }; - if (res1 == (uint32_t)0U) - { - uint8_t *pkE = enc; - uint8_t dh[32U] = { 0U }; - uint8_t zeros[32U] = { 0U }; - Hacl_Curve25519_64_scalarmult(dh, skR, pkE); - { - uint8_t res0 = (uint8_t)255U; - { - uint32_t i; - for (i = (uint32_t)0U; i < (uint32_t)32U; i++) - { - uint8_t uu____0 = FStar_UInt8_eq_mask(dh[i], zeros[i]); - res0 = uu____0 & res0; - } - } - { - uint8_t z = res0; - uint32_t res; - if (z == (uint8_t)255U) - { - res = (uint32_t)1U; - } - else - { - res = (uint32_t)0U; - } - { - uint32_t res11 = res; - uint32_t res2; - uint8_t kemcontext[64U] = { 0U }; - if (res11 == (uint32_t)0U) - { - uint8_t *pkRm = kemcontext + (uint32_t)32U; - uint8_t *pkR1 = pkRm; - Hacl_Curve25519_64_secret_to_public(pkR1, skR); - { - uint32_t res20 = (uint32_t)0U; - if (res20 == (uint32_t)0U) - { - memcpy(kemcontext, enc, (uint32_t)32U * sizeof (uint8_t)); - { - uint8_t *dhm = dh; - uint8_t o_eae_prk[32U] = { 0U }; - uint8_t suite_id_kem[5U] = { 0U }; - uint8_t *uu____1 = suite_id_kem; - uu____1[0U] = (uint8_t)0x4bU; - uu____1[1U] = (uint8_t)0x45U; - uu____1[2U] = (uint8_t)0x4dU; - { - uint8_t *uu____2 = suite_id_kem + (uint32_t)3U; - uu____2[0U] = (uint8_t)0U; - uu____2[1U] = (uint8_t)32U; - { - uint8_t *empty = suite_id_kem; - uint8_t - label_eae_prk[7U] = - { - (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, - (uint8_t)0x70U, (uint8_t)0x72U, (uint8_t)0x6bU - }; - uint32_t len0 = (uint32_t)51U; - KRML_CHECK_SIZE(sizeof (uint8_t), len0); - { - uint8_t tmp0[len0]; - memset(tmp0, 0U, len0 * sizeof (uint8_t)); - { - uint8_t *uu____3 = tmp0; - uu____3[0U] = (uint8_t)0x48U; - uu____3[1U] = (uint8_t)0x50U; - uu____3[2U] = (uint8_t)0x4bU; - uu____3[3U] = (uint8_t)0x45U; - uu____3[4U] = (uint8_t)0x2dU; - uu____3[5U] = (uint8_t)0x76U; - uu____3[6U] = (uint8_t)0x31U; - memcpy(tmp0 + (uint32_t)7U, - suite_id_kem, - (uint32_t)5U * sizeof (uint8_t)); - memcpy(tmp0 + (uint32_t)12U, - label_eae_prk, - (uint32_t)7U * sizeof (uint8_t)); - memcpy(tmp0 + (uint32_t)19U, dhm, (uint32_t)32U * sizeof (uint8_t)); - Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp0, len0); - { - uint8_t - label_shared_secret[13U] = - { - (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, - (uint8_t)0x65U, (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, - (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U, - (uint8_t)0x74U - }; - uint32_t len = (uint32_t)91U; - KRML_CHECK_SIZE(sizeof (uint8_t), len); - { - uint8_t tmp[len]; - memset(tmp, 0U, len * sizeof (uint8_t)); - { - uint8_t *uu____4 = tmp; - store32_be(uu____4, (uint32_t)32U); - memcpy(uu____4, - uu____4 + (uint32_t)2U, - (uint32_t)2U * sizeof (uint8_t)); - { - uint8_t *uu____5 = tmp + (uint32_t)2U; - uu____5[0U] = (uint8_t)0x48U; - uu____5[1U] = (uint8_t)0x50U; - uu____5[2U] = (uint8_t)0x4bU; - uu____5[3U] = (uint8_t)0x45U; - uu____5[4U] = (uint8_t)0x2dU; - uu____5[5U] = (uint8_t)0x76U; - uu____5[6U] = (uint8_t)0x31U; - memcpy(tmp + (uint32_t)9U, - suite_id_kem, - (uint32_t)5U * sizeof (uint8_t)); - memcpy(tmp + (uint32_t)14U, - label_shared_secret, - (uint32_t)13U * sizeof (uint8_t)); - memcpy(tmp + (uint32_t)27U, - kemcontext, - (uint32_t)64U * sizeof (uint8_t)); - Hacl_HKDF_expand_sha2_256(shared, - o_eae_prk, - (uint32_t)32U, - tmp, - len, - (uint32_t)32U); - res2 = (uint32_t)0U; - } - } - } - } - } - } - } - } - } - } - else - { - res2 = (uint32_t)1U; - } - } - } - else - { - res2 = (uint32_t)1U; - } - if (res2 == (uint32_t)0U) - { - uint8_t o_context[65U] = { 0U }; - uint8_t o_secret[32U] = { 0U }; - uint8_t suite_id[10U] = { 0U }; - uint8_t *uu____6 = suite_id; - uu____6[0U] = (uint8_t)0x48U; - uu____6[1U] = (uint8_t)0x50U; - uu____6[2U] = (uint8_t)0x4bU; - uu____6[3U] = (uint8_t)0x45U; - { - uint8_t *uu____7 = suite_id + (uint32_t)4U; - uu____7[0U] = (uint8_t)0U; - uu____7[1U] = (uint8_t)32U; - { - uint8_t *uu____8 = suite_id + (uint32_t)6U; - uu____8[0U] = (uint8_t)0U; - uu____8[1U] = (uint8_t)1U; - { - uint8_t *uu____9 = suite_id + (uint32_t)8U; - uu____9[0U] = (uint8_t)0U; - uu____9[1U] = (uint8_t)3U; - { - uint8_t - label_psk_id_hash[11U] = - { - (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, - (uint8_t)0x69U, (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, - (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U - }; - uint8_t o_psk_id_hash[32U] = { 0U }; - uint8_t *empty = suite_id; - uint32_t len0 = (uint32_t)28U; - KRML_CHECK_SIZE(sizeof (uint8_t), len0); - { - uint8_t tmp0[len0]; - memset(tmp0, 0U, len0 * sizeof (uint8_t)); - { - uint8_t *uu____10 = tmp0; - uu____10[0U] = (uint8_t)0x48U; - uu____10[1U] = (uint8_t)0x50U; - uu____10[2U] = (uint8_t)0x4bU; - uu____10[3U] = (uint8_t)0x45U; - uu____10[4U] = (uint8_t)0x2dU; - uu____10[5U] = (uint8_t)0x76U; - uu____10[6U] = (uint8_t)0x31U; - memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp0 + (uint32_t)17U, - label_psk_id_hash, - (uint32_t)11U * sizeof (uint8_t)); - memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t)); - Hacl_HKDF_extract_sha2_256(o_psk_id_hash, - empty, - (uint32_t)0U, - tmp0, - len0); - { - uint8_t - label_info_hash[9U] = - { - (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, - (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, - (uint8_t)0x68U - }; - uint8_t o_info_hash[32U] = { 0U }; - uint32_t len1 = (uint32_t)26U + infolen; - KRML_CHECK_SIZE(sizeof (uint8_t), len1); - { - uint8_t tmp1[len1]; - memset(tmp1, 0U, len1 * sizeof (uint8_t)); - { - uint8_t *uu____11 = tmp1; - uu____11[0U] = (uint8_t)0x48U; - uu____11[1U] = (uint8_t)0x50U; - uu____11[2U] = (uint8_t)0x4bU; - uu____11[3U] = (uint8_t)0x45U; - uu____11[4U] = (uint8_t)0x2dU; - uu____11[5U] = (uint8_t)0x76U; - uu____11[6U] = (uint8_t)0x31U; - memcpy(tmp1 + (uint32_t)7U, - suite_id, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp1 + (uint32_t)17U, - label_info_hash, - (uint32_t)9U * sizeof (uint8_t)); - memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t)); - Hacl_HKDF_extract_sha2_256(o_info_hash, - empty, - (uint32_t)0U, - tmp1, - len1); - o_context[0U] = (uint8_t)0U; - memcpy(o_context + (uint32_t)1U, - o_psk_id_hash, - (uint32_t)32U * sizeof (uint8_t)); - memcpy(o_context + (uint32_t)33U, - o_info_hash, - (uint32_t)32U * sizeof (uint8_t)); - { - uint8_t - label_secret[6U] = - { - (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, - (uint8_t)0x65U, (uint8_t)0x74U - }; - uint32_t len2 = (uint32_t)23U; - KRML_CHECK_SIZE(sizeof (uint8_t), len2); - { - uint8_t tmp2[len2]; - memset(tmp2, 0U, len2 * sizeof (uint8_t)); - { - uint8_t *uu____12 = tmp2; - uu____12[0U] = (uint8_t)0x48U; - uu____12[1U] = (uint8_t)0x50U; - uu____12[2U] = (uint8_t)0x4bU; - uu____12[3U] = (uint8_t)0x45U; - uu____12[4U] = (uint8_t)0x2dU; - uu____12[5U] = (uint8_t)0x76U; - uu____12[6U] = (uint8_t)0x31U; - memcpy(tmp2 + (uint32_t)7U, - suite_id, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp2 + (uint32_t)17U, - label_secret, - (uint32_t)6U * sizeof (uint8_t)); - memcpy(tmp2 + (uint32_t)23U, - empty, - (uint32_t)0U * sizeof (uint8_t)); - Hacl_HKDF_extract_sha2_256(o_secret, - shared, - (uint32_t)32U, - tmp2, - len2); - { - uint8_t - label_exp[3U] = - { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U }; - uint32_t len3 = (uint32_t)87U; - KRML_CHECK_SIZE(sizeof (uint8_t), len3); - { - uint8_t tmp3[len3]; - memset(tmp3, 0U, len3 * sizeof (uint8_t)); - { - uint8_t *uu____13 = tmp3; - store32_be(uu____13, (uint32_t)32U); - memcpy(uu____13, - uu____13 + (uint32_t)2U, - (uint32_t)2U * sizeof (uint8_t)); - { - uint8_t *uu____14 = tmp3 + (uint32_t)2U; - uu____14[0U] = (uint8_t)0x48U; - uu____14[1U] = (uint8_t)0x50U; - uu____14[2U] = (uint8_t)0x4bU; - uu____14[3U] = (uint8_t)0x45U; - uu____14[4U] = (uint8_t)0x2dU; - uu____14[5U] = (uint8_t)0x76U; - uu____14[6U] = (uint8_t)0x31U; - memcpy(tmp3 + (uint32_t)9U, - suite_id, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp3 + (uint32_t)19U, - label_exp, - (uint32_t)3U * sizeof (uint8_t)); - memcpy(tmp3 + (uint32_t)22U, - o_context, - (uint32_t)65U * sizeof (uint8_t)); - Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter, - o_secret, - (uint32_t)32U, - tmp3, - len3, - (uint32_t)32U); - { - uint8_t - label_key[3U] = - { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U }; - uint32_t len4 = (uint32_t)87U; - KRML_CHECK_SIZE(sizeof (uint8_t), len4); - { - uint8_t tmp4[len4]; - memset(tmp4, 0U, len4 * sizeof (uint8_t)); - { - uint8_t *uu____15 = tmp4; - store32_be(uu____15, (uint32_t)32U); - memcpy(uu____15, - uu____15 + (uint32_t)2U, - (uint32_t)2U * sizeof (uint8_t)); - { - uint8_t *uu____16 = tmp4 + (uint32_t)2U; - uu____16[0U] = (uint8_t)0x48U; - uu____16[1U] = (uint8_t)0x50U; - uu____16[2U] = (uint8_t)0x4bU; - uu____16[3U] = (uint8_t)0x45U; - uu____16[4U] = (uint8_t)0x2dU; - uu____16[5U] = (uint8_t)0x76U; - uu____16[6U] = (uint8_t)0x31U; - memcpy(tmp4 + (uint32_t)9U, - suite_id, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp4 + (uint32_t)19U, - label_key, - (uint32_t)3U * sizeof (uint8_t)); - memcpy(tmp4 + (uint32_t)22U, - o_context, - (uint32_t)65U * sizeof (uint8_t)); - Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, - o_secret, - (uint32_t)32U, - tmp4, - len4, - (uint32_t)32U); - { - uint8_t - label_base_nonce[10U] = - { - (uint8_t)0x62U, (uint8_t)0x61U, - (uint8_t)0x73U, (uint8_t)0x65U, - (uint8_t)0x5fU, (uint8_t)0x6eU, - (uint8_t)0x6fU, (uint8_t)0x6eU, - (uint8_t)0x63U, (uint8_t)0x65U - }; - uint32_t len = (uint32_t)94U; - KRML_CHECK_SIZE(sizeof (uint8_t), len); - { - uint8_t tmp[len]; - memset(tmp, 0U, len * sizeof (uint8_t)); - { - uint8_t *uu____17 = tmp; - store32_be(uu____17, (uint32_t)12U); - memcpy(uu____17, - uu____17 + (uint32_t)2U, - (uint32_t)2U * sizeof (uint8_t)); - { - uint8_t - *uu____18 = tmp + (uint32_t)2U; - uu____18[0U] = (uint8_t)0x48U; - uu____18[1U] = (uint8_t)0x50U; - uu____18[2U] = (uint8_t)0x4bU; - uu____18[3U] = (uint8_t)0x45U; - uu____18[4U] = (uint8_t)0x2dU; - uu____18[5U] = (uint8_t)0x76U; - uu____18[6U] = (uint8_t)0x31U; - memcpy(tmp + (uint32_t)9U, - suite_id, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp + (uint32_t)19U, - label_base_nonce, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp + (uint32_t)29U, - o_context, - (uint32_t)65U * sizeof (uint8_t)); - Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, - o_secret, - (uint32_t)32U, - tmp, - len, - (uint32_t)12U); - o_ctx.ctx_seq[0U] = (uint64_t)0U; - ite = (uint32_t)0U; - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - else - { - ite = (uint32_t)1U; - } - } - } - } - } - else - { - ite = (uint32_t)1U; - } - return ite; - } -} - -uint32_t -Hacl_HPKE_Curve64_CP256_SHA256_sealBase( - uint8_t *skE, - uint8_t *pkR, - uint32_t infolen, - uint8_t *info, - uint32_t aadlen, - uint8_t *aad, - uint32_t plainlen, - uint8_t *plain, - uint8_t *o_enc, - uint8_t *o_ct -) -{ - uint8_t ctx_key[32U] = { 0U }; - uint8_t ctx_nonce[12U] = { 0U }; - uint64_t ctx_seq = (uint64_t)0U; - uint8_t ctx_exporter[32U] = { 0U }; - Hacl_Impl_HPKE_context_s o_ctx; - uint32_t res; - uint32_t ite; - o_ctx.ctx_key = ctx_key; - o_ctx.ctx_nonce = ctx_nonce; - o_ctx.ctx_seq = &ctx_seq; - o_ctx.ctx_exporter = ctx_exporter; - res = Hacl_HPKE_Curve64_CP256_SHA256_setupBaseS(o_enc, o_ctx, skE, pkR, infolen, info); - if (res == (uint32_t)0U) - { - uint8_t nonce[12U] = { 0U }; - uint64_t s = o_ctx.ctx_seq[0U]; - uint8_t enc[12U] = { 0U }; - store64_be(enc + (uint32_t)4U, s); - KRML_MAYBE_FOR12(i, - (uint32_t)0U, - (uint32_t)12U, - (uint32_t)1U, - uint8_t xi = enc[i]; - uint8_t yi = o_ctx.ctx_nonce[i]; - nonce[i] = xi ^ yi;); - Hacl_Chacha20Poly1305_256_aead_encrypt(o_ctx.ctx_key, - nonce, - aadlen, - aad, - plainlen, - plain, - o_ct, - o_ct + plainlen); - { - uint64_t s1 = o_ctx.ctx_seq[0U]; - uint32_t res1; - if (s1 == (uint64_t)18446744073709551615U) - { - res1 = (uint32_t)1U; - } - else - { - uint64_t s_ = s1 + (uint64_t)1U; - o_ctx.ctx_seq[0U] = s_; - res1 = (uint32_t)0U; - } - { - uint32_t res10 = res1; - ite = res10; - } - } - } - else - { - ite = (uint32_t)1U; - } - return ite; -} - -uint32_t -Hacl_HPKE_Curve64_CP256_SHA256_openBase( - uint8_t *pkE, - uint8_t *skR, - uint32_t infolen, - uint8_t *info, - uint32_t aadlen, - uint8_t *aad, - uint32_t ctlen, - uint8_t *ct, - uint8_t *o_pt -) -{ - uint8_t ctx_key[32U] = { 0U }; - uint8_t ctx_nonce[12U] = { 0U }; - uint64_t ctx_seq = (uint64_t)0U; - uint8_t ctx_exporter[32U] = { 0U }; - Hacl_Impl_HPKE_context_s o_ctx; - uint32_t res; - uint32_t ite; - o_ctx.ctx_key = ctx_key; - o_ctx.ctx_nonce = ctx_nonce; - o_ctx.ctx_seq = &ctx_seq; - o_ctx.ctx_exporter = ctx_exporter; - res = Hacl_HPKE_Curve64_CP256_SHA256_setupBaseR(o_ctx, pkE, skR, infolen, info); - if (res == (uint32_t)0U) - { - uint8_t nonce[12U] = { 0U }; - uint64_t s = o_ctx.ctx_seq[0U]; - uint8_t enc[12U] = { 0U }; - store64_be(enc + (uint32_t)4U, s); - KRML_MAYBE_FOR12(i, - (uint32_t)0U, - (uint32_t)12U, - (uint32_t)1U, - uint8_t xi = enc[i]; - uint8_t yi = o_ctx.ctx_nonce[i]; - nonce[i] = xi ^ yi;); - { - uint32_t - res10 = - Hacl_Chacha20Poly1305_256_aead_decrypt(o_ctx.ctx_key, - nonce, - aadlen, - aad, - ctlen - (uint32_t)16U, - o_pt, - ct, - ct + ctlen - (uint32_t)16U); - uint32_t res1; - if (res10 == (uint32_t)0U) - { - uint64_t s1 = o_ctx.ctx_seq[0U]; - uint32_t res2; - if (s1 == (uint64_t)18446744073709551615U) - { - res2 = (uint32_t)1U; - } - else - { - uint64_t s_ = s1 + (uint64_t)1U; - o_ctx.ctx_seq[0U] = s_; - res2 = (uint32_t)0U; - } - res1 = res2; - } - else - { - res1 = (uint32_t)1U; - } - ite = res1; - } - } - else - { - ite = (uint32_t)1U; - } - return ite; -} - diff --git a/dist/c89-compatible/Hacl_HPKE_Curve64_CP256_SHA256.h b/dist/c89-compatible/Hacl_HPKE_Curve64_CP256_SHA256.h deleted file mode 100644 index 69029789f8..0000000000 --- a/dist/c89-compatible/Hacl_HPKE_Curve64_CP256_SHA256.h +++ /dev/null @@ -1,95 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#ifndef __Hacl_HPKE_Curve64_CP256_SHA256_H -#define __Hacl_HPKE_Curve64_CP256_SHA256_H - -#if defined(__cplusplus) -extern "C" { -#endif - -#include -#include "krml/internal/types.h" -#include "krml/lowstar_endianness.h" -#include "krml/internal/target.h" - - -#include "Hacl_Krmllib.h" -#include "Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE.h" -#include "Hacl_HKDF.h" -#include "Hacl_Curve25519_64.h" -#include "Hacl_Chacha20Poly1305_256.h" -#include "evercrypt_targetconfig.h" -uint32_t -Hacl_HPKE_Curve64_CP256_SHA256_setupBaseS( - uint8_t *o_pkE, - Hacl_Impl_HPKE_context_s o_ctx, - uint8_t *skE, - uint8_t *pkR, - uint32_t infolen, - uint8_t *info -); - -uint32_t -Hacl_HPKE_Curve64_CP256_SHA256_setupBaseR( - Hacl_Impl_HPKE_context_s o_ctx, - uint8_t *enc, - uint8_t *skR, - uint32_t infolen, - uint8_t *info -); - -uint32_t -Hacl_HPKE_Curve64_CP256_SHA256_sealBase( - uint8_t *skE, - uint8_t *pkR, - uint32_t infolen, - uint8_t *info, - uint32_t aadlen, - uint8_t *aad, - uint32_t plainlen, - uint8_t *plain, - uint8_t *o_enc, - uint8_t *o_ct -); - -uint32_t -Hacl_HPKE_Curve64_CP256_SHA256_openBase( - uint8_t *pkE, - uint8_t *skR, - uint32_t infolen, - uint8_t *info, - uint32_t aadlen, - uint8_t *aad, - uint32_t ctlen, - uint8_t *ct, - uint8_t *o_pt -); - -#if defined(__cplusplus) -} -#endif - -#define __Hacl_HPKE_Curve64_CP256_SHA256_H_DEFINED -#endif diff --git a/dist/c89-compatible/Hacl_HPKE_Curve64_CP256_SHA512.c b/dist/c89-compatible/Hacl_HPKE_Curve64_CP256_SHA512.c deleted file mode 100644 index 6315185024..0000000000 --- a/dist/c89-compatible/Hacl_HPKE_Curve64_CP256_SHA512.c +++ /dev/null @@ -1,1077 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#include "Hacl_HPKE_Curve64_CP256_SHA512.h" - - - -uint32_t -Hacl_HPKE_Curve64_CP256_SHA512_setupBaseS( - uint8_t *o_pkE, - Hacl_Impl_HPKE_context_s o_ctx, - uint8_t *skE, - uint8_t *pkR, - uint32_t infolen, - uint8_t *info -) -{ - uint8_t o_shared[32U] = { 0U }; - uint8_t *o_pkE1 = o_pkE; - uint32_t res1; - uint32_t res0; - uint32_t ite; - Hacl_Curve25519_64_secret_to_public(o_pkE1, skE); - res1 = (uint32_t)0U; - if (res1 == (uint32_t)0U) - { - uint8_t o_dh[32U] = { 0U }; - uint8_t zeros[32U] = { 0U }; - Hacl_Curve25519_64_scalarmult(o_dh, skE, pkR); - { - uint8_t res2 = (uint8_t)255U; - { - uint32_t i; - for (i = (uint32_t)0U; i < (uint32_t)32U; i++) - { - uint8_t uu____0 = FStar_UInt8_eq_mask(o_dh[i], zeros[i]); - res2 = uu____0 & res2; - } - } - { - uint8_t z = res2; - uint32_t res; - if (z == (uint8_t)255U) - { - res = (uint32_t)1U; - } - else - { - res = (uint32_t)0U; - } - { - uint32_t res20 = res; - uint8_t o_kemcontext[64U] = { 0U }; - if (res20 == (uint32_t)0U) - { - memcpy(o_kemcontext, o_pkE, (uint32_t)32U * sizeof (uint8_t)); - { - uint8_t *o_pkRm = o_kemcontext + (uint32_t)32U; - uint8_t *o_pkR = o_pkRm; - memcpy(o_pkR, pkR, (uint32_t)32U * sizeof (uint8_t)); - { - uint8_t *o_dhm = o_dh; - uint8_t o_eae_prk[32U] = { 0U }; - uint8_t suite_id_kem[5U] = { 0U }; - uint8_t *uu____1 = suite_id_kem; - uu____1[0U] = (uint8_t)0x4bU; - uu____1[1U] = (uint8_t)0x45U; - uu____1[2U] = (uint8_t)0x4dU; - { - uint8_t *uu____2 = suite_id_kem + (uint32_t)3U; - uu____2[0U] = (uint8_t)0U; - uu____2[1U] = (uint8_t)32U; - { - uint8_t *empty = suite_id_kem; - uint8_t - label_eae_prk[7U] = - { - (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, - (uint8_t)0x70U, (uint8_t)0x72U, (uint8_t)0x6bU - }; - uint32_t len0 = (uint32_t)51U; - KRML_CHECK_SIZE(sizeof (uint8_t), len0); - { - uint8_t tmp0[len0]; - memset(tmp0, 0U, len0 * sizeof (uint8_t)); - { - uint8_t *uu____3 = tmp0; - uu____3[0U] = (uint8_t)0x48U; - uu____3[1U] = (uint8_t)0x50U; - uu____3[2U] = (uint8_t)0x4bU; - uu____3[3U] = (uint8_t)0x45U; - uu____3[4U] = (uint8_t)0x2dU; - uu____3[5U] = (uint8_t)0x76U; - uu____3[6U] = (uint8_t)0x31U; - memcpy(tmp0 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t)); - memcpy(tmp0 + (uint32_t)12U, - label_eae_prk, - (uint32_t)7U * sizeof (uint8_t)); - memcpy(tmp0 + (uint32_t)19U, o_dhm, (uint32_t)32U * sizeof (uint8_t)); - Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp0, len0); - { - uint8_t - label_shared_secret[13U] = - { - (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, - (uint8_t)0x65U, (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, - (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U, - (uint8_t)0x74U - }; - uint32_t len = (uint32_t)91U; - KRML_CHECK_SIZE(sizeof (uint8_t), len); - { - uint8_t tmp[len]; - memset(tmp, 0U, len * sizeof (uint8_t)); - { - uint8_t *uu____4 = tmp; - store32_be(uu____4, (uint32_t)32U); - memcpy(uu____4, - uu____4 + (uint32_t)2U, - (uint32_t)2U * sizeof (uint8_t)); - { - uint8_t *uu____5 = tmp + (uint32_t)2U; - uu____5[0U] = (uint8_t)0x48U; - uu____5[1U] = (uint8_t)0x50U; - uu____5[2U] = (uint8_t)0x4bU; - uu____5[3U] = (uint8_t)0x45U; - uu____5[4U] = (uint8_t)0x2dU; - uu____5[5U] = (uint8_t)0x76U; - uu____5[6U] = (uint8_t)0x31U; - memcpy(tmp + (uint32_t)9U, - suite_id_kem, - (uint32_t)5U * sizeof (uint8_t)); - memcpy(tmp + (uint32_t)14U, - label_shared_secret, - (uint32_t)13U * sizeof (uint8_t)); - memcpy(tmp + (uint32_t)27U, - o_kemcontext, - (uint32_t)64U * sizeof (uint8_t)); - Hacl_HKDF_expand_sha2_256(o_shared, - o_eae_prk, - (uint32_t)32U, - tmp, - len, - (uint32_t)32U); - res0 = (uint32_t)0U; - } - } - } - } - } - } - } - } - } - } - } - else - { - res0 = (uint32_t)1U; - } - } - } - } - } - else - { - res0 = (uint32_t)1U; - } - if (res0 == (uint32_t)0U) - { - uint8_t o_context[129U] = { 0U }; - uint8_t o_secret[64U] = { 0U }; - uint8_t suite_id[10U] = { 0U }; - uint8_t *uu____6 = suite_id; - uu____6[0U] = (uint8_t)0x48U; - uu____6[1U] = (uint8_t)0x50U; - uu____6[2U] = (uint8_t)0x4bU; - uu____6[3U] = (uint8_t)0x45U; - { - uint8_t *uu____7 = suite_id + (uint32_t)4U; - uu____7[0U] = (uint8_t)0U; - uu____7[1U] = (uint8_t)32U; - { - uint8_t *uu____8 = suite_id + (uint32_t)6U; - uu____8[0U] = (uint8_t)0U; - uu____8[1U] = (uint8_t)3U; - { - uint8_t *uu____9 = suite_id + (uint32_t)8U; - uu____9[0U] = (uint8_t)0U; - uu____9[1U] = (uint8_t)3U; - { - uint8_t - label_psk_id_hash[11U] = - { - (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U, - (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, - (uint8_t)0x68U - }; - uint8_t o_psk_id_hash[64U] = { 0U }; - uint8_t *empty = suite_id; - uint32_t len0 = (uint32_t)28U; - KRML_CHECK_SIZE(sizeof (uint8_t), len0); - { - uint8_t tmp0[len0]; - memset(tmp0, 0U, len0 * sizeof (uint8_t)); - { - uint8_t *uu____10 = tmp0; - uu____10[0U] = (uint8_t)0x48U; - uu____10[1U] = (uint8_t)0x50U; - uu____10[2U] = (uint8_t)0x4bU; - uu____10[3U] = (uint8_t)0x45U; - uu____10[4U] = (uint8_t)0x2dU; - uu____10[5U] = (uint8_t)0x76U; - uu____10[6U] = (uint8_t)0x31U; - memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp0 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t)); - memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t)); - Hacl_HKDF_extract_sha2_512(o_psk_id_hash, empty, (uint32_t)0U, tmp0, len0); - { - uint8_t - label_info_hash[9U] = - { - (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU, - (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U - }; - uint8_t o_info_hash[64U] = { 0U }; - uint32_t len1 = (uint32_t)26U + infolen; - KRML_CHECK_SIZE(sizeof (uint8_t), len1); - { - uint8_t tmp1[len1]; - memset(tmp1, 0U, len1 * sizeof (uint8_t)); - { - uint8_t *uu____11 = tmp1; - uu____11[0U] = (uint8_t)0x48U; - uu____11[1U] = (uint8_t)0x50U; - uu____11[2U] = (uint8_t)0x4bU; - uu____11[3U] = (uint8_t)0x45U; - uu____11[4U] = (uint8_t)0x2dU; - uu____11[5U] = (uint8_t)0x76U; - uu____11[6U] = (uint8_t)0x31U; - memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp1 + (uint32_t)17U, - label_info_hash, - (uint32_t)9U * sizeof (uint8_t)); - memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t)); - Hacl_HKDF_extract_sha2_512(o_info_hash, empty, (uint32_t)0U, tmp1, len1); - o_context[0U] = (uint8_t)0U; - memcpy(o_context + (uint32_t)1U, - o_psk_id_hash, - (uint32_t)64U * sizeof (uint8_t)); - memcpy(o_context + (uint32_t)65U, - o_info_hash, - (uint32_t)64U * sizeof (uint8_t)); - { - uint8_t - label_secret[6U] = - { - (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, - (uint8_t)0x65U, (uint8_t)0x74U - }; - uint32_t len2 = (uint32_t)23U; - KRML_CHECK_SIZE(sizeof (uint8_t), len2); - { - uint8_t tmp2[len2]; - memset(tmp2, 0U, len2 * sizeof (uint8_t)); - { - uint8_t *uu____12 = tmp2; - uu____12[0U] = (uint8_t)0x48U; - uu____12[1U] = (uint8_t)0x50U; - uu____12[2U] = (uint8_t)0x4bU; - uu____12[3U] = (uint8_t)0x45U; - uu____12[4U] = (uint8_t)0x2dU; - uu____12[5U] = (uint8_t)0x76U; - uu____12[6U] = (uint8_t)0x31U; - memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp2 + (uint32_t)17U, - label_secret, - (uint32_t)6U * sizeof (uint8_t)); - memcpy(tmp2 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t)); - Hacl_HKDF_extract_sha2_512(o_secret, - o_shared, - (uint32_t)32U, - tmp2, - len2); - { - uint8_t - label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U }; - uint32_t len3 = (uint32_t)151U; - KRML_CHECK_SIZE(sizeof (uint8_t), len3); - { - uint8_t tmp3[len3]; - memset(tmp3, 0U, len3 * sizeof (uint8_t)); - { - uint8_t *uu____13 = tmp3; - store32_be(uu____13, (uint32_t)64U); - memcpy(uu____13, - uu____13 + (uint32_t)2U, - (uint32_t)2U * sizeof (uint8_t)); - { - uint8_t *uu____14 = tmp3 + (uint32_t)2U; - uu____14[0U] = (uint8_t)0x48U; - uu____14[1U] = (uint8_t)0x50U; - uu____14[2U] = (uint8_t)0x4bU; - uu____14[3U] = (uint8_t)0x45U; - uu____14[4U] = (uint8_t)0x2dU; - uu____14[5U] = (uint8_t)0x76U; - uu____14[6U] = (uint8_t)0x31U; - memcpy(tmp3 + (uint32_t)9U, - suite_id, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp3 + (uint32_t)19U, - label_exp, - (uint32_t)3U * sizeof (uint8_t)); - memcpy(tmp3 + (uint32_t)22U, - o_context, - (uint32_t)129U * sizeof (uint8_t)); - Hacl_HKDF_expand_sha2_512(o_ctx.ctx_exporter, - o_secret, - (uint32_t)64U, - tmp3, - len3, - (uint32_t)64U); - { - uint8_t - label_key[3U] = - { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U }; - uint32_t len4 = (uint32_t)151U; - KRML_CHECK_SIZE(sizeof (uint8_t), len4); - { - uint8_t tmp4[len4]; - memset(tmp4, 0U, len4 * sizeof (uint8_t)); - { - uint8_t *uu____15 = tmp4; - store32_be(uu____15, (uint32_t)32U); - memcpy(uu____15, - uu____15 + (uint32_t)2U, - (uint32_t)2U * sizeof (uint8_t)); - { - uint8_t *uu____16 = tmp4 + (uint32_t)2U; - uu____16[0U] = (uint8_t)0x48U; - uu____16[1U] = (uint8_t)0x50U; - uu____16[2U] = (uint8_t)0x4bU; - uu____16[3U] = (uint8_t)0x45U; - uu____16[4U] = (uint8_t)0x2dU; - uu____16[5U] = (uint8_t)0x76U; - uu____16[6U] = (uint8_t)0x31U; - memcpy(tmp4 + (uint32_t)9U, - suite_id, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp4 + (uint32_t)19U, - label_key, - (uint32_t)3U * sizeof (uint8_t)); - memcpy(tmp4 + (uint32_t)22U, - o_context, - (uint32_t)129U * sizeof (uint8_t)); - Hacl_HKDF_expand_sha2_512(o_ctx.ctx_key, - o_secret, - (uint32_t)64U, - tmp4, - len4, - (uint32_t)32U); - { - uint8_t - label_base_nonce[10U] = - { - (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, - (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x6eU, - (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, - (uint8_t)0x65U - }; - uint32_t len = (uint32_t)158U; - KRML_CHECK_SIZE(sizeof (uint8_t), len); - { - uint8_t tmp[len]; - memset(tmp, 0U, len * sizeof (uint8_t)); - { - uint8_t *uu____17 = tmp; - store32_be(uu____17, (uint32_t)12U); - memcpy(uu____17, - uu____17 + (uint32_t)2U, - (uint32_t)2U * sizeof (uint8_t)); - { - uint8_t *uu____18 = tmp + (uint32_t)2U; - uu____18[0U] = (uint8_t)0x48U; - uu____18[1U] = (uint8_t)0x50U; - uu____18[2U] = (uint8_t)0x4bU; - uu____18[3U] = (uint8_t)0x45U; - uu____18[4U] = (uint8_t)0x2dU; - uu____18[5U] = (uint8_t)0x76U; - uu____18[6U] = (uint8_t)0x31U; - memcpy(tmp + (uint32_t)9U, - suite_id, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp + (uint32_t)19U, - label_base_nonce, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp + (uint32_t)29U, - o_context, - (uint32_t)129U * sizeof (uint8_t)); - Hacl_HKDF_expand_sha2_512(o_ctx.ctx_nonce, - o_secret, - (uint32_t)64U, - tmp, - len, - (uint32_t)12U); - o_ctx.ctx_seq[0U] = (uint64_t)0U; - ite = res0; - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - else - { - ite = res0; - } - return ite; -} - -uint32_t -Hacl_HPKE_Curve64_CP256_SHA512_setupBaseR( - Hacl_Impl_HPKE_context_s o_ctx, - uint8_t *enc, - uint8_t *skR, - uint32_t infolen, - uint8_t *info -) -{ - uint8_t pkR[32U] = { 0U }; - uint32_t res1; - uint32_t ite; - Hacl_Curve25519_64_secret_to_public(pkR, skR); - res1 = (uint32_t)0U; - { - uint8_t shared[32U] = { 0U }; - if (res1 == (uint32_t)0U) - { - uint8_t *pkE = enc; - uint8_t dh[32U] = { 0U }; - uint8_t zeros[32U] = { 0U }; - Hacl_Curve25519_64_scalarmult(dh, skR, pkE); - { - uint8_t res0 = (uint8_t)255U; - { - uint32_t i; - for (i = (uint32_t)0U; i < (uint32_t)32U; i++) - { - uint8_t uu____0 = FStar_UInt8_eq_mask(dh[i], zeros[i]); - res0 = uu____0 & res0; - } - } - { - uint8_t z = res0; - uint32_t res; - if (z == (uint8_t)255U) - { - res = (uint32_t)1U; - } - else - { - res = (uint32_t)0U; - } - { - uint32_t res11 = res; - uint32_t res2; - uint8_t kemcontext[64U] = { 0U }; - if (res11 == (uint32_t)0U) - { - uint8_t *pkRm = kemcontext + (uint32_t)32U; - uint8_t *pkR1 = pkRm; - Hacl_Curve25519_64_secret_to_public(pkR1, skR); - { - uint32_t res20 = (uint32_t)0U; - if (res20 == (uint32_t)0U) - { - memcpy(kemcontext, enc, (uint32_t)32U * sizeof (uint8_t)); - { - uint8_t *dhm = dh; - uint8_t o_eae_prk[32U] = { 0U }; - uint8_t suite_id_kem[5U] = { 0U }; - uint8_t *uu____1 = suite_id_kem; - uu____1[0U] = (uint8_t)0x4bU; - uu____1[1U] = (uint8_t)0x45U; - uu____1[2U] = (uint8_t)0x4dU; - { - uint8_t *uu____2 = suite_id_kem + (uint32_t)3U; - uu____2[0U] = (uint8_t)0U; - uu____2[1U] = (uint8_t)32U; - { - uint8_t *empty = suite_id_kem; - uint8_t - label_eae_prk[7U] = - { - (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, - (uint8_t)0x70U, (uint8_t)0x72U, (uint8_t)0x6bU - }; - uint32_t len0 = (uint32_t)51U; - KRML_CHECK_SIZE(sizeof (uint8_t), len0); - { - uint8_t tmp0[len0]; - memset(tmp0, 0U, len0 * sizeof (uint8_t)); - { - uint8_t *uu____3 = tmp0; - uu____3[0U] = (uint8_t)0x48U; - uu____3[1U] = (uint8_t)0x50U; - uu____3[2U] = (uint8_t)0x4bU; - uu____3[3U] = (uint8_t)0x45U; - uu____3[4U] = (uint8_t)0x2dU; - uu____3[5U] = (uint8_t)0x76U; - uu____3[6U] = (uint8_t)0x31U; - memcpy(tmp0 + (uint32_t)7U, - suite_id_kem, - (uint32_t)5U * sizeof (uint8_t)); - memcpy(tmp0 + (uint32_t)12U, - label_eae_prk, - (uint32_t)7U * sizeof (uint8_t)); - memcpy(tmp0 + (uint32_t)19U, dhm, (uint32_t)32U * sizeof (uint8_t)); - Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp0, len0); - { - uint8_t - label_shared_secret[13U] = - { - (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, - (uint8_t)0x65U, (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, - (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U, - (uint8_t)0x74U - }; - uint32_t len = (uint32_t)91U; - KRML_CHECK_SIZE(sizeof (uint8_t), len); - { - uint8_t tmp[len]; - memset(tmp, 0U, len * sizeof (uint8_t)); - { - uint8_t *uu____4 = tmp; - store32_be(uu____4, (uint32_t)32U); - memcpy(uu____4, - uu____4 + (uint32_t)2U, - (uint32_t)2U * sizeof (uint8_t)); - { - uint8_t *uu____5 = tmp + (uint32_t)2U; - uu____5[0U] = (uint8_t)0x48U; - uu____5[1U] = (uint8_t)0x50U; - uu____5[2U] = (uint8_t)0x4bU; - uu____5[3U] = (uint8_t)0x45U; - uu____5[4U] = (uint8_t)0x2dU; - uu____5[5U] = (uint8_t)0x76U; - uu____5[6U] = (uint8_t)0x31U; - memcpy(tmp + (uint32_t)9U, - suite_id_kem, - (uint32_t)5U * sizeof (uint8_t)); - memcpy(tmp + (uint32_t)14U, - label_shared_secret, - (uint32_t)13U * sizeof (uint8_t)); - memcpy(tmp + (uint32_t)27U, - kemcontext, - (uint32_t)64U * sizeof (uint8_t)); - Hacl_HKDF_expand_sha2_256(shared, - o_eae_prk, - (uint32_t)32U, - tmp, - len, - (uint32_t)32U); - res2 = (uint32_t)0U; - } - } - } - } - } - } - } - } - } - } - else - { - res2 = (uint32_t)1U; - } - } - } - else - { - res2 = (uint32_t)1U; - } - if (res2 == (uint32_t)0U) - { - uint8_t o_context[129U] = { 0U }; - uint8_t o_secret[64U] = { 0U }; - uint8_t suite_id[10U] = { 0U }; - uint8_t *uu____6 = suite_id; - uu____6[0U] = (uint8_t)0x48U; - uu____6[1U] = (uint8_t)0x50U; - uu____6[2U] = (uint8_t)0x4bU; - uu____6[3U] = (uint8_t)0x45U; - { - uint8_t *uu____7 = suite_id + (uint32_t)4U; - uu____7[0U] = (uint8_t)0U; - uu____7[1U] = (uint8_t)32U; - { - uint8_t *uu____8 = suite_id + (uint32_t)6U; - uu____8[0U] = (uint8_t)0U; - uu____8[1U] = (uint8_t)3U; - { - uint8_t *uu____9 = suite_id + (uint32_t)8U; - uu____9[0U] = (uint8_t)0U; - uu____9[1U] = (uint8_t)3U; - { - uint8_t - label_psk_id_hash[11U] = - { - (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, - (uint8_t)0x69U, (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, - (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U - }; - uint8_t o_psk_id_hash[64U] = { 0U }; - uint8_t *empty = suite_id; - uint32_t len0 = (uint32_t)28U; - KRML_CHECK_SIZE(sizeof (uint8_t), len0); - { - uint8_t tmp0[len0]; - memset(tmp0, 0U, len0 * sizeof (uint8_t)); - { - uint8_t *uu____10 = tmp0; - uu____10[0U] = (uint8_t)0x48U; - uu____10[1U] = (uint8_t)0x50U; - uu____10[2U] = (uint8_t)0x4bU; - uu____10[3U] = (uint8_t)0x45U; - uu____10[4U] = (uint8_t)0x2dU; - uu____10[5U] = (uint8_t)0x76U; - uu____10[6U] = (uint8_t)0x31U; - memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp0 + (uint32_t)17U, - label_psk_id_hash, - (uint32_t)11U * sizeof (uint8_t)); - memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t)); - Hacl_HKDF_extract_sha2_512(o_psk_id_hash, - empty, - (uint32_t)0U, - tmp0, - len0); - { - uint8_t - label_info_hash[9U] = - { - (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, - (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, - (uint8_t)0x68U - }; - uint8_t o_info_hash[64U] = { 0U }; - uint32_t len1 = (uint32_t)26U + infolen; - KRML_CHECK_SIZE(sizeof (uint8_t), len1); - { - uint8_t tmp1[len1]; - memset(tmp1, 0U, len1 * sizeof (uint8_t)); - { - uint8_t *uu____11 = tmp1; - uu____11[0U] = (uint8_t)0x48U; - uu____11[1U] = (uint8_t)0x50U; - uu____11[2U] = (uint8_t)0x4bU; - uu____11[3U] = (uint8_t)0x45U; - uu____11[4U] = (uint8_t)0x2dU; - uu____11[5U] = (uint8_t)0x76U; - uu____11[6U] = (uint8_t)0x31U; - memcpy(tmp1 + (uint32_t)7U, - suite_id, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp1 + (uint32_t)17U, - label_info_hash, - (uint32_t)9U * sizeof (uint8_t)); - memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t)); - Hacl_HKDF_extract_sha2_512(o_info_hash, - empty, - (uint32_t)0U, - tmp1, - len1); - o_context[0U] = (uint8_t)0U; - memcpy(o_context + (uint32_t)1U, - o_psk_id_hash, - (uint32_t)64U * sizeof (uint8_t)); - memcpy(o_context + (uint32_t)65U, - o_info_hash, - (uint32_t)64U * sizeof (uint8_t)); - { - uint8_t - label_secret[6U] = - { - (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, - (uint8_t)0x65U, (uint8_t)0x74U - }; - uint32_t len2 = (uint32_t)23U; - KRML_CHECK_SIZE(sizeof (uint8_t), len2); - { - uint8_t tmp2[len2]; - memset(tmp2, 0U, len2 * sizeof (uint8_t)); - { - uint8_t *uu____12 = tmp2; - uu____12[0U] = (uint8_t)0x48U; - uu____12[1U] = (uint8_t)0x50U; - uu____12[2U] = (uint8_t)0x4bU; - uu____12[3U] = (uint8_t)0x45U; - uu____12[4U] = (uint8_t)0x2dU; - uu____12[5U] = (uint8_t)0x76U; - uu____12[6U] = (uint8_t)0x31U; - memcpy(tmp2 + (uint32_t)7U, - suite_id, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp2 + (uint32_t)17U, - label_secret, - (uint32_t)6U * sizeof (uint8_t)); - memcpy(tmp2 + (uint32_t)23U, - empty, - (uint32_t)0U * sizeof (uint8_t)); - Hacl_HKDF_extract_sha2_512(o_secret, - shared, - (uint32_t)32U, - tmp2, - len2); - { - uint8_t - label_exp[3U] = - { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U }; - uint32_t len3 = (uint32_t)151U; - KRML_CHECK_SIZE(sizeof (uint8_t), len3); - { - uint8_t tmp3[len3]; - memset(tmp3, 0U, len3 * sizeof (uint8_t)); - { - uint8_t *uu____13 = tmp3; - store32_be(uu____13, (uint32_t)64U); - memcpy(uu____13, - uu____13 + (uint32_t)2U, - (uint32_t)2U * sizeof (uint8_t)); - { - uint8_t *uu____14 = tmp3 + (uint32_t)2U; - uu____14[0U] = (uint8_t)0x48U; - uu____14[1U] = (uint8_t)0x50U; - uu____14[2U] = (uint8_t)0x4bU; - uu____14[3U] = (uint8_t)0x45U; - uu____14[4U] = (uint8_t)0x2dU; - uu____14[5U] = (uint8_t)0x76U; - uu____14[6U] = (uint8_t)0x31U; - memcpy(tmp3 + (uint32_t)9U, - suite_id, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp3 + (uint32_t)19U, - label_exp, - (uint32_t)3U * sizeof (uint8_t)); - memcpy(tmp3 + (uint32_t)22U, - o_context, - (uint32_t)129U * sizeof (uint8_t)); - Hacl_HKDF_expand_sha2_512(o_ctx.ctx_exporter, - o_secret, - (uint32_t)64U, - tmp3, - len3, - (uint32_t)64U); - { - uint8_t - label_key[3U] = - { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U }; - uint32_t len4 = (uint32_t)151U; - KRML_CHECK_SIZE(sizeof (uint8_t), len4); - { - uint8_t tmp4[len4]; - memset(tmp4, 0U, len4 * sizeof (uint8_t)); - { - uint8_t *uu____15 = tmp4; - store32_be(uu____15, (uint32_t)32U); - memcpy(uu____15, - uu____15 + (uint32_t)2U, - (uint32_t)2U * sizeof (uint8_t)); - { - uint8_t *uu____16 = tmp4 + (uint32_t)2U; - uu____16[0U] = (uint8_t)0x48U; - uu____16[1U] = (uint8_t)0x50U; - uu____16[2U] = (uint8_t)0x4bU; - uu____16[3U] = (uint8_t)0x45U; - uu____16[4U] = (uint8_t)0x2dU; - uu____16[5U] = (uint8_t)0x76U; - uu____16[6U] = (uint8_t)0x31U; - memcpy(tmp4 + (uint32_t)9U, - suite_id, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp4 + (uint32_t)19U, - label_key, - (uint32_t)3U * sizeof (uint8_t)); - memcpy(tmp4 + (uint32_t)22U, - o_context, - (uint32_t)129U * sizeof (uint8_t)); - Hacl_HKDF_expand_sha2_512(o_ctx.ctx_key, - o_secret, - (uint32_t)64U, - tmp4, - len4, - (uint32_t)32U); - { - uint8_t - label_base_nonce[10U] = - { - (uint8_t)0x62U, (uint8_t)0x61U, - (uint8_t)0x73U, (uint8_t)0x65U, - (uint8_t)0x5fU, (uint8_t)0x6eU, - (uint8_t)0x6fU, (uint8_t)0x6eU, - (uint8_t)0x63U, (uint8_t)0x65U - }; - uint32_t len = (uint32_t)158U; - KRML_CHECK_SIZE(sizeof (uint8_t), len); - { - uint8_t tmp[len]; - memset(tmp, 0U, len * sizeof (uint8_t)); - { - uint8_t *uu____17 = tmp; - store32_be(uu____17, (uint32_t)12U); - memcpy(uu____17, - uu____17 + (uint32_t)2U, - (uint32_t)2U * sizeof (uint8_t)); - { - uint8_t - *uu____18 = tmp + (uint32_t)2U; - uu____18[0U] = (uint8_t)0x48U; - uu____18[1U] = (uint8_t)0x50U; - uu____18[2U] = (uint8_t)0x4bU; - uu____18[3U] = (uint8_t)0x45U; - uu____18[4U] = (uint8_t)0x2dU; - uu____18[5U] = (uint8_t)0x76U; - uu____18[6U] = (uint8_t)0x31U; - memcpy(tmp + (uint32_t)9U, - suite_id, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp + (uint32_t)19U, - label_base_nonce, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp + (uint32_t)29U, - o_context, - (uint32_t)129U * sizeof (uint8_t)); - Hacl_HKDF_expand_sha2_512(o_ctx.ctx_nonce, - o_secret, - (uint32_t)64U, - tmp, - len, - (uint32_t)12U); - o_ctx.ctx_seq[0U] = (uint64_t)0U; - ite = (uint32_t)0U; - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - else - { - ite = (uint32_t)1U; - } - } - } - } - } - else - { - ite = (uint32_t)1U; - } - return ite; - } -} - -uint32_t -Hacl_HPKE_Curve64_CP256_SHA512_sealBase( - uint8_t *skE, - uint8_t *pkR, - uint32_t infolen, - uint8_t *info, - uint32_t aadlen, - uint8_t *aad, - uint32_t plainlen, - uint8_t *plain, - uint8_t *o_enc, - uint8_t *o_ct -) -{ - uint8_t ctx_key[32U] = { 0U }; - uint8_t ctx_nonce[12U] = { 0U }; - uint64_t ctx_seq = (uint64_t)0U; - uint8_t ctx_exporter[64U] = { 0U }; - Hacl_Impl_HPKE_context_s o_ctx; - uint32_t res; - uint32_t ite; - o_ctx.ctx_key = ctx_key; - o_ctx.ctx_nonce = ctx_nonce; - o_ctx.ctx_seq = &ctx_seq; - o_ctx.ctx_exporter = ctx_exporter; - res = Hacl_HPKE_Curve64_CP256_SHA512_setupBaseS(o_enc, o_ctx, skE, pkR, infolen, info); - if (res == (uint32_t)0U) - { - uint8_t nonce[12U] = { 0U }; - uint64_t s = o_ctx.ctx_seq[0U]; - uint8_t enc[12U] = { 0U }; - store64_be(enc + (uint32_t)4U, s); - KRML_MAYBE_FOR12(i, - (uint32_t)0U, - (uint32_t)12U, - (uint32_t)1U, - uint8_t xi = enc[i]; - uint8_t yi = o_ctx.ctx_nonce[i]; - nonce[i] = xi ^ yi;); - Hacl_Chacha20Poly1305_256_aead_encrypt(o_ctx.ctx_key, - nonce, - aadlen, - aad, - plainlen, - plain, - o_ct, - o_ct + plainlen); - { - uint64_t s1 = o_ctx.ctx_seq[0U]; - uint32_t res1; - if (s1 == (uint64_t)18446744073709551615U) - { - res1 = (uint32_t)1U; - } - else - { - uint64_t s_ = s1 + (uint64_t)1U; - o_ctx.ctx_seq[0U] = s_; - res1 = (uint32_t)0U; - } - { - uint32_t res10 = res1; - ite = res10; - } - } - } - else - { - ite = (uint32_t)1U; - } - return ite; -} - -uint32_t -Hacl_HPKE_Curve64_CP256_SHA512_openBase( - uint8_t *pkE, - uint8_t *skR, - uint32_t infolen, - uint8_t *info, - uint32_t aadlen, - uint8_t *aad, - uint32_t ctlen, - uint8_t *ct, - uint8_t *o_pt -) -{ - uint8_t ctx_key[32U] = { 0U }; - uint8_t ctx_nonce[12U] = { 0U }; - uint64_t ctx_seq = (uint64_t)0U; - uint8_t ctx_exporter[64U] = { 0U }; - Hacl_Impl_HPKE_context_s o_ctx; - uint32_t res; - uint32_t ite; - o_ctx.ctx_key = ctx_key; - o_ctx.ctx_nonce = ctx_nonce; - o_ctx.ctx_seq = &ctx_seq; - o_ctx.ctx_exporter = ctx_exporter; - res = Hacl_HPKE_Curve64_CP256_SHA512_setupBaseR(o_ctx, pkE, skR, infolen, info); - if (res == (uint32_t)0U) - { - uint8_t nonce[12U] = { 0U }; - uint64_t s = o_ctx.ctx_seq[0U]; - uint8_t enc[12U] = { 0U }; - store64_be(enc + (uint32_t)4U, s); - KRML_MAYBE_FOR12(i, - (uint32_t)0U, - (uint32_t)12U, - (uint32_t)1U, - uint8_t xi = enc[i]; - uint8_t yi = o_ctx.ctx_nonce[i]; - nonce[i] = xi ^ yi;); - { - uint32_t - res10 = - Hacl_Chacha20Poly1305_256_aead_decrypt(o_ctx.ctx_key, - nonce, - aadlen, - aad, - ctlen - (uint32_t)16U, - o_pt, - ct, - ct + ctlen - (uint32_t)16U); - uint32_t res1; - if (res10 == (uint32_t)0U) - { - uint64_t s1 = o_ctx.ctx_seq[0U]; - uint32_t res2; - if (s1 == (uint64_t)18446744073709551615U) - { - res2 = (uint32_t)1U; - } - else - { - uint64_t s_ = s1 + (uint64_t)1U; - o_ctx.ctx_seq[0U] = s_; - res2 = (uint32_t)0U; - } - res1 = res2; - } - else - { - res1 = (uint32_t)1U; - } - ite = res1; - } - } - else - { - ite = (uint32_t)1U; - } - return ite; -} - diff --git a/dist/c89-compatible/Hacl_HPKE_Curve64_CP256_SHA512.h b/dist/c89-compatible/Hacl_HPKE_Curve64_CP256_SHA512.h deleted file mode 100644 index aa5f41f335..0000000000 --- a/dist/c89-compatible/Hacl_HPKE_Curve64_CP256_SHA512.h +++ /dev/null @@ -1,95 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#ifndef __Hacl_HPKE_Curve64_CP256_SHA512_H -#define __Hacl_HPKE_Curve64_CP256_SHA512_H - -#if defined(__cplusplus) -extern "C" { -#endif - -#include -#include "krml/internal/types.h" -#include "krml/lowstar_endianness.h" -#include "krml/internal/target.h" - - -#include "Hacl_Krmllib.h" -#include "Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE.h" -#include "Hacl_HKDF.h" -#include "Hacl_Curve25519_64.h" -#include "Hacl_Chacha20Poly1305_256.h" -#include "evercrypt_targetconfig.h" -uint32_t -Hacl_HPKE_Curve64_CP256_SHA512_setupBaseS( - uint8_t *o_pkE, - Hacl_Impl_HPKE_context_s o_ctx, - uint8_t *skE, - uint8_t *pkR, - uint32_t infolen, - uint8_t *info -); - -uint32_t -Hacl_HPKE_Curve64_CP256_SHA512_setupBaseR( - Hacl_Impl_HPKE_context_s o_ctx, - uint8_t *enc, - uint8_t *skR, - uint32_t infolen, - uint8_t *info -); - -uint32_t -Hacl_HPKE_Curve64_CP256_SHA512_sealBase( - uint8_t *skE, - uint8_t *pkR, - uint32_t infolen, - uint8_t *info, - uint32_t aadlen, - uint8_t *aad, - uint32_t plainlen, - uint8_t *plain, - uint8_t *o_enc, - uint8_t *o_ct -); - -uint32_t -Hacl_HPKE_Curve64_CP256_SHA512_openBase( - uint8_t *pkE, - uint8_t *skR, - uint32_t infolen, - uint8_t *info, - uint32_t aadlen, - uint8_t *aad, - uint32_t ctlen, - uint8_t *ct, - uint8_t *o_pt -); - -#if defined(__cplusplus) -} -#endif - -#define __Hacl_HPKE_Curve64_CP256_SHA512_H_DEFINED -#endif diff --git a/dist/c89-compatible/Hacl_HPKE_Curve64_CP32_SHA256.c b/dist/c89-compatible/Hacl_HPKE_Curve64_CP32_SHA256.c deleted file mode 100644 index b8aca8d525..0000000000 --- a/dist/c89-compatible/Hacl_HPKE_Curve64_CP32_SHA256.c +++ /dev/null @@ -1,1077 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#include "Hacl_HPKE_Curve64_CP32_SHA256.h" - - - -uint32_t -Hacl_HPKE_Curve64_CP32_SHA256_setupBaseS( - uint8_t *o_pkE, - Hacl_Impl_HPKE_context_s o_ctx, - uint8_t *skE, - uint8_t *pkR, - uint32_t infolen, - uint8_t *info -) -{ - uint8_t o_shared[32U] = { 0U }; - uint8_t *o_pkE1 = o_pkE; - uint32_t res1; - uint32_t res0; - uint32_t ite; - Hacl_Curve25519_64_secret_to_public(o_pkE1, skE); - res1 = (uint32_t)0U; - if (res1 == (uint32_t)0U) - { - uint8_t o_dh[32U] = { 0U }; - uint8_t zeros[32U] = { 0U }; - Hacl_Curve25519_64_scalarmult(o_dh, skE, pkR); - { - uint8_t res2 = (uint8_t)255U; - { - uint32_t i; - for (i = (uint32_t)0U; i < (uint32_t)32U; i++) - { - uint8_t uu____0 = FStar_UInt8_eq_mask(o_dh[i], zeros[i]); - res2 = uu____0 & res2; - } - } - { - uint8_t z = res2; - uint32_t res; - if (z == (uint8_t)255U) - { - res = (uint32_t)1U; - } - else - { - res = (uint32_t)0U; - } - { - uint32_t res20 = res; - uint8_t o_kemcontext[64U] = { 0U }; - if (res20 == (uint32_t)0U) - { - memcpy(o_kemcontext, o_pkE, (uint32_t)32U * sizeof (uint8_t)); - { - uint8_t *o_pkRm = o_kemcontext + (uint32_t)32U; - uint8_t *o_pkR = o_pkRm; - memcpy(o_pkR, pkR, (uint32_t)32U * sizeof (uint8_t)); - { - uint8_t *o_dhm = o_dh; - uint8_t o_eae_prk[32U] = { 0U }; - uint8_t suite_id_kem[5U] = { 0U }; - uint8_t *uu____1 = suite_id_kem; - uu____1[0U] = (uint8_t)0x4bU; - uu____1[1U] = (uint8_t)0x45U; - uu____1[2U] = (uint8_t)0x4dU; - { - uint8_t *uu____2 = suite_id_kem + (uint32_t)3U; - uu____2[0U] = (uint8_t)0U; - uu____2[1U] = (uint8_t)32U; - { - uint8_t *empty = suite_id_kem; - uint8_t - label_eae_prk[7U] = - { - (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, - (uint8_t)0x70U, (uint8_t)0x72U, (uint8_t)0x6bU - }; - uint32_t len0 = (uint32_t)51U; - KRML_CHECK_SIZE(sizeof (uint8_t), len0); - { - uint8_t tmp0[len0]; - memset(tmp0, 0U, len0 * sizeof (uint8_t)); - { - uint8_t *uu____3 = tmp0; - uu____3[0U] = (uint8_t)0x48U; - uu____3[1U] = (uint8_t)0x50U; - uu____3[2U] = (uint8_t)0x4bU; - uu____3[3U] = (uint8_t)0x45U; - uu____3[4U] = (uint8_t)0x2dU; - uu____3[5U] = (uint8_t)0x76U; - uu____3[6U] = (uint8_t)0x31U; - memcpy(tmp0 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t)); - memcpy(tmp0 + (uint32_t)12U, - label_eae_prk, - (uint32_t)7U * sizeof (uint8_t)); - memcpy(tmp0 + (uint32_t)19U, o_dhm, (uint32_t)32U * sizeof (uint8_t)); - Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp0, len0); - { - uint8_t - label_shared_secret[13U] = - { - (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, - (uint8_t)0x65U, (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, - (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U, - (uint8_t)0x74U - }; - uint32_t len = (uint32_t)91U; - KRML_CHECK_SIZE(sizeof (uint8_t), len); - { - uint8_t tmp[len]; - memset(tmp, 0U, len * sizeof (uint8_t)); - { - uint8_t *uu____4 = tmp; - store32_be(uu____4, (uint32_t)32U); - memcpy(uu____4, - uu____4 + (uint32_t)2U, - (uint32_t)2U * sizeof (uint8_t)); - { - uint8_t *uu____5 = tmp + (uint32_t)2U; - uu____5[0U] = (uint8_t)0x48U; - uu____5[1U] = (uint8_t)0x50U; - uu____5[2U] = (uint8_t)0x4bU; - uu____5[3U] = (uint8_t)0x45U; - uu____5[4U] = (uint8_t)0x2dU; - uu____5[5U] = (uint8_t)0x76U; - uu____5[6U] = (uint8_t)0x31U; - memcpy(tmp + (uint32_t)9U, - suite_id_kem, - (uint32_t)5U * sizeof (uint8_t)); - memcpy(tmp + (uint32_t)14U, - label_shared_secret, - (uint32_t)13U * sizeof (uint8_t)); - memcpy(tmp + (uint32_t)27U, - o_kemcontext, - (uint32_t)64U * sizeof (uint8_t)); - Hacl_HKDF_expand_sha2_256(o_shared, - o_eae_prk, - (uint32_t)32U, - tmp, - len, - (uint32_t)32U); - res0 = (uint32_t)0U; - } - } - } - } - } - } - } - } - } - } - } - else - { - res0 = (uint32_t)1U; - } - } - } - } - } - else - { - res0 = (uint32_t)1U; - } - if (res0 == (uint32_t)0U) - { - uint8_t o_context[65U] = { 0U }; - uint8_t o_secret[32U] = { 0U }; - uint8_t suite_id[10U] = { 0U }; - uint8_t *uu____6 = suite_id; - uu____6[0U] = (uint8_t)0x48U; - uu____6[1U] = (uint8_t)0x50U; - uu____6[2U] = (uint8_t)0x4bU; - uu____6[3U] = (uint8_t)0x45U; - { - uint8_t *uu____7 = suite_id + (uint32_t)4U; - uu____7[0U] = (uint8_t)0U; - uu____7[1U] = (uint8_t)32U; - { - uint8_t *uu____8 = suite_id + (uint32_t)6U; - uu____8[0U] = (uint8_t)0U; - uu____8[1U] = (uint8_t)1U; - { - uint8_t *uu____9 = suite_id + (uint32_t)8U; - uu____9[0U] = (uint8_t)0U; - uu____9[1U] = (uint8_t)3U; - { - uint8_t - label_psk_id_hash[11U] = - { - (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U, - (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, - (uint8_t)0x68U - }; - uint8_t o_psk_id_hash[32U] = { 0U }; - uint8_t *empty = suite_id; - uint32_t len0 = (uint32_t)28U; - KRML_CHECK_SIZE(sizeof (uint8_t), len0); - { - uint8_t tmp0[len0]; - memset(tmp0, 0U, len0 * sizeof (uint8_t)); - { - uint8_t *uu____10 = tmp0; - uu____10[0U] = (uint8_t)0x48U; - uu____10[1U] = (uint8_t)0x50U; - uu____10[2U] = (uint8_t)0x4bU; - uu____10[3U] = (uint8_t)0x45U; - uu____10[4U] = (uint8_t)0x2dU; - uu____10[5U] = (uint8_t)0x76U; - uu____10[6U] = (uint8_t)0x31U; - memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp0 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t)); - memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t)); - Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, (uint32_t)0U, tmp0, len0); - { - uint8_t - label_info_hash[9U] = - { - (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU, - (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U - }; - uint8_t o_info_hash[32U] = { 0U }; - uint32_t len1 = (uint32_t)26U + infolen; - KRML_CHECK_SIZE(sizeof (uint8_t), len1); - { - uint8_t tmp1[len1]; - memset(tmp1, 0U, len1 * sizeof (uint8_t)); - { - uint8_t *uu____11 = tmp1; - uu____11[0U] = (uint8_t)0x48U; - uu____11[1U] = (uint8_t)0x50U; - uu____11[2U] = (uint8_t)0x4bU; - uu____11[3U] = (uint8_t)0x45U; - uu____11[4U] = (uint8_t)0x2dU; - uu____11[5U] = (uint8_t)0x76U; - uu____11[6U] = (uint8_t)0x31U; - memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp1 + (uint32_t)17U, - label_info_hash, - (uint32_t)9U * sizeof (uint8_t)); - memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t)); - Hacl_HKDF_extract_sha2_256(o_info_hash, empty, (uint32_t)0U, tmp1, len1); - o_context[0U] = (uint8_t)0U; - memcpy(o_context + (uint32_t)1U, - o_psk_id_hash, - (uint32_t)32U * sizeof (uint8_t)); - memcpy(o_context + (uint32_t)33U, - o_info_hash, - (uint32_t)32U * sizeof (uint8_t)); - { - uint8_t - label_secret[6U] = - { - (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, - (uint8_t)0x65U, (uint8_t)0x74U - }; - uint32_t len2 = (uint32_t)23U; - KRML_CHECK_SIZE(sizeof (uint8_t), len2); - { - uint8_t tmp2[len2]; - memset(tmp2, 0U, len2 * sizeof (uint8_t)); - { - uint8_t *uu____12 = tmp2; - uu____12[0U] = (uint8_t)0x48U; - uu____12[1U] = (uint8_t)0x50U; - uu____12[2U] = (uint8_t)0x4bU; - uu____12[3U] = (uint8_t)0x45U; - uu____12[4U] = (uint8_t)0x2dU; - uu____12[5U] = (uint8_t)0x76U; - uu____12[6U] = (uint8_t)0x31U; - memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp2 + (uint32_t)17U, - label_secret, - (uint32_t)6U * sizeof (uint8_t)); - memcpy(tmp2 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t)); - Hacl_HKDF_extract_sha2_256(o_secret, - o_shared, - (uint32_t)32U, - tmp2, - len2); - { - uint8_t - label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U }; - uint32_t len3 = (uint32_t)87U; - KRML_CHECK_SIZE(sizeof (uint8_t), len3); - { - uint8_t tmp3[len3]; - memset(tmp3, 0U, len3 * sizeof (uint8_t)); - { - uint8_t *uu____13 = tmp3; - store32_be(uu____13, (uint32_t)32U); - memcpy(uu____13, - uu____13 + (uint32_t)2U, - (uint32_t)2U * sizeof (uint8_t)); - { - uint8_t *uu____14 = tmp3 + (uint32_t)2U; - uu____14[0U] = (uint8_t)0x48U; - uu____14[1U] = (uint8_t)0x50U; - uu____14[2U] = (uint8_t)0x4bU; - uu____14[3U] = (uint8_t)0x45U; - uu____14[4U] = (uint8_t)0x2dU; - uu____14[5U] = (uint8_t)0x76U; - uu____14[6U] = (uint8_t)0x31U; - memcpy(tmp3 + (uint32_t)9U, - suite_id, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp3 + (uint32_t)19U, - label_exp, - (uint32_t)3U * sizeof (uint8_t)); - memcpy(tmp3 + (uint32_t)22U, - o_context, - (uint32_t)65U * sizeof (uint8_t)); - Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter, - o_secret, - (uint32_t)32U, - tmp3, - len3, - (uint32_t)32U); - { - uint8_t - label_key[3U] = - { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U }; - uint32_t len4 = (uint32_t)87U; - KRML_CHECK_SIZE(sizeof (uint8_t), len4); - { - uint8_t tmp4[len4]; - memset(tmp4, 0U, len4 * sizeof (uint8_t)); - { - uint8_t *uu____15 = tmp4; - store32_be(uu____15, (uint32_t)32U); - memcpy(uu____15, - uu____15 + (uint32_t)2U, - (uint32_t)2U * sizeof (uint8_t)); - { - uint8_t *uu____16 = tmp4 + (uint32_t)2U; - uu____16[0U] = (uint8_t)0x48U; - uu____16[1U] = (uint8_t)0x50U; - uu____16[2U] = (uint8_t)0x4bU; - uu____16[3U] = (uint8_t)0x45U; - uu____16[4U] = (uint8_t)0x2dU; - uu____16[5U] = (uint8_t)0x76U; - uu____16[6U] = (uint8_t)0x31U; - memcpy(tmp4 + (uint32_t)9U, - suite_id, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp4 + (uint32_t)19U, - label_key, - (uint32_t)3U * sizeof (uint8_t)); - memcpy(tmp4 + (uint32_t)22U, - o_context, - (uint32_t)65U * sizeof (uint8_t)); - Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, - o_secret, - (uint32_t)32U, - tmp4, - len4, - (uint32_t)32U); - { - uint8_t - label_base_nonce[10U] = - { - (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, - (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x6eU, - (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, - (uint8_t)0x65U - }; - uint32_t len = (uint32_t)94U; - KRML_CHECK_SIZE(sizeof (uint8_t), len); - { - uint8_t tmp[len]; - memset(tmp, 0U, len * sizeof (uint8_t)); - { - uint8_t *uu____17 = tmp; - store32_be(uu____17, (uint32_t)12U); - memcpy(uu____17, - uu____17 + (uint32_t)2U, - (uint32_t)2U * sizeof (uint8_t)); - { - uint8_t *uu____18 = tmp + (uint32_t)2U; - uu____18[0U] = (uint8_t)0x48U; - uu____18[1U] = (uint8_t)0x50U; - uu____18[2U] = (uint8_t)0x4bU; - uu____18[3U] = (uint8_t)0x45U; - uu____18[4U] = (uint8_t)0x2dU; - uu____18[5U] = (uint8_t)0x76U; - uu____18[6U] = (uint8_t)0x31U; - memcpy(tmp + (uint32_t)9U, - suite_id, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp + (uint32_t)19U, - label_base_nonce, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp + (uint32_t)29U, - o_context, - (uint32_t)65U * sizeof (uint8_t)); - Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, - o_secret, - (uint32_t)32U, - tmp, - len, - (uint32_t)12U); - o_ctx.ctx_seq[0U] = (uint64_t)0U; - ite = res0; - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - else - { - ite = res0; - } - return ite; -} - -uint32_t -Hacl_HPKE_Curve64_CP32_SHA256_setupBaseR( - Hacl_Impl_HPKE_context_s o_ctx, - uint8_t *enc, - uint8_t *skR, - uint32_t infolen, - uint8_t *info -) -{ - uint8_t pkR[32U] = { 0U }; - uint32_t res1; - uint32_t ite; - Hacl_Curve25519_64_secret_to_public(pkR, skR); - res1 = (uint32_t)0U; - { - uint8_t shared[32U] = { 0U }; - if (res1 == (uint32_t)0U) - { - uint8_t *pkE = enc; - uint8_t dh[32U] = { 0U }; - uint8_t zeros[32U] = { 0U }; - Hacl_Curve25519_64_scalarmult(dh, skR, pkE); - { - uint8_t res0 = (uint8_t)255U; - { - uint32_t i; - for (i = (uint32_t)0U; i < (uint32_t)32U; i++) - { - uint8_t uu____0 = FStar_UInt8_eq_mask(dh[i], zeros[i]); - res0 = uu____0 & res0; - } - } - { - uint8_t z = res0; - uint32_t res; - if (z == (uint8_t)255U) - { - res = (uint32_t)1U; - } - else - { - res = (uint32_t)0U; - } - { - uint32_t res11 = res; - uint32_t res2; - uint8_t kemcontext[64U] = { 0U }; - if (res11 == (uint32_t)0U) - { - uint8_t *pkRm = kemcontext + (uint32_t)32U; - uint8_t *pkR1 = pkRm; - Hacl_Curve25519_64_secret_to_public(pkR1, skR); - { - uint32_t res20 = (uint32_t)0U; - if (res20 == (uint32_t)0U) - { - memcpy(kemcontext, enc, (uint32_t)32U * sizeof (uint8_t)); - { - uint8_t *dhm = dh; - uint8_t o_eae_prk[32U] = { 0U }; - uint8_t suite_id_kem[5U] = { 0U }; - uint8_t *uu____1 = suite_id_kem; - uu____1[0U] = (uint8_t)0x4bU; - uu____1[1U] = (uint8_t)0x45U; - uu____1[2U] = (uint8_t)0x4dU; - { - uint8_t *uu____2 = suite_id_kem + (uint32_t)3U; - uu____2[0U] = (uint8_t)0U; - uu____2[1U] = (uint8_t)32U; - { - uint8_t *empty = suite_id_kem; - uint8_t - label_eae_prk[7U] = - { - (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, - (uint8_t)0x70U, (uint8_t)0x72U, (uint8_t)0x6bU - }; - uint32_t len0 = (uint32_t)51U; - KRML_CHECK_SIZE(sizeof (uint8_t), len0); - { - uint8_t tmp0[len0]; - memset(tmp0, 0U, len0 * sizeof (uint8_t)); - { - uint8_t *uu____3 = tmp0; - uu____3[0U] = (uint8_t)0x48U; - uu____3[1U] = (uint8_t)0x50U; - uu____3[2U] = (uint8_t)0x4bU; - uu____3[3U] = (uint8_t)0x45U; - uu____3[4U] = (uint8_t)0x2dU; - uu____3[5U] = (uint8_t)0x76U; - uu____3[6U] = (uint8_t)0x31U; - memcpy(tmp0 + (uint32_t)7U, - suite_id_kem, - (uint32_t)5U * sizeof (uint8_t)); - memcpy(tmp0 + (uint32_t)12U, - label_eae_prk, - (uint32_t)7U * sizeof (uint8_t)); - memcpy(tmp0 + (uint32_t)19U, dhm, (uint32_t)32U * sizeof (uint8_t)); - Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp0, len0); - { - uint8_t - label_shared_secret[13U] = - { - (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, - (uint8_t)0x65U, (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, - (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U, - (uint8_t)0x74U - }; - uint32_t len = (uint32_t)91U; - KRML_CHECK_SIZE(sizeof (uint8_t), len); - { - uint8_t tmp[len]; - memset(tmp, 0U, len * sizeof (uint8_t)); - { - uint8_t *uu____4 = tmp; - store32_be(uu____4, (uint32_t)32U); - memcpy(uu____4, - uu____4 + (uint32_t)2U, - (uint32_t)2U * sizeof (uint8_t)); - { - uint8_t *uu____5 = tmp + (uint32_t)2U; - uu____5[0U] = (uint8_t)0x48U; - uu____5[1U] = (uint8_t)0x50U; - uu____5[2U] = (uint8_t)0x4bU; - uu____5[3U] = (uint8_t)0x45U; - uu____5[4U] = (uint8_t)0x2dU; - uu____5[5U] = (uint8_t)0x76U; - uu____5[6U] = (uint8_t)0x31U; - memcpy(tmp + (uint32_t)9U, - suite_id_kem, - (uint32_t)5U * sizeof (uint8_t)); - memcpy(tmp + (uint32_t)14U, - label_shared_secret, - (uint32_t)13U * sizeof (uint8_t)); - memcpy(tmp + (uint32_t)27U, - kemcontext, - (uint32_t)64U * sizeof (uint8_t)); - Hacl_HKDF_expand_sha2_256(shared, - o_eae_prk, - (uint32_t)32U, - tmp, - len, - (uint32_t)32U); - res2 = (uint32_t)0U; - } - } - } - } - } - } - } - } - } - } - else - { - res2 = (uint32_t)1U; - } - } - } - else - { - res2 = (uint32_t)1U; - } - if (res2 == (uint32_t)0U) - { - uint8_t o_context[65U] = { 0U }; - uint8_t o_secret[32U] = { 0U }; - uint8_t suite_id[10U] = { 0U }; - uint8_t *uu____6 = suite_id; - uu____6[0U] = (uint8_t)0x48U; - uu____6[1U] = (uint8_t)0x50U; - uu____6[2U] = (uint8_t)0x4bU; - uu____6[3U] = (uint8_t)0x45U; - { - uint8_t *uu____7 = suite_id + (uint32_t)4U; - uu____7[0U] = (uint8_t)0U; - uu____7[1U] = (uint8_t)32U; - { - uint8_t *uu____8 = suite_id + (uint32_t)6U; - uu____8[0U] = (uint8_t)0U; - uu____8[1U] = (uint8_t)1U; - { - uint8_t *uu____9 = suite_id + (uint32_t)8U; - uu____9[0U] = (uint8_t)0U; - uu____9[1U] = (uint8_t)3U; - { - uint8_t - label_psk_id_hash[11U] = - { - (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, - (uint8_t)0x69U, (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, - (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U - }; - uint8_t o_psk_id_hash[32U] = { 0U }; - uint8_t *empty = suite_id; - uint32_t len0 = (uint32_t)28U; - KRML_CHECK_SIZE(sizeof (uint8_t), len0); - { - uint8_t tmp0[len0]; - memset(tmp0, 0U, len0 * sizeof (uint8_t)); - { - uint8_t *uu____10 = tmp0; - uu____10[0U] = (uint8_t)0x48U; - uu____10[1U] = (uint8_t)0x50U; - uu____10[2U] = (uint8_t)0x4bU; - uu____10[3U] = (uint8_t)0x45U; - uu____10[4U] = (uint8_t)0x2dU; - uu____10[5U] = (uint8_t)0x76U; - uu____10[6U] = (uint8_t)0x31U; - memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp0 + (uint32_t)17U, - label_psk_id_hash, - (uint32_t)11U * sizeof (uint8_t)); - memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t)); - Hacl_HKDF_extract_sha2_256(o_psk_id_hash, - empty, - (uint32_t)0U, - tmp0, - len0); - { - uint8_t - label_info_hash[9U] = - { - (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, - (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, - (uint8_t)0x68U - }; - uint8_t o_info_hash[32U] = { 0U }; - uint32_t len1 = (uint32_t)26U + infolen; - KRML_CHECK_SIZE(sizeof (uint8_t), len1); - { - uint8_t tmp1[len1]; - memset(tmp1, 0U, len1 * sizeof (uint8_t)); - { - uint8_t *uu____11 = tmp1; - uu____11[0U] = (uint8_t)0x48U; - uu____11[1U] = (uint8_t)0x50U; - uu____11[2U] = (uint8_t)0x4bU; - uu____11[3U] = (uint8_t)0x45U; - uu____11[4U] = (uint8_t)0x2dU; - uu____11[5U] = (uint8_t)0x76U; - uu____11[6U] = (uint8_t)0x31U; - memcpy(tmp1 + (uint32_t)7U, - suite_id, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp1 + (uint32_t)17U, - label_info_hash, - (uint32_t)9U * sizeof (uint8_t)); - memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t)); - Hacl_HKDF_extract_sha2_256(o_info_hash, - empty, - (uint32_t)0U, - tmp1, - len1); - o_context[0U] = (uint8_t)0U; - memcpy(o_context + (uint32_t)1U, - o_psk_id_hash, - (uint32_t)32U * sizeof (uint8_t)); - memcpy(o_context + (uint32_t)33U, - o_info_hash, - (uint32_t)32U * sizeof (uint8_t)); - { - uint8_t - label_secret[6U] = - { - (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, - (uint8_t)0x65U, (uint8_t)0x74U - }; - uint32_t len2 = (uint32_t)23U; - KRML_CHECK_SIZE(sizeof (uint8_t), len2); - { - uint8_t tmp2[len2]; - memset(tmp2, 0U, len2 * sizeof (uint8_t)); - { - uint8_t *uu____12 = tmp2; - uu____12[0U] = (uint8_t)0x48U; - uu____12[1U] = (uint8_t)0x50U; - uu____12[2U] = (uint8_t)0x4bU; - uu____12[3U] = (uint8_t)0x45U; - uu____12[4U] = (uint8_t)0x2dU; - uu____12[5U] = (uint8_t)0x76U; - uu____12[6U] = (uint8_t)0x31U; - memcpy(tmp2 + (uint32_t)7U, - suite_id, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp2 + (uint32_t)17U, - label_secret, - (uint32_t)6U * sizeof (uint8_t)); - memcpy(tmp2 + (uint32_t)23U, - empty, - (uint32_t)0U * sizeof (uint8_t)); - Hacl_HKDF_extract_sha2_256(o_secret, - shared, - (uint32_t)32U, - tmp2, - len2); - { - uint8_t - label_exp[3U] = - { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U }; - uint32_t len3 = (uint32_t)87U; - KRML_CHECK_SIZE(sizeof (uint8_t), len3); - { - uint8_t tmp3[len3]; - memset(tmp3, 0U, len3 * sizeof (uint8_t)); - { - uint8_t *uu____13 = tmp3; - store32_be(uu____13, (uint32_t)32U); - memcpy(uu____13, - uu____13 + (uint32_t)2U, - (uint32_t)2U * sizeof (uint8_t)); - { - uint8_t *uu____14 = tmp3 + (uint32_t)2U; - uu____14[0U] = (uint8_t)0x48U; - uu____14[1U] = (uint8_t)0x50U; - uu____14[2U] = (uint8_t)0x4bU; - uu____14[3U] = (uint8_t)0x45U; - uu____14[4U] = (uint8_t)0x2dU; - uu____14[5U] = (uint8_t)0x76U; - uu____14[6U] = (uint8_t)0x31U; - memcpy(tmp3 + (uint32_t)9U, - suite_id, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp3 + (uint32_t)19U, - label_exp, - (uint32_t)3U * sizeof (uint8_t)); - memcpy(tmp3 + (uint32_t)22U, - o_context, - (uint32_t)65U * sizeof (uint8_t)); - Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter, - o_secret, - (uint32_t)32U, - tmp3, - len3, - (uint32_t)32U); - { - uint8_t - label_key[3U] = - { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U }; - uint32_t len4 = (uint32_t)87U; - KRML_CHECK_SIZE(sizeof (uint8_t), len4); - { - uint8_t tmp4[len4]; - memset(tmp4, 0U, len4 * sizeof (uint8_t)); - { - uint8_t *uu____15 = tmp4; - store32_be(uu____15, (uint32_t)32U); - memcpy(uu____15, - uu____15 + (uint32_t)2U, - (uint32_t)2U * sizeof (uint8_t)); - { - uint8_t *uu____16 = tmp4 + (uint32_t)2U; - uu____16[0U] = (uint8_t)0x48U; - uu____16[1U] = (uint8_t)0x50U; - uu____16[2U] = (uint8_t)0x4bU; - uu____16[3U] = (uint8_t)0x45U; - uu____16[4U] = (uint8_t)0x2dU; - uu____16[5U] = (uint8_t)0x76U; - uu____16[6U] = (uint8_t)0x31U; - memcpy(tmp4 + (uint32_t)9U, - suite_id, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp4 + (uint32_t)19U, - label_key, - (uint32_t)3U * sizeof (uint8_t)); - memcpy(tmp4 + (uint32_t)22U, - o_context, - (uint32_t)65U * sizeof (uint8_t)); - Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, - o_secret, - (uint32_t)32U, - tmp4, - len4, - (uint32_t)32U); - { - uint8_t - label_base_nonce[10U] = - { - (uint8_t)0x62U, (uint8_t)0x61U, - (uint8_t)0x73U, (uint8_t)0x65U, - (uint8_t)0x5fU, (uint8_t)0x6eU, - (uint8_t)0x6fU, (uint8_t)0x6eU, - (uint8_t)0x63U, (uint8_t)0x65U - }; - uint32_t len = (uint32_t)94U; - KRML_CHECK_SIZE(sizeof (uint8_t), len); - { - uint8_t tmp[len]; - memset(tmp, 0U, len * sizeof (uint8_t)); - { - uint8_t *uu____17 = tmp; - store32_be(uu____17, (uint32_t)12U); - memcpy(uu____17, - uu____17 + (uint32_t)2U, - (uint32_t)2U * sizeof (uint8_t)); - { - uint8_t - *uu____18 = tmp + (uint32_t)2U; - uu____18[0U] = (uint8_t)0x48U; - uu____18[1U] = (uint8_t)0x50U; - uu____18[2U] = (uint8_t)0x4bU; - uu____18[3U] = (uint8_t)0x45U; - uu____18[4U] = (uint8_t)0x2dU; - uu____18[5U] = (uint8_t)0x76U; - uu____18[6U] = (uint8_t)0x31U; - memcpy(tmp + (uint32_t)9U, - suite_id, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp + (uint32_t)19U, - label_base_nonce, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp + (uint32_t)29U, - o_context, - (uint32_t)65U * sizeof (uint8_t)); - Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, - o_secret, - (uint32_t)32U, - tmp, - len, - (uint32_t)12U); - o_ctx.ctx_seq[0U] = (uint64_t)0U; - ite = (uint32_t)0U; - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - else - { - ite = (uint32_t)1U; - } - } - } - } - } - else - { - ite = (uint32_t)1U; - } - return ite; - } -} - -uint32_t -Hacl_HPKE_Curve64_CP32_SHA256_sealBase( - uint8_t *skE, - uint8_t *pkR, - uint32_t infolen, - uint8_t *info, - uint32_t aadlen, - uint8_t *aad, - uint32_t plainlen, - uint8_t *plain, - uint8_t *o_enc, - uint8_t *o_ct -) -{ - uint8_t ctx_key[32U] = { 0U }; - uint8_t ctx_nonce[12U] = { 0U }; - uint64_t ctx_seq = (uint64_t)0U; - uint8_t ctx_exporter[32U] = { 0U }; - Hacl_Impl_HPKE_context_s o_ctx; - uint32_t res; - uint32_t ite; - o_ctx.ctx_key = ctx_key; - o_ctx.ctx_nonce = ctx_nonce; - o_ctx.ctx_seq = &ctx_seq; - o_ctx.ctx_exporter = ctx_exporter; - res = Hacl_HPKE_Curve64_CP32_SHA256_setupBaseS(o_enc, o_ctx, skE, pkR, infolen, info); - if (res == (uint32_t)0U) - { - uint8_t nonce[12U] = { 0U }; - uint64_t s = o_ctx.ctx_seq[0U]; - uint8_t enc[12U] = { 0U }; - store64_be(enc + (uint32_t)4U, s); - KRML_MAYBE_FOR12(i, - (uint32_t)0U, - (uint32_t)12U, - (uint32_t)1U, - uint8_t xi = enc[i]; - uint8_t yi = o_ctx.ctx_nonce[i]; - nonce[i] = xi ^ yi;); - Hacl_Chacha20Poly1305_32_aead_encrypt(o_ctx.ctx_key, - nonce, - aadlen, - aad, - plainlen, - plain, - o_ct, - o_ct + plainlen); - { - uint64_t s1 = o_ctx.ctx_seq[0U]; - uint32_t res1; - if (s1 == (uint64_t)18446744073709551615U) - { - res1 = (uint32_t)1U; - } - else - { - uint64_t s_ = s1 + (uint64_t)1U; - o_ctx.ctx_seq[0U] = s_; - res1 = (uint32_t)0U; - } - { - uint32_t res10 = res1; - ite = res10; - } - } - } - else - { - ite = (uint32_t)1U; - } - return ite; -} - -uint32_t -Hacl_HPKE_Curve64_CP32_SHA256_openBase( - uint8_t *pkE, - uint8_t *skR, - uint32_t infolen, - uint8_t *info, - uint32_t aadlen, - uint8_t *aad, - uint32_t ctlen, - uint8_t *ct, - uint8_t *o_pt -) -{ - uint8_t ctx_key[32U] = { 0U }; - uint8_t ctx_nonce[12U] = { 0U }; - uint64_t ctx_seq = (uint64_t)0U; - uint8_t ctx_exporter[32U] = { 0U }; - Hacl_Impl_HPKE_context_s o_ctx; - uint32_t res; - uint32_t ite; - o_ctx.ctx_key = ctx_key; - o_ctx.ctx_nonce = ctx_nonce; - o_ctx.ctx_seq = &ctx_seq; - o_ctx.ctx_exporter = ctx_exporter; - res = Hacl_HPKE_Curve64_CP32_SHA256_setupBaseR(o_ctx, pkE, skR, infolen, info); - if (res == (uint32_t)0U) - { - uint8_t nonce[12U] = { 0U }; - uint64_t s = o_ctx.ctx_seq[0U]; - uint8_t enc[12U] = { 0U }; - store64_be(enc + (uint32_t)4U, s); - KRML_MAYBE_FOR12(i, - (uint32_t)0U, - (uint32_t)12U, - (uint32_t)1U, - uint8_t xi = enc[i]; - uint8_t yi = o_ctx.ctx_nonce[i]; - nonce[i] = xi ^ yi;); - { - uint32_t - res10 = - Hacl_Chacha20Poly1305_32_aead_decrypt(o_ctx.ctx_key, - nonce, - aadlen, - aad, - ctlen - (uint32_t)16U, - o_pt, - ct, - ct + ctlen - (uint32_t)16U); - uint32_t res1; - if (res10 == (uint32_t)0U) - { - uint64_t s1 = o_ctx.ctx_seq[0U]; - uint32_t res2; - if (s1 == (uint64_t)18446744073709551615U) - { - res2 = (uint32_t)1U; - } - else - { - uint64_t s_ = s1 + (uint64_t)1U; - o_ctx.ctx_seq[0U] = s_; - res2 = (uint32_t)0U; - } - res1 = res2; - } - else - { - res1 = (uint32_t)1U; - } - ite = res1; - } - } - else - { - ite = (uint32_t)1U; - } - return ite; -} - diff --git a/dist/c89-compatible/Hacl_HPKE_Curve64_CP32_SHA256.h b/dist/c89-compatible/Hacl_HPKE_Curve64_CP32_SHA256.h deleted file mode 100644 index c18c600e39..0000000000 --- a/dist/c89-compatible/Hacl_HPKE_Curve64_CP32_SHA256.h +++ /dev/null @@ -1,95 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#ifndef __Hacl_HPKE_Curve64_CP32_SHA256_H -#define __Hacl_HPKE_Curve64_CP32_SHA256_H - -#if defined(__cplusplus) -extern "C" { -#endif - -#include -#include "krml/internal/types.h" -#include "krml/lowstar_endianness.h" -#include "krml/internal/target.h" - - -#include "Hacl_Krmllib.h" -#include "Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE.h" -#include "Hacl_HKDF.h" -#include "Hacl_Curve25519_64.h" -#include "Hacl_Chacha20Poly1305_32.h" -#include "evercrypt_targetconfig.h" -uint32_t -Hacl_HPKE_Curve64_CP32_SHA256_setupBaseS( - uint8_t *o_pkE, - Hacl_Impl_HPKE_context_s o_ctx, - uint8_t *skE, - uint8_t *pkR, - uint32_t infolen, - uint8_t *info -); - -uint32_t -Hacl_HPKE_Curve64_CP32_SHA256_setupBaseR( - Hacl_Impl_HPKE_context_s o_ctx, - uint8_t *enc, - uint8_t *skR, - uint32_t infolen, - uint8_t *info -); - -uint32_t -Hacl_HPKE_Curve64_CP32_SHA256_sealBase( - uint8_t *skE, - uint8_t *pkR, - uint32_t infolen, - uint8_t *info, - uint32_t aadlen, - uint8_t *aad, - uint32_t plainlen, - uint8_t *plain, - uint8_t *o_enc, - uint8_t *o_ct -); - -uint32_t -Hacl_HPKE_Curve64_CP32_SHA256_openBase( - uint8_t *pkE, - uint8_t *skR, - uint32_t infolen, - uint8_t *info, - uint32_t aadlen, - uint8_t *aad, - uint32_t ctlen, - uint8_t *ct, - uint8_t *o_pt -); - -#if defined(__cplusplus) -} -#endif - -#define __Hacl_HPKE_Curve64_CP32_SHA256_H_DEFINED -#endif diff --git a/dist/c89-compatible/Hacl_HPKE_Curve64_CP32_SHA512.c b/dist/c89-compatible/Hacl_HPKE_Curve64_CP32_SHA512.c deleted file mode 100644 index 9bdc373164..0000000000 --- a/dist/c89-compatible/Hacl_HPKE_Curve64_CP32_SHA512.c +++ /dev/null @@ -1,1077 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#include "Hacl_HPKE_Curve64_CP32_SHA512.h" - - - -uint32_t -Hacl_HPKE_Curve64_CP32_SHA512_setupBaseS( - uint8_t *o_pkE, - Hacl_Impl_HPKE_context_s o_ctx, - uint8_t *skE, - uint8_t *pkR, - uint32_t infolen, - uint8_t *info -) -{ - uint8_t o_shared[32U] = { 0U }; - uint8_t *o_pkE1 = o_pkE; - uint32_t res1; - uint32_t res0; - uint32_t ite; - Hacl_Curve25519_64_secret_to_public(o_pkE1, skE); - res1 = (uint32_t)0U; - if (res1 == (uint32_t)0U) - { - uint8_t o_dh[32U] = { 0U }; - uint8_t zeros[32U] = { 0U }; - Hacl_Curve25519_64_scalarmult(o_dh, skE, pkR); - { - uint8_t res2 = (uint8_t)255U; - { - uint32_t i; - for (i = (uint32_t)0U; i < (uint32_t)32U; i++) - { - uint8_t uu____0 = FStar_UInt8_eq_mask(o_dh[i], zeros[i]); - res2 = uu____0 & res2; - } - } - { - uint8_t z = res2; - uint32_t res; - if (z == (uint8_t)255U) - { - res = (uint32_t)1U; - } - else - { - res = (uint32_t)0U; - } - { - uint32_t res20 = res; - uint8_t o_kemcontext[64U] = { 0U }; - if (res20 == (uint32_t)0U) - { - memcpy(o_kemcontext, o_pkE, (uint32_t)32U * sizeof (uint8_t)); - { - uint8_t *o_pkRm = o_kemcontext + (uint32_t)32U; - uint8_t *o_pkR = o_pkRm; - memcpy(o_pkR, pkR, (uint32_t)32U * sizeof (uint8_t)); - { - uint8_t *o_dhm = o_dh; - uint8_t o_eae_prk[32U] = { 0U }; - uint8_t suite_id_kem[5U] = { 0U }; - uint8_t *uu____1 = suite_id_kem; - uu____1[0U] = (uint8_t)0x4bU; - uu____1[1U] = (uint8_t)0x45U; - uu____1[2U] = (uint8_t)0x4dU; - { - uint8_t *uu____2 = suite_id_kem + (uint32_t)3U; - uu____2[0U] = (uint8_t)0U; - uu____2[1U] = (uint8_t)32U; - { - uint8_t *empty = suite_id_kem; - uint8_t - label_eae_prk[7U] = - { - (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, - (uint8_t)0x70U, (uint8_t)0x72U, (uint8_t)0x6bU - }; - uint32_t len0 = (uint32_t)51U; - KRML_CHECK_SIZE(sizeof (uint8_t), len0); - { - uint8_t tmp0[len0]; - memset(tmp0, 0U, len0 * sizeof (uint8_t)); - { - uint8_t *uu____3 = tmp0; - uu____3[0U] = (uint8_t)0x48U; - uu____3[1U] = (uint8_t)0x50U; - uu____3[2U] = (uint8_t)0x4bU; - uu____3[3U] = (uint8_t)0x45U; - uu____3[4U] = (uint8_t)0x2dU; - uu____3[5U] = (uint8_t)0x76U; - uu____3[6U] = (uint8_t)0x31U; - memcpy(tmp0 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t)); - memcpy(tmp0 + (uint32_t)12U, - label_eae_prk, - (uint32_t)7U * sizeof (uint8_t)); - memcpy(tmp0 + (uint32_t)19U, o_dhm, (uint32_t)32U * sizeof (uint8_t)); - Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp0, len0); - { - uint8_t - label_shared_secret[13U] = - { - (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, - (uint8_t)0x65U, (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, - (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U, - (uint8_t)0x74U - }; - uint32_t len = (uint32_t)91U; - KRML_CHECK_SIZE(sizeof (uint8_t), len); - { - uint8_t tmp[len]; - memset(tmp, 0U, len * sizeof (uint8_t)); - { - uint8_t *uu____4 = tmp; - store32_be(uu____4, (uint32_t)32U); - memcpy(uu____4, - uu____4 + (uint32_t)2U, - (uint32_t)2U * sizeof (uint8_t)); - { - uint8_t *uu____5 = tmp + (uint32_t)2U; - uu____5[0U] = (uint8_t)0x48U; - uu____5[1U] = (uint8_t)0x50U; - uu____5[2U] = (uint8_t)0x4bU; - uu____5[3U] = (uint8_t)0x45U; - uu____5[4U] = (uint8_t)0x2dU; - uu____5[5U] = (uint8_t)0x76U; - uu____5[6U] = (uint8_t)0x31U; - memcpy(tmp + (uint32_t)9U, - suite_id_kem, - (uint32_t)5U * sizeof (uint8_t)); - memcpy(tmp + (uint32_t)14U, - label_shared_secret, - (uint32_t)13U * sizeof (uint8_t)); - memcpy(tmp + (uint32_t)27U, - o_kemcontext, - (uint32_t)64U * sizeof (uint8_t)); - Hacl_HKDF_expand_sha2_256(o_shared, - o_eae_prk, - (uint32_t)32U, - tmp, - len, - (uint32_t)32U); - res0 = (uint32_t)0U; - } - } - } - } - } - } - } - } - } - } - } - else - { - res0 = (uint32_t)1U; - } - } - } - } - } - else - { - res0 = (uint32_t)1U; - } - if (res0 == (uint32_t)0U) - { - uint8_t o_context[129U] = { 0U }; - uint8_t o_secret[64U] = { 0U }; - uint8_t suite_id[10U] = { 0U }; - uint8_t *uu____6 = suite_id; - uu____6[0U] = (uint8_t)0x48U; - uu____6[1U] = (uint8_t)0x50U; - uu____6[2U] = (uint8_t)0x4bU; - uu____6[3U] = (uint8_t)0x45U; - { - uint8_t *uu____7 = suite_id + (uint32_t)4U; - uu____7[0U] = (uint8_t)0U; - uu____7[1U] = (uint8_t)32U; - { - uint8_t *uu____8 = suite_id + (uint32_t)6U; - uu____8[0U] = (uint8_t)0U; - uu____8[1U] = (uint8_t)3U; - { - uint8_t *uu____9 = suite_id + (uint32_t)8U; - uu____9[0U] = (uint8_t)0U; - uu____9[1U] = (uint8_t)3U; - { - uint8_t - label_psk_id_hash[11U] = - { - (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U, - (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, - (uint8_t)0x68U - }; - uint8_t o_psk_id_hash[64U] = { 0U }; - uint8_t *empty = suite_id; - uint32_t len0 = (uint32_t)28U; - KRML_CHECK_SIZE(sizeof (uint8_t), len0); - { - uint8_t tmp0[len0]; - memset(tmp0, 0U, len0 * sizeof (uint8_t)); - { - uint8_t *uu____10 = tmp0; - uu____10[0U] = (uint8_t)0x48U; - uu____10[1U] = (uint8_t)0x50U; - uu____10[2U] = (uint8_t)0x4bU; - uu____10[3U] = (uint8_t)0x45U; - uu____10[4U] = (uint8_t)0x2dU; - uu____10[5U] = (uint8_t)0x76U; - uu____10[6U] = (uint8_t)0x31U; - memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp0 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t)); - memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t)); - Hacl_HKDF_extract_sha2_512(o_psk_id_hash, empty, (uint32_t)0U, tmp0, len0); - { - uint8_t - label_info_hash[9U] = - { - (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU, - (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U - }; - uint8_t o_info_hash[64U] = { 0U }; - uint32_t len1 = (uint32_t)26U + infolen; - KRML_CHECK_SIZE(sizeof (uint8_t), len1); - { - uint8_t tmp1[len1]; - memset(tmp1, 0U, len1 * sizeof (uint8_t)); - { - uint8_t *uu____11 = tmp1; - uu____11[0U] = (uint8_t)0x48U; - uu____11[1U] = (uint8_t)0x50U; - uu____11[2U] = (uint8_t)0x4bU; - uu____11[3U] = (uint8_t)0x45U; - uu____11[4U] = (uint8_t)0x2dU; - uu____11[5U] = (uint8_t)0x76U; - uu____11[6U] = (uint8_t)0x31U; - memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp1 + (uint32_t)17U, - label_info_hash, - (uint32_t)9U * sizeof (uint8_t)); - memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t)); - Hacl_HKDF_extract_sha2_512(o_info_hash, empty, (uint32_t)0U, tmp1, len1); - o_context[0U] = (uint8_t)0U; - memcpy(o_context + (uint32_t)1U, - o_psk_id_hash, - (uint32_t)64U * sizeof (uint8_t)); - memcpy(o_context + (uint32_t)65U, - o_info_hash, - (uint32_t)64U * sizeof (uint8_t)); - { - uint8_t - label_secret[6U] = - { - (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, - (uint8_t)0x65U, (uint8_t)0x74U - }; - uint32_t len2 = (uint32_t)23U; - KRML_CHECK_SIZE(sizeof (uint8_t), len2); - { - uint8_t tmp2[len2]; - memset(tmp2, 0U, len2 * sizeof (uint8_t)); - { - uint8_t *uu____12 = tmp2; - uu____12[0U] = (uint8_t)0x48U; - uu____12[1U] = (uint8_t)0x50U; - uu____12[2U] = (uint8_t)0x4bU; - uu____12[3U] = (uint8_t)0x45U; - uu____12[4U] = (uint8_t)0x2dU; - uu____12[5U] = (uint8_t)0x76U; - uu____12[6U] = (uint8_t)0x31U; - memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp2 + (uint32_t)17U, - label_secret, - (uint32_t)6U * sizeof (uint8_t)); - memcpy(tmp2 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t)); - Hacl_HKDF_extract_sha2_512(o_secret, - o_shared, - (uint32_t)32U, - tmp2, - len2); - { - uint8_t - label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U }; - uint32_t len3 = (uint32_t)151U; - KRML_CHECK_SIZE(sizeof (uint8_t), len3); - { - uint8_t tmp3[len3]; - memset(tmp3, 0U, len3 * sizeof (uint8_t)); - { - uint8_t *uu____13 = tmp3; - store32_be(uu____13, (uint32_t)64U); - memcpy(uu____13, - uu____13 + (uint32_t)2U, - (uint32_t)2U * sizeof (uint8_t)); - { - uint8_t *uu____14 = tmp3 + (uint32_t)2U; - uu____14[0U] = (uint8_t)0x48U; - uu____14[1U] = (uint8_t)0x50U; - uu____14[2U] = (uint8_t)0x4bU; - uu____14[3U] = (uint8_t)0x45U; - uu____14[4U] = (uint8_t)0x2dU; - uu____14[5U] = (uint8_t)0x76U; - uu____14[6U] = (uint8_t)0x31U; - memcpy(tmp3 + (uint32_t)9U, - suite_id, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp3 + (uint32_t)19U, - label_exp, - (uint32_t)3U * sizeof (uint8_t)); - memcpy(tmp3 + (uint32_t)22U, - o_context, - (uint32_t)129U * sizeof (uint8_t)); - Hacl_HKDF_expand_sha2_512(o_ctx.ctx_exporter, - o_secret, - (uint32_t)64U, - tmp3, - len3, - (uint32_t)64U); - { - uint8_t - label_key[3U] = - { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U }; - uint32_t len4 = (uint32_t)151U; - KRML_CHECK_SIZE(sizeof (uint8_t), len4); - { - uint8_t tmp4[len4]; - memset(tmp4, 0U, len4 * sizeof (uint8_t)); - { - uint8_t *uu____15 = tmp4; - store32_be(uu____15, (uint32_t)32U); - memcpy(uu____15, - uu____15 + (uint32_t)2U, - (uint32_t)2U * sizeof (uint8_t)); - { - uint8_t *uu____16 = tmp4 + (uint32_t)2U; - uu____16[0U] = (uint8_t)0x48U; - uu____16[1U] = (uint8_t)0x50U; - uu____16[2U] = (uint8_t)0x4bU; - uu____16[3U] = (uint8_t)0x45U; - uu____16[4U] = (uint8_t)0x2dU; - uu____16[5U] = (uint8_t)0x76U; - uu____16[6U] = (uint8_t)0x31U; - memcpy(tmp4 + (uint32_t)9U, - suite_id, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp4 + (uint32_t)19U, - label_key, - (uint32_t)3U * sizeof (uint8_t)); - memcpy(tmp4 + (uint32_t)22U, - o_context, - (uint32_t)129U * sizeof (uint8_t)); - Hacl_HKDF_expand_sha2_512(o_ctx.ctx_key, - o_secret, - (uint32_t)64U, - tmp4, - len4, - (uint32_t)32U); - { - uint8_t - label_base_nonce[10U] = - { - (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, - (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x6eU, - (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, - (uint8_t)0x65U - }; - uint32_t len = (uint32_t)158U; - KRML_CHECK_SIZE(sizeof (uint8_t), len); - { - uint8_t tmp[len]; - memset(tmp, 0U, len * sizeof (uint8_t)); - { - uint8_t *uu____17 = tmp; - store32_be(uu____17, (uint32_t)12U); - memcpy(uu____17, - uu____17 + (uint32_t)2U, - (uint32_t)2U * sizeof (uint8_t)); - { - uint8_t *uu____18 = tmp + (uint32_t)2U; - uu____18[0U] = (uint8_t)0x48U; - uu____18[1U] = (uint8_t)0x50U; - uu____18[2U] = (uint8_t)0x4bU; - uu____18[3U] = (uint8_t)0x45U; - uu____18[4U] = (uint8_t)0x2dU; - uu____18[5U] = (uint8_t)0x76U; - uu____18[6U] = (uint8_t)0x31U; - memcpy(tmp + (uint32_t)9U, - suite_id, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp + (uint32_t)19U, - label_base_nonce, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp + (uint32_t)29U, - o_context, - (uint32_t)129U * sizeof (uint8_t)); - Hacl_HKDF_expand_sha2_512(o_ctx.ctx_nonce, - o_secret, - (uint32_t)64U, - tmp, - len, - (uint32_t)12U); - o_ctx.ctx_seq[0U] = (uint64_t)0U; - ite = res0; - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - else - { - ite = res0; - } - return ite; -} - -uint32_t -Hacl_HPKE_Curve64_CP32_SHA512_setupBaseR( - Hacl_Impl_HPKE_context_s o_ctx, - uint8_t *enc, - uint8_t *skR, - uint32_t infolen, - uint8_t *info -) -{ - uint8_t pkR[32U] = { 0U }; - uint32_t res1; - uint32_t ite; - Hacl_Curve25519_64_secret_to_public(pkR, skR); - res1 = (uint32_t)0U; - { - uint8_t shared[32U] = { 0U }; - if (res1 == (uint32_t)0U) - { - uint8_t *pkE = enc; - uint8_t dh[32U] = { 0U }; - uint8_t zeros[32U] = { 0U }; - Hacl_Curve25519_64_scalarmult(dh, skR, pkE); - { - uint8_t res0 = (uint8_t)255U; - { - uint32_t i; - for (i = (uint32_t)0U; i < (uint32_t)32U; i++) - { - uint8_t uu____0 = FStar_UInt8_eq_mask(dh[i], zeros[i]); - res0 = uu____0 & res0; - } - } - { - uint8_t z = res0; - uint32_t res; - if (z == (uint8_t)255U) - { - res = (uint32_t)1U; - } - else - { - res = (uint32_t)0U; - } - { - uint32_t res11 = res; - uint32_t res2; - uint8_t kemcontext[64U] = { 0U }; - if (res11 == (uint32_t)0U) - { - uint8_t *pkRm = kemcontext + (uint32_t)32U; - uint8_t *pkR1 = pkRm; - Hacl_Curve25519_64_secret_to_public(pkR1, skR); - { - uint32_t res20 = (uint32_t)0U; - if (res20 == (uint32_t)0U) - { - memcpy(kemcontext, enc, (uint32_t)32U * sizeof (uint8_t)); - { - uint8_t *dhm = dh; - uint8_t o_eae_prk[32U] = { 0U }; - uint8_t suite_id_kem[5U] = { 0U }; - uint8_t *uu____1 = suite_id_kem; - uu____1[0U] = (uint8_t)0x4bU; - uu____1[1U] = (uint8_t)0x45U; - uu____1[2U] = (uint8_t)0x4dU; - { - uint8_t *uu____2 = suite_id_kem + (uint32_t)3U; - uu____2[0U] = (uint8_t)0U; - uu____2[1U] = (uint8_t)32U; - { - uint8_t *empty = suite_id_kem; - uint8_t - label_eae_prk[7U] = - { - (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, - (uint8_t)0x70U, (uint8_t)0x72U, (uint8_t)0x6bU - }; - uint32_t len0 = (uint32_t)51U; - KRML_CHECK_SIZE(sizeof (uint8_t), len0); - { - uint8_t tmp0[len0]; - memset(tmp0, 0U, len0 * sizeof (uint8_t)); - { - uint8_t *uu____3 = tmp0; - uu____3[0U] = (uint8_t)0x48U; - uu____3[1U] = (uint8_t)0x50U; - uu____3[2U] = (uint8_t)0x4bU; - uu____3[3U] = (uint8_t)0x45U; - uu____3[4U] = (uint8_t)0x2dU; - uu____3[5U] = (uint8_t)0x76U; - uu____3[6U] = (uint8_t)0x31U; - memcpy(tmp0 + (uint32_t)7U, - suite_id_kem, - (uint32_t)5U * sizeof (uint8_t)); - memcpy(tmp0 + (uint32_t)12U, - label_eae_prk, - (uint32_t)7U * sizeof (uint8_t)); - memcpy(tmp0 + (uint32_t)19U, dhm, (uint32_t)32U * sizeof (uint8_t)); - Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp0, len0); - { - uint8_t - label_shared_secret[13U] = - { - (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, - (uint8_t)0x65U, (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, - (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U, - (uint8_t)0x74U - }; - uint32_t len = (uint32_t)91U; - KRML_CHECK_SIZE(sizeof (uint8_t), len); - { - uint8_t tmp[len]; - memset(tmp, 0U, len * sizeof (uint8_t)); - { - uint8_t *uu____4 = tmp; - store32_be(uu____4, (uint32_t)32U); - memcpy(uu____4, - uu____4 + (uint32_t)2U, - (uint32_t)2U * sizeof (uint8_t)); - { - uint8_t *uu____5 = tmp + (uint32_t)2U; - uu____5[0U] = (uint8_t)0x48U; - uu____5[1U] = (uint8_t)0x50U; - uu____5[2U] = (uint8_t)0x4bU; - uu____5[3U] = (uint8_t)0x45U; - uu____5[4U] = (uint8_t)0x2dU; - uu____5[5U] = (uint8_t)0x76U; - uu____5[6U] = (uint8_t)0x31U; - memcpy(tmp + (uint32_t)9U, - suite_id_kem, - (uint32_t)5U * sizeof (uint8_t)); - memcpy(tmp + (uint32_t)14U, - label_shared_secret, - (uint32_t)13U * sizeof (uint8_t)); - memcpy(tmp + (uint32_t)27U, - kemcontext, - (uint32_t)64U * sizeof (uint8_t)); - Hacl_HKDF_expand_sha2_256(shared, - o_eae_prk, - (uint32_t)32U, - tmp, - len, - (uint32_t)32U); - res2 = (uint32_t)0U; - } - } - } - } - } - } - } - } - } - } - else - { - res2 = (uint32_t)1U; - } - } - } - else - { - res2 = (uint32_t)1U; - } - if (res2 == (uint32_t)0U) - { - uint8_t o_context[129U] = { 0U }; - uint8_t o_secret[64U] = { 0U }; - uint8_t suite_id[10U] = { 0U }; - uint8_t *uu____6 = suite_id; - uu____6[0U] = (uint8_t)0x48U; - uu____6[1U] = (uint8_t)0x50U; - uu____6[2U] = (uint8_t)0x4bU; - uu____6[3U] = (uint8_t)0x45U; - { - uint8_t *uu____7 = suite_id + (uint32_t)4U; - uu____7[0U] = (uint8_t)0U; - uu____7[1U] = (uint8_t)32U; - { - uint8_t *uu____8 = suite_id + (uint32_t)6U; - uu____8[0U] = (uint8_t)0U; - uu____8[1U] = (uint8_t)3U; - { - uint8_t *uu____9 = suite_id + (uint32_t)8U; - uu____9[0U] = (uint8_t)0U; - uu____9[1U] = (uint8_t)3U; - { - uint8_t - label_psk_id_hash[11U] = - { - (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, - (uint8_t)0x69U, (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, - (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U - }; - uint8_t o_psk_id_hash[64U] = { 0U }; - uint8_t *empty = suite_id; - uint32_t len0 = (uint32_t)28U; - KRML_CHECK_SIZE(sizeof (uint8_t), len0); - { - uint8_t tmp0[len0]; - memset(tmp0, 0U, len0 * sizeof (uint8_t)); - { - uint8_t *uu____10 = tmp0; - uu____10[0U] = (uint8_t)0x48U; - uu____10[1U] = (uint8_t)0x50U; - uu____10[2U] = (uint8_t)0x4bU; - uu____10[3U] = (uint8_t)0x45U; - uu____10[4U] = (uint8_t)0x2dU; - uu____10[5U] = (uint8_t)0x76U; - uu____10[6U] = (uint8_t)0x31U; - memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp0 + (uint32_t)17U, - label_psk_id_hash, - (uint32_t)11U * sizeof (uint8_t)); - memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t)); - Hacl_HKDF_extract_sha2_512(o_psk_id_hash, - empty, - (uint32_t)0U, - tmp0, - len0); - { - uint8_t - label_info_hash[9U] = - { - (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, - (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, - (uint8_t)0x68U - }; - uint8_t o_info_hash[64U] = { 0U }; - uint32_t len1 = (uint32_t)26U + infolen; - KRML_CHECK_SIZE(sizeof (uint8_t), len1); - { - uint8_t tmp1[len1]; - memset(tmp1, 0U, len1 * sizeof (uint8_t)); - { - uint8_t *uu____11 = tmp1; - uu____11[0U] = (uint8_t)0x48U; - uu____11[1U] = (uint8_t)0x50U; - uu____11[2U] = (uint8_t)0x4bU; - uu____11[3U] = (uint8_t)0x45U; - uu____11[4U] = (uint8_t)0x2dU; - uu____11[5U] = (uint8_t)0x76U; - uu____11[6U] = (uint8_t)0x31U; - memcpy(tmp1 + (uint32_t)7U, - suite_id, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp1 + (uint32_t)17U, - label_info_hash, - (uint32_t)9U * sizeof (uint8_t)); - memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t)); - Hacl_HKDF_extract_sha2_512(o_info_hash, - empty, - (uint32_t)0U, - tmp1, - len1); - o_context[0U] = (uint8_t)0U; - memcpy(o_context + (uint32_t)1U, - o_psk_id_hash, - (uint32_t)64U * sizeof (uint8_t)); - memcpy(o_context + (uint32_t)65U, - o_info_hash, - (uint32_t)64U * sizeof (uint8_t)); - { - uint8_t - label_secret[6U] = - { - (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, - (uint8_t)0x65U, (uint8_t)0x74U - }; - uint32_t len2 = (uint32_t)23U; - KRML_CHECK_SIZE(sizeof (uint8_t), len2); - { - uint8_t tmp2[len2]; - memset(tmp2, 0U, len2 * sizeof (uint8_t)); - { - uint8_t *uu____12 = tmp2; - uu____12[0U] = (uint8_t)0x48U; - uu____12[1U] = (uint8_t)0x50U; - uu____12[2U] = (uint8_t)0x4bU; - uu____12[3U] = (uint8_t)0x45U; - uu____12[4U] = (uint8_t)0x2dU; - uu____12[5U] = (uint8_t)0x76U; - uu____12[6U] = (uint8_t)0x31U; - memcpy(tmp2 + (uint32_t)7U, - suite_id, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp2 + (uint32_t)17U, - label_secret, - (uint32_t)6U * sizeof (uint8_t)); - memcpy(tmp2 + (uint32_t)23U, - empty, - (uint32_t)0U * sizeof (uint8_t)); - Hacl_HKDF_extract_sha2_512(o_secret, - shared, - (uint32_t)32U, - tmp2, - len2); - { - uint8_t - label_exp[3U] = - { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U }; - uint32_t len3 = (uint32_t)151U; - KRML_CHECK_SIZE(sizeof (uint8_t), len3); - { - uint8_t tmp3[len3]; - memset(tmp3, 0U, len3 * sizeof (uint8_t)); - { - uint8_t *uu____13 = tmp3; - store32_be(uu____13, (uint32_t)64U); - memcpy(uu____13, - uu____13 + (uint32_t)2U, - (uint32_t)2U * sizeof (uint8_t)); - { - uint8_t *uu____14 = tmp3 + (uint32_t)2U; - uu____14[0U] = (uint8_t)0x48U; - uu____14[1U] = (uint8_t)0x50U; - uu____14[2U] = (uint8_t)0x4bU; - uu____14[3U] = (uint8_t)0x45U; - uu____14[4U] = (uint8_t)0x2dU; - uu____14[5U] = (uint8_t)0x76U; - uu____14[6U] = (uint8_t)0x31U; - memcpy(tmp3 + (uint32_t)9U, - suite_id, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp3 + (uint32_t)19U, - label_exp, - (uint32_t)3U * sizeof (uint8_t)); - memcpy(tmp3 + (uint32_t)22U, - o_context, - (uint32_t)129U * sizeof (uint8_t)); - Hacl_HKDF_expand_sha2_512(o_ctx.ctx_exporter, - o_secret, - (uint32_t)64U, - tmp3, - len3, - (uint32_t)64U); - { - uint8_t - label_key[3U] = - { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U }; - uint32_t len4 = (uint32_t)151U; - KRML_CHECK_SIZE(sizeof (uint8_t), len4); - { - uint8_t tmp4[len4]; - memset(tmp4, 0U, len4 * sizeof (uint8_t)); - { - uint8_t *uu____15 = tmp4; - store32_be(uu____15, (uint32_t)32U); - memcpy(uu____15, - uu____15 + (uint32_t)2U, - (uint32_t)2U * sizeof (uint8_t)); - { - uint8_t *uu____16 = tmp4 + (uint32_t)2U; - uu____16[0U] = (uint8_t)0x48U; - uu____16[1U] = (uint8_t)0x50U; - uu____16[2U] = (uint8_t)0x4bU; - uu____16[3U] = (uint8_t)0x45U; - uu____16[4U] = (uint8_t)0x2dU; - uu____16[5U] = (uint8_t)0x76U; - uu____16[6U] = (uint8_t)0x31U; - memcpy(tmp4 + (uint32_t)9U, - suite_id, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp4 + (uint32_t)19U, - label_key, - (uint32_t)3U * sizeof (uint8_t)); - memcpy(tmp4 + (uint32_t)22U, - o_context, - (uint32_t)129U * sizeof (uint8_t)); - Hacl_HKDF_expand_sha2_512(o_ctx.ctx_key, - o_secret, - (uint32_t)64U, - tmp4, - len4, - (uint32_t)32U); - { - uint8_t - label_base_nonce[10U] = - { - (uint8_t)0x62U, (uint8_t)0x61U, - (uint8_t)0x73U, (uint8_t)0x65U, - (uint8_t)0x5fU, (uint8_t)0x6eU, - (uint8_t)0x6fU, (uint8_t)0x6eU, - (uint8_t)0x63U, (uint8_t)0x65U - }; - uint32_t len = (uint32_t)158U; - KRML_CHECK_SIZE(sizeof (uint8_t), len); - { - uint8_t tmp[len]; - memset(tmp, 0U, len * sizeof (uint8_t)); - { - uint8_t *uu____17 = tmp; - store32_be(uu____17, (uint32_t)12U); - memcpy(uu____17, - uu____17 + (uint32_t)2U, - (uint32_t)2U * sizeof (uint8_t)); - { - uint8_t - *uu____18 = tmp + (uint32_t)2U; - uu____18[0U] = (uint8_t)0x48U; - uu____18[1U] = (uint8_t)0x50U; - uu____18[2U] = (uint8_t)0x4bU; - uu____18[3U] = (uint8_t)0x45U; - uu____18[4U] = (uint8_t)0x2dU; - uu____18[5U] = (uint8_t)0x76U; - uu____18[6U] = (uint8_t)0x31U; - memcpy(tmp + (uint32_t)9U, - suite_id, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp + (uint32_t)19U, - label_base_nonce, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp + (uint32_t)29U, - o_context, - (uint32_t)129U * sizeof (uint8_t)); - Hacl_HKDF_expand_sha2_512(o_ctx.ctx_nonce, - o_secret, - (uint32_t)64U, - tmp, - len, - (uint32_t)12U); - o_ctx.ctx_seq[0U] = (uint64_t)0U; - ite = (uint32_t)0U; - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - else - { - ite = (uint32_t)1U; - } - } - } - } - } - else - { - ite = (uint32_t)1U; - } - return ite; - } -} - -uint32_t -Hacl_HPKE_Curve64_CP32_SHA512_sealBase( - uint8_t *skE, - uint8_t *pkR, - uint32_t infolen, - uint8_t *info, - uint32_t aadlen, - uint8_t *aad, - uint32_t plainlen, - uint8_t *plain, - uint8_t *o_enc, - uint8_t *o_ct -) -{ - uint8_t ctx_key[32U] = { 0U }; - uint8_t ctx_nonce[12U] = { 0U }; - uint64_t ctx_seq = (uint64_t)0U; - uint8_t ctx_exporter[64U] = { 0U }; - Hacl_Impl_HPKE_context_s o_ctx; - uint32_t res; - uint32_t ite; - o_ctx.ctx_key = ctx_key; - o_ctx.ctx_nonce = ctx_nonce; - o_ctx.ctx_seq = &ctx_seq; - o_ctx.ctx_exporter = ctx_exporter; - res = Hacl_HPKE_Curve64_CP32_SHA512_setupBaseS(o_enc, o_ctx, skE, pkR, infolen, info); - if (res == (uint32_t)0U) - { - uint8_t nonce[12U] = { 0U }; - uint64_t s = o_ctx.ctx_seq[0U]; - uint8_t enc[12U] = { 0U }; - store64_be(enc + (uint32_t)4U, s); - KRML_MAYBE_FOR12(i, - (uint32_t)0U, - (uint32_t)12U, - (uint32_t)1U, - uint8_t xi = enc[i]; - uint8_t yi = o_ctx.ctx_nonce[i]; - nonce[i] = xi ^ yi;); - Hacl_Chacha20Poly1305_32_aead_encrypt(o_ctx.ctx_key, - nonce, - aadlen, - aad, - plainlen, - plain, - o_ct, - o_ct + plainlen); - { - uint64_t s1 = o_ctx.ctx_seq[0U]; - uint32_t res1; - if (s1 == (uint64_t)18446744073709551615U) - { - res1 = (uint32_t)1U; - } - else - { - uint64_t s_ = s1 + (uint64_t)1U; - o_ctx.ctx_seq[0U] = s_; - res1 = (uint32_t)0U; - } - { - uint32_t res10 = res1; - ite = res10; - } - } - } - else - { - ite = (uint32_t)1U; - } - return ite; -} - -uint32_t -Hacl_HPKE_Curve64_CP32_SHA512_openBase( - uint8_t *pkE, - uint8_t *skR, - uint32_t infolen, - uint8_t *info, - uint32_t aadlen, - uint8_t *aad, - uint32_t ctlen, - uint8_t *ct, - uint8_t *o_pt -) -{ - uint8_t ctx_key[32U] = { 0U }; - uint8_t ctx_nonce[12U] = { 0U }; - uint64_t ctx_seq = (uint64_t)0U; - uint8_t ctx_exporter[64U] = { 0U }; - Hacl_Impl_HPKE_context_s o_ctx; - uint32_t res; - uint32_t ite; - o_ctx.ctx_key = ctx_key; - o_ctx.ctx_nonce = ctx_nonce; - o_ctx.ctx_seq = &ctx_seq; - o_ctx.ctx_exporter = ctx_exporter; - res = Hacl_HPKE_Curve64_CP32_SHA512_setupBaseR(o_ctx, pkE, skR, infolen, info); - if (res == (uint32_t)0U) - { - uint8_t nonce[12U] = { 0U }; - uint64_t s = o_ctx.ctx_seq[0U]; - uint8_t enc[12U] = { 0U }; - store64_be(enc + (uint32_t)4U, s); - KRML_MAYBE_FOR12(i, - (uint32_t)0U, - (uint32_t)12U, - (uint32_t)1U, - uint8_t xi = enc[i]; - uint8_t yi = o_ctx.ctx_nonce[i]; - nonce[i] = xi ^ yi;); - { - uint32_t - res10 = - Hacl_Chacha20Poly1305_32_aead_decrypt(o_ctx.ctx_key, - nonce, - aadlen, - aad, - ctlen - (uint32_t)16U, - o_pt, - ct, - ct + ctlen - (uint32_t)16U); - uint32_t res1; - if (res10 == (uint32_t)0U) - { - uint64_t s1 = o_ctx.ctx_seq[0U]; - uint32_t res2; - if (s1 == (uint64_t)18446744073709551615U) - { - res2 = (uint32_t)1U; - } - else - { - uint64_t s_ = s1 + (uint64_t)1U; - o_ctx.ctx_seq[0U] = s_; - res2 = (uint32_t)0U; - } - res1 = res2; - } - else - { - res1 = (uint32_t)1U; - } - ite = res1; - } - } - else - { - ite = (uint32_t)1U; - } - return ite; -} - diff --git a/dist/c89-compatible/Hacl_HPKE_Curve64_CP32_SHA512.h b/dist/c89-compatible/Hacl_HPKE_Curve64_CP32_SHA512.h deleted file mode 100644 index d269ec2154..0000000000 --- a/dist/c89-compatible/Hacl_HPKE_Curve64_CP32_SHA512.h +++ /dev/null @@ -1,95 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#ifndef __Hacl_HPKE_Curve64_CP32_SHA512_H -#define __Hacl_HPKE_Curve64_CP32_SHA512_H - -#if defined(__cplusplus) -extern "C" { -#endif - -#include -#include "krml/internal/types.h" -#include "krml/lowstar_endianness.h" -#include "krml/internal/target.h" - - -#include "Hacl_Krmllib.h" -#include "Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE.h" -#include "Hacl_HKDF.h" -#include "Hacl_Curve25519_64.h" -#include "Hacl_Chacha20Poly1305_32.h" -#include "evercrypt_targetconfig.h" -uint32_t -Hacl_HPKE_Curve64_CP32_SHA512_setupBaseS( - uint8_t *o_pkE, - Hacl_Impl_HPKE_context_s o_ctx, - uint8_t *skE, - uint8_t *pkR, - uint32_t infolen, - uint8_t *info -); - -uint32_t -Hacl_HPKE_Curve64_CP32_SHA512_setupBaseR( - Hacl_Impl_HPKE_context_s o_ctx, - uint8_t *enc, - uint8_t *skR, - uint32_t infolen, - uint8_t *info -); - -uint32_t -Hacl_HPKE_Curve64_CP32_SHA512_sealBase( - uint8_t *skE, - uint8_t *pkR, - uint32_t infolen, - uint8_t *info, - uint32_t aadlen, - uint8_t *aad, - uint32_t plainlen, - uint8_t *plain, - uint8_t *o_enc, - uint8_t *o_ct -); - -uint32_t -Hacl_HPKE_Curve64_CP32_SHA512_openBase( - uint8_t *pkE, - uint8_t *skR, - uint32_t infolen, - uint8_t *info, - uint32_t aadlen, - uint8_t *aad, - uint32_t ctlen, - uint8_t *ct, - uint8_t *o_pt -); - -#if defined(__cplusplus) -} -#endif - -#define __Hacl_HPKE_Curve64_CP32_SHA512_H_DEFINED -#endif diff --git a/dist/c89-compatible/Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE.h b/dist/c89-compatible/Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE.h deleted file mode 100644 index 4984ec8b55..0000000000 --- a/dist/c89-compatible/Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE.h +++ /dev/null @@ -1,54 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#ifndef __Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE_H -#define __Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE_H - -#if defined(__cplusplus) -extern "C" { -#endif - -#include -#include "krml/internal/types.h" -#include "krml/lowstar_endianness.h" -#include "krml/internal/target.h" - - - -#include "evercrypt_targetconfig.h" -typedef struct Hacl_Impl_HPKE_context_s_s -{ - uint8_t *ctx_key; - uint8_t *ctx_nonce; - uint64_t *ctx_seq; - uint8_t *ctx_exporter; -} -Hacl_Impl_HPKE_context_s; - -#if defined(__cplusplus) -} -#endif - -#define __Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE_H_DEFINED -#endif diff --git a/dist/c89-compatible/Hacl_HPKE_P256_CP128_SHA256.c b/dist/c89-compatible/Hacl_HPKE_P256_CP128_SHA256.c deleted file mode 100644 index c90e6abefc..0000000000 --- a/dist/c89-compatible/Hacl_HPKE_P256_CP128_SHA256.c +++ /dev/null @@ -1,1174 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#include "Hacl_HPKE_P256_CP128_SHA256.h" - -#include "internal/Hacl_P256.h" - -uint32_t -Hacl_HPKE_P256_CP128_SHA256_setupBaseS( - uint8_t *o_pkE, - Hacl_Impl_HPKE_context_s o_ctx, - uint8_t *skE, - uint8_t *pkR, - uint32_t infolen, - uint8_t *info -) -{ - uint8_t o_shared[32U] = { 0U }; - uint8_t *o_pkE1 = o_pkE + (uint32_t)1U; - uint64_t tempBuffer[100U] = { 0U }; - uint64_t resultBuffer[12U] = { 0U }; - uint64_t *resultBufferX = resultBuffer; - uint64_t *resultBufferY = resultBuffer + (uint32_t)4U; - uint8_t *resultX0 = o_pkE1; - uint8_t *resultY0 = o_pkE1 + (uint32_t)32U; - uint64_t flag; - bool res0; - uint32_t res1; - uint32_t res; - uint32_t ite; - Hacl_Impl_P256_Core_secretToPublic(resultBuffer, skE, tempBuffer); - flag = Hacl_Impl_P256_Core_isPointAtInfinityPrivate(resultBuffer); - Hacl_Impl_P256_LowLevel_changeEndian(resultBufferX); - Hacl_Impl_P256_LowLevel_changeEndian(resultBufferY); - Hacl_Impl_P256_LowLevel_toUint8(resultBufferX, resultX0); - Hacl_Impl_P256_LowLevel_toUint8(resultBufferY, resultY0); - res0 = flag == (uint64_t)0U; - if (res0) - { - res1 = (uint32_t)0U; - } - else - { - res1 = (uint32_t)1U; - } - if (res1 == (uint32_t)0U) - { - o_pkE[0U] = (uint8_t)4U; - { - uint8_t o_dh[64U] = { 0U }; - uint8_t tmp0[64U] = { 0U }; - uint64_t resultBufferFelem[12U] = { 0U }; - uint64_t *resultBufferFelemX = resultBufferFelem; - uint64_t *resultBufferFelemY = resultBufferFelem + (uint32_t)4U; - uint8_t *resultX = tmp0; - uint8_t *resultY = tmp0 + (uint32_t)32U; - uint64_t publicKeyAsFelem[8U] = { 0U }; - uint64_t *publicKeyFelemX = publicKeyAsFelem; - uint64_t *publicKeyFelemY = publicKeyAsFelem + (uint32_t)4U; - uint8_t *pubKeyX = pkR; - uint8_t *pubKeyY = pkR + (uint32_t)32U; - Hacl_Impl_P256_LowLevel_toUint64ChangeEndian(pubKeyX, publicKeyFelemX); - Hacl_Impl_P256_LowLevel_toUint64ChangeEndian(pubKeyY, publicKeyFelemY); - { - uint64_t flag0 = Hacl_Impl_P256_DH__ecp256dh_r(resultBufferFelem, publicKeyAsFelem, skE); - Hacl_Impl_P256_LowLevel_changeEndian(resultBufferFelemX); - Hacl_Impl_P256_LowLevel_changeEndian(resultBufferFelemY); - Hacl_Impl_P256_LowLevel_toUint8(resultBufferFelemX, resultX); - Hacl_Impl_P256_LowLevel_toUint8(resultBufferFelemY, resultY); - { - bool res3 = flag0 == (uint64_t)0U; - memcpy(o_dh, tmp0, (uint32_t)64U * sizeof (uint8_t)); - { - uint32_t res2; - if (res3) - { - res2 = (uint32_t)0U; - } - else - { - res2 = (uint32_t)1U; - } - { - uint8_t o_kemcontext[130U] = { 0U }; - if (res2 == (uint32_t)0U) - { - memcpy(o_kemcontext, o_pkE, (uint32_t)65U * sizeof (uint8_t)); - { - uint8_t *o_pkRm = o_kemcontext + (uint32_t)65U; - uint8_t *o_pkR = o_pkRm + (uint32_t)1U; - memcpy(o_pkR, pkR, (uint32_t)64U * sizeof (uint8_t)); - o_pkRm[0U] = (uint8_t)4U; - { - uint8_t *o_dhm = o_dh; - uint8_t o_eae_prk[32U] = { 0U }; - uint8_t suite_id_kem[5U] = { 0U }; - uint8_t *uu____0 = suite_id_kem; - uu____0[0U] = (uint8_t)0x4bU; - uu____0[1U] = (uint8_t)0x45U; - uu____0[2U] = (uint8_t)0x4dU; - { - uint8_t *uu____1 = suite_id_kem + (uint32_t)3U; - uu____1[0U] = (uint8_t)0U; - uu____1[1U] = (uint8_t)16U; - { - uint8_t *empty = suite_id_kem; - uint8_t - label_eae_prk[7U] = - { - (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, - (uint8_t)0x70U, (uint8_t)0x72U, (uint8_t)0x6bU - }; - uint32_t len0 = (uint32_t)51U; - KRML_CHECK_SIZE(sizeof (uint8_t), len0); - { - uint8_t tmp1[len0]; - memset(tmp1, 0U, len0 * sizeof (uint8_t)); - { - uint8_t *uu____2 = tmp1; - uu____2[0U] = (uint8_t)0x48U; - uu____2[1U] = (uint8_t)0x50U; - uu____2[2U] = (uint8_t)0x4bU; - uu____2[3U] = (uint8_t)0x45U; - uu____2[4U] = (uint8_t)0x2dU; - uu____2[5U] = (uint8_t)0x76U; - uu____2[6U] = (uint8_t)0x31U; - memcpy(tmp1 + (uint32_t)7U, - suite_id_kem, - (uint32_t)5U * sizeof (uint8_t)); - memcpy(tmp1 + (uint32_t)12U, - label_eae_prk, - (uint32_t)7U * sizeof (uint8_t)); - memcpy(tmp1 + (uint32_t)19U, o_dhm, (uint32_t)32U * sizeof (uint8_t)); - Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp1, len0); - { - uint8_t - label_shared_secret[13U] = - { - (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, - (uint8_t)0x65U, (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, - (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U, - (uint8_t)0x74U - }; - uint32_t len = (uint32_t)157U; - KRML_CHECK_SIZE(sizeof (uint8_t), len); - { - uint8_t tmp[len]; - memset(tmp, 0U, len * sizeof (uint8_t)); - { - uint8_t *uu____3 = tmp; - store32_be(uu____3, (uint32_t)32U); - memcpy(uu____3, - uu____3 + (uint32_t)2U, - (uint32_t)2U * sizeof (uint8_t)); - { - uint8_t *uu____4 = tmp + (uint32_t)2U; - uu____4[0U] = (uint8_t)0x48U; - uu____4[1U] = (uint8_t)0x50U; - uu____4[2U] = (uint8_t)0x4bU; - uu____4[3U] = (uint8_t)0x45U; - uu____4[4U] = (uint8_t)0x2dU; - uu____4[5U] = (uint8_t)0x76U; - uu____4[6U] = (uint8_t)0x31U; - memcpy(tmp + (uint32_t)9U, - suite_id_kem, - (uint32_t)5U * sizeof (uint8_t)); - memcpy(tmp + (uint32_t)14U, - label_shared_secret, - (uint32_t)13U * sizeof (uint8_t)); - memcpy(tmp + (uint32_t)27U, - o_kemcontext, - (uint32_t)130U * sizeof (uint8_t)); - Hacl_HKDF_expand_sha2_256(o_shared, - o_eae_prk, - (uint32_t)32U, - tmp, - len, - (uint32_t)32U); - res = (uint32_t)0U; - } - } - } - } - } - } - } - } - } - } - } - else - { - res = (uint32_t)1U; - } - } - } - } - } - } - } - else - { - res = (uint32_t)1U; - } - if (res == (uint32_t)0U) - { - uint8_t o_context[65U] = { 0U }; - uint8_t o_secret[32U] = { 0U }; - uint8_t suite_id[10U] = { 0U }; - uint8_t *uu____5 = suite_id; - uu____5[0U] = (uint8_t)0x48U; - uu____5[1U] = (uint8_t)0x50U; - uu____5[2U] = (uint8_t)0x4bU; - uu____5[3U] = (uint8_t)0x45U; - { - uint8_t *uu____6 = suite_id + (uint32_t)4U; - uu____6[0U] = (uint8_t)0U; - uu____6[1U] = (uint8_t)16U; - { - uint8_t *uu____7 = suite_id + (uint32_t)6U; - uu____7[0U] = (uint8_t)0U; - uu____7[1U] = (uint8_t)1U; - { - uint8_t *uu____8 = suite_id + (uint32_t)8U; - uu____8[0U] = (uint8_t)0U; - uu____8[1U] = (uint8_t)3U; - { - uint8_t - label_psk_id_hash[11U] = - { - (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U, - (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, - (uint8_t)0x68U - }; - uint8_t o_psk_id_hash[32U] = { 0U }; - uint8_t *empty = suite_id; - uint32_t len0 = (uint32_t)28U; - KRML_CHECK_SIZE(sizeof (uint8_t), len0); - { - uint8_t tmp0[len0]; - memset(tmp0, 0U, len0 * sizeof (uint8_t)); - { - uint8_t *uu____9 = tmp0; - uu____9[0U] = (uint8_t)0x48U; - uu____9[1U] = (uint8_t)0x50U; - uu____9[2U] = (uint8_t)0x4bU; - uu____9[3U] = (uint8_t)0x45U; - uu____9[4U] = (uint8_t)0x2dU; - uu____9[5U] = (uint8_t)0x76U; - uu____9[6U] = (uint8_t)0x31U; - memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp0 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t)); - memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t)); - Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, (uint32_t)0U, tmp0, len0); - { - uint8_t - label_info_hash[9U] = - { - (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU, - (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U - }; - uint8_t o_info_hash[32U] = { 0U }; - uint32_t len1 = (uint32_t)26U + infolen; - KRML_CHECK_SIZE(sizeof (uint8_t), len1); - { - uint8_t tmp1[len1]; - memset(tmp1, 0U, len1 * sizeof (uint8_t)); - { - uint8_t *uu____10 = tmp1; - uu____10[0U] = (uint8_t)0x48U; - uu____10[1U] = (uint8_t)0x50U; - uu____10[2U] = (uint8_t)0x4bU; - uu____10[3U] = (uint8_t)0x45U; - uu____10[4U] = (uint8_t)0x2dU; - uu____10[5U] = (uint8_t)0x76U; - uu____10[6U] = (uint8_t)0x31U; - memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp1 + (uint32_t)17U, - label_info_hash, - (uint32_t)9U * sizeof (uint8_t)); - memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t)); - Hacl_HKDF_extract_sha2_256(o_info_hash, empty, (uint32_t)0U, tmp1, len1); - o_context[0U] = (uint8_t)0U; - memcpy(o_context + (uint32_t)1U, - o_psk_id_hash, - (uint32_t)32U * sizeof (uint8_t)); - memcpy(o_context + (uint32_t)33U, - o_info_hash, - (uint32_t)32U * sizeof (uint8_t)); - { - uint8_t - label_secret[6U] = - { - (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, - (uint8_t)0x65U, (uint8_t)0x74U - }; - uint32_t len2 = (uint32_t)23U; - KRML_CHECK_SIZE(sizeof (uint8_t), len2); - { - uint8_t tmp2[len2]; - memset(tmp2, 0U, len2 * sizeof (uint8_t)); - { - uint8_t *uu____11 = tmp2; - uu____11[0U] = (uint8_t)0x48U; - uu____11[1U] = (uint8_t)0x50U; - uu____11[2U] = (uint8_t)0x4bU; - uu____11[3U] = (uint8_t)0x45U; - uu____11[4U] = (uint8_t)0x2dU; - uu____11[5U] = (uint8_t)0x76U; - uu____11[6U] = (uint8_t)0x31U; - memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp2 + (uint32_t)17U, - label_secret, - (uint32_t)6U * sizeof (uint8_t)); - memcpy(tmp2 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t)); - Hacl_HKDF_extract_sha2_256(o_secret, - o_shared, - (uint32_t)32U, - tmp2, - len2); - { - uint8_t - label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U }; - uint32_t len3 = (uint32_t)87U; - KRML_CHECK_SIZE(sizeof (uint8_t), len3); - { - uint8_t tmp3[len3]; - memset(tmp3, 0U, len3 * sizeof (uint8_t)); - { - uint8_t *uu____12 = tmp3; - store32_be(uu____12, (uint32_t)32U); - memcpy(uu____12, - uu____12 + (uint32_t)2U, - (uint32_t)2U * sizeof (uint8_t)); - { - uint8_t *uu____13 = tmp3 + (uint32_t)2U; - uu____13[0U] = (uint8_t)0x48U; - uu____13[1U] = (uint8_t)0x50U; - uu____13[2U] = (uint8_t)0x4bU; - uu____13[3U] = (uint8_t)0x45U; - uu____13[4U] = (uint8_t)0x2dU; - uu____13[5U] = (uint8_t)0x76U; - uu____13[6U] = (uint8_t)0x31U; - memcpy(tmp3 + (uint32_t)9U, - suite_id, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp3 + (uint32_t)19U, - label_exp, - (uint32_t)3U * sizeof (uint8_t)); - memcpy(tmp3 + (uint32_t)22U, - o_context, - (uint32_t)65U * sizeof (uint8_t)); - Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter, - o_secret, - (uint32_t)32U, - tmp3, - len3, - (uint32_t)32U); - { - uint8_t - label_key[3U] = - { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U }; - uint32_t len4 = (uint32_t)87U; - KRML_CHECK_SIZE(sizeof (uint8_t), len4); - { - uint8_t tmp4[len4]; - memset(tmp4, 0U, len4 * sizeof (uint8_t)); - { - uint8_t *uu____14 = tmp4; - store32_be(uu____14, (uint32_t)32U); - memcpy(uu____14, - uu____14 + (uint32_t)2U, - (uint32_t)2U * sizeof (uint8_t)); - { - uint8_t *uu____15 = tmp4 + (uint32_t)2U; - uu____15[0U] = (uint8_t)0x48U; - uu____15[1U] = (uint8_t)0x50U; - uu____15[2U] = (uint8_t)0x4bU; - uu____15[3U] = (uint8_t)0x45U; - uu____15[4U] = (uint8_t)0x2dU; - uu____15[5U] = (uint8_t)0x76U; - uu____15[6U] = (uint8_t)0x31U; - memcpy(tmp4 + (uint32_t)9U, - suite_id, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp4 + (uint32_t)19U, - label_key, - (uint32_t)3U * sizeof (uint8_t)); - memcpy(tmp4 + (uint32_t)22U, - o_context, - (uint32_t)65U * sizeof (uint8_t)); - Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, - o_secret, - (uint32_t)32U, - tmp4, - len4, - (uint32_t)32U); - { - uint8_t - label_base_nonce[10U] = - { - (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, - (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x6eU, - (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, - (uint8_t)0x65U - }; - uint32_t len = (uint32_t)94U; - KRML_CHECK_SIZE(sizeof (uint8_t), len); - { - uint8_t tmp[len]; - memset(tmp, 0U, len * sizeof (uint8_t)); - { - uint8_t *uu____16 = tmp; - store32_be(uu____16, (uint32_t)12U); - memcpy(uu____16, - uu____16 + (uint32_t)2U, - (uint32_t)2U * sizeof (uint8_t)); - { - uint8_t *uu____17 = tmp + (uint32_t)2U; - uu____17[0U] = (uint8_t)0x48U; - uu____17[1U] = (uint8_t)0x50U; - uu____17[2U] = (uint8_t)0x4bU; - uu____17[3U] = (uint8_t)0x45U; - uu____17[4U] = (uint8_t)0x2dU; - uu____17[5U] = (uint8_t)0x76U; - uu____17[6U] = (uint8_t)0x31U; - memcpy(tmp + (uint32_t)9U, - suite_id, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp + (uint32_t)19U, - label_base_nonce, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp + (uint32_t)29U, - o_context, - (uint32_t)65U * sizeof (uint8_t)); - Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, - o_secret, - (uint32_t)32U, - tmp, - len, - (uint32_t)12U); - o_ctx.ctx_seq[0U] = (uint64_t)0U; - ite = res; - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - else - { - ite = res; - } - return ite; -} - -uint32_t -Hacl_HPKE_P256_CP128_SHA256_setupBaseR( - Hacl_Impl_HPKE_context_s o_ctx, - uint8_t *enc, - uint8_t *skR, - uint32_t infolen, - uint8_t *info -) -{ - uint8_t pkR[64U] = { 0U }; - uint64_t tempBuffer0[100U] = { 0U }; - uint64_t resultBuffer0[12U] = { 0U }; - uint64_t *resultBufferX0 = resultBuffer0; - uint64_t *resultBufferY0 = resultBuffer0 + (uint32_t)4U; - uint8_t *resultX0 = pkR; - uint8_t *resultY0 = pkR + (uint32_t)32U; - uint64_t flag0; - bool res; - uint32_t res1; - uint32_t ite; - Hacl_Impl_P256_Core_secretToPublic(resultBuffer0, skR, tempBuffer0); - flag0 = Hacl_Impl_P256_Core_isPointAtInfinityPrivate(resultBuffer0); - Hacl_Impl_P256_LowLevel_changeEndian(resultBufferX0); - Hacl_Impl_P256_LowLevel_changeEndian(resultBufferY0); - Hacl_Impl_P256_LowLevel_toUint8(resultBufferX0, resultX0); - Hacl_Impl_P256_LowLevel_toUint8(resultBufferY0, resultY0); - res = flag0 == (uint64_t)0U; - if (res) - { - res1 = (uint32_t)0U; - } - else - { - res1 = (uint32_t)1U; - } - { - uint8_t shared[32U] = { 0U }; - if (res1 == (uint32_t)0U) - { - uint8_t *pkE = enc + (uint32_t)1U; - uint8_t dh[64U] = { 0U }; - uint8_t tmp0[64U] = { 0U }; - uint64_t resultBufferFelem[12U] = { 0U }; - uint64_t *resultBufferFelemX = resultBufferFelem; - uint64_t *resultBufferFelemY = resultBufferFelem + (uint32_t)4U; - uint8_t *resultX1 = tmp0; - uint8_t *resultY1 = tmp0 + (uint32_t)32U; - uint64_t publicKeyAsFelem[8U] = { 0U }; - uint64_t *publicKeyFelemX = publicKeyAsFelem; - uint64_t *publicKeyFelemY = publicKeyAsFelem + (uint32_t)4U; - uint8_t *pubKeyX = pkE; - uint8_t *pubKeyY = pkE + (uint32_t)32U; - Hacl_Impl_P256_LowLevel_toUint64ChangeEndian(pubKeyX, publicKeyFelemX); - Hacl_Impl_P256_LowLevel_toUint64ChangeEndian(pubKeyY, publicKeyFelemY); - { - uint64_t flag1 = Hacl_Impl_P256_DH__ecp256dh_r(resultBufferFelem, publicKeyAsFelem, skR); - Hacl_Impl_P256_LowLevel_changeEndian(resultBufferFelemX); - Hacl_Impl_P256_LowLevel_changeEndian(resultBufferFelemY); - Hacl_Impl_P256_LowLevel_toUint8(resultBufferFelemX, resultX1); - Hacl_Impl_P256_LowLevel_toUint8(resultBufferFelemY, resultY1); - { - bool res0 = flag1 == (uint64_t)0U; - memcpy(dh, tmp0, (uint32_t)64U * sizeof (uint8_t)); - { - uint32_t res11; - if (res0) - { - res11 = (uint32_t)0U; - } - else - { - res11 = (uint32_t)1U; - } - { - uint32_t res20; - uint8_t kemcontext[130U] = { 0U }; - if (res11 == (uint32_t)0U) - { - uint8_t *pkRm = kemcontext + (uint32_t)65U; - uint8_t *pkR1 = pkRm + (uint32_t)1U; - uint64_t tempBuffer[100U] = { 0U }; - uint64_t resultBuffer[12U] = { 0U }; - uint64_t *resultBufferX = resultBuffer; - uint64_t *resultBufferY = resultBuffer + (uint32_t)4U; - uint8_t *resultX = pkR1; - uint8_t *resultY = pkR1 + (uint32_t)32U; - Hacl_Impl_P256_Core_secretToPublic(resultBuffer, skR, tempBuffer); - { - uint64_t flag = Hacl_Impl_P256_Core_isPointAtInfinityPrivate(resultBuffer); - Hacl_Impl_P256_LowLevel_changeEndian(resultBufferX); - Hacl_Impl_P256_LowLevel_changeEndian(resultBufferY); - Hacl_Impl_P256_LowLevel_toUint8(resultBufferX, resultX); - Hacl_Impl_P256_LowLevel_toUint8(resultBufferY, resultY); - { - bool res3 = flag == (uint64_t)0U; - uint32_t res2; - if (res3) - { - res2 = (uint32_t)0U; - } - else - { - res2 = (uint32_t)1U; - } - if (res2 == (uint32_t)0U) - { - memcpy(kemcontext, enc, (uint32_t)65U * sizeof (uint8_t)); - pkRm[0U] = (uint8_t)4U; - { - uint8_t *dhm = dh; - uint8_t o_eae_prk[32U] = { 0U }; - uint8_t suite_id_kem[5U] = { 0U }; - uint8_t *uu____0 = suite_id_kem; - uu____0[0U] = (uint8_t)0x4bU; - uu____0[1U] = (uint8_t)0x45U; - uu____0[2U] = (uint8_t)0x4dU; - { - uint8_t *uu____1 = suite_id_kem + (uint32_t)3U; - uu____1[0U] = (uint8_t)0U; - uu____1[1U] = (uint8_t)16U; - { - uint8_t *empty = suite_id_kem; - uint8_t - label_eae_prk[7U] = - { - (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, - (uint8_t)0x70U, (uint8_t)0x72U, (uint8_t)0x6bU - }; - uint32_t len0 = (uint32_t)51U; - KRML_CHECK_SIZE(sizeof (uint8_t), len0); - { - uint8_t tmp1[len0]; - memset(tmp1, 0U, len0 * sizeof (uint8_t)); - { - uint8_t *uu____2 = tmp1; - uu____2[0U] = (uint8_t)0x48U; - uu____2[1U] = (uint8_t)0x50U; - uu____2[2U] = (uint8_t)0x4bU; - uu____2[3U] = (uint8_t)0x45U; - uu____2[4U] = (uint8_t)0x2dU; - uu____2[5U] = (uint8_t)0x76U; - uu____2[6U] = (uint8_t)0x31U; - memcpy(tmp1 + (uint32_t)7U, - suite_id_kem, - (uint32_t)5U * sizeof (uint8_t)); - memcpy(tmp1 + (uint32_t)12U, - label_eae_prk, - (uint32_t)7U * sizeof (uint8_t)); - memcpy(tmp1 + (uint32_t)19U, dhm, (uint32_t)32U * sizeof (uint8_t)); - Hacl_HKDF_extract_sha2_256(o_eae_prk, - empty, - (uint32_t)0U, - tmp1, - len0); - { - uint8_t - label_shared_secret[13U] = - { - (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, - (uint8_t)0x65U, (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, - (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U, - (uint8_t)0x74U - }; - uint32_t len = (uint32_t)157U; - KRML_CHECK_SIZE(sizeof (uint8_t), len); - { - uint8_t tmp[len]; - memset(tmp, 0U, len * sizeof (uint8_t)); - { - uint8_t *uu____3 = tmp; - store32_be(uu____3, (uint32_t)32U); - memcpy(uu____3, - uu____3 + (uint32_t)2U, - (uint32_t)2U * sizeof (uint8_t)); - { - uint8_t *uu____4 = tmp + (uint32_t)2U; - uu____4[0U] = (uint8_t)0x48U; - uu____4[1U] = (uint8_t)0x50U; - uu____4[2U] = (uint8_t)0x4bU; - uu____4[3U] = (uint8_t)0x45U; - uu____4[4U] = (uint8_t)0x2dU; - uu____4[5U] = (uint8_t)0x76U; - uu____4[6U] = (uint8_t)0x31U; - memcpy(tmp + (uint32_t)9U, - suite_id_kem, - (uint32_t)5U * sizeof (uint8_t)); - memcpy(tmp + (uint32_t)14U, - label_shared_secret, - (uint32_t)13U * sizeof (uint8_t)); - memcpy(tmp + (uint32_t)27U, - kemcontext, - (uint32_t)130U * sizeof (uint8_t)); - Hacl_HKDF_expand_sha2_256(shared, - o_eae_prk, - (uint32_t)32U, - tmp, - len, - (uint32_t)32U); - res20 = (uint32_t)0U; - } - } - } - } - } - } - } - } - } - } - else - { - res20 = (uint32_t)1U; - } - } - } - } - else - { - res20 = (uint32_t)1U; - } - if (res20 == (uint32_t)0U) - { - uint8_t o_context[65U] = { 0U }; - uint8_t o_secret[32U] = { 0U }; - uint8_t suite_id[10U] = { 0U }; - uint8_t *uu____5 = suite_id; - uu____5[0U] = (uint8_t)0x48U; - uu____5[1U] = (uint8_t)0x50U; - uu____5[2U] = (uint8_t)0x4bU; - uu____5[3U] = (uint8_t)0x45U; - { - uint8_t *uu____6 = suite_id + (uint32_t)4U; - uu____6[0U] = (uint8_t)0U; - uu____6[1U] = (uint8_t)16U; - { - uint8_t *uu____7 = suite_id + (uint32_t)6U; - uu____7[0U] = (uint8_t)0U; - uu____7[1U] = (uint8_t)1U; - { - uint8_t *uu____8 = suite_id + (uint32_t)8U; - uu____8[0U] = (uint8_t)0U; - uu____8[1U] = (uint8_t)3U; - { - uint8_t - label_psk_id_hash[11U] = - { - (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, - (uint8_t)0x69U, (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, - (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U - }; - uint8_t o_psk_id_hash[32U] = { 0U }; - uint8_t *empty = suite_id; - uint32_t len0 = (uint32_t)28U; - KRML_CHECK_SIZE(sizeof (uint8_t), len0); - { - uint8_t tmp1[len0]; - memset(tmp1, 0U, len0 * sizeof (uint8_t)); - { - uint8_t *uu____9 = tmp1; - uu____9[0U] = (uint8_t)0x48U; - uu____9[1U] = (uint8_t)0x50U; - uu____9[2U] = (uint8_t)0x4bU; - uu____9[3U] = (uint8_t)0x45U; - uu____9[4U] = (uint8_t)0x2dU; - uu____9[5U] = (uint8_t)0x76U; - uu____9[6U] = (uint8_t)0x31U; - memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp1 + (uint32_t)17U, - label_psk_id_hash, - (uint32_t)11U * sizeof (uint8_t)); - memcpy(tmp1 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t)); - Hacl_HKDF_extract_sha2_256(o_psk_id_hash, - empty, - (uint32_t)0U, - tmp1, - len0); - { - uint8_t - label_info_hash[9U] = - { - (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, - (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, - (uint8_t)0x68U - }; - uint8_t o_info_hash[32U] = { 0U }; - uint32_t len1 = (uint32_t)26U + infolen; - KRML_CHECK_SIZE(sizeof (uint8_t), len1); - { - uint8_t tmp2[len1]; - memset(tmp2, 0U, len1 * sizeof (uint8_t)); - { - uint8_t *uu____10 = tmp2; - uu____10[0U] = (uint8_t)0x48U; - uu____10[1U] = (uint8_t)0x50U; - uu____10[2U] = (uint8_t)0x4bU; - uu____10[3U] = (uint8_t)0x45U; - uu____10[4U] = (uint8_t)0x2dU; - uu____10[5U] = (uint8_t)0x76U; - uu____10[6U] = (uint8_t)0x31U; - memcpy(tmp2 + (uint32_t)7U, - suite_id, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp2 + (uint32_t)17U, - label_info_hash, - (uint32_t)9U * sizeof (uint8_t)); - memcpy(tmp2 + (uint32_t)26U, info, infolen * sizeof (uint8_t)); - Hacl_HKDF_extract_sha2_256(o_info_hash, - empty, - (uint32_t)0U, - tmp2, - len1); - o_context[0U] = (uint8_t)0U; - memcpy(o_context + (uint32_t)1U, - o_psk_id_hash, - (uint32_t)32U * sizeof (uint8_t)); - memcpy(o_context + (uint32_t)33U, - o_info_hash, - (uint32_t)32U * sizeof (uint8_t)); - { - uint8_t - label_secret[6U] = - { - (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, - (uint8_t)0x72U, (uint8_t)0x65U, (uint8_t)0x74U - }; - uint32_t len2 = (uint32_t)23U; - KRML_CHECK_SIZE(sizeof (uint8_t), len2); - { - uint8_t tmp3[len2]; - memset(tmp3, 0U, len2 * sizeof (uint8_t)); - { - uint8_t *uu____11 = tmp3; - uu____11[0U] = (uint8_t)0x48U; - uu____11[1U] = (uint8_t)0x50U; - uu____11[2U] = (uint8_t)0x4bU; - uu____11[3U] = (uint8_t)0x45U; - uu____11[4U] = (uint8_t)0x2dU; - uu____11[5U] = (uint8_t)0x76U; - uu____11[6U] = (uint8_t)0x31U; - memcpy(tmp3 + (uint32_t)7U, - suite_id, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp3 + (uint32_t)17U, - label_secret, - (uint32_t)6U * sizeof (uint8_t)); - memcpy(tmp3 + (uint32_t)23U, - empty, - (uint32_t)0U * sizeof (uint8_t)); - Hacl_HKDF_extract_sha2_256(o_secret, - shared, - (uint32_t)32U, - tmp3, - len2); - { - uint8_t - label_exp[3U] = - { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U }; - uint32_t len3 = (uint32_t)87U; - KRML_CHECK_SIZE(sizeof (uint8_t), len3); - { - uint8_t tmp4[len3]; - memset(tmp4, 0U, len3 * sizeof (uint8_t)); - { - uint8_t *uu____12 = tmp4; - store32_be(uu____12, (uint32_t)32U); - memcpy(uu____12, - uu____12 + (uint32_t)2U, - (uint32_t)2U * sizeof (uint8_t)); - { - uint8_t *uu____13 = tmp4 + (uint32_t)2U; - uu____13[0U] = (uint8_t)0x48U; - uu____13[1U] = (uint8_t)0x50U; - uu____13[2U] = (uint8_t)0x4bU; - uu____13[3U] = (uint8_t)0x45U; - uu____13[4U] = (uint8_t)0x2dU; - uu____13[5U] = (uint8_t)0x76U; - uu____13[6U] = (uint8_t)0x31U; - memcpy(tmp4 + (uint32_t)9U, - suite_id, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp4 + (uint32_t)19U, - label_exp, - (uint32_t)3U * sizeof (uint8_t)); - memcpy(tmp4 + (uint32_t)22U, - o_context, - (uint32_t)65U * sizeof (uint8_t)); - Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter, - o_secret, - (uint32_t)32U, - tmp4, - len3, - (uint32_t)32U); - { - uint8_t - label_key[3U] = - { - (uint8_t)0x6bU, - (uint8_t)0x65U, - (uint8_t)0x79U - }; - uint32_t len4 = (uint32_t)87U; - KRML_CHECK_SIZE(sizeof (uint8_t), len4); - { - uint8_t tmp5[len4]; - memset(tmp5, 0U, len4 * sizeof (uint8_t)); - { - uint8_t *uu____14 = tmp5; - store32_be(uu____14, (uint32_t)32U); - memcpy(uu____14, - uu____14 + (uint32_t)2U, - (uint32_t)2U * sizeof (uint8_t)); - { - uint8_t *uu____15 = tmp5 + (uint32_t)2U; - uu____15[0U] = (uint8_t)0x48U; - uu____15[1U] = (uint8_t)0x50U; - uu____15[2U] = (uint8_t)0x4bU; - uu____15[3U] = (uint8_t)0x45U; - uu____15[4U] = (uint8_t)0x2dU; - uu____15[5U] = (uint8_t)0x76U; - uu____15[6U] = (uint8_t)0x31U; - memcpy(tmp5 + (uint32_t)9U, - suite_id, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp5 + (uint32_t)19U, - label_key, - (uint32_t)3U * sizeof (uint8_t)); - memcpy(tmp5 + (uint32_t)22U, - o_context, - (uint32_t)65U * sizeof (uint8_t)); - Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, - o_secret, - (uint32_t)32U, - tmp5, - len4, - (uint32_t)32U); - { - uint8_t - label_base_nonce[10U] = - { - (uint8_t)0x62U, (uint8_t)0x61U, - (uint8_t)0x73U, (uint8_t)0x65U, - (uint8_t)0x5fU, (uint8_t)0x6eU, - (uint8_t)0x6fU, (uint8_t)0x6eU, - (uint8_t)0x63U, (uint8_t)0x65U - }; - uint32_t len = (uint32_t)94U; - KRML_CHECK_SIZE(sizeof (uint8_t), len); - { - uint8_t tmp[len]; - memset(tmp, 0U, len * sizeof (uint8_t)); - { - uint8_t *uu____16 = tmp; - store32_be(uu____16, (uint32_t)12U); - memcpy(uu____16, - uu____16 + (uint32_t)2U, - (uint32_t)2U * sizeof (uint8_t)); - { - uint8_t - *uu____17 = tmp + (uint32_t)2U; - uu____17[0U] = (uint8_t)0x48U; - uu____17[1U] = (uint8_t)0x50U; - uu____17[2U] = (uint8_t)0x4bU; - uu____17[3U] = (uint8_t)0x45U; - uu____17[4U] = (uint8_t)0x2dU; - uu____17[5U] = (uint8_t)0x76U; - uu____17[6U] = (uint8_t)0x31U; - memcpy(tmp + (uint32_t)9U, - suite_id, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp + (uint32_t)19U, - label_base_nonce, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp + (uint32_t)29U, - o_context, - (uint32_t)65U * sizeof (uint8_t)); - Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, - o_secret, - (uint32_t)32U, - tmp, - len, - (uint32_t)12U); - o_ctx.ctx_seq[0U] = (uint64_t)0U; - ite = (uint32_t)0U; - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - else - { - ite = (uint32_t)1U; - } - } - } - } - } - } - else - { - ite = (uint32_t)1U; - } - return ite; - } -} - -uint32_t -Hacl_HPKE_P256_CP128_SHA256_sealBase( - uint8_t *skE, - uint8_t *pkR, - uint32_t infolen, - uint8_t *info, - uint32_t aadlen, - uint8_t *aad, - uint32_t plainlen, - uint8_t *plain, - uint8_t *o_enc, - uint8_t *o_ct -) -{ - uint8_t ctx_key[32U] = { 0U }; - uint8_t ctx_nonce[12U] = { 0U }; - uint64_t ctx_seq = (uint64_t)0U; - uint8_t ctx_exporter[32U] = { 0U }; - Hacl_Impl_HPKE_context_s o_ctx; - uint32_t res; - uint32_t ite; - o_ctx.ctx_key = ctx_key; - o_ctx.ctx_nonce = ctx_nonce; - o_ctx.ctx_seq = &ctx_seq; - o_ctx.ctx_exporter = ctx_exporter; - res = Hacl_HPKE_P256_CP128_SHA256_setupBaseS(o_enc, o_ctx, skE, pkR, infolen, info); - if (res == (uint32_t)0U) - { - uint8_t nonce[12U] = { 0U }; - uint64_t s = o_ctx.ctx_seq[0U]; - uint8_t enc[12U] = { 0U }; - store64_be(enc + (uint32_t)4U, s); - KRML_MAYBE_FOR12(i, - (uint32_t)0U, - (uint32_t)12U, - (uint32_t)1U, - uint8_t xi = enc[i]; - uint8_t yi = o_ctx.ctx_nonce[i]; - nonce[i] = xi ^ yi;); - Hacl_Chacha20Poly1305_128_aead_encrypt(o_ctx.ctx_key, - nonce, - aadlen, - aad, - plainlen, - plain, - o_ct, - o_ct + plainlen); - { - uint64_t s1 = o_ctx.ctx_seq[0U]; - uint32_t res1; - if (s1 == (uint64_t)18446744073709551615U) - { - res1 = (uint32_t)1U; - } - else - { - uint64_t s_ = s1 + (uint64_t)1U; - o_ctx.ctx_seq[0U] = s_; - res1 = (uint32_t)0U; - } - { - uint32_t res10 = res1; - ite = res10; - } - } - } - else - { - ite = (uint32_t)1U; - } - return ite; -} - -uint32_t -Hacl_HPKE_P256_CP128_SHA256_openBase( - uint8_t *pkE, - uint8_t *skR, - uint32_t infolen, - uint8_t *info, - uint32_t aadlen, - uint8_t *aad, - uint32_t ctlen, - uint8_t *ct, - uint8_t *o_pt -) -{ - uint8_t ctx_key[32U] = { 0U }; - uint8_t ctx_nonce[12U] = { 0U }; - uint64_t ctx_seq = (uint64_t)0U; - uint8_t ctx_exporter[32U] = { 0U }; - Hacl_Impl_HPKE_context_s o_ctx; - uint32_t res; - uint32_t ite; - o_ctx.ctx_key = ctx_key; - o_ctx.ctx_nonce = ctx_nonce; - o_ctx.ctx_seq = &ctx_seq; - o_ctx.ctx_exporter = ctx_exporter; - res = Hacl_HPKE_P256_CP128_SHA256_setupBaseR(o_ctx, pkE, skR, infolen, info); - if (res == (uint32_t)0U) - { - uint8_t nonce[12U] = { 0U }; - uint64_t s = o_ctx.ctx_seq[0U]; - uint8_t enc[12U] = { 0U }; - store64_be(enc + (uint32_t)4U, s); - KRML_MAYBE_FOR12(i, - (uint32_t)0U, - (uint32_t)12U, - (uint32_t)1U, - uint8_t xi = enc[i]; - uint8_t yi = o_ctx.ctx_nonce[i]; - nonce[i] = xi ^ yi;); - { - uint32_t - res10 = - Hacl_Chacha20Poly1305_128_aead_decrypt(o_ctx.ctx_key, - nonce, - aadlen, - aad, - ctlen - (uint32_t)16U, - o_pt, - ct, - ct + ctlen - (uint32_t)16U); - uint32_t res1; - if (res10 == (uint32_t)0U) - { - uint64_t s1 = o_ctx.ctx_seq[0U]; - uint32_t res2; - if (s1 == (uint64_t)18446744073709551615U) - { - res2 = (uint32_t)1U; - } - else - { - uint64_t s_ = s1 + (uint64_t)1U; - o_ctx.ctx_seq[0U] = s_; - res2 = (uint32_t)0U; - } - res1 = res2; - } - else - { - res1 = (uint32_t)1U; - } - ite = res1; - } - } - else - { - ite = (uint32_t)1U; - } - return ite; -} - diff --git a/dist/c89-compatible/Hacl_HPKE_P256_CP128_SHA256.h b/dist/c89-compatible/Hacl_HPKE_P256_CP128_SHA256.h deleted file mode 100644 index 33f07fcc5e..0000000000 --- a/dist/c89-compatible/Hacl_HPKE_P256_CP128_SHA256.h +++ /dev/null @@ -1,94 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#ifndef __Hacl_HPKE_P256_CP128_SHA256_H -#define __Hacl_HPKE_P256_CP128_SHA256_H - -#if defined(__cplusplus) -extern "C" { -#endif - -#include -#include "krml/internal/types.h" -#include "krml/lowstar_endianness.h" -#include "krml/internal/target.h" - - -#include "Hacl_Krmllib.h" -#include "Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE.h" -#include "Hacl_HKDF.h" -#include "Hacl_Chacha20Poly1305_128.h" -#include "evercrypt_targetconfig.h" -uint32_t -Hacl_HPKE_P256_CP128_SHA256_setupBaseS( - uint8_t *o_pkE, - Hacl_Impl_HPKE_context_s o_ctx, - uint8_t *skE, - uint8_t *pkR, - uint32_t infolen, - uint8_t *info -); - -uint32_t -Hacl_HPKE_P256_CP128_SHA256_setupBaseR( - Hacl_Impl_HPKE_context_s o_ctx, - uint8_t *enc, - uint8_t *skR, - uint32_t infolen, - uint8_t *info -); - -uint32_t -Hacl_HPKE_P256_CP128_SHA256_sealBase( - uint8_t *skE, - uint8_t *pkR, - uint32_t infolen, - uint8_t *info, - uint32_t aadlen, - uint8_t *aad, - uint32_t plainlen, - uint8_t *plain, - uint8_t *o_enc, - uint8_t *o_ct -); - -uint32_t -Hacl_HPKE_P256_CP128_SHA256_openBase( - uint8_t *pkE, - uint8_t *skR, - uint32_t infolen, - uint8_t *info, - uint32_t aadlen, - uint8_t *aad, - uint32_t ctlen, - uint8_t *ct, - uint8_t *o_pt -); - -#if defined(__cplusplus) -} -#endif - -#define __Hacl_HPKE_P256_CP128_SHA256_H_DEFINED -#endif diff --git a/dist/c89-compatible/Hacl_HPKE_P256_CP256_SHA256.c b/dist/c89-compatible/Hacl_HPKE_P256_CP256_SHA256.c deleted file mode 100644 index 2b007d7358..0000000000 --- a/dist/c89-compatible/Hacl_HPKE_P256_CP256_SHA256.c +++ /dev/null @@ -1,1174 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#include "Hacl_HPKE_P256_CP256_SHA256.h" - -#include "internal/Hacl_P256.h" - -uint32_t -Hacl_HPKE_P256_CP256_SHA256_setupBaseS( - uint8_t *o_pkE, - Hacl_Impl_HPKE_context_s o_ctx, - uint8_t *skE, - uint8_t *pkR, - uint32_t infolen, - uint8_t *info -) -{ - uint8_t o_shared[32U] = { 0U }; - uint8_t *o_pkE1 = o_pkE + (uint32_t)1U; - uint64_t tempBuffer[100U] = { 0U }; - uint64_t resultBuffer[12U] = { 0U }; - uint64_t *resultBufferX = resultBuffer; - uint64_t *resultBufferY = resultBuffer + (uint32_t)4U; - uint8_t *resultX0 = o_pkE1; - uint8_t *resultY0 = o_pkE1 + (uint32_t)32U; - uint64_t flag; - bool res0; - uint32_t res1; - uint32_t res; - uint32_t ite; - Hacl_Impl_P256_Core_secretToPublic(resultBuffer, skE, tempBuffer); - flag = Hacl_Impl_P256_Core_isPointAtInfinityPrivate(resultBuffer); - Hacl_Impl_P256_LowLevel_changeEndian(resultBufferX); - Hacl_Impl_P256_LowLevel_changeEndian(resultBufferY); - Hacl_Impl_P256_LowLevel_toUint8(resultBufferX, resultX0); - Hacl_Impl_P256_LowLevel_toUint8(resultBufferY, resultY0); - res0 = flag == (uint64_t)0U; - if (res0) - { - res1 = (uint32_t)0U; - } - else - { - res1 = (uint32_t)1U; - } - if (res1 == (uint32_t)0U) - { - o_pkE[0U] = (uint8_t)4U; - { - uint8_t o_dh[64U] = { 0U }; - uint8_t tmp0[64U] = { 0U }; - uint64_t resultBufferFelem[12U] = { 0U }; - uint64_t *resultBufferFelemX = resultBufferFelem; - uint64_t *resultBufferFelemY = resultBufferFelem + (uint32_t)4U; - uint8_t *resultX = tmp0; - uint8_t *resultY = tmp0 + (uint32_t)32U; - uint64_t publicKeyAsFelem[8U] = { 0U }; - uint64_t *publicKeyFelemX = publicKeyAsFelem; - uint64_t *publicKeyFelemY = publicKeyAsFelem + (uint32_t)4U; - uint8_t *pubKeyX = pkR; - uint8_t *pubKeyY = pkR + (uint32_t)32U; - Hacl_Impl_P256_LowLevel_toUint64ChangeEndian(pubKeyX, publicKeyFelemX); - Hacl_Impl_P256_LowLevel_toUint64ChangeEndian(pubKeyY, publicKeyFelemY); - { - uint64_t flag0 = Hacl_Impl_P256_DH__ecp256dh_r(resultBufferFelem, publicKeyAsFelem, skE); - Hacl_Impl_P256_LowLevel_changeEndian(resultBufferFelemX); - Hacl_Impl_P256_LowLevel_changeEndian(resultBufferFelemY); - Hacl_Impl_P256_LowLevel_toUint8(resultBufferFelemX, resultX); - Hacl_Impl_P256_LowLevel_toUint8(resultBufferFelemY, resultY); - { - bool res3 = flag0 == (uint64_t)0U; - memcpy(o_dh, tmp0, (uint32_t)64U * sizeof (uint8_t)); - { - uint32_t res2; - if (res3) - { - res2 = (uint32_t)0U; - } - else - { - res2 = (uint32_t)1U; - } - { - uint8_t o_kemcontext[130U] = { 0U }; - if (res2 == (uint32_t)0U) - { - memcpy(o_kemcontext, o_pkE, (uint32_t)65U * sizeof (uint8_t)); - { - uint8_t *o_pkRm = o_kemcontext + (uint32_t)65U; - uint8_t *o_pkR = o_pkRm + (uint32_t)1U; - memcpy(o_pkR, pkR, (uint32_t)64U * sizeof (uint8_t)); - o_pkRm[0U] = (uint8_t)4U; - { - uint8_t *o_dhm = o_dh; - uint8_t o_eae_prk[32U] = { 0U }; - uint8_t suite_id_kem[5U] = { 0U }; - uint8_t *uu____0 = suite_id_kem; - uu____0[0U] = (uint8_t)0x4bU; - uu____0[1U] = (uint8_t)0x45U; - uu____0[2U] = (uint8_t)0x4dU; - { - uint8_t *uu____1 = suite_id_kem + (uint32_t)3U; - uu____1[0U] = (uint8_t)0U; - uu____1[1U] = (uint8_t)16U; - { - uint8_t *empty = suite_id_kem; - uint8_t - label_eae_prk[7U] = - { - (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, - (uint8_t)0x70U, (uint8_t)0x72U, (uint8_t)0x6bU - }; - uint32_t len0 = (uint32_t)51U; - KRML_CHECK_SIZE(sizeof (uint8_t), len0); - { - uint8_t tmp1[len0]; - memset(tmp1, 0U, len0 * sizeof (uint8_t)); - { - uint8_t *uu____2 = tmp1; - uu____2[0U] = (uint8_t)0x48U; - uu____2[1U] = (uint8_t)0x50U; - uu____2[2U] = (uint8_t)0x4bU; - uu____2[3U] = (uint8_t)0x45U; - uu____2[4U] = (uint8_t)0x2dU; - uu____2[5U] = (uint8_t)0x76U; - uu____2[6U] = (uint8_t)0x31U; - memcpy(tmp1 + (uint32_t)7U, - suite_id_kem, - (uint32_t)5U * sizeof (uint8_t)); - memcpy(tmp1 + (uint32_t)12U, - label_eae_prk, - (uint32_t)7U * sizeof (uint8_t)); - memcpy(tmp1 + (uint32_t)19U, o_dhm, (uint32_t)32U * sizeof (uint8_t)); - Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp1, len0); - { - uint8_t - label_shared_secret[13U] = - { - (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, - (uint8_t)0x65U, (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, - (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U, - (uint8_t)0x74U - }; - uint32_t len = (uint32_t)157U; - KRML_CHECK_SIZE(sizeof (uint8_t), len); - { - uint8_t tmp[len]; - memset(tmp, 0U, len * sizeof (uint8_t)); - { - uint8_t *uu____3 = tmp; - store32_be(uu____3, (uint32_t)32U); - memcpy(uu____3, - uu____3 + (uint32_t)2U, - (uint32_t)2U * sizeof (uint8_t)); - { - uint8_t *uu____4 = tmp + (uint32_t)2U; - uu____4[0U] = (uint8_t)0x48U; - uu____4[1U] = (uint8_t)0x50U; - uu____4[2U] = (uint8_t)0x4bU; - uu____4[3U] = (uint8_t)0x45U; - uu____4[4U] = (uint8_t)0x2dU; - uu____4[5U] = (uint8_t)0x76U; - uu____4[6U] = (uint8_t)0x31U; - memcpy(tmp + (uint32_t)9U, - suite_id_kem, - (uint32_t)5U * sizeof (uint8_t)); - memcpy(tmp + (uint32_t)14U, - label_shared_secret, - (uint32_t)13U * sizeof (uint8_t)); - memcpy(tmp + (uint32_t)27U, - o_kemcontext, - (uint32_t)130U * sizeof (uint8_t)); - Hacl_HKDF_expand_sha2_256(o_shared, - o_eae_prk, - (uint32_t)32U, - tmp, - len, - (uint32_t)32U); - res = (uint32_t)0U; - } - } - } - } - } - } - } - } - } - } - } - else - { - res = (uint32_t)1U; - } - } - } - } - } - } - } - else - { - res = (uint32_t)1U; - } - if (res == (uint32_t)0U) - { - uint8_t o_context[65U] = { 0U }; - uint8_t o_secret[32U] = { 0U }; - uint8_t suite_id[10U] = { 0U }; - uint8_t *uu____5 = suite_id; - uu____5[0U] = (uint8_t)0x48U; - uu____5[1U] = (uint8_t)0x50U; - uu____5[2U] = (uint8_t)0x4bU; - uu____5[3U] = (uint8_t)0x45U; - { - uint8_t *uu____6 = suite_id + (uint32_t)4U; - uu____6[0U] = (uint8_t)0U; - uu____6[1U] = (uint8_t)16U; - { - uint8_t *uu____7 = suite_id + (uint32_t)6U; - uu____7[0U] = (uint8_t)0U; - uu____7[1U] = (uint8_t)1U; - { - uint8_t *uu____8 = suite_id + (uint32_t)8U; - uu____8[0U] = (uint8_t)0U; - uu____8[1U] = (uint8_t)3U; - { - uint8_t - label_psk_id_hash[11U] = - { - (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U, - (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, - (uint8_t)0x68U - }; - uint8_t o_psk_id_hash[32U] = { 0U }; - uint8_t *empty = suite_id; - uint32_t len0 = (uint32_t)28U; - KRML_CHECK_SIZE(sizeof (uint8_t), len0); - { - uint8_t tmp0[len0]; - memset(tmp0, 0U, len0 * sizeof (uint8_t)); - { - uint8_t *uu____9 = tmp0; - uu____9[0U] = (uint8_t)0x48U; - uu____9[1U] = (uint8_t)0x50U; - uu____9[2U] = (uint8_t)0x4bU; - uu____9[3U] = (uint8_t)0x45U; - uu____9[4U] = (uint8_t)0x2dU; - uu____9[5U] = (uint8_t)0x76U; - uu____9[6U] = (uint8_t)0x31U; - memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp0 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t)); - memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t)); - Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, (uint32_t)0U, tmp0, len0); - { - uint8_t - label_info_hash[9U] = - { - (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU, - (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U - }; - uint8_t o_info_hash[32U] = { 0U }; - uint32_t len1 = (uint32_t)26U + infolen; - KRML_CHECK_SIZE(sizeof (uint8_t), len1); - { - uint8_t tmp1[len1]; - memset(tmp1, 0U, len1 * sizeof (uint8_t)); - { - uint8_t *uu____10 = tmp1; - uu____10[0U] = (uint8_t)0x48U; - uu____10[1U] = (uint8_t)0x50U; - uu____10[2U] = (uint8_t)0x4bU; - uu____10[3U] = (uint8_t)0x45U; - uu____10[4U] = (uint8_t)0x2dU; - uu____10[5U] = (uint8_t)0x76U; - uu____10[6U] = (uint8_t)0x31U; - memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp1 + (uint32_t)17U, - label_info_hash, - (uint32_t)9U * sizeof (uint8_t)); - memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t)); - Hacl_HKDF_extract_sha2_256(o_info_hash, empty, (uint32_t)0U, tmp1, len1); - o_context[0U] = (uint8_t)0U; - memcpy(o_context + (uint32_t)1U, - o_psk_id_hash, - (uint32_t)32U * sizeof (uint8_t)); - memcpy(o_context + (uint32_t)33U, - o_info_hash, - (uint32_t)32U * sizeof (uint8_t)); - { - uint8_t - label_secret[6U] = - { - (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, - (uint8_t)0x65U, (uint8_t)0x74U - }; - uint32_t len2 = (uint32_t)23U; - KRML_CHECK_SIZE(sizeof (uint8_t), len2); - { - uint8_t tmp2[len2]; - memset(tmp2, 0U, len2 * sizeof (uint8_t)); - { - uint8_t *uu____11 = tmp2; - uu____11[0U] = (uint8_t)0x48U; - uu____11[1U] = (uint8_t)0x50U; - uu____11[2U] = (uint8_t)0x4bU; - uu____11[3U] = (uint8_t)0x45U; - uu____11[4U] = (uint8_t)0x2dU; - uu____11[5U] = (uint8_t)0x76U; - uu____11[6U] = (uint8_t)0x31U; - memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp2 + (uint32_t)17U, - label_secret, - (uint32_t)6U * sizeof (uint8_t)); - memcpy(tmp2 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t)); - Hacl_HKDF_extract_sha2_256(o_secret, - o_shared, - (uint32_t)32U, - tmp2, - len2); - { - uint8_t - label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U }; - uint32_t len3 = (uint32_t)87U; - KRML_CHECK_SIZE(sizeof (uint8_t), len3); - { - uint8_t tmp3[len3]; - memset(tmp3, 0U, len3 * sizeof (uint8_t)); - { - uint8_t *uu____12 = tmp3; - store32_be(uu____12, (uint32_t)32U); - memcpy(uu____12, - uu____12 + (uint32_t)2U, - (uint32_t)2U * sizeof (uint8_t)); - { - uint8_t *uu____13 = tmp3 + (uint32_t)2U; - uu____13[0U] = (uint8_t)0x48U; - uu____13[1U] = (uint8_t)0x50U; - uu____13[2U] = (uint8_t)0x4bU; - uu____13[3U] = (uint8_t)0x45U; - uu____13[4U] = (uint8_t)0x2dU; - uu____13[5U] = (uint8_t)0x76U; - uu____13[6U] = (uint8_t)0x31U; - memcpy(tmp3 + (uint32_t)9U, - suite_id, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp3 + (uint32_t)19U, - label_exp, - (uint32_t)3U * sizeof (uint8_t)); - memcpy(tmp3 + (uint32_t)22U, - o_context, - (uint32_t)65U * sizeof (uint8_t)); - Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter, - o_secret, - (uint32_t)32U, - tmp3, - len3, - (uint32_t)32U); - { - uint8_t - label_key[3U] = - { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U }; - uint32_t len4 = (uint32_t)87U; - KRML_CHECK_SIZE(sizeof (uint8_t), len4); - { - uint8_t tmp4[len4]; - memset(tmp4, 0U, len4 * sizeof (uint8_t)); - { - uint8_t *uu____14 = tmp4; - store32_be(uu____14, (uint32_t)32U); - memcpy(uu____14, - uu____14 + (uint32_t)2U, - (uint32_t)2U * sizeof (uint8_t)); - { - uint8_t *uu____15 = tmp4 + (uint32_t)2U; - uu____15[0U] = (uint8_t)0x48U; - uu____15[1U] = (uint8_t)0x50U; - uu____15[2U] = (uint8_t)0x4bU; - uu____15[3U] = (uint8_t)0x45U; - uu____15[4U] = (uint8_t)0x2dU; - uu____15[5U] = (uint8_t)0x76U; - uu____15[6U] = (uint8_t)0x31U; - memcpy(tmp4 + (uint32_t)9U, - suite_id, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp4 + (uint32_t)19U, - label_key, - (uint32_t)3U * sizeof (uint8_t)); - memcpy(tmp4 + (uint32_t)22U, - o_context, - (uint32_t)65U * sizeof (uint8_t)); - Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, - o_secret, - (uint32_t)32U, - tmp4, - len4, - (uint32_t)32U); - { - uint8_t - label_base_nonce[10U] = - { - (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, - (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x6eU, - (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, - (uint8_t)0x65U - }; - uint32_t len = (uint32_t)94U; - KRML_CHECK_SIZE(sizeof (uint8_t), len); - { - uint8_t tmp[len]; - memset(tmp, 0U, len * sizeof (uint8_t)); - { - uint8_t *uu____16 = tmp; - store32_be(uu____16, (uint32_t)12U); - memcpy(uu____16, - uu____16 + (uint32_t)2U, - (uint32_t)2U * sizeof (uint8_t)); - { - uint8_t *uu____17 = tmp + (uint32_t)2U; - uu____17[0U] = (uint8_t)0x48U; - uu____17[1U] = (uint8_t)0x50U; - uu____17[2U] = (uint8_t)0x4bU; - uu____17[3U] = (uint8_t)0x45U; - uu____17[4U] = (uint8_t)0x2dU; - uu____17[5U] = (uint8_t)0x76U; - uu____17[6U] = (uint8_t)0x31U; - memcpy(tmp + (uint32_t)9U, - suite_id, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp + (uint32_t)19U, - label_base_nonce, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp + (uint32_t)29U, - o_context, - (uint32_t)65U * sizeof (uint8_t)); - Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, - o_secret, - (uint32_t)32U, - tmp, - len, - (uint32_t)12U); - o_ctx.ctx_seq[0U] = (uint64_t)0U; - ite = res; - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - else - { - ite = res; - } - return ite; -} - -uint32_t -Hacl_HPKE_P256_CP256_SHA256_setupBaseR( - Hacl_Impl_HPKE_context_s o_ctx, - uint8_t *enc, - uint8_t *skR, - uint32_t infolen, - uint8_t *info -) -{ - uint8_t pkR[64U] = { 0U }; - uint64_t tempBuffer0[100U] = { 0U }; - uint64_t resultBuffer0[12U] = { 0U }; - uint64_t *resultBufferX0 = resultBuffer0; - uint64_t *resultBufferY0 = resultBuffer0 + (uint32_t)4U; - uint8_t *resultX0 = pkR; - uint8_t *resultY0 = pkR + (uint32_t)32U; - uint64_t flag0; - bool res; - uint32_t res1; - uint32_t ite; - Hacl_Impl_P256_Core_secretToPublic(resultBuffer0, skR, tempBuffer0); - flag0 = Hacl_Impl_P256_Core_isPointAtInfinityPrivate(resultBuffer0); - Hacl_Impl_P256_LowLevel_changeEndian(resultBufferX0); - Hacl_Impl_P256_LowLevel_changeEndian(resultBufferY0); - Hacl_Impl_P256_LowLevel_toUint8(resultBufferX0, resultX0); - Hacl_Impl_P256_LowLevel_toUint8(resultBufferY0, resultY0); - res = flag0 == (uint64_t)0U; - if (res) - { - res1 = (uint32_t)0U; - } - else - { - res1 = (uint32_t)1U; - } - { - uint8_t shared[32U] = { 0U }; - if (res1 == (uint32_t)0U) - { - uint8_t *pkE = enc + (uint32_t)1U; - uint8_t dh[64U] = { 0U }; - uint8_t tmp0[64U] = { 0U }; - uint64_t resultBufferFelem[12U] = { 0U }; - uint64_t *resultBufferFelemX = resultBufferFelem; - uint64_t *resultBufferFelemY = resultBufferFelem + (uint32_t)4U; - uint8_t *resultX1 = tmp0; - uint8_t *resultY1 = tmp0 + (uint32_t)32U; - uint64_t publicKeyAsFelem[8U] = { 0U }; - uint64_t *publicKeyFelemX = publicKeyAsFelem; - uint64_t *publicKeyFelemY = publicKeyAsFelem + (uint32_t)4U; - uint8_t *pubKeyX = pkE; - uint8_t *pubKeyY = pkE + (uint32_t)32U; - Hacl_Impl_P256_LowLevel_toUint64ChangeEndian(pubKeyX, publicKeyFelemX); - Hacl_Impl_P256_LowLevel_toUint64ChangeEndian(pubKeyY, publicKeyFelemY); - { - uint64_t flag1 = Hacl_Impl_P256_DH__ecp256dh_r(resultBufferFelem, publicKeyAsFelem, skR); - Hacl_Impl_P256_LowLevel_changeEndian(resultBufferFelemX); - Hacl_Impl_P256_LowLevel_changeEndian(resultBufferFelemY); - Hacl_Impl_P256_LowLevel_toUint8(resultBufferFelemX, resultX1); - Hacl_Impl_P256_LowLevel_toUint8(resultBufferFelemY, resultY1); - { - bool res0 = flag1 == (uint64_t)0U; - memcpy(dh, tmp0, (uint32_t)64U * sizeof (uint8_t)); - { - uint32_t res11; - if (res0) - { - res11 = (uint32_t)0U; - } - else - { - res11 = (uint32_t)1U; - } - { - uint32_t res20; - uint8_t kemcontext[130U] = { 0U }; - if (res11 == (uint32_t)0U) - { - uint8_t *pkRm = kemcontext + (uint32_t)65U; - uint8_t *pkR1 = pkRm + (uint32_t)1U; - uint64_t tempBuffer[100U] = { 0U }; - uint64_t resultBuffer[12U] = { 0U }; - uint64_t *resultBufferX = resultBuffer; - uint64_t *resultBufferY = resultBuffer + (uint32_t)4U; - uint8_t *resultX = pkR1; - uint8_t *resultY = pkR1 + (uint32_t)32U; - Hacl_Impl_P256_Core_secretToPublic(resultBuffer, skR, tempBuffer); - { - uint64_t flag = Hacl_Impl_P256_Core_isPointAtInfinityPrivate(resultBuffer); - Hacl_Impl_P256_LowLevel_changeEndian(resultBufferX); - Hacl_Impl_P256_LowLevel_changeEndian(resultBufferY); - Hacl_Impl_P256_LowLevel_toUint8(resultBufferX, resultX); - Hacl_Impl_P256_LowLevel_toUint8(resultBufferY, resultY); - { - bool res3 = flag == (uint64_t)0U; - uint32_t res2; - if (res3) - { - res2 = (uint32_t)0U; - } - else - { - res2 = (uint32_t)1U; - } - if (res2 == (uint32_t)0U) - { - memcpy(kemcontext, enc, (uint32_t)65U * sizeof (uint8_t)); - pkRm[0U] = (uint8_t)4U; - { - uint8_t *dhm = dh; - uint8_t o_eae_prk[32U] = { 0U }; - uint8_t suite_id_kem[5U] = { 0U }; - uint8_t *uu____0 = suite_id_kem; - uu____0[0U] = (uint8_t)0x4bU; - uu____0[1U] = (uint8_t)0x45U; - uu____0[2U] = (uint8_t)0x4dU; - { - uint8_t *uu____1 = suite_id_kem + (uint32_t)3U; - uu____1[0U] = (uint8_t)0U; - uu____1[1U] = (uint8_t)16U; - { - uint8_t *empty = suite_id_kem; - uint8_t - label_eae_prk[7U] = - { - (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, - (uint8_t)0x70U, (uint8_t)0x72U, (uint8_t)0x6bU - }; - uint32_t len0 = (uint32_t)51U; - KRML_CHECK_SIZE(sizeof (uint8_t), len0); - { - uint8_t tmp1[len0]; - memset(tmp1, 0U, len0 * sizeof (uint8_t)); - { - uint8_t *uu____2 = tmp1; - uu____2[0U] = (uint8_t)0x48U; - uu____2[1U] = (uint8_t)0x50U; - uu____2[2U] = (uint8_t)0x4bU; - uu____2[3U] = (uint8_t)0x45U; - uu____2[4U] = (uint8_t)0x2dU; - uu____2[5U] = (uint8_t)0x76U; - uu____2[6U] = (uint8_t)0x31U; - memcpy(tmp1 + (uint32_t)7U, - suite_id_kem, - (uint32_t)5U * sizeof (uint8_t)); - memcpy(tmp1 + (uint32_t)12U, - label_eae_prk, - (uint32_t)7U * sizeof (uint8_t)); - memcpy(tmp1 + (uint32_t)19U, dhm, (uint32_t)32U * sizeof (uint8_t)); - Hacl_HKDF_extract_sha2_256(o_eae_prk, - empty, - (uint32_t)0U, - tmp1, - len0); - { - uint8_t - label_shared_secret[13U] = - { - (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, - (uint8_t)0x65U, (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, - (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U, - (uint8_t)0x74U - }; - uint32_t len = (uint32_t)157U; - KRML_CHECK_SIZE(sizeof (uint8_t), len); - { - uint8_t tmp[len]; - memset(tmp, 0U, len * sizeof (uint8_t)); - { - uint8_t *uu____3 = tmp; - store32_be(uu____3, (uint32_t)32U); - memcpy(uu____3, - uu____3 + (uint32_t)2U, - (uint32_t)2U * sizeof (uint8_t)); - { - uint8_t *uu____4 = tmp + (uint32_t)2U; - uu____4[0U] = (uint8_t)0x48U; - uu____4[1U] = (uint8_t)0x50U; - uu____4[2U] = (uint8_t)0x4bU; - uu____4[3U] = (uint8_t)0x45U; - uu____4[4U] = (uint8_t)0x2dU; - uu____4[5U] = (uint8_t)0x76U; - uu____4[6U] = (uint8_t)0x31U; - memcpy(tmp + (uint32_t)9U, - suite_id_kem, - (uint32_t)5U * sizeof (uint8_t)); - memcpy(tmp + (uint32_t)14U, - label_shared_secret, - (uint32_t)13U * sizeof (uint8_t)); - memcpy(tmp + (uint32_t)27U, - kemcontext, - (uint32_t)130U * sizeof (uint8_t)); - Hacl_HKDF_expand_sha2_256(shared, - o_eae_prk, - (uint32_t)32U, - tmp, - len, - (uint32_t)32U); - res20 = (uint32_t)0U; - } - } - } - } - } - } - } - } - } - } - else - { - res20 = (uint32_t)1U; - } - } - } - } - else - { - res20 = (uint32_t)1U; - } - if (res20 == (uint32_t)0U) - { - uint8_t o_context[65U] = { 0U }; - uint8_t o_secret[32U] = { 0U }; - uint8_t suite_id[10U] = { 0U }; - uint8_t *uu____5 = suite_id; - uu____5[0U] = (uint8_t)0x48U; - uu____5[1U] = (uint8_t)0x50U; - uu____5[2U] = (uint8_t)0x4bU; - uu____5[3U] = (uint8_t)0x45U; - { - uint8_t *uu____6 = suite_id + (uint32_t)4U; - uu____6[0U] = (uint8_t)0U; - uu____6[1U] = (uint8_t)16U; - { - uint8_t *uu____7 = suite_id + (uint32_t)6U; - uu____7[0U] = (uint8_t)0U; - uu____7[1U] = (uint8_t)1U; - { - uint8_t *uu____8 = suite_id + (uint32_t)8U; - uu____8[0U] = (uint8_t)0U; - uu____8[1U] = (uint8_t)3U; - { - uint8_t - label_psk_id_hash[11U] = - { - (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, - (uint8_t)0x69U, (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, - (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U - }; - uint8_t o_psk_id_hash[32U] = { 0U }; - uint8_t *empty = suite_id; - uint32_t len0 = (uint32_t)28U; - KRML_CHECK_SIZE(sizeof (uint8_t), len0); - { - uint8_t tmp1[len0]; - memset(tmp1, 0U, len0 * sizeof (uint8_t)); - { - uint8_t *uu____9 = tmp1; - uu____9[0U] = (uint8_t)0x48U; - uu____9[1U] = (uint8_t)0x50U; - uu____9[2U] = (uint8_t)0x4bU; - uu____9[3U] = (uint8_t)0x45U; - uu____9[4U] = (uint8_t)0x2dU; - uu____9[5U] = (uint8_t)0x76U; - uu____9[6U] = (uint8_t)0x31U; - memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp1 + (uint32_t)17U, - label_psk_id_hash, - (uint32_t)11U * sizeof (uint8_t)); - memcpy(tmp1 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t)); - Hacl_HKDF_extract_sha2_256(o_psk_id_hash, - empty, - (uint32_t)0U, - tmp1, - len0); - { - uint8_t - label_info_hash[9U] = - { - (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, - (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, - (uint8_t)0x68U - }; - uint8_t o_info_hash[32U] = { 0U }; - uint32_t len1 = (uint32_t)26U + infolen; - KRML_CHECK_SIZE(sizeof (uint8_t), len1); - { - uint8_t tmp2[len1]; - memset(tmp2, 0U, len1 * sizeof (uint8_t)); - { - uint8_t *uu____10 = tmp2; - uu____10[0U] = (uint8_t)0x48U; - uu____10[1U] = (uint8_t)0x50U; - uu____10[2U] = (uint8_t)0x4bU; - uu____10[3U] = (uint8_t)0x45U; - uu____10[4U] = (uint8_t)0x2dU; - uu____10[5U] = (uint8_t)0x76U; - uu____10[6U] = (uint8_t)0x31U; - memcpy(tmp2 + (uint32_t)7U, - suite_id, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp2 + (uint32_t)17U, - label_info_hash, - (uint32_t)9U * sizeof (uint8_t)); - memcpy(tmp2 + (uint32_t)26U, info, infolen * sizeof (uint8_t)); - Hacl_HKDF_extract_sha2_256(o_info_hash, - empty, - (uint32_t)0U, - tmp2, - len1); - o_context[0U] = (uint8_t)0U; - memcpy(o_context + (uint32_t)1U, - o_psk_id_hash, - (uint32_t)32U * sizeof (uint8_t)); - memcpy(o_context + (uint32_t)33U, - o_info_hash, - (uint32_t)32U * sizeof (uint8_t)); - { - uint8_t - label_secret[6U] = - { - (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, - (uint8_t)0x72U, (uint8_t)0x65U, (uint8_t)0x74U - }; - uint32_t len2 = (uint32_t)23U; - KRML_CHECK_SIZE(sizeof (uint8_t), len2); - { - uint8_t tmp3[len2]; - memset(tmp3, 0U, len2 * sizeof (uint8_t)); - { - uint8_t *uu____11 = tmp3; - uu____11[0U] = (uint8_t)0x48U; - uu____11[1U] = (uint8_t)0x50U; - uu____11[2U] = (uint8_t)0x4bU; - uu____11[3U] = (uint8_t)0x45U; - uu____11[4U] = (uint8_t)0x2dU; - uu____11[5U] = (uint8_t)0x76U; - uu____11[6U] = (uint8_t)0x31U; - memcpy(tmp3 + (uint32_t)7U, - suite_id, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp3 + (uint32_t)17U, - label_secret, - (uint32_t)6U * sizeof (uint8_t)); - memcpy(tmp3 + (uint32_t)23U, - empty, - (uint32_t)0U * sizeof (uint8_t)); - Hacl_HKDF_extract_sha2_256(o_secret, - shared, - (uint32_t)32U, - tmp3, - len2); - { - uint8_t - label_exp[3U] = - { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U }; - uint32_t len3 = (uint32_t)87U; - KRML_CHECK_SIZE(sizeof (uint8_t), len3); - { - uint8_t tmp4[len3]; - memset(tmp4, 0U, len3 * sizeof (uint8_t)); - { - uint8_t *uu____12 = tmp4; - store32_be(uu____12, (uint32_t)32U); - memcpy(uu____12, - uu____12 + (uint32_t)2U, - (uint32_t)2U * sizeof (uint8_t)); - { - uint8_t *uu____13 = tmp4 + (uint32_t)2U; - uu____13[0U] = (uint8_t)0x48U; - uu____13[1U] = (uint8_t)0x50U; - uu____13[2U] = (uint8_t)0x4bU; - uu____13[3U] = (uint8_t)0x45U; - uu____13[4U] = (uint8_t)0x2dU; - uu____13[5U] = (uint8_t)0x76U; - uu____13[6U] = (uint8_t)0x31U; - memcpy(tmp4 + (uint32_t)9U, - suite_id, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp4 + (uint32_t)19U, - label_exp, - (uint32_t)3U * sizeof (uint8_t)); - memcpy(tmp4 + (uint32_t)22U, - o_context, - (uint32_t)65U * sizeof (uint8_t)); - Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter, - o_secret, - (uint32_t)32U, - tmp4, - len3, - (uint32_t)32U); - { - uint8_t - label_key[3U] = - { - (uint8_t)0x6bU, - (uint8_t)0x65U, - (uint8_t)0x79U - }; - uint32_t len4 = (uint32_t)87U; - KRML_CHECK_SIZE(sizeof (uint8_t), len4); - { - uint8_t tmp5[len4]; - memset(tmp5, 0U, len4 * sizeof (uint8_t)); - { - uint8_t *uu____14 = tmp5; - store32_be(uu____14, (uint32_t)32U); - memcpy(uu____14, - uu____14 + (uint32_t)2U, - (uint32_t)2U * sizeof (uint8_t)); - { - uint8_t *uu____15 = tmp5 + (uint32_t)2U; - uu____15[0U] = (uint8_t)0x48U; - uu____15[1U] = (uint8_t)0x50U; - uu____15[2U] = (uint8_t)0x4bU; - uu____15[3U] = (uint8_t)0x45U; - uu____15[4U] = (uint8_t)0x2dU; - uu____15[5U] = (uint8_t)0x76U; - uu____15[6U] = (uint8_t)0x31U; - memcpy(tmp5 + (uint32_t)9U, - suite_id, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp5 + (uint32_t)19U, - label_key, - (uint32_t)3U * sizeof (uint8_t)); - memcpy(tmp5 + (uint32_t)22U, - o_context, - (uint32_t)65U * sizeof (uint8_t)); - Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, - o_secret, - (uint32_t)32U, - tmp5, - len4, - (uint32_t)32U); - { - uint8_t - label_base_nonce[10U] = - { - (uint8_t)0x62U, (uint8_t)0x61U, - (uint8_t)0x73U, (uint8_t)0x65U, - (uint8_t)0x5fU, (uint8_t)0x6eU, - (uint8_t)0x6fU, (uint8_t)0x6eU, - (uint8_t)0x63U, (uint8_t)0x65U - }; - uint32_t len = (uint32_t)94U; - KRML_CHECK_SIZE(sizeof (uint8_t), len); - { - uint8_t tmp[len]; - memset(tmp, 0U, len * sizeof (uint8_t)); - { - uint8_t *uu____16 = tmp; - store32_be(uu____16, (uint32_t)12U); - memcpy(uu____16, - uu____16 + (uint32_t)2U, - (uint32_t)2U * sizeof (uint8_t)); - { - uint8_t - *uu____17 = tmp + (uint32_t)2U; - uu____17[0U] = (uint8_t)0x48U; - uu____17[1U] = (uint8_t)0x50U; - uu____17[2U] = (uint8_t)0x4bU; - uu____17[3U] = (uint8_t)0x45U; - uu____17[4U] = (uint8_t)0x2dU; - uu____17[5U] = (uint8_t)0x76U; - uu____17[6U] = (uint8_t)0x31U; - memcpy(tmp + (uint32_t)9U, - suite_id, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp + (uint32_t)19U, - label_base_nonce, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp + (uint32_t)29U, - o_context, - (uint32_t)65U * sizeof (uint8_t)); - Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, - o_secret, - (uint32_t)32U, - tmp, - len, - (uint32_t)12U); - o_ctx.ctx_seq[0U] = (uint64_t)0U; - ite = (uint32_t)0U; - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - else - { - ite = (uint32_t)1U; - } - } - } - } - } - } - else - { - ite = (uint32_t)1U; - } - return ite; - } -} - -uint32_t -Hacl_HPKE_P256_CP256_SHA256_sealBase( - uint8_t *skE, - uint8_t *pkR, - uint32_t infolen, - uint8_t *info, - uint32_t aadlen, - uint8_t *aad, - uint32_t plainlen, - uint8_t *plain, - uint8_t *o_enc, - uint8_t *o_ct -) -{ - uint8_t ctx_key[32U] = { 0U }; - uint8_t ctx_nonce[12U] = { 0U }; - uint64_t ctx_seq = (uint64_t)0U; - uint8_t ctx_exporter[32U] = { 0U }; - Hacl_Impl_HPKE_context_s o_ctx; - uint32_t res; - uint32_t ite; - o_ctx.ctx_key = ctx_key; - o_ctx.ctx_nonce = ctx_nonce; - o_ctx.ctx_seq = &ctx_seq; - o_ctx.ctx_exporter = ctx_exporter; - res = Hacl_HPKE_P256_CP256_SHA256_setupBaseS(o_enc, o_ctx, skE, pkR, infolen, info); - if (res == (uint32_t)0U) - { - uint8_t nonce[12U] = { 0U }; - uint64_t s = o_ctx.ctx_seq[0U]; - uint8_t enc[12U] = { 0U }; - store64_be(enc + (uint32_t)4U, s); - KRML_MAYBE_FOR12(i, - (uint32_t)0U, - (uint32_t)12U, - (uint32_t)1U, - uint8_t xi = enc[i]; - uint8_t yi = o_ctx.ctx_nonce[i]; - nonce[i] = xi ^ yi;); - Hacl_Chacha20Poly1305_256_aead_encrypt(o_ctx.ctx_key, - nonce, - aadlen, - aad, - plainlen, - plain, - o_ct, - o_ct + plainlen); - { - uint64_t s1 = o_ctx.ctx_seq[0U]; - uint32_t res1; - if (s1 == (uint64_t)18446744073709551615U) - { - res1 = (uint32_t)1U; - } - else - { - uint64_t s_ = s1 + (uint64_t)1U; - o_ctx.ctx_seq[0U] = s_; - res1 = (uint32_t)0U; - } - { - uint32_t res10 = res1; - ite = res10; - } - } - } - else - { - ite = (uint32_t)1U; - } - return ite; -} - -uint32_t -Hacl_HPKE_P256_CP256_SHA256_openBase( - uint8_t *pkE, - uint8_t *skR, - uint32_t infolen, - uint8_t *info, - uint32_t aadlen, - uint8_t *aad, - uint32_t ctlen, - uint8_t *ct, - uint8_t *o_pt -) -{ - uint8_t ctx_key[32U] = { 0U }; - uint8_t ctx_nonce[12U] = { 0U }; - uint64_t ctx_seq = (uint64_t)0U; - uint8_t ctx_exporter[32U] = { 0U }; - Hacl_Impl_HPKE_context_s o_ctx; - uint32_t res; - uint32_t ite; - o_ctx.ctx_key = ctx_key; - o_ctx.ctx_nonce = ctx_nonce; - o_ctx.ctx_seq = &ctx_seq; - o_ctx.ctx_exporter = ctx_exporter; - res = Hacl_HPKE_P256_CP256_SHA256_setupBaseR(o_ctx, pkE, skR, infolen, info); - if (res == (uint32_t)0U) - { - uint8_t nonce[12U] = { 0U }; - uint64_t s = o_ctx.ctx_seq[0U]; - uint8_t enc[12U] = { 0U }; - store64_be(enc + (uint32_t)4U, s); - KRML_MAYBE_FOR12(i, - (uint32_t)0U, - (uint32_t)12U, - (uint32_t)1U, - uint8_t xi = enc[i]; - uint8_t yi = o_ctx.ctx_nonce[i]; - nonce[i] = xi ^ yi;); - { - uint32_t - res10 = - Hacl_Chacha20Poly1305_256_aead_decrypt(o_ctx.ctx_key, - nonce, - aadlen, - aad, - ctlen - (uint32_t)16U, - o_pt, - ct, - ct + ctlen - (uint32_t)16U); - uint32_t res1; - if (res10 == (uint32_t)0U) - { - uint64_t s1 = o_ctx.ctx_seq[0U]; - uint32_t res2; - if (s1 == (uint64_t)18446744073709551615U) - { - res2 = (uint32_t)1U; - } - else - { - uint64_t s_ = s1 + (uint64_t)1U; - o_ctx.ctx_seq[0U] = s_; - res2 = (uint32_t)0U; - } - res1 = res2; - } - else - { - res1 = (uint32_t)1U; - } - ite = res1; - } - } - else - { - ite = (uint32_t)1U; - } - return ite; -} - diff --git a/dist/c89-compatible/Hacl_HPKE_P256_CP256_SHA256.h b/dist/c89-compatible/Hacl_HPKE_P256_CP256_SHA256.h deleted file mode 100644 index d40161c6cb..0000000000 --- a/dist/c89-compatible/Hacl_HPKE_P256_CP256_SHA256.h +++ /dev/null @@ -1,94 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#ifndef __Hacl_HPKE_P256_CP256_SHA256_H -#define __Hacl_HPKE_P256_CP256_SHA256_H - -#if defined(__cplusplus) -extern "C" { -#endif - -#include -#include "krml/internal/types.h" -#include "krml/lowstar_endianness.h" -#include "krml/internal/target.h" - - -#include "Hacl_Krmllib.h" -#include "Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE.h" -#include "Hacl_HKDF.h" -#include "Hacl_Chacha20Poly1305_256.h" -#include "evercrypt_targetconfig.h" -uint32_t -Hacl_HPKE_P256_CP256_SHA256_setupBaseS( - uint8_t *o_pkE, - Hacl_Impl_HPKE_context_s o_ctx, - uint8_t *skE, - uint8_t *pkR, - uint32_t infolen, - uint8_t *info -); - -uint32_t -Hacl_HPKE_P256_CP256_SHA256_setupBaseR( - Hacl_Impl_HPKE_context_s o_ctx, - uint8_t *enc, - uint8_t *skR, - uint32_t infolen, - uint8_t *info -); - -uint32_t -Hacl_HPKE_P256_CP256_SHA256_sealBase( - uint8_t *skE, - uint8_t *pkR, - uint32_t infolen, - uint8_t *info, - uint32_t aadlen, - uint8_t *aad, - uint32_t plainlen, - uint8_t *plain, - uint8_t *o_enc, - uint8_t *o_ct -); - -uint32_t -Hacl_HPKE_P256_CP256_SHA256_openBase( - uint8_t *pkE, - uint8_t *skR, - uint32_t infolen, - uint8_t *info, - uint32_t aadlen, - uint8_t *aad, - uint32_t ctlen, - uint8_t *ct, - uint8_t *o_pt -); - -#if defined(__cplusplus) -} -#endif - -#define __Hacl_HPKE_P256_CP256_SHA256_H_DEFINED -#endif diff --git a/dist/c89-compatible/Hacl_HPKE_P256_CP32_SHA256.c b/dist/c89-compatible/Hacl_HPKE_P256_CP32_SHA256.c deleted file mode 100644 index e49d88252c..0000000000 --- a/dist/c89-compatible/Hacl_HPKE_P256_CP32_SHA256.c +++ /dev/null @@ -1,1174 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#include "Hacl_HPKE_P256_CP32_SHA256.h" - -#include "internal/Hacl_P256.h" - -uint32_t -Hacl_HPKE_P256_CP32_SHA256_setupBaseS( - uint8_t *o_pkE, - Hacl_Impl_HPKE_context_s o_ctx, - uint8_t *skE, - uint8_t *pkR, - uint32_t infolen, - uint8_t *info -) -{ - uint8_t o_shared[32U] = { 0U }; - uint8_t *o_pkE1 = o_pkE + (uint32_t)1U; - uint64_t tempBuffer[100U] = { 0U }; - uint64_t resultBuffer[12U] = { 0U }; - uint64_t *resultBufferX = resultBuffer; - uint64_t *resultBufferY = resultBuffer + (uint32_t)4U; - uint8_t *resultX0 = o_pkE1; - uint8_t *resultY0 = o_pkE1 + (uint32_t)32U; - uint64_t flag; - bool res0; - uint32_t res1; - uint32_t res; - uint32_t ite; - Hacl_Impl_P256_Core_secretToPublic(resultBuffer, skE, tempBuffer); - flag = Hacl_Impl_P256_Core_isPointAtInfinityPrivate(resultBuffer); - Hacl_Impl_P256_LowLevel_changeEndian(resultBufferX); - Hacl_Impl_P256_LowLevel_changeEndian(resultBufferY); - Hacl_Impl_P256_LowLevel_toUint8(resultBufferX, resultX0); - Hacl_Impl_P256_LowLevel_toUint8(resultBufferY, resultY0); - res0 = flag == (uint64_t)0U; - if (res0) - { - res1 = (uint32_t)0U; - } - else - { - res1 = (uint32_t)1U; - } - if (res1 == (uint32_t)0U) - { - o_pkE[0U] = (uint8_t)4U; - { - uint8_t o_dh[64U] = { 0U }; - uint8_t tmp0[64U] = { 0U }; - uint64_t resultBufferFelem[12U] = { 0U }; - uint64_t *resultBufferFelemX = resultBufferFelem; - uint64_t *resultBufferFelemY = resultBufferFelem + (uint32_t)4U; - uint8_t *resultX = tmp0; - uint8_t *resultY = tmp0 + (uint32_t)32U; - uint64_t publicKeyAsFelem[8U] = { 0U }; - uint64_t *publicKeyFelemX = publicKeyAsFelem; - uint64_t *publicKeyFelemY = publicKeyAsFelem + (uint32_t)4U; - uint8_t *pubKeyX = pkR; - uint8_t *pubKeyY = pkR + (uint32_t)32U; - Hacl_Impl_P256_LowLevel_toUint64ChangeEndian(pubKeyX, publicKeyFelemX); - Hacl_Impl_P256_LowLevel_toUint64ChangeEndian(pubKeyY, publicKeyFelemY); - { - uint64_t flag0 = Hacl_Impl_P256_DH__ecp256dh_r(resultBufferFelem, publicKeyAsFelem, skE); - Hacl_Impl_P256_LowLevel_changeEndian(resultBufferFelemX); - Hacl_Impl_P256_LowLevel_changeEndian(resultBufferFelemY); - Hacl_Impl_P256_LowLevel_toUint8(resultBufferFelemX, resultX); - Hacl_Impl_P256_LowLevel_toUint8(resultBufferFelemY, resultY); - { - bool res3 = flag0 == (uint64_t)0U; - memcpy(o_dh, tmp0, (uint32_t)64U * sizeof (uint8_t)); - { - uint32_t res2; - if (res3) - { - res2 = (uint32_t)0U; - } - else - { - res2 = (uint32_t)1U; - } - { - uint8_t o_kemcontext[130U] = { 0U }; - if (res2 == (uint32_t)0U) - { - memcpy(o_kemcontext, o_pkE, (uint32_t)65U * sizeof (uint8_t)); - { - uint8_t *o_pkRm = o_kemcontext + (uint32_t)65U; - uint8_t *o_pkR = o_pkRm + (uint32_t)1U; - memcpy(o_pkR, pkR, (uint32_t)64U * sizeof (uint8_t)); - o_pkRm[0U] = (uint8_t)4U; - { - uint8_t *o_dhm = o_dh; - uint8_t o_eae_prk[32U] = { 0U }; - uint8_t suite_id_kem[5U] = { 0U }; - uint8_t *uu____0 = suite_id_kem; - uu____0[0U] = (uint8_t)0x4bU; - uu____0[1U] = (uint8_t)0x45U; - uu____0[2U] = (uint8_t)0x4dU; - { - uint8_t *uu____1 = suite_id_kem + (uint32_t)3U; - uu____1[0U] = (uint8_t)0U; - uu____1[1U] = (uint8_t)16U; - { - uint8_t *empty = suite_id_kem; - uint8_t - label_eae_prk[7U] = - { - (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, - (uint8_t)0x70U, (uint8_t)0x72U, (uint8_t)0x6bU - }; - uint32_t len0 = (uint32_t)51U; - KRML_CHECK_SIZE(sizeof (uint8_t), len0); - { - uint8_t tmp1[len0]; - memset(tmp1, 0U, len0 * sizeof (uint8_t)); - { - uint8_t *uu____2 = tmp1; - uu____2[0U] = (uint8_t)0x48U; - uu____2[1U] = (uint8_t)0x50U; - uu____2[2U] = (uint8_t)0x4bU; - uu____2[3U] = (uint8_t)0x45U; - uu____2[4U] = (uint8_t)0x2dU; - uu____2[5U] = (uint8_t)0x76U; - uu____2[6U] = (uint8_t)0x31U; - memcpy(tmp1 + (uint32_t)7U, - suite_id_kem, - (uint32_t)5U * sizeof (uint8_t)); - memcpy(tmp1 + (uint32_t)12U, - label_eae_prk, - (uint32_t)7U * sizeof (uint8_t)); - memcpy(tmp1 + (uint32_t)19U, o_dhm, (uint32_t)32U * sizeof (uint8_t)); - Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp1, len0); - { - uint8_t - label_shared_secret[13U] = - { - (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, - (uint8_t)0x65U, (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, - (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U, - (uint8_t)0x74U - }; - uint32_t len = (uint32_t)157U; - KRML_CHECK_SIZE(sizeof (uint8_t), len); - { - uint8_t tmp[len]; - memset(tmp, 0U, len * sizeof (uint8_t)); - { - uint8_t *uu____3 = tmp; - store32_be(uu____3, (uint32_t)32U); - memcpy(uu____3, - uu____3 + (uint32_t)2U, - (uint32_t)2U * sizeof (uint8_t)); - { - uint8_t *uu____4 = tmp + (uint32_t)2U; - uu____4[0U] = (uint8_t)0x48U; - uu____4[1U] = (uint8_t)0x50U; - uu____4[2U] = (uint8_t)0x4bU; - uu____4[3U] = (uint8_t)0x45U; - uu____4[4U] = (uint8_t)0x2dU; - uu____4[5U] = (uint8_t)0x76U; - uu____4[6U] = (uint8_t)0x31U; - memcpy(tmp + (uint32_t)9U, - suite_id_kem, - (uint32_t)5U * sizeof (uint8_t)); - memcpy(tmp + (uint32_t)14U, - label_shared_secret, - (uint32_t)13U * sizeof (uint8_t)); - memcpy(tmp + (uint32_t)27U, - o_kemcontext, - (uint32_t)130U * sizeof (uint8_t)); - Hacl_HKDF_expand_sha2_256(o_shared, - o_eae_prk, - (uint32_t)32U, - tmp, - len, - (uint32_t)32U); - res = (uint32_t)0U; - } - } - } - } - } - } - } - } - } - } - } - else - { - res = (uint32_t)1U; - } - } - } - } - } - } - } - else - { - res = (uint32_t)1U; - } - if (res == (uint32_t)0U) - { - uint8_t o_context[65U] = { 0U }; - uint8_t o_secret[32U] = { 0U }; - uint8_t suite_id[10U] = { 0U }; - uint8_t *uu____5 = suite_id; - uu____5[0U] = (uint8_t)0x48U; - uu____5[1U] = (uint8_t)0x50U; - uu____5[2U] = (uint8_t)0x4bU; - uu____5[3U] = (uint8_t)0x45U; - { - uint8_t *uu____6 = suite_id + (uint32_t)4U; - uu____6[0U] = (uint8_t)0U; - uu____6[1U] = (uint8_t)16U; - { - uint8_t *uu____7 = suite_id + (uint32_t)6U; - uu____7[0U] = (uint8_t)0U; - uu____7[1U] = (uint8_t)1U; - { - uint8_t *uu____8 = suite_id + (uint32_t)8U; - uu____8[0U] = (uint8_t)0U; - uu____8[1U] = (uint8_t)3U; - { - uint8_t - label_psk_id_hash[11U] = - { - (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U, - (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, - (uint8_t)0x68U - }; - uint8_t o_psk_id_hash[32U] = { 0U }; - uint8_t *empty = suite_id; - uint32_t len0 = (uint32_t)28U; - KRML_CHECK_SIZE(sizeof (uint8_t), len0); - { - uint8_t tmp0[len0]; - memset(tmp0, 0U, len0 * sizeof (uint8_t)); - { - uint8_t *uu____9 = tmp0; - uu____9[0U] = (uint8_t)0x48U; - uu____9[1U] = (uint8_t)0x50U; - uu____9[2U] = (uint8_t)0x4bU; - uu____9[3U] = (uint8_t)0x45U; - uu____9[4U] = (uint8_t)0x2dU; - uu____9[5U] = (uint8_t)0x76U; - uu____9[6U] = (uint8_t)0x31U; - memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp0 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t)); - memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t)); - Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, (uint32_t)0U, tmp0, len0); - { - uint8_t - label_info_hash[9U] = - { - (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU, - (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U - }; - uint8_t o_info_hash[32U] = { 0U }; - uint32_t len1 = (uint32_t)26U + infolen; - KRML_CHECK_SIZE(sizeof (uint8_t), len1); - { - uint8_t tmp1[len1]; - memset(tmp1, 0U, len1 * sizeof (uint8_t)); - { - uint8_t *uu____10 = tmp1; - uu____10[0U] = (uint8_t)0x48U; - uu____10[1U] = (uint8_t)0x50U; - uu____10[2U] = (uint8_t)0x4bU; - uu____10[3U] = (uint8_t)0x45U; - uu____10[4U] = (uint8_t)0x2dU; - uu____10[5U] = (uint8_t)0x76U; - uu____10[6U] = (uint8_t)0x31U; - memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp1 + (uint32_t)17U, - label_info_hash, - (uint32_t)9U * sizeof (uint8_t)); - memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t)); - Hacl_HKDF_extract_sha2_256(o_info_hash, empty, (uint32_t)0U, tmp1, len1); - o_context[0U] = (uint8_t)0U; - memcpy(o_context + (uint32_t)1U, - o_psk_id_hash, - (uint32_t)32U * sizeof (uint8_t)); - memcpy(o_context + (uint32_t)33U, - o_info_hash, - (uint32_t)32U * sizeof (uint8_t)); - { - uint8_t - label_secret[6U] = - { - (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, - (uint8_t)0x65U, (uint8_t)0x74U - }; - uint32_t len2 = (uint32_t)23U; - KRML_CHECK_SIZE(sizeof (uint8_t), len2); - { - uint8_t tmp2[len2]; - memset(tmp2, 0U, len2 * sizeof (uint8_t)); - { - uint8_t *uu____11 = tmp2; - uu____11[0U] = (uint8_t)0x48U; - uu____11[1U] = (uint8_t)0x50U; - uu____11[2U] = (uint8_t)0x4bU; - uu____11[3U] = (uint8_t)0x45U; - uu____11[4U] = (uint8_t)0x2dU; - uu____11[5U] = (uint8_t)0x76U; - uu____11[6U] = (uint8_t)0x31U; - memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp2 + (uint32_t)17U, - label_secret, - (uint32_t)6U * sizeof (uint8_t)); - memcpy(tmp2 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t)); - Hacl_HKDF_extract_sha2_256(o_secret, - o_shared, - (uint32_t)32U, - tmp2, - len2); - { - uint8_t - label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U }; - uint32_t len3 = (uint32_t)87U; - KRML_CHECK_SIZE(sizeof (uint8_t), len3); - { - uint8_t tmp3[len3]; - memset(tmp3, 0U, len3 * sizeof (uint8_t)); - { - uint8_t *uu____12 = tmp3; - store32_be(uu____12, (uint32_t)32U); - memcpy(uu____12, - uu____12 + (uint32_t)2U, - (uint32_t)2U * sizeof (uint8_t)); - { - uint8_t *uu____13 = tmp3 + (uint32_t)2U; - uu____13[0U] = (uint8_t)0x48U; - uu____13[1U] = (uint8_t)0x50U; - uu____13[2U] = (uint8_t)0x4bU; - uu____13[3U] = (uint8_t)0x45U; - uu____13[4U] = (uint8_t)0x2dU; - uu____13[5U] = (uint8_t)0x76U; - uu____13[6U] = (uint8_t)0x31U; - memcpy(tmp3 + (uint32_t)9U, - suite_id, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp3 + (uint32_t)19U, - label_exp, - (uint32_t)3U * sizeof (uint8_t)); - memcpy(tmp3 + (uint32_t)22U, - o_context, - (uint32_t)65U * sizeof (uint8_t)); - Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter, - o_secret, - (uint32_t)32U, - tmp3, - len3, - (uint32_t)32U); - { - uint8_t - label_key[3U] = - { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U }; - uint32_t len4 = (uint32_t)87U; - KRML_CHECK_SIZE(sizeof (uint8_t), len4); - { - uint8_t tmp4[len4]; - memset(tmp4, 0U, len4 * sizeof (uint8_t)); - { - uint8_t *uu____14 = tmp4; - store32_be(uu____14, (uint32_t)32U); - memcpy(uu____14, - uu____14 + (uint32_t)2U, - (uint32_t)2U * sizeof (uint8_t)); - { - uint8_t *uu____15 = tmp4 + (uint32_t)2U; - uu____15[0U] = (uint8_t)0x48U; - uu____15[1U] = (uint8_t)0x50U; - uu____15[2U] = (uint8_t)0x4bU; - uu____15[3U] = (uint8_t)0x45U; - uu____15[4U] = (uint8_t)0x2dU; - uu____15[5U] = (uint8_t)0x76U; - uu____15[6U] = (uint8_t)0x31U; - memcpy(tmp4 + (uint32_t)9U, - suite_id, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp4 + (uint32_t)19U, - label_key, - (uint32_t)3U * sizeof (uint8_t)); - memcpy(tmp4 + (uint32_t)22U, - o_context, - (uint32_t)65U * sizeof (uint8_t)); - Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, - o_secret, - (uint32_t)32U, - tmp4, - len4, - (uint32_t)32U); - { - uint8_t - label_base_nonce[10U] = - { - (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, - (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x6eU, - (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, - (uint8_t)0x65U - }; - uint32_t len = (uint32_t)94U; - KRML_CHECK_SIZE(sizeof (uint8_t), len); - { - uint8_t tmp[len]; - memset(tmp, 0U, len * sizeof (uint8_t)); - { - uint8_t *uu____16 = tmp; - store32_be(uu____16, (uint32_t)12U); - memcpy(uu____16, - uu____16 + (uint32_t)2U, - (uint32_t)2U * sizeof (uint8_t)); - { - uint8_t *uu____17 = tmp + (uint32_t)2U; - uu____17[0U] = (uint8_t)0x48U; - uu____17[1U] = (uint8_t)0x50U; - uu____17[2U] = (uint8_t)0x4bU; - uu____17[3U] = (uint8_t)0x45U; - uu____17[4U] = (uint8_t)0x2dU; - uu____17[5U] = (uint8_t)0x76U; - uu____17[6U] = (uint8_t)0x31U; - memcpy(tmp + (uint32_t)9U, - suite_id, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp + (uint32_t)19U, - label_base_nonce, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp + (uint32_t)29U, - o_context, - (uint32_t)65U * sizeof (uint8_t)); - Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, - o_secret, - (uint32_t)32U, - tmp, - len, - (uint32_t)12U); - o_ctx.ctx_seq[0U] = (uint64_t)0U; - ite = res; - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - else - { - ite = res; - } - return ite; -} - -uint32_t -Hacl_HPKE_P256_CP32_SHA256_setupBaseR( - Hacl_Impl_HPKE_context_s o_ctx, - uint8_t *enc, - uint8_t *skR, - uint32_t infolen, - uint8_t *info -) -{ - uint8_t pkR[64U] = { 0U }; - uint64_t tempBuffer0[100U] = { 0U }; - uint64_t resultBuffer0[12U] = { 0U }; - uint64_t *resultBufferX0 = resultBuffer0; - uint64_t *resultBufferY0 = resultBuffer0 + (uint32_t)4U; - uint8_t *resultX0 = pkR; - uint8_t *resultY0 = pkR + (uint32_t)32U; - uint64_t flag0; - bool res; - uint32_t res1; - uint32_t ite; - Hacl_Impl_P256_Core_secretToPublic(resultBuffer0, skR, tempBuffer0); - flag0 = Hacl_Impl_P256_Core_isPointAtInfinityPrivate(resultBuffer0); - Hacl_Impl_P256_LowLevel_changeEndian(resultBufferX0); - Hacl_Impl_P256_LowLevel_changeEndian(resultBufferY0); - Hacl_Impl_P256_LowLevel_toUint8(resultBufferX0, resultX0); - Hacl_Impl_P256_LowLevel_toUint8(resultBufferY0, resultY0); - res = flag0 == (uint64_t)0U; - if (res) - { - res1 = (uint32_t)0U; - } - else - { - res1 = (uint32_t)1U; - } - { - uint8_t shared[32U] = { 0U }; - if (res1 == (uint32_t)0U) - { - uint8_t *pkE = enc + (uint32_t)1U; - uint8_t dh[64U] = { 0U }; - uint8_t tmp0[64U] = { 0U }; - uint64_t resultBufferFelem[12U] = { 0U }; - uint64_t *resultBufferFelemX = resultBufferFelem; - uint64_t *resultBufferFelemY = resultBufferFelem + (uint32_t)4U; - uint8_t *resultX1 = tmp0; - uint8_t *resultY1 = tmp0 + (uint32_t)32U; - uint64_t publicKeyAsFelem[8U] = { 0U }; - uint64_t *publicKeyFelemX = publicKeyAsFelem; - uint64_t *publicKeyFelemY = publicKeyAsFelem + (uint32_t)4U; - uint8_t *pubKeyX = pkE; - uint8_t *pubKeyY = pkE + (uint32_t)32U; - Hacl_Impl_P256_LowLevel_toUint64ChangeEndian(pubKeyX, publicKeyFelemX); - Hacl_Impl_P256_LowLevel_toUint64ChangeEndian(pubKeyY, publicKeyFelemY); - { - uint64_t flag1 = Hacl_Impl_P256_DH__ecp256dh_r(resultBufferFelem, publicKeyAsFelem, skR); - Hacl_Impl_P256_LowLevel_changeEndian(resultBufferFelemX); - Hacl_Impl_P256_LowLevel_changeEndian(resultBufferFelemY); - Hacl_Impl_P256_LowLevel_toUint8(resultBufferFelemX, resultX1); - Hacl_Impl_P256_LowLevel_toUint8(resultBufferFelemY, resultY1); - { - bool res0 = flag1 == (uint64_t)0U; - memcpy(dh, tmp0, (uint32_t)64U * sizeof (uint8_t)); - { - uint32_t res11; - if (res0) - { - res11 = (uint32_t)0U; - } - else - { - res11 = (uint32_t)1U; - } - { - uint32_t res20; - uint8_t kemcontext[130U] = { 0U }; - if (res11 == (uint32_t)0U) - { - uint8_t *pkRm = kemcontext + (uint32_t)65U; - uint8_t *pkR1 = pkRm + (uint32_t)1U; - uint64_t tempBuffer[100U] = { 0U }; - uint64_t resultBuffer[12U] = { 0U }; - uint64_t *resultBufferX = resultBuffer; - uint64_t *resultBufferY = resultBuffer + (uint32_t)4U; - uint8_t *resultX = pkR1; - uint8_t *resultY = pkR1 + (uint32_t)32U; - Hacl_Impl_P256_Core_secretToPublic(resultBuffer, skR, tempBuffer); - { - uint64_t flag = Hacl_Impl_P256_Core_isPointAtInfinityPrivate(resultBuffer); - Hacl_Impl_P256_LowLevel_changeEndian(resultBufferX); - Hacl_Impl_P256_LowLevel_changeEndian(resultBufferY); - Hacl_Impl_P256_LowLevel_toUint8(resultBufferX, resultX); - Hacl_Impl_P256_LowLevel_toUint8(resultBufferY, resultY); - { - bool res3 = flag == (uint64_t)0U; - uint32_t res2; - if (res3) - { - res2 = (uint32_t)0U; - } - else - { - res2 = (uint32_t)1U; - } - if (res2 == (uint32_t)0U) - { - memcpy(kemcontext, enc, (uint32_t)65U * sizeof (uint8_t)); - pkRm[0U] = (uint8_t)4U; - { - uint8_t *dhm = dh; - uint8_t o_eae_prk[32U] = { 0U }; - uint8_t suite_id_kem[5U] = { 0U }; - uint8_t *uu____0 = suite_id_kem; - uu____0[0U] = (uint8_t)0x4bU; - uu____0[1U] = (uint8_t)0x45U; - uu____0[2U] = (uint8_t)0x4dU; - { - uint8_t *uu____1 = suite_id_kem + (uint32_t)3U; - uu____1[0U] = (uint8_t)0U; - uu____1[1U] = (uint8_t)16U; - { - uint8_t *empty = suite_id_kem; - uint8_t - label_eae_prk[7U] = - { - (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, - (uint8_t)0x70U, (uint8_t)0x72U, (uint8_t)0x6bU - }; - uint32_t len0 = (uint32_t)51U; - KRML_CHECK_SIZE(sizeof (uint8_t), len0); - { - uint8_t tmp1[len0]; - memset(tmp1, 0U, len0 * sizeof (uint8_t)); - { - uint8_t *uu____2 = tmp1; - uu____2[0U] = (uint8_t)0x48U; - uu____2[1U] = (uint8_t)0x50U; - uu____2[2U] = (uint8_t)0x4bU; - uu____2[3U] = (uint8_t)0x45U; - uu____2[4U] = (uint8_t)0x2dU; - uu____2[5U] = (uint8_t)0x76U; - uu____2[6U] = (uint8_t)0x31U; - memcpy(tmp1 + (uint32_t)7U, - suite_id_kem, - (uint32_t)5U * sizeof (uint8_t)); - memcpy(tmp1 + (uint32_t)12U, - label_eae_prk, - (uint32_t)7U * sizeof (uint8_t)); - memcpy(tmp1 + (uint32_t)19U, dhm, (uint32_t)32U * sizeof (uint8_t)); - Hacl_HKDF_extract_sha2_256(o_eae_prk, - empty, - (uint32_t)0U, - tmp1, - len0); - { - uint8_t - label_shared_secret[13U] = - { - (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, - (uint8_t)0x65U, (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, - (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U, - (uint8_t)0x74U - }; - uint32_t len = (uint32_t)157U; - KRML_CHECK_SIZE(sizeof (uint8_t), len); - { - uint8_t tmp[len]; - memset(tmp, 0U, len * sizeof (uint8_t)); - { - uint8_t *uu____3 = tmp; - store32_be(uu____3, (uint32_t)32U); - memcpy(uu____3, - uu____3 + (uint32_t)2U, - (uint32_t)2U * sizeof (uint8_t)); - { - uint8_t *uu____4 = tmp + (uint32_t)2U; - uu____4[0U] = (uint8_t)0x48U; - uu____4[1U] = (uint8_t)0x50U; - uu____4[2U] = (uint8_t)0x4bU; - uu____4[3U] = (uint8_t)0x45U; - uu____4[4U] = (uint8_t)0x2dU; - uu____4[5U] = (uint8_t)0x76U; - uu____4[6U] = (uint8_t)0x31U; - memcpy(tmp + (uint32_t)9U, - suite_id_kem, - (uint32_t)5U * sizeof (uint8_t)); - memcpy(tmp + (uint32_t)14U, - label_shared_secret, - (uint32_t)13U * sizeof (uint8_t)); - memcpy(tmp + (uint32_t)27U, - kemcontext, - (uint32_t)130U * sizeof (uint8_t)); - Hacl_HKDF_expand_sha2_256(shared, - o_eae_prk, - (uint32_t)32U, - tmp, - len, - (uint32_t)32U); - res20 = (uint32_t)0U; - } - } - } - } - } - } - } - } - } - } - else - { - res20 = (uint32_t)1U; - } - } - } - } - else - { - res20 = (uint32_t)1U; - } - if (res20 == (uint32_t)0U) - { - uint8_t o_context[65U] = { 0U }; - uint8_t o_secret[32U] = { 0U }; - uint8_t suite_id[10U] = { 0U }; - uint8_t *uu____5 = suite_id; - uu____5[0U] = (uint8_t)0x48U; - uu____5[1U] = (uint8_t)0x50U; - uu____5[2U] = (uint8_t)0x4bU; - uu____5[3U] = (uint8_t)0x45U; - { - uint8_t *uu____6 = suite_id + (uint32_t)4U; - uu____6[0U] = (uint8_t)0U; - uu____6[1U] = (uint8_t)16U; - { - uint8_t *uu____7 = suite_id + (uint32_t)6U; - uu____7[0U] = (uint8_t)0U; - uu____7[1U] = (uint8_t)1U; - { - uint8_t *uu____8 = suite_id + (uint32_t)8U; - uu____8[0U] = (uint8_t)0U; - uu____8[1U] = (uint8_t)3U; - { - uint8_t - label_psk_id_hash[11U] = - { - (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, - (uint8_t)0x69U, (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, - (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U - }; - uint8_t o_psk_id_hash[32U] = { 0U }; - uint8_t *empty = suite_id; - uint32_t len0 = (uint32_t)28U; - KRML_CHECK_SIZE(sizeof (uint8_t), len0); - { - uint8_t tmp1[len0]; - memset(tmp1, 0U, len0 * sizeof (uint8_t)); - { - uint8_t *uu____9 = tmp1; - uu____9[0U] = (uint8_t)0x48U; - uu____9[1U] = (uint8_t)0x50U; - uu____9[2U] = (uint8_t)0x4bU; - uu____9[3U] = (uint8_t)0x45U; - uu____9[4U] = (uint8_t)0x2dU; - uu____9[5U] = (uint8_t)0x76U; - uu____9[6U] = (uint8_t)0x31U; - memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp1 + (uint32_t)17U, - label_psk_id_hash, - (uint32_t)11U * sizeof (uint8_t)); - memcpy(tmp1 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t)); - Hacl_HKDF_extract_sha2_256(o_psk_id_hash, - empty, - (uint32_t)0U, - tmp1, - len0); - { - uint8_t - label_info_hash[9U] = - { - (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, - (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, - (uint8_t)0x68U - }; - uint8_t o_info_hash[32U] = { 0U }; - uint32_t len1 = (uint32_t)26U + infolen; - KRML_CHECK_SIZE(sizeof (uint8_t), len1); - { - uint8_t tmp2[len1]; - memset(tmp2, 0U, len1 * sizeof (uint8_t)); - { - uint8_t *uu____10 = tmp2; - uu____10[0U] = (uint8_t)0x48U; - uu____10[1U] = (uint8_t)0x50U; - uu____10[2U] = (uint8_t)0x4bU; - uu____10[3U] = (uint8_t)0x45U; - uu____10[4U] = (uint8_t)0x2dU; - uu____10[5U] = (uint8_t)0x76U; - uu____10[6U] = (uint8_t)0x31U; - memcpy(tmp2 + (uint32_t)7U, - suite_id, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp2 + (uint32_t)17U, - label_info_hash, - (uint32_t)9U * sizeof (uint8_t)); - memcpy(tmp2 + (uint32_t)26U, info, infolen * sizeof (uint8_t)); - Hacl_HKDF_extract_sha2_256(o_info_hash, - empty, - (uint32_t)0U, - tmp2, - len1); - o_context[0U] = (uint8_t)0U; - memcpy(o_context + (uint32_t)1U, - o_psk_id_hash, - (uint32_t)32U * sizeof (uint8_t)); - memcpy(o_context + (uint32_t)33U, - o_info_hash, - (uint32_t)32U * sizeof (uint8_t)); - { - uint8_t - label_secret[6U] = - { - (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, - (uint8_t)0x72U, (uint8_t)0x65U, (uint8_t)0x74U - }; - uint32_t len2 = (uint32_t)23U; - KRML_CHECK_SIZE(sizeof (uint8_t), len2); - { - uint8_t tmp3[len2]; - memset(tmp3, 0U, len2 * sizeof (uint8_t)); - { - uint8_t *uu____11 = tmp3; - uu____11[0U] = (uint8_t)0x48U; - uu____11[1U] = (uint8_t)0x50U; - uu____11[2U] = (uint8_t)0x4bU; - uu____11[3U] = (uint8_t)0x45U; - uu____11[4U] = (uint8_t)0x2dU; - uu____11[5U] = (uint8_t)0x76U; - uu____11[6U] = (uint8_t)0x31U; - memcpy(tmp3 + (uint32_t)7U, - suite_id, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp3 + (uint32_t)17U, - label_secret, - (uint32_t)6U * sizeof (uint8_t)); - memcpy(tmp3 + (uint32_t)23U, - empty, - (uint32_t)0U * sizeof (uint8_t)); - Hacl_HKDF_extract_sha2_256(o_secret, - shared, - (uint32_t)32U, - tmp3, - len2); - { - uint8_t - label_exp[3U] = - { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U }; - uint32_t len3 = (uint32_t)87U; - KRML_CHECK_SIZE(sizeof (uint8_t), len3); - { - uint8_t tmp4[len3]; - memset(tmp4, 0U, len3 * sizeof (uint8_t)); - { - uint8_t *uu____12 = tmp4; - store32_be(uu____12, (uint32_t)32U); - memcpy(uu____12, - uu____12 + (uint32_t)2U, - (uint32_t)2U * sizeof (uint8_t)); - { - uint8_t *uu____13 = tmp4 + (uint32_t)2U; - uu____13[0U] = (uint8_t)0x48U; - uu____13[1U] = (uint8_t)0x50U; - uu____13[2U] = (uint8_t)0x4bU; - uu____13[3U] = (uint8_t)0x45U; - uu____13[4U] = (uint8_t)0x2dU; - uu____13[5U] = (uint8_t)0x76U; - uu____13[6U] = (uint8_t)0x31U; - memcpy(tmp4 + (uint32_t)9U, - suite_id, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp4 + (uint32_t)19U, - label_exp, - (uint32_t)3U * sizeof (uint8_t)); - memcpy(tmp4 + (uint32_t)22U, - o_context, - (uint32_t)65U * sizeof (uint8_t)); - Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter, - o_secret, - (uint32_t)32U, - tmp4, - len3, - (uint32_t)32U); - { - uint8_t - label_key[3U] = - { - (uint8_t)0x6bU, - (uint8_t)0x65U, - (uint8_t)0x79U - }; - uint32_t len4 = (uint32_t)87U; - KRML_CHECK_SIZE(sizeof (uint8_t), len4); - { - uint8_t tmp5[len4]; - memset(tmp5, 0U, len4 * sizeof (uint8_t)); - { - uint8_t *uu____14 = tmp5; - store32_be(uu____14, (uint32_t)32U); - memcpy(uu____14, - uu____14 + (uint32_t)2U, - (uint32_t)2U * sizeof (uint8_t)); - { - uint8_t *uu____15 = tmp5 + (uint32_t)2U; - uu____15[0U] = (uint8_t)0x48U; - uu____15[1U] = (uint8_t)0x50U; - uu____15[2U] = (uint8_t)0x4bU; - uu____15[3U] = (uint8_t)0x45U; - uu____15[4U] = (uint8_t)0x2dU; - uu____15[5U] = (uint8_t)0x76U; - uu____15[6U] = (uint8_t)0x31U; - memcpy(tmp5 + (uint32_t)9U, - suite_id, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp5 + (uint32_t)19U, - label_key, - (uint32_t)3U * sizeof (uint8_t)); - memcpy(tmp5 + (uint32_t)22U, - o_context, - (uint32_t)65U * sizeof (uint8_t)); - Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, - o_secret, - (uint32_t)32U, - tmp5, - len4, - (uint32_t)32U); - { - uint8_t - label_base_nonce[10U] = - { - (uint8_t)0x62U, (uint8_t)0x61U, - (uint8_t)0x73U, (uint8_t)0x65U, - (uint8_t)0x5fU, (uint8_t)0x6eU, - (uint8_t)0x6fU, (uint8_t)0x6eU, - (uint8_t)0x63U, (uint8_t)0x65U - }; - uint32_t len = (uint32_t)94U; - KRML_CHECK_SIZE(sizeof (uint8_t), len); - { - uint8_t tmp[len]; - memset(tmp, 0U, len * sizeof (uint8_t)); - { - uint8_t *uu____16 = tmp; - store32_be(uu____16, (uint32_t)12U); - memcpy(uu____16, - uu____16 + (uint32_t)2U, - (uint32_t)2U * sizeof (uint8_t)); - { - uint8_t - *uu____17 = tmp + (uint32_t)2U; - uu____17[0U] = (uint8_t)0x48U; - uu____17[1U] = (uint8_t)0x50U; - uu____17[2U] = (uint8_t)0x4bU; - uu____17[3U] = (uint8_t)0x45U; - uu____17[4U] = (uint8_t)0x2dU; - uu____17[5U] = (uint8_t)0x76U; - uu____17[6U] = (uint8_t)0x31U; - memcpy(tmp + (uint32_t)9U, - suite_id, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp + (uint32_t)19U, - label_base_nonce, - (uint32_t)10U * sizeof (uint8_t)); - memcpy(tmp + (uint32_t)29U, - o_context, - (uint32_t)65U * sizeof (uint8_t)); - Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, - o_secret, - (uint32_t)32U, - tmp, - len, - (uint32_t)12U); - o_ctx.ctx_seq[0U] = (uint64_t)0U; - ite = (uint32_t)0U; - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - else - { - ite = (uint32_t)1U; - } - } - } - } - } - } - else - { - ite = (uint32_t)1U; - } - return ite; - } -} - -uint32_t -Hacl_HPKE_P256_CP32_SHA256_sealBase( - uint8_t *skE, - uint8_t *pkR, - uint32_t infolen, - uint8_t *info, - uint32_t aadlen, - uint8_t *aad, - uint32_t plainlen, - uint8_t *plain, - uint8_t *o_enc, - uint8_t *o_ct -) -{ - uint8_t ctx_key[32U] = { 0U }; - uint8_t ctx_nonce[12U] = { 0U }; - uint64_t ctx_seq = (uint64_t)0U; - uint8_t ctx_exporter[32U] = { 0U }; - Hacl_Impl_HPKE_context_s o_ctx; - uint32_t res; - uint32_t ite; - o_ctx.ctx_key = ctx_key; - o_ctx.ctx_nonce = ctx_nonce; - o_ctx.ctx_seq = &ctx_seq; - o_ctx.ctx_exporter = ctx_exporter; - res = Hacl_HPKE_P256_CP32_SHA256_setupBaseS(o_enc, o_ctx, skE, pkR, infolen, info); - if (res == (uint32_t)0U) - { - uint8_t nonce[12U] = { 0U }; - uint64_t s = o_ctx.ctx_seq[0U]; - uint8_t enc[12U] = { 0U }; - store64_be(enc + (uint32_t)4U, s); - KRML_MAYBE_FOR12(i, - (uint32_t)0U, - (uint32_t)12U, - (uint32_t)1U, - uint8_t xi = enc[i]; - uint8_t yi = o_ctx.ctx_nonce[i]; - nonce[i] = xi ^ yi;); - Hacl_Chacha20Poly1305_32_aead_encrypt(o_ctx.ctx_key, - nonce, - aadlen, - aad, - plainlen, - plain, - o_ct, - o_ct + plainlen); - { - uint64_t s1 = o_ctx.ctx_seq[0U]; - uint32_t res1; - if (s1 == (uint64_t)18446744073709551615U) - { - res1 = (uint32_t)1U; - } - else - { - uint64_t s_ = s1 + (uint64_t)1U; - o_ctx.ctx_seq[0U] = s_; - res1 = (uint32_t)0U; - } - { - uint32_t res10 = res1; - ite = res10; - } - } - } - else - { - ite = (uint32_t)1U; - } - return ite; -} - -uint32_t -Hacl_HPKE_P256_CP32_SHA256_openBase( - uint8_t *pkE, - uint8_t *skR, - uint32_t infolen, - uint8_t *info, - uint32_t aadlen, - uint8_t *aad, - uint32_t ctlen, - uint8_t *ct, - uint8_t *o_pt -) -{ - uint8_t ctx_key[32U] = { 0U }; - uint8_t ctx_nonce[12U] = { 0U }; - uint64_t ctx_seq = (uint64_t)0U; - uint8_t ctx_exporter[32U] = { 0U }; - Hacl_Impl_HPKE_context_s o_ctx; - uint32_t res; - uint32_t ite; - o_ctx.ctx_key = ctx_key; - o_ctx.ctx_nonce = ctx_nonce; - o_ctx.ctx_seq = &ctx_seq; - o_ctx.ctx_exporter = ctx_exporter; - res = Hacl_HPKE_P256_CP32_SHA256_setupBaseR(o_ctx, pkE, skR, infolen, info); - if (res == (uint32_t)0U) - { - uint8_t nonce[12U] = { 0U }; - uint64_t s = o_ctx.ctx_seq[0U]; - uint8_t enc[12U] = { 0U }; - store64_be(enc + (uint32_t)4U, s); - KRML_MAYBE_FOR12(i, - (uint32_t)0U, - (uint32_t)12U, - (uint32_t)1U, - uint8_t xi = enc[i]; - uint8_t yi = o_ctx.ctx_nonce[i]; - nonce[i] = xi ^ yi;); - { - uint32_t - res10 = - Hacl_Chacha20Poly1305_32_aead_decrypt(o_ctx.ctx_key, - nonce, - aadlen, - aad, - ctlen - (uint32_t)16U, - o_pt, - ct, - ct + ctlen - (uint32_t)16U); - uint32_t res1; - if (res10 == (uint32_t)0U) - { - uint64_t s1 = o_ctx.ctx_seq[0U]; - uint32_t res2; - if (s1 == (uint64_t)18446744073709551615U) - { - res2 = (uint32_t)1U; - } - else - { - uint64_t s_ = s1 + (uint64_t)1U; - o_ctx.ctx_seq[0U] = s_; - res2 = (uint32_t)0U; - } - res1 = res2; - } - else - { - res1 = (uint32_t)1U; - } - ite = res1; - } - } - else - { - ite = (uint32_t)1U; - } - return ite; -} - diff --git a/dist/c89-compatible/Hacl_HPKE_P256_CP32_SHA256.h b/dist/c89-compatible/Hacl_HPKE_P256_CP32_SHA256.h deleted file mode 100644 index 3b86e09735..0000000000 --- a/dist/c89-compatible/Hacl_HPKE_P256_CP32_SHA256.h +++ /dev/null @@ -1,94 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#ifndef __Hacl_HPKE_P256_CP32_SHA256_H -#define __Hacl_HPKE_P256_CP32_SHA256_H - -#if defined(__cplusplus) -extern "C" { -#endif - -#include -#include "krml/internal/types.h" -#include "krml/lowstar_endianness.h" -#include "krml/internal/target.h" - - -#include "Hacl_Krmllib.h" -#include "Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE.h" -#include "Hacl_HKDF.h" -#include "Hacl_Chacha20Poly1305_32.h" -#include "evercrypt_targetconfig.h" -uint32_t -Hacl_HPKE_P256_CP32_SHA256_setupBaseS( - uint8_t *o_pkE, - Hacl_Impl_HPKE_context_s o_ctx, - uint8_t *skE, - uint8_t *pkR, - uint32_t infolen, - uint8_t *info -); - -uint32_t -Hacl_HPKE_P256_CP32_SHA256_setupBaseR( - Hacl_Impl_HPKE_context_s o_ctx, - uint8_t *enc, - uint8_t *skR, - uint32_t infolen, - uint8_t *info -); - -uint32_t -Hacl_HPKE_P256_CP32_SHA256_sealBase( - uint8_t *skE, - uint8_t *pkR, - uint32_t infolen, - uint8_t *info, - uint32_t aadlen, - uint8_t *aad, - uint32_t plainlen, - uint8_t *plain, - uint8_t *o_enc, - uint8_t *o_ct -); - -uint32_t -Hacl_HPKE_P256_CP32_SHA256_openBase( - uint8_t *pkE, - uint8_t *skR, - uint32_t infolen, - uint8_t *info, - uint32_t aadlen, - uint8_t *aad, - uint32_t ctlen, - uint8_t *ct, - uint8_t *o_pt -); - -#if defined(__cplusplus) -} -#endif - -#define __Hacl_HPKE_P256_CP32_SHA256_H_DEFINED -#endif diff --git a/dist/c89-compatible/Hacl_Hash_Base.c b/dist/c89-compatible/Hacl_Hash_Base.c deleted file mode 100644 index 7946dfd9b0..0000000000 --- a/dist/c89-compatible/Hacl_Hash_Base.c +++ /dev/null @@ -1,220 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#include "Hacl_Hash_Base.h" - - - -uint32_t Hacl_Hash_Definitions_word_len(Spec_Hash_Definitions_hash_alg a) -{ - switch (a) - { - case Spec_Hash_Definitions_MD5: - { - return (uint32_t)4U; - } - case Spec_Hash_Definitions_SHA1: - { - return (uint32_t)4U; - } - case Spec_Hash_Definitions_SHA2_224: - { - return (uint32_t)4U; - } - case Spec_Hash_Definitions_SHA2_256: - { - return (uint32_t)4U; - } - case Spec_Hash_Definitions_SHA2_384: - { - return (uint32_t)8U; - } - case Spec_Hash_Definitions_SHA2_512: - { - return (uint32_t)8U; - } - case Spec_Hash_Definitions_SHA3_256: - { - return (uint32_t)8U; - } - case Spec_Hash_Definitions_Blake2S: - { - return (uint32_t)4U; - } - case Spec_Hash_Definitions_Blake2B: - { - return (uint32_t)8U; - } - default: - { - KRML_HOST_PRINTF("KaRaMeL incomplete match at %s:%d\n", __FILE__, __LINE__); - KRML_HOST_EXIT(253U); - } - } -} - -uint32_t Hacl_Hash_Definitions_block_len(Spec_Hash_Definitions_hash_alg a) -{ - switch (a) - { - case Spec_Hash_Definitions_MD5: - { - return (uint32_t)64U; - } - case Spec_Hash_Definitions_SHA1: - { - return (uint32_t)64U; - } - case Spec_Hash_Definitions_SHA2_224: - { - return (uint32_t)64U; - } - case Spec_Hash_Definitions_SHA2_256: - { - return (uint32_t)64U; - } - case Spec_Hash_Definitions_SHA2_384: - { - return (uint32_t)128U; - } - case Spec_Hash_Definitions_SHA2_512: - { - return (uint32_t)128U; - } - case Spec_Hash_Definitions_SHA3_256: - { - return (uint32_t)136U; - } - case Spec_Hash_Definitions_Blake2S: - { - return (uint32_t)64U; - } - case Spec_Hash_Definitions_Blake2B: - { - return (uint32_t)128U; - } - default: - { - KRML_HOST_PRINTF("KaRaMeL incomplete match at %s:%d\n", __FILE__, __LINE__); - KRML_HOST_EXIT(253U); - } - } -} - -uint32_t Hacl_Hash_Definitions_hash_word_len(Spec_Hash_Definitions_hash_alg a) -{ - switch (a) - { - case Spec_Hash_Definitions_MD5: - { - return (uint32_t)4U; - } - case Spec_Hash_Definitions_SHA1: - { - return (uint32_t)5U; - } - case Spec_Hash_Definitions_SHA2_224: - { - return (uint32_t)7U; - } - case Spec_Hash_Definitions_SHA2_256: - { - return (uint32_t)8U; - } - case Spec_Hash_Definitions_SHA2_384: - { - return (uint32_t)6U; - } - case Spec_Hash_Definitions_SHA2_512: - { - return (uint32_t)8U; - } - case Spec_Hash_Definitions_SHA3_256: - { - return (uint32_t)4U; - } - case Spec_Hash_Definitions_Blake2S: - { - return (uint32_t)8U; - } - case Spec_Hash_Definitions_Blake2B: - { - return (uint32_t)8U; - } - default: - { - KRML_HOST_PRINTF("KaRaMeL incomplete match at %s:%d\n", __FILE__, __LINE__); - KRML_HOST_EXIT(253U); - } - } -} - -uint32_t Hacl_Hash_Definitions_hash_len(Spec_Hash_Definitions_hash_alg a) -{ - switch (a) - { - case Spec_Hash_Definitions_MD5: - { - return (uint32_t)16U; - } - case Spec_Hash_Definitions_SHA1: - { - return (uint32_t)20U; - } - case Spec_Hash_Definitions_SHA2_224: - { - return (uint32_t)28U; - } - case Spec_Hash_Definitions_SHA2_256: - { - return (uint32_t)32U; - } - case Spec_Hash_Definitions_SHA2_384: - { - return (uint32_t)48U; - } - case Spec_Hash_Definitions_SHA2_512: - { - return (uint32_t)64U; - } - case Spec_Hash_Definitions_SHA3_256: - { - return (uint32_t)32U; - } - case Spec_Hash_Definitions_Blake2S: - { - return (uint32_t)32U; - } - case Spec_Hash_Definitions_Blake2B: - { - return (uint32_t)64U; - } - default: - { - KRML_HOST_PRINTF("KaRaMeL incomplete match at %s:%d\n", __FILE__, __LINE__); - KRML_HOST_EXIT(253U); - } - } -} - diff --git a/dist/c89-compatible/Hacl_Hash_Base.h b/dist/c89-compatible/Hacl_Hash_Base.h deleted file mode 100644 index 3df2d628bc..0000000000 --- a/dist/c89-compatible/Hacl_Hash_Base.h +++ /dev/null @@ -1,55 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#ifndef __Hacl_Hash_Base_H -#define __Hacl_Hash_Base_H - -#if defined(__cplusplus) -extern "C" { -#endif - -#include -#include "krml/internal/types.h" -#include "krml/lowstar_endianness.h" -#include "krml/internal/target.h" - - -#include "Hacl_Spec.h" -#include "evercrypt_targetconfig.h" -uint32_t Hacl_Hash_Definitions_word_len(Spec_Hash_Definitions_hash_alg a); - -uint32_t Hacl_Hash_Definitions_block_len(Spec_Hash_Definitions_hash_alg a); - -uint32_t Hacl_Hash_Definitions_hash_word_len(Spec_Hash_Definitions_hash_alg a); - -uint32_t Hacl_Hash_Definitions_hash_len(Spec_Hash_Definitions_hash_alg a); - -typedef uint8_t *Hacl_Hash_Definitions_hash_t; - -#if defined(__cplusplus) -} -#endif - -#define __Hacl_Hash_Base_H_DEFINED -#endif diff --git a/dist/c89-compatible/Hacl_Hash_Blake2.c b/dist/c89-compatible/Hacl_Hash_Blake2.c deleted file mode 100644 index 5876948592..0000000000 --- a/dist/c89-compatible/Hacl_Hash_Blake2.c +++ /dev/null @@ -1,3468 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#include "internal/Hacl_Hash_Blake2.h" - - - -uint64_t Hacl_Hash_Core_Blake2_init_blake2s_32(uint32_t *s) -{ - uint32_t *r0 = s; - uint32_t *r1 = s + (uint32_t)4U; - uint32_t *r2 = s + (uint32_t)8U; - uint32_t *r3 = s + (uint32_t)12U; - uint32_t iv0 = Hacl_Impl_Blake2_Constants_ivTable_S[0U]; - uint32_t iv1 = Hacl_Impl_Blake2_Constants_ivTable_S[1U]; - uint32_t iv2 = Hacl_Impl_Blake2_Constants_ivTable_S[2U]; - uint32_t iv3 = Hacl_Impl_Blake2_Constants_ivTable_S[3U]; - uint32_t iv4 = Hacl_Impl_Blake2_Constants_ivTable_S[4U]; - uint32_t iv5 = Hacl_Impl_Blake2_Constants_ivTable_S[5U]; - uint32_t iv6 = Hacl_Impl_Blake2_Constants_ivTable_S[6U]; - uint32_t iv7 = Hacl_Impl_Blake2_Constants_ivTable_S[7U]; - uint32_t kk_shift_8; - uint32_t iv0_; - r2[0U] = iv0; - r2[1U] = iv1; - r2[2U] = iv2; - r2[3U] = iv3; - r3[0U] = iv4; - r3[1U] = iv5; - r3[2U] = iv6; - r3[3U] = iv7; - kk_shift_8 = (uint32_t)0U; - iv0_ = iv0 ^ ((uint32_t)0x01010000U ^ (kk_shift_8 ^ (uint32_t)32U)); - r0[0U] = iv0_; - r0[1U] = iv1; - r0[2U] = iv2; - r0[3U] = iv3; - r1[0U] = iv4; - r1[1U] = iv5; - r1[2U] = iv6; - r1[3U] = iv7; - return (uint64_t)0U; -} - -uint64_t Hacl_Hash_Core_Blake2_update_blake2s_32(uint32_t *s, uint64_t totlen, uint8_t *block) -{ - uint32_t wv[16U] = { 0U }; - uint64_t totlen1 = totlen + (uint64_t)(uint32_t)64U; - uint32_t m_w[16U] = { 0U }; - KRML_MAYBE_FOR16(i, - (uint32_t)0U, - (uint32_t)16U, - (uint32_t)1U, - uint32_t *os = m_w; - uint8_t *bj = block + i * (uint32_t)4U; - uint32_t u = load32_le(bj); - uint32_t r = u; - uint32_t x = r; - os[i] = x;); - { - uint32_t mask[4U] = { 0U }; - uint32_t wv_14 = (uint32_t)0U; - uint32_t wv_15 = (uint32_t)0U; - uint32_t *wv3; - uint32_t *s00; - uint32_t *s16; - uint32_t *r00; - uint32_t *r10; - uint32_t *r20; - uint32_t *r30; - mask[0U] = (uint32_t)totlen1; - mask[1U] = (uint32_t)(totlen1 >> (uint32_t)32U); - mask[2U] = wv_14; - mask[3U] = wv_15; - memcpy(wv, s, (uint32_t)16U * sizeof (uint32_t)); - wv3 = wv + (uint32_t)12U; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint32_t *os = wv3; - uint32_t x = wv3[i] ^ mask[i]; - os[i] = x;); - KRML_MAYBE_FOR10(i0, - (uint32_t)0U, - (uint32_t)10U, - (uint32_t)1U, - uint32_t start_idx = i0 % (uint32_t)10U * (uint32_t)16U; - uint32_t m_st[16U] = { 0U }; - uint32_t *r0 = m_st; - uint32_t *r1 = m_st + (uint32_t)4U; - uint32_t *r21 = m_st + (uint32_t)8U; - uint32_t *r31 = m_st + (uint32_t)12U; - uint32_t s0 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)0U]; - uint32_t s1 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)1U]; - uint32_t s2 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)2U]; - uint32_t s3 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)3U]; - uint32_t s4 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)4U]; - uint32_t s5 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)5U]; - uint32_t s6 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)6U]; - uint32_t s7 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)7U]; - uint32_t s8 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)8U]; - uint32_t s9 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)9U]; - uint32_t s10 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)10U]; - uint32_t s11 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)11U]; - uint32_t s12 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)12U]; - uint32_t s13 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)13U]; - uint32_t s14 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)14U]; - uint32_t s15 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)15U]; - uint32_t uu____0 = m_w[s2]; - uint32_t uu____1 = m_w[s4]; - uint32_t uu____2 = m_w[s6]; - r0[0U] = m_w[s0]; - r0[1U] = uu____0; - r0[2U] = uu____1; - r0[3U] = uu____2; - uint32_t uu____3 = m_w[s3]; - uint32_t uu____4 = m_w[s5]; - uint32_t uu____5 = m_w[s7]; - r1[0U] = m_w[s1]; - r1[1U] = uu____3; - r1[2U] = uu____4; - r1[3U] = uu____5; - uint32_t uu____6 = m_w[s10]; - uint32_t uu____7 = m_w[s12]; - uint32_t uu____8 = m_w[s14]; - r21[0U] = m_w[s8]; - r21[1U] = uu____6; - r21[2U] = uu____7; - r21[3U] = uu____8; - uint32_t uu____9 = m_w[s11]; - uint32_t uu____10 = m_w[s13]; - uint32_t uu____11 = m_w[s15]; - r31[0U] = m_w[s9]; - r31[1U] = uu____9; - r31[2U] = uu____10; - r31[3U] = uu____11; - uint32_t *x = m_st; - uint32_t *y = m_st + (uint32_t)4U; - uint32_t *z = m_st + (uint32_t)8U; - uint32_t *w = m_st + (uint32_t)12U; - uint32_t a = (uint32_t)0U; - uint32_t b0 = (uint32_t)1U; - uint32_t c0 = (uint32_t)2U; - uint32_t d0 = (uint32_t)3U; - uint32_t *wv_a0 = wv + a * (uint32_t)4U; - uint32_t *wv_b0 = wv + b0 * (uint32_t)4U; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint32_t *os = wv_a0; - uint32_t x1 = wv_a0[i] + wv_b0[i]; - os[i] = x1;); - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint32_t *os = wv_a0; - uint32_t x1 = wv_a0[i] + x[i]; - os[i] = x1;); - uint32_t *wv_a1 = wv + d0 * (uint32_t)4U; - uint32_t *wv_b1 = wv + a * (uint32_t)4U; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint32_t *os = wv_a1; - uint32_t x1 = wv_a1[i] ^ wv_b1[i]; - os[i] = x1;); - uint32_t *r12 = wv_a1; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint32_t *os = r12; - uint32_t x1 = r12[i]; - uint32_t x10 = x1 >> (uint32_t)16U | x1 << (uint32_t)16U; - os[i] = x10;); - uint32_t *wv_a2 = wv + c0 * (uint32_t)4U; - uint32_t *wv_b2 = wv + d0 * (uint32_t)4U; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint32_t *os = wv_a2; - uint32_t x1 = wv_a2[i] + wv_b2[i]; - os[i] = x1;); - uint32_t *wv_a3 = wv + b0 * (uint32_t)4U; - uint32_t *wv_b3 = wv + c0 * (uint32_t)4U; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint32_t *os = wv_a3; - uint32_t x1 = wv_a3[i] ^ wv_b3[i]; - os[i] = x1;); - uint32_t *r13 = wv_a3; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint32_t *os = r13; - uint32_t x1 = r13[i]; - uint32_t x10 = x1 >> (uint32_t)12U | x1 << (uint32_t)20U; - os[i] = x10;); - uint32_t *wv_a4 = wv + a * (uint32_t)4U; - uint32_t *wv_b4 = wv + b0 * (uint32_t)4U; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint32_t *os = wv_a4; - uint32_t x1 = wv_a4[i] + wv_b4[i]; - os[i] = x1;); - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint32_t *os = wv_a4; - uint32_t x1 = wv_a4[i] + y[i]; - os[i] = x1;); - uint32_t *wv_a5 = wv + d0 * (uint32_t)4U; - uint32_t *wv_b5 = wv + a * (uint32_t)4U; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint32_t *os = wv_a5; - uint32_t x1 = wv_a5[i] ^ wv_b5[i]; - os[i] = x1;); - uint32_t *r14 = wv_a5; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint32_t *os = r14; - uint32_t x1 = r14[i]; - uint32_t x10 = x1 >> (uint32_t)8U | x1 << (uint32_t)24U; - os[i] = x10;); - uint32_t *wv_a6 = wv + c0 * (uint32_t)4U; - uint32_t *wv_b6 = wv + d0 * (uint32_t)4U; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint32_t *os = wv_a6; - uint32_t x1 = wv_a6[i] + wv_b6[i]; - os[i] = x1;); - uint32_t *wv_a7 = wv + b0 * (uint32_t)4U; - uint32_t *wv_b7 = wv + c0 * (uint32_t)4U; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint32_t *os = wv_a7; - uint32_t x1 = wv_a7[i] ^ wv_b7[i]; - os[i] = x1;); - uint32_t *r15 = wv_a7; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint32_t *os = r15; - uint32_t x1 = r15[i]; - uint32_t x10 = x1 >> (uint32_t)7U | x1 << (uint32_t)25U; - os[i] = x10;); - uint32_t *r16 = wv + (uint32_t)4U; - uint32_t *r22 = wv + (uint32_t)8U; - uint32_t *r32 = wv + (uint32_t)12U; - uint32_t *r110 = r16; - uint32_t x00 = r110[1U]; - uint32_t x10 = r110[2U]; - uint32_t x20 = r110[3U]; - uint32_t x30 = r110[0U]; - r110[0U] = x00; - r110[1U] = x10; - r110[2U] = x20; - r110[3U] = x30; - uint32_t *r111 = r22; - uint32_t x01 = r111[2U]; - uint32_t x11 = r111[3U]; - uint32_t x21 = r111[0U]; - uint32_t x31 = r111[1U]; - r111[0U] = x01; - r111[1U] = x11; - r111[2U] = x21; - r111[3U] = x31; - uint32_t *r112 = r32; - uint32_t x02 = r112[3U]; - uint32_t x12 = r112[0U]; - uint32_t x22 = r112[1U]; - uint32_t x32 = r112[2U]; - r112[0U] = x02; - r112[1U] = x12; - r112[2U] = x22; - r112[3U] = x32; - uint32_t a0 = (uint32_t)0U; - uint32_t b = (uint32_t)1U; - uint32_t c = (uint32_t)2U; - uint32_t d = (uint32_t)3U; - uint32_t *wv_a = wv + a0 * (uint32_t)4U; - uint32_t *wv_b8 = wv + b * (uint32_t)4U; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint32_t *os = wv_a; - uint32_t x1 = wv_a[i] + wv_b8[i]; - os[i] = x1;); - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint32_t *os = wv_a; - uint32_t x1 = wv_a[i] + z[i]; - os[i] = x1;); - uint32_t *wv_a8 = wv + d * (uint32_t)4U; - uint32_t *wv_b9 = wv + a0 * (uint32_t)4U; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint32_t *os = wv_a8; - uint32_t x1 = wv_a8[i] ^ wv_b9[i]; - os[i] = x1;); - uint32_t *r17 = wv_a8; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint32_t *os = r17; - uint32_t x1 = r17[i]; - uint32_t x13 = x1 >> (uint32_t)16U | x1 << (uint32_t)16U; - os[i] = x13;); - uint32_t *wv_a9 = wv + c * (uint32_t)4U; - uint32_t *wv_b10 = wv + d * (uint32_t)4U; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint32_t *os = wv_a9; - uint32_t x1 = wv_a9[i] + wv_b10[i]; - os[i] = x1;); - uint32_t *wv_a10 = wv + b * (uint32_t)4U; - uint32_t *wv_b11 = wv + c * (uint32_t)4U; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint32_t *os = wv_a10; - uint32_t x1 = wv_a10[i] ^ wv_b11[i]; - os[i] = x1;); - uint32_t *r18 = wv_a10; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint32_t *os = r18; - uint32_t x1 = r18[i]; - uint32_t x13 = x1 >> (uint32_t)12U | x1 << (uint32_t)20U; - os[i] = x13;); - uint32_t *wv_a11 = wv + a0 * (uint32_t)4U; - uint32_t *wv_b12 = wv + b * (uint32_t)4U; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint32_t *os = wv_a11; - uint32_t x1 = wv_a11[i] + wv_b12[i]; - os[i] = x1;); - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint32_t *os = wv_a11; - uint32_t x1 = wv_a11[i] + w[i]; - os[i] = x1;); - uint32_t *wv_a12 = wv + d * (uint32_t)4U; - uint32_t *wv_b13 = wv + a0 * (uint32_t)4U; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint32_t *os = wv_a12; - uint32_t x1 = wv_a12[i] ^ wv_b13[i]; - os[i] = x1;); - uint32_t *r19 = wv_a12; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint32_t *os = r19; - uint32_t x1 = r19[i]; - uint32_t x13 = x1 >> (uint32_t)8U | x1 << (uint32_t)24U; - os[i] = x13;); - uint32_t *wv_a13 = wv + c * (uint32_t)4U; - uint32_t *wv_b14 = wv + d * (uint32_t)4U; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint32_t *os = wv_a13; - uint32_t x1 = wv_a13[i] + wv_b14[i]; - os[i] = x1;); - uint32_t *wv_a14 = wv + b * (uint32_t)4U; - uint32_t *wv_b = wv + c * (uint32_t)4U; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint32_t *os = wv_a14; - uint32_t x1 = wv_a14[i] ^ wv_b[i]; - os[i] = x1;); - uint32_t *r113 = wv_a14; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint32_t *os = r113; - uint32_t x1 = r113[i]; - uint32_t x13 = x1 >> (uint32_t)7U | x1 << (uint32_t)25U; - os[i] = x13;); - uint32_t *r114 = wv + (uint32_t)4U; - uint32_t *r2 = wv + (uint32_t)8U; - uint32_t *r3 = wv + (uint32_t)12U; - uint32_t *r11 = r114; - uint32_t x03 = r11[3U]; - uint32_t x13 = r11[0U]; - uint32_t x23 = r11[1U]; - uint32_t x33 = r11[2U]; - r11[0U] = x03; - r11[1U] = x13; - r11[2U] = x23; - r11[3U] = x33; - uint32_t *r115 = r2; - uint32_t x04 = r115[2U]; - uint32_t x14 = r115[3U]; - uint32_t x24 = r115[0U]; - uint32_t x34 = r115[1U]; - r115[0U] = x04; - r115[1U] = x14; - r115[2U] = x24; - r115[3U] = x34; - uint32_t *r116 = r3; - uint32_t x0 = r116[1U]; - uint32_t x1 = r116[2U]; - uint32_t x2 = r116[3U]; - uint32_t x3 = r116[0U]; - r116[0U] = x0; - r116[1U] = x1; - r116[2U] = x2; - r116[3U] = x3;); - s00 = s; - s16 = s + (uint32_t)4U; - r00 = wv; - r10 = wv + (uint32_t)4U; - r20 = wv + (uint32_t)8U; - r30 = wv + (uint32_t)12U; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint32_t *os = s00; - uint32_t x = s00[i] ^ r00[i]; - os[i] = x;); - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint32_t *os = s00; - uint32_t x = s00[i] ^ r20[i]; - os[i] = x;); - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint32_t *os = s16; - uint32_t x = s16[i] ^ r10[i]; - os[i] = x;); - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint32_t *os = s16; - uint32_t x = s16[i] ^ r30[i]; - os[i] = x;); - return totlen1; - } -} - -void Hacl_Hash_Core_Blake2_finish_blake2s_32(uint32_t *s, uint64_t ev, uint8_t *dst) -{ - uint32_t double_row = (uint32_t)32U; - KRML_CHECK_SIZE(sizeof (uint8_t), double_row); - { - uint8_t b[double_row]; - memset(b, 0U, double_row * sizeof (uint8_t)); - { - uint8_t *first = b; - uint8_t *second = b + (uint32_t)16U; - uint32_t *row0 = s; - uint32_t *row1 = s + (uint32_t)4U; - uint8_t *final; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - store32_le(first + i * (uint32_t)4U, row0[i]);); - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - store32_le(second + i * (uint32_t)4U, row1[i]);); - final = b; - memcpy(dst, final, (uint32_t)32U * sizeof (uint8_t)); - Lib_Memzero0_memzero(b, double_row * sizeof (b[0U])); - } - } -} - -FStar_UInt128_uint128 Hacl_Hash_Core_Blake2_init_blake2b_32(uint64_t *s) -{ - uint64_t *r0 = s; - uint64_t *r1 = s + (uint32_t)4U; - uint64_t *r2 = s + (uint32_t)8U; - uint64_t *r3 = s + (uint32_t)12U; - uint64_t iv0 = Hacl_Impl_Blake2_Constants_ivTable_B[0U]; - uint64_t iv1 = Hacl_Impl_Blake2_Constants_ivTable_B[1U]; - uint64_t iv2 = Hacl_Impl_Blake2_Constants_ivTable_B[2U]; - uint64_t iv3 = Hacl_Impl_Blake2_Constants_ivTable_B[3U]; - uint64_t iv4 = Hacl_Impl_Blake2_Constants_ivTable_B[4U]; - uint64_t iv5 = Hacl_Impl_Blake2_Constants_ivTable_B[5U]; - uint64_t iv6 = Hacl_Impl_Blake2_Constants_ivTable_B[6U]; - uint64_t iv7 = Hacl_Impl_Blake2_Constants_ivTable_B[7U]; - uint64_t kk_shift_8; - uint64_t iv0_; - r2[0U] = iv0; - r2[1U] = iv1; - r2[2U] = iv2; - r2[3U] = iv3; - r3[0U] = iv4; - r3[1U] = iv5; - r3[2U] = iv6; - r3[3U] = iv7; - kk_shift_8 = (uint64_t)(uint32_t)0U << (uint32_t)8U; - iv0_ = iv0 ^ ((uint64_t)0x01010000U ^ (kk_shift_8 ^ (uint64_t)(uint32_t)64U)); - r0[0U] = iv0_; - r0[1U] = iv1; - r0[2U] = iv2; - r0[3U] = iv3; - r1[0U] = iv4; - r1[1U] = iv5; - r1[2U] = iv6; - r1[3U] = iv7; - return FStar_UInt128_uint64_to_uint128((uint64_t)0U); -} - -FStar_UInt128_uint128 -Hacl_Hash_Core_Blake2_update_blake2b_32( - uint64_t *s, - FStar_UInt128_uint128 totlen, - uint8_t *block -) -{ - uint64_t wv[16U] = { 0U }; - FStar_UInt128_uint128 - totlen1 = - FStar_UInt128_add_mod(totlen, - FStar_UInt128_uint64_to_uint128((uint64_t)(uint32_t)128U)); - uint64_t m_w[16U] = { 0U }; - KRML_MAYBE_FOR16(i, - (uint32_t)0U, - (uint32_t)16U, - (uint32_t)1U, - uint64_t *os = m_w; - uint8_t *bj = block + i * (uint32_t)8U; - uint64_t u = load64_le(bj); - uint64_t r = u; - uint64_t x = r; - os[i] = x;); - { - uint64_t mask[4U] = { 0U }; - uint64_t wv_14 = (uint64_t)0U; - uint64_t wv_15 = (uint64_t)0U; - uint64_t *wv3; - uint64_t *s00; - uint64_t *s16; - uint64_t *r00; - uint64_t *r10; - uint64_t *r20; - uint64_t *r30; - mask[0U] = FStar_UInt128_uint128_to_uint64(totlen1); - mask[1U] = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(totlen1, (uint32_t)64U)); - mask[2U] = wv_14; - mask[3U] = wv_15; - memcpy(wv, s, (uint32_t)16U * sizeof (uint64_t)); - wv3 = wv + (uint32_t)12U; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *os = wv3; - uint64_t x = wv3[i] ^ mask[i]; - os[i] = x;); - KRML_MAYBE_FOR12(i0, - (uint32_t)0U, - (uint32_t)12U, - (uint32_t)1U, - uint32_t start_idx = i0 % (uint32_t)10U * (uint32_t)16U; - uint64_t m_st[16U] = { 0U }; - uint64_t *r0 = m_st; - uint64_t *r1 = m_st + (uint32_t)4U; - uint64_t *r21 = m_st + (uint32_t)8U; - uint64_t *r31 = m_st + (uint32_t)12U; - uint32_t s0 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)0U]; - uint32_t s1 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)1U]; - uint32_t s2 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)2U]; - uint32_t s3 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)3U]; - uint32_t s4 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)4U]; - uint32_t s5 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)5U]; - uint32_t s6 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)6U]; - uint32_t s7 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)7U]; - uint32_t s8 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)8U]; - uint32_t s9 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)9U]; - uint32_t s10 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)10U]; - uint32_t s11 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)11U]; - uint32_t s12 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)12U]; - uint32_t s13 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)13U]; - uint32_t s14 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)14U]; - uint32_t s15 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)15U]; - uint64_t uu____0 = m_w[s2]; - uint64_t uu____1 = m_w[s4]; - uint64_t uu____2 = m_w[s6]; - r0[0U] = m_w[s0]; - r0[1U] = uu____0; - r0[2U] = uu____1; - r0[3U] = uu____2; - uint64_t uu____3 = m_w[s3]; - uint64_t uu____4 = m_w[s5]; - uint64_t uu____5 = m_w[s7]; - r1[0U] = m_w[s1]; - r1[1U] = uu____3; - r1[2U] = uu____4; - r1[3U] = uu____5; - uint64_t uu____6 = m_w[s10]; - uint64_t uu____7 = m_w[s12]; - uint64_t uu____8 = m_w[s14]; - r21[0U] = m_w[s8]; - r21[1U] = uu____6; - r21[2U] = uu____7; - r21[3U] = uu____8; - uint64_t uu____9 = m_w[s11]; - uint64_t uu____10 = m_w[s13]; - uint64_t uu____11 = m_w[s15]; - r31[0U] = m_w[s9]; - r31[1U] = uu____9; - r31[2U] = uu____10; - r31[3U] = uu____11; - uint64_t *x = m_st; - uint64_t *y = m_st + (uint32_t)4U; - uint64_t *z = m_st + (uint32_t)8U; - uint64_t *w = m_st + (uint32_t)12U; - uint32_t a = (uint32_t)0U; - uint32_t b0 = (uint32_t)1U; - uint32_t c0 = (uint32_t)2U; - uint32_t d0 = (uint32_t)3U; - uint64_t *wv_a0 = wv + a * (uint32_t)4U; - uint64_t *wv_b0 = wv + b0 * (uint32_t)4U; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *os = wv_a0; - uint64_t x1 = wv_a0[i] + wv_b0[i]; - os[i] = x1;); - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *os = wv_a0; - uint64_t x1 = wv_a0[i] + x[i]; - os[i] = x1;); - uint64_t *wv_a1 = wv + d0 * (uint32_t)4U; - uint64_t *wv_b1 = wv + a * (uint32_t)4U; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *os = wv_a1; - uint64_t x1 = wv_a1[i] ^ wv_b1[i]; - os[i] = x1;); - uint64_t *r12 = wv_a1; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *os = r12; - uint64_t x1 = r12[i]; - uint64_t x10 = x1 >> (uint32_t)32U | x1 << (uint32_t)32U; - os[i] = x10;); - uint64_t *wv_a2 = wv + c0 * (uint32_t)4U; - uint64_t *wv_b2 = wv + d0 * (uint32_t)4U; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *os = wv_a2; - uint64_t x1 = wv_a2[i] + wv_b2[i]; - os[i] = x1;); - uint64_t *wv_a3 = wv + b0 * (uint32_t)4U; - uint64_t *wv_b3 = wv + c0 * (uint32_t)4U; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *os = wv_a3; - uint64_t x1 = wv_a3[i] ^ wv_b3[i]; - os[i] = x1;); - uint64_t *r13 = wv_a3; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *os = r13; - uint64_t x1 = r13[i]; - uint64_t x10 = x1 >> (uint32_t)24U | x1 << (uint32_t)40U; - os[i] = x10;); - uint64_t *wv_a4 = wv + a * (uint32_t)4U; - uint64_t *wv_b4 = wv + b0 * (uint32_t)4U; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *os = wv_a4; - uint64_t x1 = wv_a4[i] + wv_b4[i]; - os[i] = x1;); - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *os = wv_a4; - uint64_t x1 = wv_a4[i] + y[i]; - os[i] = x1;); - uint64_t *wv_a5 = wv + d0 * (uint32_t)4U; - uint64_t *wv_b5 = wv + a * (uint32_t)4U; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *os = wv_a5; - uint64_t x1 = wv_a5[i] ^ wv_b5[i]; - os[i] = x1;); - uint64_t *r14 = wv_a5; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *os = r14; - uint64_t x1 = r14[i]; - uint64_t x10 = x1 >> (uint32_t)16U | x1 << (uint32_t)48U; - os[i] = x10;); - uint64_t *wv_a6 = wv + c0 * (uint32_t)4U; - uint64_t *wv_b6 = wv + d0 * (uint32_t)4U; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *os = wv_a6; - uint64_t x1 = wv_a6[i] + wv_b6[i]; - os[i] = x1;); - uint64_t *wv_a7 = wv + b0 * (uint32_t)4U; - uint64_t *wv_b7 = wv + c0 * (uint32_t)4U; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *os = wv_a7; - uint64_t x1 = wv_a7[i] ^ wv_b7[i]; - os[i] = x1;); - uint64_t *r15 = wv_a7; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *os = r15; - uint64_t x1 = r15[i]; - uint64_t x10 = x1 >> (uint32_t)63U | x1 << (uint32_t)1U; - os[i] = x10;); - uint64_t *r16 = wv + (uint32_t)4U; - uint64_t *r22 = wv + (uint32_t)8U; - uint64_t *r32 = wv + (uint32_t)12U; - uint64_t *r110 = r16; - uint64_t x00 = r110[1U]; - uint64_t x10 = r110[2U]; - uint64_t x20 = r110[3U]; - uint64_t x30 = r110[0U]; - r110[0U] = x00; - r110[1U] = x10; - r110[2U] = x20; - r110[3U] = x30; - uint64_t *r111 = r22; - uint64_t x01 = r111[2U]; - uint64_t x11 = r111[3U]; - uint64_t x21 = r111[0U]; - uint64_t x31 = r111[1U]; - r111[0U] = x01; - r111[1U] = x11; - r111[2U] = x21; - r111[3U] = x31; - uint64_t *r112 = r32; - uint64_t x02 = r112[3U]; - uint64_t x12 = r112[0U]; - uint64_t x22 = r112[1U]; - uint64_t x32 = r112[2U]; - r112[0U] = x02; - r112[1U] = x12; - r112[2U] = x22; - r112[3U] = x32; - uint32_t a0 = (uint32_t)0U; - uint32_t b = (uint32_t)1U; - uint32_t c = (uint32_t)2U; - uint32_t d = (uint32_t)3U; - uint64_t *wv_a = wv + a0 * (uint32_t)4U; - uint64_t *wv_b8 = wv + b * (uint32_t)4U; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *os = wv_a; - uint64_t x1 = wv_a[i] + wv_b8[i]; - os[i] = x1;); - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *os = wv_a; - uint64_t x1 = wv_a[i] + z[i]; - os[i] = x1;); - uint64_t *wv_a8 = wv + d * (uint32_t)4U; - uint64_t *wv_b9 = wv + a0 * (uint32_t)4U; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *os = wv_a8; - uint64_t x1 = wv_a8[i] ^ wv_b9[i]; - os[i] = x1;); - uint64_t *r17 = wv_a8; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *os = r17; - uint64_t x1 = r17[i]; - uint64_t x13 = x1 >> (uint32_t)32U | x1 << (uint32_t)32U; - os[i] = x13;); - uint64_t *wv_a9 = wv + c * (uint32_t)4U; - uint64_t *wv_b10 = wv + d * (uint32_t)4U; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *os = wv_a9; - uint64_t x1 = wv_a9[i] + wv_b10[i]; - os[i] = x1;); - uint64_t *wv_a10 = wv + b * (uint32_t)4U; - uint64_t *wv_b11 = wv + c * (uint32_t)4U; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *os = wv_a10; - uint64_t x1 = wv_a10[i] ^ wv_b11[i]; - os[i] = x1;); - uint64_t *r18 = wv_a10; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *os = r18; - uint64_t x1 = r18[i]; - uint64_t x13 = x1 >> (uint32_t)24U | x1 << (uint32_t)40U; - os[i] = x13;); - uint64_t *wv_a11 = wv + a0 * (uint32_t)4U; - uint64_t *wv_b12 = wv + b * (uint32_t)4U; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *os = wv_a11; - uint64_t x1 = wv_a11[i] + wv_b12[i]; - os[i] = x1;); - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *os = wv_a11; - uint64_t x1 = wv_a11[i] + w[i]; - os[i] = x1;); - uint64_t *wv_a12 = wv + d * (uint32_t)4U; - uint64_t *wv_b13 = wv + a0 * (uint32_t)4U; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *os = wv_a12; - uint64_t x1 = wv_a12[i] ^ wv_b13[i]; - os[i] = x1;); - uint64_t *r19 = wv_a12; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *os = r19; - uint64_t x1 = r19[i]; - uint64_t x13 = x1 >> (uint32_t)16U | x1 << (uint32_t)48U; - os[i] = x13;); - uint64_t *wv_a13 = wv + c * (uint32_t)4U; - uint64_t *wv_b14 = wv + d * (uint32_t)4U; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *os = wv_a13; - uint64_t x1 = wv_a13[i] + wv_b14[i]; - os[i] = x1;); - uint64_t *wv_a14 = wv + b * (uint32_t)4U; - uint64_t *wv_b = wv + c * (uint32_t)4U; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *os = wv_a14; - uint64_t x1 = wv_a14[i] ^ wv_b[i]; - os[i] = x1;); - uint64_t *r113 = wv_a14; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *os = r113; - uint64_t x1 = r113[i]; - uint64_t x13 = x1 >> (uint32_t)63U | x1 << (uint32_t)1U; - os[i] = x13;); - uint64_t *r114 = wv + (uint32_t)4U; - uint64_t *r2 = wv + (uint32_t)8U; - uint64_t *r3 = wv + (uint32_t)12U; - uint64_t *r11 = r114; - uint64_t x03 = r11[3U]; - uint64_t x13 = r11[0U]; - uint64_t x23 = r11[1U]; - uint64_t x33 = r11[2U]; - r11[0U] = x03; - r11[1U] = x13; - r11[2U] = x23; - r11[3U] = x33; - uint64_t *r115 = r2; - uint64_t x04 = r115[2U]; - uint64_t x14 = r115[3U]; - uint64_t x24 = r115[0U]; - uint64_t x34 = r115[1U]; - r115[0U] = x04; - r115[1U] = x14; - r115[2U] = x24; - r115[3U] = x34; - uint64_t *r116 = r3; - uint64_t x0 = r116[1U]; - uint64_t x1 = r116[2U]; - uint64_t x2 = r116[3U]; - uint64_t x3 = r116[0U]; - r116[0U] = x0; - r116[1U] = x1; - r116[2U] = x2; - r116[3U] = x3;); - s00 = s; - s16 = s + (uint32_t)4U; - r00 = wv; - r10 = wv + (uint32_t)4U; - r20 = wv + (uint32_t)8U; - r30 = wv + (uint32_t)12U; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *os = s00; - uint64_t x = s00[i] ^ r00[i]; - os[i] = x;); - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *os = s00; - uint64_t x = s00[i] ^ r20[i]; - os[i] = x;); - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *os = s16; - uint64_t x = s16[i] ^ r10[i]; - os[i] = x;); - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *os = s16; - uint64_t x = s16[i] ^ r30[i]; - os[i] = x;); - return totlen1; - } -} - -void -Hacl_Hash_Core_Blake2_finish_blake2b_32(uint64_t *s, FStar_UInt128_uint128 ev, uint8_t *dst) -{ - uint32_t double_row = (uint32_t)64U; - KRML_CHECK_SIZE(sizeof (uint8_t), double_row); - { - uint8_t b[double_row]; - memset(b, 0U, double_row * sizeof (uint8_t)); - { - uint8_t *first = b; - uint8_t *second = b + (uint32_t)32U; - uint64_t *row0 = s; - uint64_t *row1 = s + (uint32_t)4U; - uint8_t *final; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - store64_le(first + i * (uint32_t)8U, row0[i]);); - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - store64_le(second + i * (uint32_t)8U, row1[i]);); - final = b; - memcpy(dst, final, (uint32_t)64U * sizeof (uint8_t)); - Lib_Memzero0_memzero(b, double_row * sizeof (b[0U])); - } - } -} - -uint64_t -Hacl_Hash_Blake2_update_multi_blake2s_32( - uint32_t *s, - uint64_t ev, - uint8_t *blocks, - uint32_t n_blocks -) -{ - { - uint32_t i; - for (i = (uint32_t)0U; i < n_blocks; i++) - { - uint32_t sz = (uint32_t)64U; - uint8_t *block = blocks + sz * i; - uint64_t - v_ = - Hacl_Hash_Core_Blake2_update_blake2s_32(s, - ev + (uint64_t)i * (uint64_t)(uint32_t)64U, - block); - } - } - return ev + (uint64_t)n_blocks * (uint64_t)(uint32_t)64U; -} - -FStar_UInt128_uint128 -Hacl_Hash_Blake2_update_multi_blake2b_32( - uint64_t *s, - FStar_UInt128_uint128 ev, - uint8_t *blocks, - uint32_t n_blocks -) -{ - { - uint32_t i; - for (i = (uint32_t)0U; i < n_blocks; i++) - { - uint32_t sz = (uint32_t)128U; - uint8_t *block = blocks + sz * i; - FStar_UInt128_uint128 - v_ = - Hacl_Hash_Core_Blake2_update_blake2b_32(s, - FStar_UInt128_add_mod(ev, - FStar_UInt128_uint64_to_uint128((uint64_t)i * (uint64_t)(uint32_t)128U)), - block); - } - } - return - FStar_UInt128_add_mod(ev, - FStar_UInt128_uint64_to_uint128((uint64_t)n_blocks * (uint64_t)(uint32_t)128U)); -} - -uint64_t -Hacl_Hash_Blake2_update_last_blake2s_32( - uint32_t *s, - uint64_t ev, - uint64_t prev_len, - uint8_t *input, - uint32_t input_len -) -{ - uint32_t blocks_n = input_len / (uint32_t)64U; - uint32_t blocks_len0 = blocks_n * (uint32_t)64U; - uint32_t rest_len0 = input_len - blocks_len0; - K___uint32_t_uint32_t_uint32_t scrut0; - if (rest_len0 == (uint32_t)0U && blocks_n > (uint32_t)0U) - { - uint32_t blocks_n1 = blocks_n - (uint32_t)1U; - uint32_t blocks_len1 = blocks_len0 - (uint32_t)64U; - uint32_t rest_len1 = (uint32_t)64U; - K___uint32_t_uint32_t_uint32_t lit; - lit.fst = blocks_n1; - lit.snd = blocks_len1; - lit.thd = rest_len1; - scrut0 = lit; - } - else - { - K___uint32_t_uint32_t_uint32_t lit; - lit.fst = blocks_n; - lit.snd = blocks_len0; - lit.thd = rest_len0; - scrut0 = lit; - } - { - uint32_t num_blocks0 = scrut0.fst; - uint32_t blocks_len = scrut0.snd; - uint32_t rest_len1 = scrut0.thd; - uint8_t *blocks0 = input; - uint8_t *rest0 = input + blocks_len; - K___uint32_t_uint32_t_uint32_t__uint8_t___uint8_t_ lit; - K___uint32_t_uint32_t_uint32_t__uint8_t___uint8_t_ scrut; - uint32_t num_blocks; - uint32_t rest_len; - uint8_t *blocks; - uint8_t *rest; - uint64_t ev_; - lit.fst = num_blocks0; - lit.snd = blocks_len; - lit.thd = rest_len1; - lit.f3 = blocks0; - lit.f4 = rest0; - scrut = lit; - num_blocks = scrut.fst; - rest_len = scrut.thd; - blocks = scrut.f3; - rest = scrut.f4; - ev_ = Hacl_Hash_Blake2_update_multi_blake2s_32(s, ev, blocks, num_blocks); - { - uint32_t wv[16U] = { 0U }; - uint8_t tmp[64U] = { 0U }; - uint8_t *tmp_rest = tmp; - uint64_t totlen; - memcpy(tmp_rest, rest, rest_len * sizeof (uint8_t)); - totlen = ev_ + (uint64_t)rest_len; - { - uint32_t m_w[16U] = { 0U }; - KRML_MAYBE_FOR16(i, - (uint32_t)0U, - (uint32_t)16U, - (uint32_t)1U, - uint32_t *os = m_w; - uint8_t *bj = tmp + i * (uint32_t)4U; - uint32_t u = load32_le(bj); - uint32_t r = u; - uint32_t x = r; - os[i] = x;); - { - uint32_t mask[4U] = { 0U }; - uint32_t wv_14 = (uint32_t)0xFFFFFFFFU; - uint32_t wv_15 = (uint32_t)0U; - uint32_t *wv3; - uint32_t *s00; - uint32_t *s16; - uint32_t *r00; - uint32_t *r10; - uint32_t *r20; - uint32_t *r30; - mask[0U] = (uint32_t)totlen; - mask[1U] = (uint32_t)(totlen >> (uint32_t)32U); - mask[2U] = wv_14; - mask[3U] = wv_15; - memcpy(wv, s, (uint32_t)16U * sizeof (uint32_t)); - wv3 = wv + (uint32_t)12U; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint32_t *os = wv3; - uint32_t x = wv3[i] ^ mask[i]; - os[i] = x;); - KRML_MAYBE_FOR10(i0, - (uint32_t)0U, - (uint32_t)10U, - (uint32_t)1U, - uint32_t start_idx = i0 % (uint32_t)10U * (uint32_t)16U; - uint32_t m_st[16U] = { 0U }; - uint32_t *r0 = m_st; - uint32_t *r1 = m_st + (uint32_t)4U; - uint32_t *r21 = m_st + (uint32_t)8U; - uint32_t *r31 = m_st + (uint32_t)12U; - uint32_t s0 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)0U]; - uint32_t s1 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)1U]; - uint32_t s2 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)2U]; - uint32_t s3 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)3U]; - uint32_t s4 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)4U]; - uint32_t s5 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)5U]; - uint32_t s6 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)6U]; - uint32_t s7 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)7U]; - uint32_t s8 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)8U]; - uint32_t s9 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)9U]; - uint32_t s10 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)10U]; - uint32_t s11 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)11U]; - uint32_t s12 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)12U]; - uint32_t s13 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)13U]; - uint32_t s14 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)14U]; - uint32_t s15 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)15U]; - uint32_t uu____0 = m_w[s2]; - uint32_t uu____1 = m_w[s4]; - uint32_t uu____2 = m_w[s6]; - r0[0U] = m_w[s0]; - r0[1U] = uu____0; - r0[2U] = uu____1; - r0[3U] = uu____2; - uint32_t uu____3 = m_w[s3]; - uint32_t uu____4 = m_w[s5]; - uint32_t uu____5 = m_w[s7]; - r1[0U] = m_w[s1]; - r1[1U] = uu____3; - r1[2U] = uu____4; - r1[3U] = uu____5; - uint32_t uu____6 = m_w[s10]; - uint32_t uu____7 = m_w[s12]; - uint32_t uu____8 = m_w[s14]; - r21[0U] = m_w[s8]; - r21[1U] = uu____6; - r21[2U] = uu____7; - r21[3U] = uu____8; - uint32_t uu____9 = m_w[s11]; - uint32_t uu____10 = m_w[s13]; - uint32_t uu____11 = m_w[s15]; - r31[0U] = m_w[s9]; - r31[1U] = uu____9; - r31[2U] = uu____10; - r31[3U] = uu____11; - uint32_t *x = m_st; - uint32_t *y = m_st + (uint32_t)4U; - uint32_t *z = m_st + (uint32_t)8U; - uint32_t *w = m_st + (uint32_t)12U; - uint32_t a = (uint32_t)0U; - uint32_t b0 = (uint32_t)1U; - uint32_t c0 = (uint32_t)2U; - uint32_t d0 = (uint32_t)3U; - uint32_t *wv_a0 = wv + a * (uint32_t)4U; - uint32_t *wv_b0 = wv + b0 * (uint32_t)4U; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint32_t *os = wv_a0; - uint32_t x1 = wv_a0[i] + wv_b0[i]; - os[i] = x1;); - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint32_t *os = wv_a0; - uint32_t x1 = wv_a0[i] + x[i]; - os[i] = x1;); - uint32_t *wv_a1 = wv + d0 * (uint32_t)4U; - uint32_t *wv_b1 = wv + a * (uint32_t)4U; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint32_t *os = wv_a1; - uint32_t x1 = wv_a1[i] ^ wv_b1[i]; - os[i] = x1;); - uint32_t *r12 = wv_a1; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint32_t *os = r12; - uint32_t x1 = r12[i]; - uint32_t x10 = x1 >> (uint32_t)16U | x1 << (uint32_t)16U; - os[i] = x10;); - uint32_t *wv_a2 = wv + c0 * (uint32_t)4U; - uint32_t *wv_b2 = wv + d0 * (uint32_t)4U; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint32_t *os = wv_a2; - uint32_t x1 = wv_a2[i] + wv_b2[i]; - os[i] = x1;); - uint32_t *wv_a3 = wv + b0 * (uint32_t)4U; - uint32_t *wv_b3 = wv + c0 * (uint32_t)4U; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint32_t *os = wv_a3; - uint32_t x1 = wv_a3[i] ^ wv_b3[i]; - os[i] = x1;); - uint32_t *r13 = wv_a3; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint32_t *os = r13; - uint32_t x1 = r13[i]; - uint32_t x10 = x1 >> (uint32_t)12U | x1 << (uint32_t)20U; - os[i] = x10;); - uint32_t *wv_a4 = wv + a * (uint32_t)4U; - uint32_t *wv_b4 = wv + b0 * (uint32_t)4U; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint32_t *os = wv_a4; - uint32_t x1 = wv_a4[i] + wv_b4[i]; - os[i] = x1;); - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint32_t *os = wv_a4; - uint32_t x1 = wv_a4[i] + y[i]; - os[i] = x1;); - uint32_t *wv_a5 = wv + d0 * (uint32_t)4U; - uint32_t *wv_b5 = wv + a * (uint32_t)4U; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint32_t *os = wv_a5; - uint32_t x1 = wv_a5[i] ^ wv_b5[i]; - os[i] = x1;); - uint32_t *r14 = wv_a5; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint32_t *os = r14; - uint32_t x1 = r14[i]; - uint32_t x10 = x1 >> (uint32_t)8U | x1 << (uint32_t)24U; - os[i] = x10;); - uint32_t *wv_a6 = wv + c0 * (uint32_t)4U; - uint32_t *wv_b6 = wv + d0 * (uint32_t)4U; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint32_t *os = wv_a6; - uint32_t x1 = wv_a6[i] + wv_b6[i]; - os[i] = x1;); - uint32_t *wv_a7 = wv + b0 * (uint32_t)4U; - uint32_t *wv_b7 = wv + c0 * (uint32_t)4U; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint32_t *os = wv_a7; - uint32_t x1 = wv_a7[i] ^ wv_b7[i]; - os[i] = x1;); - uint32_t *r15 = wv_a7; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint32_t *os = r15; - uint32_t x1 = r15[i]; - uint32_t x10 = x1 >> (uint32_t)7U | x1 << (uint32_t)25U; - os[i] = x10;); - uint32_t *r16 = wv + (uint32_t)4U; - uint32_t *r22 = wv + (uint32_t)8U; - uint32_t *r32 = wv + (uint32_t)12U; - uint32_t *r110 = r16; - uint32_t x00 = r110[1U]; - uint32_t x10 = r110[2U]; - uint32_t x20 = r110[3U]; - uint32_t x30 = r110[0U]; - r110[0U] = x00; - r110[1U] = x10; - r110[2U] = x20; - r110[3U] = x30; - uint32_t *r111 = r22; - uint32_t x01 = r111[2U]; - uint32_t x11 = r111[3U]; - uint32_t x21 = r111[0U]; - uint32_t x31 = r111[1U]; - r111[0U] = x01; - r111[1U] = x11; - r111[2U] = x21; - r111[3U] = x31; - uint32_t *r112 = r32; - uint32_t x02 = r112[3U]; - uint32_t x12 = r112[0U]; - uint32_t x22 = r112[1U]; - uint32_t x32 = r112[2U]; - r112[0U] = x02; - r112[1U] = x12; - r112[2U] = x22; - r112[3U] = x32; - uint32_t a0 = (uint32_t)0U; - uint32_t b = (uint32_t)1U; - uint32_t c = (uint32_t)2U; - uint32_t d = (uint32_t)3U; - uint32_t *wv_a = wv + a0 * (uint32_t)4U; - uint32_t *wv_b8 = wv + b * (uint32_t)4U; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint32_t *os = wv_a; - uint32_t x1 = wv_a[i] + wv_b8[i]; - os[i] = x1;); - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint32_t *os = wv_a; - uint32_t x1 = wv_a[i] + z[i]; - os[i] = x1;); - uint32_t *wv_a8 = wv + d * (uint32_t)4U; - uint32_t *wv_b9 = wv + a0 * (uint32_t)4U; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint32_t *os = wv_a8; - uint32_t x1 = wv_a8[i] ^ wv_b9[i]; - os[i] = x1;); - uint32_t *r17 = wv_a8; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint32_t *os = r17; - uint32_t x1 = r17[i]; - uint32_t x13 = x1 >> (uint32_t)16U | x1 << (uint32_t)16U; - os[i] = x13;); - uint32_t *wv_a9 = wv + c * (uint32_t)4U; - uint32_t *wv_b10 = wv + d * (uint32_t)4U; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint32_t *os = wv_a9; - uint32_t x1 = wv_a9[i] + wv_b10[i]; - os[i] = x1;); - uint32_t *wv_a10 = wv + b * (uint32_t)4U; - uint32_t *wv_b11 = wv + c * (uint32_t)4U; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint32_t *os = wv_a10; - uint32_t x1 = wv_a10[i] ^ wv_b11[i]; - os[i] = x1;); - uint32_t *r18 = wv_a10; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint32_t *os = r18; - uint32_t x1 = r18[i]; - uint32_t x13 = x1 >> (uint32_t)12U | x1 << (uint32_t)20U; - os[i] = x13;); - uint32_t *wv_a11 = wv + a0 * (uint32_t)4U; - uint32_t *wv_b12 = wv + b * (uint32_t)4U; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint32_t *os = wv_a11; - uint32_t x1 = wv_a11[i] + wv_b12[i]; - os[i] = x1;); - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint32_t *os = wv_a11; - uint32_t x1 = wv_a11[i] + w[i]; - os[i] = x1;); - uint32_t *wv_a12 = wv + d * (uint32_t)4U; - uint32_t *wv_b13 = wv + a0 * (uint32_t)4U; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint32_t *os = wv_a12; - uint32_t x1 = wv_a12[i] ^ wv_b13[i]; - os[i] = x1;); - uint32_t *r19 = wv_a12; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint32_t *os = r19; - uint32_t x1 = r19[i]; - uint32_t x13 = x1 >> (uint32_t)8U | x1 << (uint32_t)24U; - os[i] = x13;); - uint32_t *wv_a13 = wv + c * (uint32_t)4U; - uint32_t *wv_b14 = wv + d * (uint32_t)4U; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint32_t *os = wv_a13; - uint32_t x1 = wv_a13[i] + wv_b14[i]; - os[i] = x1;); - uint32_t *wv_a14 = wv + b * (uint32_t)4U; - uint32_t *wv_b = wv + c * (uint32_t)4U; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint32_t *os = wv_a14; - uint32_t x1 = wv_a14[i] ^ wv_b[i]; - os[i] = x1;); - uint32_t *r113 = wv_a14; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint32_t *os = r113; - uint32_t x1 = r113[i]; - uint32_t x13 = x1 >> (uint32_t)7U | x1 << (uint32_t)25U; - os[i] = x13;); - uint32_t *r114 = wv + (uint32_t)4U; - uint32_t *r2 = wv + (uint32_t)8U; - uint32_t *r3 = wv + (uint32_t)12U; - uint32_t *r11 = r114; - uint32_t x03 = r11[3U]; - uint32_t x13 = r11[0U]; - uint32_t x23 = r11[1U]; - uint32_t x33 = r11[2U]; - r11[0U] = x03; - r11[1U] = x13; - r11[2U] = x23; - r11[3U] = x33; - uint32_t *r115 = r2; - uint32_t x04 = r115[2U]; - uint32_t x14 = r115[3U]; - uint32_t x24 = r115[0U]; - uint32_t x34 = r115[1U]; - r115[0U] = x04; - r115[1U] = x14; - r115[2U] = x24; - r115[3U] = x34; - uint32_t *r116 = r3; - uint32_t x0 = r116[1U]; - uint32_t x1 = r116[2U]; - uint32_t x2 = r116[3U]; - uint32_t x3 = r116[0U]; - r116[0U] = x0; - r116[1U] = x1; - r116[2U] = x2; - r116[3U] = x3;); - s00 = s; - s16 = s + (uint32_t)4U; - r00 = wv; - r10 = wv + (uint32_t)4U; - r20 = wv + (uint32_t)8U; - r30 = wv + (uint32_t)12U; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint32_t *os = s00; - uint32_t x = s00[i] ^ r00[i]; - os[i] = x;); - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint32_t *os = s00; - uint32_t x = s00[i] ^ r20[i]; - os[i] = x;); - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint32_t *os = s16; - uint32_t x = s16[i] ^ r10[i]; - os[i] = x;); - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint32_t *os = s16; - uint32_t x = s16[i] ^ r30[i]; - os[i] = x;); - return (uint64_t)0U; - } - } - } - } -} - -FStar_UInt128_uint128 -Hacl_Hash_Blake2_update_last_blake2b_32( - uint64_t *s, - FStar_UInt128_uint128 ev, - FStar_UInt128_uint128 prev_len, - uint8_t *input, - uint32_t input_len -) -{ - uint32_t blocks_n = input_len / (uint32_t)128U; - uint32_t blocks_len0 = blocks_n * (uint32_t)128U; - uint32_t rest_len0 = input_len - blocks_len0; - K___uint32_t_uint32_t_uint32_t scrut0; - if (rest_len0 == (uint32_t)0U && blocks_n > (uint32_t)0U) - { - uint32_t blocks_n1 = blocks_n - (uint32_t)1U; - uint32_t blocks_len1 = blocks_len0 - (uint32_t)128U; - uint32_t rest_len1 = (uint32_t)128U; - K___uint32_t_uint32_t_uint32_t lit; - lit.fst = blocks_n1; - lit.snd = blocks_len1; - lit.thd = rest_len1; - scrut0 = lit; - } - else - { - K___uint32_t_uint32_t_uint32_t lit; - lit.fst = blocks_n; - lit.snd = blocks_len0; - lit.thd = rest_len0; - scrut0 = lit; - } - { - uint32_t num_blocks0 = scrut0.fst; - uint32_t blocks_len = scrut0.snd; - uint32_t rest_len1 = scrut0.thd; - uint8_t *blocks0 = input; - uint8_t *rest0 = input + blocks_len; - K___uint32_t_uint32_t_uint32_t__uint8_t___uint8_t_ lit; - K___uint32_t_uint32_t_uint32_t__uint8_t___uint8_t_ scrut; - uint32_t num_blocks; - uint32_t rest_len; - uint8_t *blocks; - uint8_t *rest; - FStar_UInt128_uint128 ev_; - lit.fst = num_blocks0; - lit.snd = blocks_len; - lit.thd = rest_len1; - lit.f3 = blocks0; - lit.f4 = rest0; - scrut = lit; - num_blocks = scrut.fst; - rest_len = scrut.thd; - blocks = scrut.f3; - rest = scrut.f4; - ev_ = Hacl_Hash_Blake2_update_multi_blake2b_32(s, ev, blocks, num_blocks); - { - uint64_t wv[16U] = { 0U }; - uint8_t tmp[128U] = { 0U }; - uint8_t *tmp_rest = tmp; - FStar_UInt128_uint128 totlen; - memcpy(tmp_rest, rest, rest_len * sizeof (uint8_t)); - totlen = FStar_UInt128_add_mod(ev_, FStar_UInt128_uint64_to_uint128((uint64_t)rest_len)); - { - uint64_t m_w[16U] = { 0U }; - KRML_MAYBE_FOR16(i, - (uint32_t)0U, - (uint32_t)16U, - (uint32_t)1U, - uint64_t *os = m_w; - uint8_t *bj = tmp + i * (uint32_t)8U; - uint64_t u = load64_le(bj); - uint64_t r = u; - uint64_t x = r; - os[i] = x;); - { - uint64_t mask[4U] = { 0U }; - uint64_t wv_14 = (uint64_t)0xFFFFFFFFFFFFFFFFU; - uint64_t wv_15 = (uint64_t)0U; - uint64_t *wv3; - uint64_t *s00; - uint64_t *s16; - uint64_t *r00; - uint64_t *r10; - uint64_t *r20; - uint64_t *r30; - mask[0U] = FStar_UInt128_uint128_to_uint64(totlen); - mask[1U] = - FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(totlen, (uint32_t)64U)); - mask[2U] = wv_14; - mask[3U] = wv_15; - memcpy(wv, s, (uint32_t)16U * sizeof (uint64_t)); - wv3 = wv + (uint32_t)12U; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *os = wv3; - uint64_t x = wv3[i] ^ mask[i]; - os[i] = x;); - KRML_MAYBE_FOR12(i0, - (uint32_t)0U, - (uint32_t)12U, - (uint32_t)1U, - uint32_t start_idx = i0 % (uint32_t)10U * (uint32_t)16U; - uint64_t m_st[16U] = { 0U }; - uint64_t *r0 = m_st; - uint64_t *r1 = m_st + (uint32_t)4U; - uint64_t *r21 = m_st + (uint32_t)8U; - uint64_t *r31 = m_st + (uint32_t)12U; - uint32_t s0 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)0U]; - uint32_t s1 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)1U]; - uint32_t s2 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)2U]; - uint32_t s3 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)3U]; - uint32_t s4 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)4U]; - uint32_t s5 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)5U]; - uint32_t s6 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)6U]; - uint32_t s7 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)7U]; - uint32_t s8 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)8U]; - uint32_t s9 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)9U]; - uint32_t s10 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)10U]; - uint32_t s11 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)11U]; - uint32_t s12 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)12U]; - uint32_t s13 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)13U]; - uint32_t s14 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)14U]; - uint32_t s15 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)15U]; - uint64_t uu____0 = m_w[s2]; - uint64_t uu____1 = m_w[s4]; - uint64_t uu____2 = m_w[s6]; - r0[0U] = m_w[s0]; - r0[1U] = uu____0; - r0[2U] = uu____1; - r0[3U] = uu____2; - uint64_t uu____3 = m_w[s3]; - uint64_t uu____4 = m_w[s5]; - uint64_t uu____5 = m_w[s7]; - r1[0U] = m_w[s1]; - r1[1U] = uu____3; - r1[2U] = uu____4; - r1[3U] = uu____5; - uint64_t uu____6 = m_w[s10]; - uint64_t uu____7 = m_w[s12]; - uint64_t uu____8 = m_w[s14]; - r21[0U] = m_w[s8]; - r21[1U] = uu____6; - r21[2U] = uu____7; - r21[3U] = uu____8; - uint64_t uu____9 = m_w[s11]; - uint64_t uu____10 = m_w[s13]; - uint64_t uu____11 = m_w[s15]; - r31[0U] = m_w[s9]; - r31[1U] = uu____9; - r31[2U] = uu____10; - r31[3U] = uu____11; - uint64_t *x = m_st; - uint64_t *y = m_st + (uint32_t)4U; - uint64_t *z = m_st + (uint32_t)8U; - uint64_t *w = m_st + (uint32_t)12U; - uint32_t a = (uint32_t)0U; - uint32_t b0 = (uint32_t)1U; - uint32_t c0 = (uint32_t)2U; - uint32_t d0 = (uint32_t)3U; - uint64_t *wv_a0 = wv + a * (uint32_t)4U; - uint64_t *wv_b0 = wv + b0 * (uint32_t)4U; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *os = wv_a0; - uint64_t x1 = wv_a0[i] + wv_b0[i]; - os[i] = x1;); - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *os = wv_a0; - uint64_t x1 = wv_a0[i] + x[i]; - os[i] = x1;); - uint64_t *wv_a1 = wv + d0 * (uint32_t)4U; - uint64_t *wv_b1 = wv + a * (uint32_t)4U; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *os = wv_a1; - uint64_t x1 = wv_a1[i] ^ wv_b1[i]; - os[i] = x1;); - uint64_t *r12 = wv_a1; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *os = r12; - uint64_t x1 = r12[i]; - uint64_t x10 = x1 >> (uint32_t)32U | x1 << (uint32_t)32U; - os[i] = x10;); - uint64_t *wv_a2 = wv + c0 * (uint32_t)4U; - uint64_t *wv_b2 = wv + d0 * (uint32_t)4U; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *os = wv_a2; - uint64_t x1 = wv_a2[i] + wv_b2[i]; - os[i] = x1;); - uint64_t *wv_a3 = wv + b0 * (uint32_t)4U; - uint64_t *wv_b3 = wv + c0 * (uint32_t)4U; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *os = wv_a3; - uint64_t x1 = wv_a3[i] ^ wv_b3[i]; - os[i] = x1;); - uint64_t *r13 = wv_a3; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *os = r13; - uint64_t x1 = r13[i]; - uint64_t x10 = x1 >> (uint32_t)24U | x1 << (uint32_t)40U; - os[i] = x10;); - uint64_t *wv_a4 = wv + a * (uint32_t)4U; - uint64_t *wv_b4 = wv + b0 * (uint32_t)4U; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *os = wv_a4; - uint64_t x1 = wv_a4[i] + wv_b4[i]; - os[i] = x1;); - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *os = wv_a4; - uint64_t x1 = wv_a4[i] + y[i]; - os[i] = x1;); - uint64_t *wv_a5 = wv + d0 * (uint32_t)4U; - uint64_t *wv_b5 = wv + a * (uint32_t)4U; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *os = wv_a5; - uint64_t x1 = wv_a5[i] ^ wv_b5[i]; - os[i] = x1;); - uint64_t *r14 = wv_a5; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *os = r14; - uint64_t x1 = r14[i]; - uint64_t x10 = x1 >> (uint32_t)16U | x1 << (uint32_t)48U; - os[i] = x10;); - uint64_t *wv_a6 = wv + c0 * (uint32_t)4U; - uint64_t *wv_b6 = wv + d0 * (uint32_t)4U; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *os = wv_a6; - uint64_t x1 = wv_a6[i] + wv_b6[i]; - os[i] = x1;); - uint64_t *wv_a7 = wv + b0 * (uint32_t)4U; - uint64_t *wv_b7 = wv + c0 * (uint32_t)4U; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *os = wv_a7; - uint64_t x1 = wv_a7[i] ^ wv_b7[i]; - os[i] = x1;); - uint64_t *r15 = wv_a7; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *os = r15; - uint64_t x1 = r15[i]; - uint64_t x10 = x1 >> (uint32_t)63U | x1 << (uint32_t)1U; - os[i] = x10;); - uint64_t *r16 = wv + (uint32_t)4U; - uint64_t *r22 = wv + (uint32_t)8U; - uint64_t *r32 = wv + (uint32_t)12U; - uint64_t *r110 = r16; - uint64_t x00 = r110[1U]; - uint64_t x10 = r110[2U]; - uint64_t x20 = r110[3U]; - uint64_t x30 = r110[0U]; - r110[0U] = x00; - r110[1U] = x10; - r110[2U] = x20; - r110[3U] = x30; - uint64_t *r111 = r22; - uint64_t x01 = r111[2U]; - uint64_t x11 = r111[3U]; - uint64_t x21 = r111[0U]; - uint64_t x31 = r111[1U]; - r111[0U] = x01; - r111[1U] = x11; - r111[2U] = x21; - r111[3U] = x31; - uint64_t *r112 = r32; - uint64_t x02 = r112[3U]; - uint64_t x12 = r112[0U]; - uint64_t x22 = r112[1U]; - uint64_t x32 = r112[2U]; - r112[0U] = x02; - r112[1U] = x12; - r112[2U] = x22; - r112[3U] = x32; - uint32_t a0 = (uint32_t)0U; - uint32_t b = (uint32_t)1U; - uint32_t c = (uint32_t)2U; - uint32_t d = (uint32_t)3U; - uint64_t *wv_a = wv + a0 * (uint32_t)4U; - uint64_t *wv_b8 = wv + b * (uint32_t)4U; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *os = wv_a; - uint64_t x1 = wv_a[i] + wv_b8[i]; - os[i] = x1;); - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *os = wv_a; - uint64_t x1 = wv_a[i] + z[i]; - os[i] = x1;); - uint64_t *wv_a8 = wv + d * (uint32_t)4U; - uint64_t *wv_b9 = wv + a0 * (uint32_t)4U; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *os = wv_a8; - uint64_t x1 = wv_a8[i] ^ wv_b9[i]; - os[i] = x1;); - uint64_t *r17 = wv_a8; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *os = r17; - uint64_t x1 = r17[i]; - uint64_t x13 = x1 >> (uint32_t)32U | x1 << (uint32_t)32U; - os[i] = x13;); - uint64_t *wv_a9 = wv + c * (uint32_t)4U; - uint64_t *wv_b10 = wv + d * (uint32_t)4U; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *os = wv_a9; - uint64_t x1 = wv_a9[i] + wv_b10[i]; - os[i] = x1;); - uint64_t *wv_a10 = wv + b * (uint32_t)4U; - uint64_t *wv_b11 = wv + c * (uint32_t)4U; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *os = wv_a10; - uint64_t x1 = wv_a10[i] ^ wv_b11[i]; - os[i] = x1;); - uint64_t *r18 = wv_a10; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *os = r18; - uint64_t x1 = r18[i]; - uint64_t x13 = x1 >> (uint32_t)24U | x1 << (uint32_t)40U; - os[i] = x13;); - uint64_t *wv_a11 = wv + a0 * (uint32_t)4U; - uint64_t *wv_b12 = wv + b * (uint32_t)4U; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *os = wv_a11; - uint64_t x1 = wv_a11[i] + wv_b12[i]; - os[i] = x1;); - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *os = wv_a11; - uint64_t x1 = wv_a11[i] + w[i]; - os[i] = x1;); - uint64_t *wv_a12 = wv + d * (uint32_t)4U; - uint64_t *wv_b13 = wv + a0 * (uint32_t)4U; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *os = wv_a12; - uint64_t x1 = wv_a12[i] ^ wv_b13[i]; - os[i] = x1;); - uint64_t *r19 = wv_a12; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *os = r19; - uint64_t x1 = r19[i]; - uint64_t x13 = x1 >> (uint32_t)16U | x1 << (uint32_t)48U; - os[i] = x13;); - uint64_t *wv_a13 = wv + c * (uint32_t)4U; - uint64_t *wv_b14 = wv + d * (uint32_t)4U; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *os = wv_a13; - uint64_t x1 = wv_a13[i] + wv_b14[i]; - os[i] = x1;); - uint64_t *wv_a14 = wv + b * (uint32_t)4U; - uint64_t *wv_b = wv + c * (uint32_t)4U; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *os = wv_a14; - uint64_t x1 = wv_a14[i] ^ wv_b[i]; - os[i] = x1;); - uint64_t *r113 = wv_a14; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *os = r113; - uint64_t x1 = r113[i]; - uint64_t x13 = x1 >> (uint32_t)63U | x1 << (uint32_t)1U; - os[i] = x13;); - uint64_t *r114 = wv + (uint32_t)4U; - uint64_t *r2 = wv + (uint32_t)8U; - uint64_t *r3 = wv + (uint32_t)12U; - uint64_t *r11 = r114; - uint64_t x03 = r11[3U]; - uint64_t x13 = r11[0U]; - uint64_t x23 = r11[1U]; - uint64_t x33 = r11[2U]; - r11[0U] = x03; - r11[1U] = x13; - r11[2U] = x23; - r11[3U] = x33; - uint64_t *r115 = r2; - uint64_t x04 = r115[2U]; - uint64_t x14 = r115[3U]; - uint64_t x24 = r115[0U]; - uint64_t x34 = r115[1U]; - r115[0U] = x04; - r115[1U] = x14; - r115[2U] = x24; - r115[3U] = x34; - uint64_t *r116 = r3; - uint64_t x0 = r116[1U]; - uint64_t x1 = r116[2U]; - uint64_t x2 = r116[3U]; - uint64_t x3 = r116[0U]; - r116[0U] = x0; - r116[1U] = x1; - r116[2U] = x2; - r116[3U] = x3;); - s00 = s; - s16 = s + (uint32_t)4U; - r00 = wv; - r10 = wv + (uint32_t)4U; - r20 = wv + (uint32_t)8U; - r30 = wv + (uint32_t)12U; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *os = s00; - uint64_t x = s00[i] ^ r00[i]; - os[i] = x;); - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *os = s00; - uint64_t x = s00[i] ^ r20[i]; - os[i] = x;); - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *os = s16; - uint64_t x = s16[i] ^ r10[i]; - os[i] = x;); - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *os = s16; - uint64_t x = s16[i] ^ r30[i]; - os[i] = x;); - return FStar_UInt128_uint64_to_uint128((uint64_t)0U); - } - } - } - } -} - -void Hacl_Hash_Blake2_hash_blake2s_32(uint8_t *input, uint32_t input_len, uint8_t *dst) -{ - Hacl_Blake2s_32_blake2s((uint32_t)32U, dst, input_len, input, (uint32_t)0U, NULL); -} - -void Hacl_Hash_Blake2_hash_blake2b_32(uint8_t *input, uint32_t input_len, uint8_t *dst) -{ - Hacl_Blake2b_32_blake2b((uint32_t)64U, dst, input_len, input, (uint32_t)0U, NULL); -} - -static void -blake2b_update_block( - uint64_t *wv, - uint64_t *hash, - bool flag, - FStar_UInt128_uint128 totlen, - uint8_t *d -) -{ - uint64_t m_w[16U] = { 0U }; - KRML_MAYBE_FOR16(i, - (uint32_t)0U, - (uint32_t)16U, - (uint32_t)1U, - uint64_t *os = m_w; - uint8_t *bj = d + i * (uint32_t)8U; - uint64_t u = load64_le(bj); - uint64_t r = u; - uint64_t x = r; - os[i] = x;); - { - uint64_t mask[4U] = { 0U }; - uint64_t wv_14; - if (flag) - { - wv_14 = (uint64_t)0xFFFFFFFFFFFFFFFFU; - } - else - { - wv_14 = (uint64_t)0U; - } - { - uint64_t wv_15 = (uint64_t)0U; - uint64_t *wv3; - uint64_t *s00; - uint64_t *s16; - uint64_t *r00; - uint64_t *r10; - uint64_t *r20; - uint64_t *r30; - mask[0U] = FStar_UInt128_uint128_to_uint64(totlen); - mask[1U] = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(totlen, (uint32_t)64U)); - mask[2U] = wv_14; - mask[3U] = wv_15; - memcpy(wv, hash, (uint32_t)16U * sizeof (uint64_t)); - wv3 = wv + (uint32_t)12U; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *os = wv3; - uint64_t x = wv3[i] ^ mask[i]; - os[i] = x;); - KRML_MAYBE_FOR12(i0, - (uint32_t)0U, - (uint32_t)12U, - (uint32_t)1U, - uint32_t start_idx = i0 % (uint32_t)10U * (uint32_t)16U; - uint64_t m_st[16U] = { 0U }; - uint64_t *r0 = m_st; - uint64_t *r1 = m_st + (uint32_t)4U; - uint64_t *r21 = m_st + (uint32_t)8U; - uint64_t *r31 = m_st + (uint32_t)12U; - uint32_t s0 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)0U]; - uint32_t s1 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)1U]; - uint32_t s2 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)2U]; - uint32_t s3 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)3U]; - uint32_t s4 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)4U]; - uint32_t s5 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)5U]; - uint32_t s6 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)6U]; - uint32_t s7 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)7U]; - uint32_t s8 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)8U]; - uint32_t s9 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)9U]; - uint32_t s10 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)10U]; - uint32_t s11 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)11U]; - uint32_t s12 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)12U]; - uint32_t s13 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)13U]; - uint32_t s14 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)14U]; - uint32_t s15 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)15U]; - uint64_t uu____0 = m_w[s2]; - uint64_t uu____1 = m_w[s4]; - uint64_t uu____2 = m_w[s6]; - r0[0U] = m_w[s0]; - r0[1U] = uu____0; - r0[2U] = uu____1; - r0[3U] = uu____2; - uint64_t uu____3 = m_w[s3]; - uint64_t uu____4 = m_w[s5]; - uint64_t uu____5 = m_w[s7]; - r1[0U] = m_w[s1]; - r1[1U] = uu____3; - r1[2U] = uu____4; - r1[3U] = uu____5; - uint64_t uu____6 = m_w[s10]; - uint64_t uu____7 = m_w[s12]; - uint64_t uu____8 = m_w[s14]; - r21[0U] = m_w[s8]; - r21[1U] = uu____6; - r21[2U] = uu____7; - r21[3U] = uu____8; - uint64_t uu____9 = m_w[s11]; - uint64_t uu____10 = m_w[s13]; - uint64_t uu____11 = m_w[s15]; - r31[0U] = m_w[s9]; - r31[1U] = uu____9; - r31[2U] = uu____10; - r31[3U] = uu____11; - uint64_t *x = m_st; - uint64_t *y = m_st + (uint32_t)4U; - uint64_t *z = m_st + (uint32_t)8U; - uint64_t *w = m_st + (uint32_t)12U; - uint32_t a = (uint32_t)0U; - uint32_t b0 = (uint32_t)1U; - uint32_t c0 = (uint32_t)2U; - uint32_t d10 = (uint32_t)3U; - uint64_t *wv_a0 = wv + a * (uint32_t)4U; - uint64_t *wv_b0 = wv + b0 * (uint32_t)4U; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *os = wv_a0; - uint64_t x1 = wv_a0[i] + wv_b0[i]; - os[i] = x1;); - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *os = wv_a0; - uint64_t x1 = wv_a0[i] + x[i]; - os[i] = x1;); - uint64_t *wv_a1 = wv + d10 * (uint32_t)4U; - uint64_t *wv_b1 = wv + a * (uint32_t)4U; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *os = wv_a1; - uint64_t x1 = wv_a1[i] ^ wv_b1[i]; - os[i] = x1;); - uint64_t *r12 = wv_a1; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *os = r12; - uint64_t x1 = r12[i]; - uint64_t x10 = x1 >> (uint32_t)32U | x1 << (uint32_t)32U; - os[i] = x10;); - uint64_t *wv_a2 = wv + c0 * (uint32_t)4U; - uint64_t *wv_b2 = wv + d10 * (uint32_t)4U; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *os = wv_a2; - uint64_t x1 = wv_a2[i] + wv_b2[i]; - os[i] = x1;); - uint64_t *wv_a3 = wv + b0 * (uint32_t)4U; - uint64_t *wv_b3 = wv + c0 * (uint32_t)4U; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *os = wv_a3; - uint64_t x1 = wv_a3[i] ^ wv_b3[i]; - os[i] = x1;); - uint64_t *r13 = wv_a3; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *os = r13; - uint64_t x1 = r13[i]; - uint64_t x10 = x1 >> (uint32_t)24U | x1 << (uint32_t)40U; - os[i] = x10;); - uint64_t *wv_a4 = wv + a * (uint32_t)4U; - uint64_t *wv_b4 = wv + b0 * (uint32_t)4U; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *os = wv_a4; - uint64_t x1 = wv_a4[i] + wv_b4[i]; - os[i] = x1;); - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *os = wv_a4; - uint64_t x1 = wv_a4[i] + y[i]; - os[i] = x1;); - uint64_t *wv_a5 = wv + d10 * (uint32_t)4U; - uint64_t *wv_b5 = wv + a * (uint32_t)4U; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *os = wv_a5; - uint64_t x1 = wv_a5[i] ^ wv_b5[i]; - os[i] = x1;); - uint64_t *r14 = wv_a5; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *os = r14; - uint64_t x1 = r14[i]; - uint64_t x10 = x1 >> (uint32_t)16U | x1 << (uint32_t)48U; - os[i] = x10;); - uint64_t *wv_a6 = wv + c0 * (uint32_t)4U; - uint64_t *wv_b6 = wv + d10 * (uint32_t)4U; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *os = wv_a6; - uint64_t x1 = wv_a6[i] + wv_b6[i]; - os[i] = x1;); - uint64_t *wv_a7 = wv + b0 * (uint32_t)4U; - uint64_t *wv_b7 = wv + c0 * (uint32_t)4U; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *os = wv_a7; - uint64_t x1 = wv_a7[i] ^ wv_b7[i]; - os[i] = x1;); - uint64_t *r15 = wv_a7; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *os = r15; - uint64_t x1 = r15[i]; - uint64_t x10 = x1 >> (uint32_t)63U | x1 << (uint32_t)1U; - os[i] = x10;); - uint64_t *r16 = wv + (uint32_t)4U; - uint64_t *r22 = wv + (uint32_t)8U; - uint64_t *r32 = wv + (uint32_t)12U; - uint64_t *r110 = r16; - uint64_t x00 = r110[1U]; - uint64_t x10 = r110[2U]; - uint64_t x20 = r110[3U]; - uint64_t x30 = r110[0U]; - r110[0U] = x00; - r110[1U] = x10; - r110[2U] = x20; - r110[3U] = x30; - uint64_t *r111 = r22; - uint64_t x01 = r111[2U]; - uint64_t x11 = r111[3U]; - uint64_t x21 = r111[0U]; - uint64_t x31 = r111[1U]; - r111[0U] = x01; - r111[1U] = x11; - r111[2U] = x21; - r111[3U] = x31; - uint64_t *r112 = r32; - uint64_t x02 = r112[3U]; - uint64_t x12 = r112[0U]; - uint64_t x22 = r112[1U]; - uint64_t x32 = r112[2U]; - r112[0U] = x02; - r112[1U] = x12; - r112[2U] = x22; - r112[3U] = x32; - uint32_t a0 = (uint32_t)0U; - uint32_t b = (uint32_t)1U; - uint32_t c = (uint32_t)2U; - uint32_t d1 = (uint32_t)3U; - uint64_t *wv_a = wv + a0 * (uint32_t)4U; - uint64_t *wv_b8 = wv + b * (uint32_t)4U; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *os = wv_a; - uint64_t x1 = wv_a[i] + wv_b8[i]; - os[i] = x1;); - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *os = wv_a; - uint64_t x1 = wv_a[i] + z[i]; - os[i] = x1;); - uint64_t *wv_a8 = wv + d1 * (uint32_t)4U; - uint64_t *wv_b9 = wv + a0 * (uint32_t)4U; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *os = wv_a8; - uint64_t x1 = wv_a8[i] ^ wv_b9[i]; - os[i] = x1;); - uint64_t *r17 = wv_a8; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *os = r17; - uint64_t x1 = r17[i]; - uint64_t x13 = x1 >> (uint32_t)32U | x1 << (uint32_t)32U; - os[i] = x13;); - uint64_t *wv_a9 = wv + c * (uint32_t)4U; - uint64_t *wv_b10 = wv + d1 * (uint32_t)4U; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *os = wv_a9; - uint64_t x1 = wv_a9[i] + wv_b10[i]; - os[i] = x1;); - uint64_t *wv_a10 = wv + b * (uint32_t)4U; - uint64_t *wv_b11 = wv + c * (uint32_t)4U; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *os = wv_a10; - uint64_t x1 = wv_a10[i] ^ wv_b11[i]; - os[i] = x1;); - uint64_t *r18 = wv_a10; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *os = r18; - uint64_t x1 = r18[i]; - uint64_t x13 = x1 >> (uint32_t)24U | x1 << (uint32_t)40U; - os[i] = x13;); - uint64_t *wv_a11 = wv + a0 * (uint32_t)4U; - uint64_t *wv_b12 = wv + b * (uint32_t)4U; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *os = wv_a11; - uint64_t x1 = wv_a11[i] + wv_b12[i]; - os[i] = x1;); - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *os = wv_a11; - uint64_t x1 = wv_a11[i] + w[i]; - os[i] = x1;); - uint64_t *wv_a12 = wv + d1 * (uint32_t)4U; - uint64_t *wv_b13 = wv + a0 * (uint32_t)4U; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *os = wv_a12; - uint64_t x1 = wv_a12[i] ^ wv_b13[i]; - os[i] = x1;); - uint64_t *r19 = wv_a12; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *os = r19; - uint64_t x1 = r19[i]; - uint64_t x13 = x1 >> (uint32_t)16U | x1 << (uint32_t)48U; - os[i] = x13;); - uint64_t *wv_a13 = wv + c * (uint32_t)4U; - uint64_t *wv_b14 = wv + d1 * (uint32_t)4U; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *os = wv_a13; - uint64_t x1 = wv_a13[i] + wv_b14[i]; - os[i] = x1;); - uint64_t *wv_a14 = wv + b * (uint32_t)4U; - uint64_t *wv_b = wv + c * (uint32_t)4U; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *os = wv_a14; - uint64_t x1 = wv_a14[i] ^ wv_b[i]; - os[i] = x1;); - uint64_t *r113 = wv_a14; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *os = r113; - uint64_t x1 = r113[i]; - uint64_t x13 = x1 >> (uint32_t)63U | x1 << (uint32_t)1U; - os[i] = x13;); - uint64_t *r114 = wv + (uint32_t)4U; - uint64_t *r2 = wv + (uint32_t)8U; - uint64_t *r3 = wv + (uint32_t)12U; - uint64_t *r11 = r114; - uint64_t x03 = r11[3U]; - uint64_t x13 = r11[0U]; - uint64_t x23 = r11[1U]; - uint64_t x33 = r11[2U]; - r11[0U] = x03; - r11[1U] = x13; - r11[2U] = x23; - r11[3U] = x33; - uint64_t *r115 = r2; - uint64_t x04 = r115[2U]; - uint64_t x14 = r115[3U]; - uint64_t x24 = r115[0U]; - uint64_t x34 = r115[1U]; - r115[0U] = x04; - r115[1U] = x14; - r115[2U] = x24; - r115[3U] = x34; - uint64_t *r116 = r3; - uint64_t x0 = r116[1U]; - uint64_t x1 = r116[2U]; - uint64_t x2 = r116[3U]; - uint64_t x3 = r116[0U]; - r116[0U] = x0; - r116[1U] = x1; - r116[2U] = x2; - r116[3U] = x3;); - s00 = hash; - s16 = hash + (uint32_t)4U; - r00 = wv; - r10 = wv + (uint32_t)4U; - r20 = wv + (uint32_t)8U; - r30 = wv + (uint32_t)12U; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *os = s00; - uint64_t x = s00[i] ^ r00[i]; - os[i] = x;); - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *os = s00; - uint64_t x = s00[i] ^ r20[i]; - os[i] = x;); - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *os = s16; - uint64_t x = s16[i] ^ r10[i]; - os[i] = x;); - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *os = s16; - uint64_t x = s16[i] ^ r30[i]; - os[i] = x;); - } - } -} - -void Hacl_Blake2b_32_blake2b_init(uint64_t *hash, uint32_t kk, uint32_t nn) -{ - uint64_t *r0 = hash; - uint64_t *r1 = hash + (uint32_t)4U; - uint64_t *r2 = hash + (uint32_t)8U; - uint64_t *r3 = hash + (uint32_t)12U; - uint64_t iv0 = Hacl_Impl_Blake2_Constants_ivTable_B[0U]; - uint64_t iv1 = Hacl_Impl_Blake2_Constants_ivTable_B[1U]; - uint64_t iv2 = Hacl_Impl_Blake2_Constants_ivTable_B[2U]; - uint64_t iv3 = Hacl_Impl_Blake2_Constants_ivTable_B[3U]; - uint64_t iv4 = Hacl_Impl_Blake2_Constants_ivTable_B[4U]; - uint64_t iv5 = Hacl_Impl_Blake2_Constants_ivTable_B[5U]; - uint64_t iv6 = Hacl_Impl_Blake2_Constants_ivTable_B[6U]; - uint64_t iv7 = Hacl_Impl_Blake2_Constants_ivTable_B[7U]; - uint64_t kk_shift_8; - uint64_t iv0_; - r2[0U] = iv0; - r2[1U] = iv1; - r2[2U] = iv2; - r2[3U] = iv3; - r3[0U] = iv4; - r3[1U] = iv5; - r3[2U] = iv6; - r3[3U] = iv7; - kk_shift_8 = (uint64_t)kk << (uint32_t)8U; - iv0_ = iv0 ^ ((uint64_t)0x01010000U ^ (kk_shift_8 ^ (uint64_t)nn)); - r0[0U] = iv0_; - r0[1U] = iv1; - r0[2U] = iv2; - r0[3U] = iv3; - r1[0U] = iv4; - r1[1U] = iv5; - r1[2U] = iv6; - r1[3U] = iv7; -} - -void -Hacl_Blake2b_32_blake2b_update_key( - uint64_t *wv, - uint64_t *hash, - uint32_t kk, - uint8_t *k, - uint32_t ll -) -{ - FStar_UInt128_uint128 lb = FStar_UInt128_uint64_to_uint128((uint64_t)(uint32_t)128U); - uint8_t b[128U] = { 0U }; - memcpy(b, k, kk * sizeof (uint8_t)); - if (ll == (uint32_t)0U) - { - blake2b_update_block(wv, hash, true, lb, b); - } - else - { - blake2b_update_block(wv, hash, false, lb, b); - } - Lib_Memzero0_memzero(b, (uint32_t)128U * sizeof (b[0U])); -} - -void -Hacl_Blake2b_32_blake2b_update_multi( - uint32_t len, - uint64_t *wv, - uint64_t *hash, - FStar_UInt128_uint128 prev, - uint8_t *blocks, - uint32_t nb -) -{ - uint32_t i; - for (i = (uint32_t)0U; i < nb; i++) - { - FStar_UInt128_uint128 - totlen = - FStar_UInt128_add_mod(prev, - FStar_UInt128_uint64_to_uint128((uint64_t)((i + (uint32_t)1U) * (uint32_t)128U))); - uint8_t *b = blocks + i * (uint32_t)128U; - blake2b_update_block(wv, hash, false, totlen, b); - } -} - -void -Hacl_Blake2b_32_blake2b_update_last( - uint32_t len, - uint64_t *wv, - uint64_t *hash, - FStar_UInt128_uint128 prev, - uint32_t rem, - uint8_t *d -) -{ - uint8_t b[128U] = { 0U }; - uint8_t *last = d + len - rem; - FStar_UInt128_uint128 totlen; - memcpy(b, last, rem * sizeof (uint8_t)); - totlen = FStar_UInt128_add_mod(prev, FStar_UInt128_uint64_to_uint128((uint64_t)len)); - blake2b_update_block(wv, hash, true, totlen, b); - Lib_Memzero0_memzero(b, (uint32_t)128U * sizeof (b[0U])); -} - -static void -blake2b_update_blocks( - uint32_t len, - uint64_t *wv, - uint64_t *hash, - FStar_UInt128_uint128 prev, - uint8_t *blocks -) -{ - uint32_t nb0 = len / (uint32_t)128U; - uint32_t rem0 = len % (uint32_t)128U; - K___uint32_t_uint32_t scrut; - if (rem0 == (uint32_t)0U && nb0 > (uint32_t)0U) - { - uint32_t nb_ = nb0 - (uint32_t)1U; - uint32_t rem_ = (uint32_t)128U; - K___uint32_t_uint32_t lit; - lit.fst = nb_; - lit.snd = rem_; - scrut = lit; - } - else - { - K___uint32_t_uint32_t lit; - lit.fst = nb0; - lit.snd = rem0; - scrut = lit; - } - { - uint32_t nb = scrut.fst; - uint32_t rem = scrut.snd; - Hacl_Blake2b_32_blake2b_update_multi(len, wv, hash, prev, blocks, nb); - Hacl_Blake2b_32_blake2b_update_last(len, wv, hash, prev, rem, blocks); - } -} - -static inline void -blake2b_update(uint64_t *wv, uint64_t *hash, uint32_t kk, uint8_t *k, uint32_t ll, uint8_t *d) -{ - FStar_UInt128_uint128 lb = FStar_UInt128_uint64_to_uint128((uint64_t)(uint32_t)128U); - if (kk > (uint32_t)0U) - { - Hacl_Blake2b_32_blake2b_update_key(wv, hash, kk, k, ll); - if (!(ll == (uint32_t)0U)) - { - blake2b_update_blocks(ll, wv, hash, lb, d); - return; - } - return; - } - blake2b_update_blocks(ll, - wv, - hash, - FStar_UInt128_uint64_to_uint128((uint64_t)(uint32_t)0U), - d); -} - -void Hacl_Blake2b_32_blake2b_finish(uint32_t nn, uint8_t *output, uint64_t *hash) -{ - uint32_t double_row = (uint32_t)64U; - KRML_CHECK_SIZE(sizeof (uint8_t), double_row); - { - uint8_t b[double_row]; - memset(b, 0U, double_row * sizeof (uint8_t)); - { - uint8_t *first = b; - uint8_t *second = b + (uint32_t)32U; - uint64_t *row0 = hash; - uint64_t *row1 = hash + (uint32_t)4U; - uint8_t *final; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - store64_le(first + i * (uint32_t)8U, row0[i]);); - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - store64_le(second + i * (uint32_t)8U, row1[i]);); - final = b; - memcpy(output, final, nn * sizeof (uint8_t)); - Lib_Memzero0_memzero(b, double_row * sizeof (b[0U])); - } - } -} - -void -Hacl_Blake2b_32_blake2b( - uint32_t nn, - uint8_t *output, - uint32_t ll, - uint8_t *d, - uint32_t kk, - uint8_t *k -) -{ - uint64_t b[16U] = { 0U }; - uint64_t b1[16U] = { 0U }; - Hacl_Blake2b_32_blake2b_init(b, kk, nn); - blake2b_update(b1, b, kk, k, ll, d); - Hacl_Blake2b_32_blake2b_finish(nn, output, b); - Lib_Memzero0_memzero(b1, (uint32_t)16U * sizeof (b1[0U])); - Lib_Memzero0_memzero(b, (uint32_t)16U * sizeof (b[0U])); -} - -static inline void -blake2s_update_block(uint32_t *wv, uint32_t *hash, bool flag, uint64_t totlen, uint8_t *d) -{ - uint32_t m_w[16U] = { 0U }; - KRML_MAYBE_FOR16(i, - (uint32_t)0U, - (uint32_t)16U, - (uint32_t)1U, - uint32_t *os = m_w; - uint8_t *bj = d + i * (uint32_t)4U; - uint32_t u = load32_le(bj); - uint32_t r = u; - uint32_t x = r; - os[i] = x;); - { - uint32_t mask[4U] = { 0U }; - uint32_t wv_14; - if (flag) - { - wv_14 = (uint32_t)0xFFFFFFFFU; - } - else - { - wv_14 = (uint32_t)0U; - } - { - uint32_t wv_15 = (uint32_t)0U; - uint32_t *wv3; - uint32_t *s00; - uint32_t *s16; - uint32_t *r00; - uint32_t *r10; - uint32_t *r20; - uint32_t *r30; - mask[0U] = (uint32_t)totlen; - mask[1U] = (uint32_t)(totlen >> (uint32_t)32U); - mask[2U] = wv_14; - mask[3U] = wv_15; - memcpy(wv, hash, (uint32_t)16U * sizeof (uint32_t)); - wv3 = wv + (uint32_t)12U; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint32_t *os = wv3; - uint32_t x = wv3[i] ^ mask[i]; - os[i] = x;); - KRML_MAYBE_FOR10(i0, - (uint32_t)0U, - (uint32_t)10U, - (uint32_t)1U, - uint32_t start_idx = i0 % (uint32_t)10U * (uint32_t)16U; - uint32_t m_st[16U] = { 0U }; - uint32_t *r0 = m_st; - uint32_t *r1 = m_st + (uint32_t)4U; - uint32_t *r21 = m_st + (uint32_t)8U; - uint32_t *r31 = m_st + (uint32_t)12U; - uint32_t s0 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)0U]; - uint32_t s1 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)1U]; - uint32_t s2 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)2U]; - uint32_t s3 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)3U]; - uint32_t s4 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)4U]; - uint32_t s5 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)5U]; - uint32_t s6 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)6U]; - uint32_t s7 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)7U]; - uint32_t s8 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)8U]; - uint32_t s9 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)9U]; - uint32_t s10 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)10U]; - uint32_t s11 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)11U]; - uint32_t s12 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)12U]; - uint32_t s13 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)13U]; - uint32_t s14 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)14U]; - uint32_t s15 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)15U]; - uint32_t uu____0 = m_w[s2]; - uint32_t uu____1 = m_w[s4]; - uint32_t uu____2 = m_w[s6]; - r0[0U] = m_w[s0]; - r0[1U] = uu____0; - r0[2U] = uu____1; - r0[3U] = uu____2; - uint32_t uu____3 = m_w[s3]; - uint32_t uu____4 = m_w[s5]; - uint32_t uu____5 = m_w[s7]; - r1[0U] = m_w[s1]; - r1[1U] = uu____3; - r1[2U] = uu____4; - r1[3U] = uu____5; - uint32_t uu____6 = m_w[s10]; - uint32_t uu____7 = m_w[s12]; - uint32_t uu____8 = m_w[s14]; - r21[0U] = m_w[s8]; - r21[1U] = uu____6; - r21[2U] = uu____7; - r21[3U] = uu____8; - uint32_t uu____9 = m_w[s11]; - uint32_t uu____10 = m_w[s13]; - uint32_t uu____11 = m_w[s15]; - r31[0U] = m_w[s9]; - r31[1U] = uu____9; - r31[2U] = uu____10; - r31[3U] = uu____11; - uint32_t *x = m_st; - uint32_t *y = m_st + (uint32_t)4U; - uint32_t *z = m_st + (uint32_t)8U; - uint32_t *w = m_st + (uint32_t)12U; - uint32_t a = (uint32_t)0U; - uint32_t b0 = (uint32_t)1U; - uint32_t c0 = (uint32_t)2U; - uint32_t d10 = (uint32_t)3U; - uint32_t *wv_a0 = wv + a * (uint32_t)4U; - uint32_t *wv_b0 = wv + b0 * (uint32_t)4U; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint32_t *os = wv_a0; - uint32_t x1 = wv_a0[i] + wv_b0[i]; - os[i] = x1;); - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint32_t *os = wv_a0; - uint32_t x1 = wv_a0[i] + x[i]; - os[i] = x1;); - uint32_t *wv_a1 = wv + d10 * (uint32_t)4U; - uint32_t *wv_b1 = wv + a * (uint32_t)4U; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint32_t *os = wv_a1; - uint32_t x1 = wv_a1[i] ^ wv_b1[i]; - os[i] = x1;); - uint32_t *r12 = wv_a1; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint32_t *os = r12; - uint32_t x1 = r12[i]; - uint32_t x10 = x1 >> (uint32_t)16U | x1 << (uint32_t)16U; - os[i] = x10;); - uint32_t *wv_a2 = wv + c0 * (uint32_t)4U; - uint32_t *wv_b2 = wv + d10 * (uint32_t)4U; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint32_t *os = wv_a2; - uint32_t x1 = wv_a2[i] + wv_b2[i]; - os[i] = x1;); - uint32_t *wv_a3 = wv + b0 * (uint32_t)4U; - uint32_t *wv_b3 = wv + c0 * (uint32_t)4U; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint32_t *os = wv_a3; - uint32_t x1 = wv_a3[i] ^ wv_b3[i]; - os[i] = x1;); - uint32_t *r13 = wv_a3; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint32_t *os = r13; - uint32_t x1 = r13[i]; - uint32_t x10 = x1 >> (uint32_t)12U | x1 << (uint32_t)20U; - os[i] = x10;); - uint32_t *wv_a4 = wv + a * (uint32_t)4U; - uint32_t *wv_b4 = wv + b0 * (uint32_t)4U; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint32_t *os = wv_a4; - uint32_t x1 = wv_a4[i] + wv_b4[i]; - os[i] = x1;); - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint32_t *os = wv_a4; - uint32_t x1 = wv_a4[i] + y[i]; - os[i] = x1;); - uint32_t *wv_a5 = wv + d10 * (uint32_t)4U; - uint32_t *wv_b5 = wv + a * (uint32_t)4U; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint32_t *os = wv_a5; - uint32_t x1 = wv_a5[i] ^ wv_b5[i]; - os[i] = x1;); - uint32_t *r14 = wv_a5; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint32_t *os = r14; - uint32_t x1 = r14[i]; - uint32_t x10 = x1 >> (uint32_t)8U | x1 << (uint32_t)24U; - os[i] = x10;); - uint32_t *wv_a6 = wv + c0 * (uint32_t)4U; - uint32_t *wv_b6 = wv + d10 * (uint32_t)4U; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint32_t *os = wv_a6; - uint32_t x1 = wv_a6[i] + wv_b6[i]; - os[i] = x1;); - uint32_t *wv_a7 = wv + b0 * (uint32_t)4U; - uint32_t *wv_b7 = wv + c0 * (uint32_t)4U; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint32_t *os = wv_a7; - uint32_t x1 = wv_a7[i] ^ wv_b7[i]; - os[i] = x1;); - uint32_t *r15 = wv_a7; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint32_t *os = r15; - uint32_t x1 = r15[i]; - uint32_t x10 = x1 >> (uint32_t)7U | x1 << (uint32_t)25U; - os[i] = x10;); - uint32_t *r16 = wv + (uint32_t)4U; - uint32_t *r22 = wv + (uint32_t)8U; - uint32_t *r32 = wv + (uint32_t)12U; - uint32_t *r110 = r16; - uint32_t x00 = r110[1U]; - uint32_t x10 = r110[2U]; - uint32_t x20 = r110[3U]; - uint32_t x30 = r110[0U]; - r110[0U] = x00; - r110[1U] = x10; - r110[2U] = x20; - r110[3U] = x30; - uint32_t *r111 = r22; - uint32_t x01 = r111[2U]; - uint32_t x11 = r111[3U]; - uint32_t x21 = r111[0U]; - uint32_t x31 = r111[1U]; - r111[0U] = x01; - r111[1U] = x11; - r111[2U] = x21; - r111[3U] = x31; - uint32_t *r112 = r32; - uint32_t x02 = r112[3U]; - uint32_t x12 = r112[0U]; - uint32_t x22 = r112[1U]; - uint32_t x32 = r112[2U]; - r112[0U] = x02; - r112[1U] = x12; - r112[2U] = x22; - r112[3U] = x32; - uint32_t a0 = (uint32_t)0U; - uint32_t b = (uint32_t)1U; - uint32_t c = (uint32_t)2U; - uint32_t d1 = (uint32_t)3U; - uint32_t *wv_a = wv + a0 * (uint32_t)4U; - uint32_t *wv_b8 = wv + b * (uint32_t)4U; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint32_t *os = wv_a; - uint32_t x1 = wv_a[i] + wv_b8[i]; - os[i] = x1;); - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint32_t *os = wv_a; - uint32_t x1 = wv_a[i] + z[i]; - os[i] = x1;); - uint32_t *wv_a8 = wv + d1 * (uint32_t)4U; - uint32_t *wv_b9 = wv + a0 * (uint32_t)4U; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint32_t *os = wv_a8; - uint32_t x1 = wv_a8[i] ^ wv_b9[i]; - os[i] = x1;); - uint32_t *r17 = wv_a8; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint32_t *os = r17; - uint32_t x1 = r17[i]; - uint32_t x13 = x1 >> (uint32_t)16U | x1 << (uint32_t)16U; - os[i] = x13;); - uint32_t *wv_a9 = wv + c * (uint32_t)4U; - uint32_t *wv_b10 = wv + d1 * (uint32_t)4U; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint32_t *os = wv_a9; - uint32_t x1 = wv_a9[i] + wv_b10[i]; - os[i] = x1;); - uint32_t *wv_a10 = wv + b * (uint32_t)4U; - uint32_t *wv_b11 = wv + c * (uint32_t)4U; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint32_t *os = wv_a10; - uint32_t x1 = wv_a10[i] ^ wv_b11[i]; - os[i] = x1;); - uint32_t *r18 = wv_a10; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint32_t *os = r18; - uint32_t x1 = r18[i]; - uint32_t x13 = x1 >> (uint32_t)12U | x1 << (uint32_t)20U; - os[i] = x13;); - uint32_t *wv_a11 = wv + a0 * (uint32_t)4U; - uint32_t *wv_b12 = wv + b * (uint32_t)4U; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint32_t *os = wv_a11; - uint32_t x1 = wv_a11[i] + wv_b12[i]; - os[i] = x1;); - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint32_t *os = wv_a11; - uint32_t x1 = wv_a11[i] + w[i]; - os[i] = x1;); - uint32_t *wv_a12 = wv + d1 * (uint32_t)4U; - uint32_t *wv_b13 = wv + a0 * (uint32_t)4U; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint32_t *os = wv_a12; - uint32_t x1 = wv_a12[i] ^ wv_b13[i]; - os[i] = x1;); - uint32_t *r19 = wv_a12; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint32_t *os = r19; - uint32_t x1 = r19[i]; - uint32_t x13 = x1 >> (uint32_t)8U | x1 << (uint32_t)24U; - os[i] = x13;); - uint32_t *wv_a13 = wv + c * (uint32_t)4U; - uint32_t *wv_b14 = wv + d1 * (uint32_t)4U; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint32_t *os = wv_a13; - uint32_t x1 = wv_a13[i] + wv_b14[i]; - os[i] = x1;); - uint32_t *wv_a14 = wv + b * (uint32_t)4U; - uint32_t *wv_b = wv + c * (uint32_t)4U; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint32_t *os = wv_a14; - uint32_t x1 = wv_a14[i] ^ wv_b[i]; - os[i] = x1;); - uint32_t *r113 = wv_a14; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint32_t *os = r113; - uint32_t x1 = r113[i]; - uint32_t x13 = x1 >> (uint32_t)7U | x1 << (uint32_t)25U; - os[i] = x13;); - uint32_t *r114 = wv + (uint32_t)4U; - uint32_t *r2 = wv + (uint32_t)8U; - uint32_t *r3 = wv + (uint32_t)12U; - uint32_t *r11 = r114; - uint32_t x03 = r11[3U]; - uint32_t x13 = r11[0U]; - uint32_t x23 = r11[1U]; - uint32_t x33 = r11[2U]; - r11[0U] = x03; - r11[1U] = x13; - r11[2U] = x23; - r11[3U] = x33; - uint32_t *r115 = r2; - uint32_t x04 = r115[2U]; - uint32_t x14 = r115[3U]; - uint32_t x24 = r115[0U]; - uint32_t x34 = r115[1U]; - r115[0U] = x04; - r115[1U] = x14; - r115[2U] = x24; - r115[3U] = x34; - uint32_t *r116 = r3; - uint32_t x0 = r116[1U]; - uint32_t x1 = r116[2U]; - uint32_t x2 = r116[3U]; - uint32_t x3 = r116[0U]; - r116[0U] = x0; - r116[1U] = x1; - r116[2U] = x2; - r116[3U] = x3;); - s00 = hash; - s16 = hash + (uint32_t)4U; - r00 = wv; - r10 = wv + (uint32_t)4U; - r20 = wv + (uint32_t)8U; - r30 = wv + (uint32_t)12U; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint32_t *os = s00; - uint32_t x = s00[i] ^ r00[i]; - os[i] = x;); - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint32_t *os = s00; - uint32_t x = s00[i] ^ r20[i]; - os[i] = x;); - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint32_t *os = s16; - uint32_t x = s16[i] ^ r10[i]; - os[i] = x;); - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint32_t *os = s16; - uint32_t x = s16[i] ^ r30[i]; - os[i] = x;); - } - } -} - -void Hacl_Blake2s_32_blake2s_init(uint32_t *hash, uint32_t kk, uint32_t nn) -{ - uint32_t *r0 = hash; - uint32_t *r1 = hash + (uint32_t)4U; - uint32_t *r2 = hash + (uint32_t)8U; - uint32_t *r3 = hash + (uint32_t)12U; - uint32_t iv0 = Hacl_Impl_Blake2_Constants_ivTable_S[0U]; - uint32_t iv1 = Hacl_Impl_Blake2_Constants_ivTable_S[1U]; - uint32_t iv2 = Hacl_Impl_Blake2_Constants_ivTable_S[2U]; - uint32_t iv3 = Hacl_Impl_Blake2_Constants_ivTable_S[3U]; - uint32_t iv4 = Hacl_Impl_Blake2_Constants_ivTable_S[4U]; - uint32_t iv5 = Hacl_Impl_Blake2_Constants_ivTable_S[5U]; - uint32_t iv6 = Hacl_Impl_Blake2_Constants_ivTable_S[6U]; - uint32_t iv7 = Hacl_Impl_Blake2_Constants_ivTable_S[7U]; - uint32_t kk_shift_8; - uint32_t iv0_; - r2[0U] = iv0; - r2[1U] = iv1; - r2[2U] = iv2; - r2[3U] = iv3; - r3[0U] = iv4; - r3[1U] = iv5; - r3[2U] = iv6; - r3[3U] = iv7; - kk_shift_8 = kk << (uint32_t)8U; - iv0_ = iv0 ^ ((uint32_t)0x01010000U ^ (kk_shift_8 ^ nn)); - r0[0U] = iv0_; - r0[1U] = iv1; - r0[2U] = iv2; - r0[3U] = iv3; - r1[0U] = iv4; - r1[1U] = iv5; - r1[2U] = iv6; - r1[3U] = iv7; -} - -void -Hacl_Blake2s_32_blake2s_update_key( - uint32_t *wv, - uint32_t *hash, - uint32_t kk, - uint8_t *k, - uint32_t ll -) -{ - uint64_t lb = (uint64_t)(uint32_t)64U; - uint8_t b[64U] = { 0U }; - memcpy(b, k, kk * sizeof (uint8_t)); - if (ll == (uint32_t)0U) - { - blake2s_update_block(wv, hash, true, lb, b); - } - else - { - blake2s_update_block(wv, hash, false, lb, b); - } - Lib_Memzero0_memzero(b, (uint32_t)64U * sizeof (b[0U])); -} - -void -Hacl_Blake2s_32_blake2s_update_multi( - uint32_t len, - uint32_t *wv, - uint32_t *hash, - uint64_t prev, - uint8_t *blocks, - uint32_t nb -) -{ - uint32_t i; - for (i = (uint32_t)0U; i < nb; i++) - { - uint64_t totlen = prev + (uint64_t)((i + (uint32_t)1U) * (uint32_t)64U); - uint8_t *b = blocks + i * (uint32_t)64U; - blake2s_update_block(wv, hash, false, totlen, b); - } -} - -void -Hacl_Blake2s_32_blake2s_update_last( - uint32_t len, - uint32_t *wv, - uint32_t *hash, - uint64_t prev, - uint32_t rem, - uint8_t *d -) -{ - uint8_t b[64U] = { 0U }; - uint8_t *last = d + len - rem; - uint64_t totlen; - memcpy(b, last, rem * sizeof (uint8_t)); - totlen = prev + (uint64_t)len; - blake2s_update_block(wv, hash, true, totlen, b); - Lib_Memzero0_memzero(b, (uint32_t)64U * sizeof (b[0U])); -} - -static void -blake2s_update_blocks( - uint32_t len, - uint32_t *wv, - uint32_t *hash, - uint64_t prev, - uint8_t *blocks -) -{ - uint32_t nb0 = len / (uint32_t)64U; - uint32_t rem0 = len % (uint32_t)64U; - K___uint32_t_uint32_t scrut; - if (rem0 == (uint32_t)0U && nb0 > (uint32_t)0U) - { - uint32_t nb_ = nb0 - (uint32_t)1U; - uint32_t rem_ = (uint32_t)64U; - K___uint32_t_uint32_t lit; - lit.fst = nb_; - lit.snd = rem_; - scrut = lit; - } - else - { - K___uint32_t_uint32_t lit; - lit.fst = nb0; - lit.snd = rem0; - scrut = lit; - } - { - uint32_t nb = scrut.fst; - uint32_t rem = scrut.snd; - Hacl_Blake2s_32_blake2s_update_multi(len, wv, hash, prev, blocks, nb); - Hacl_Blake2s_32_blake2s_update_last(len, wv, hash, prev, rem, blocks); - } -} - -static inline void -blake2s_update(uint32_t *wv, uint32_t *hash, uint32_t kk, uint8_t *k, uint32_t ll, uint8_t *d) -{ - uint64_t lb = (uint64_t)(uint32_t)64U; - if (kk > (uint32_t)0U) - { - Hacl_Blake2s_32_blake2s_update_key(wv, hash, kk, k, ll); - if (!(ll == (uint32_t)0U)) - { - blake2s_update_blocks(ll, wv, hash, lb, d); - return; - } - return; - } - blake2s_update_blocks(ll, wv, hash, (uint64_t)(uint32_t)0U, d); -} - -void Hacl_Blake2s_32_blake2s_finish(uint32_t nn, uint8_t *output, uint32_t *hash) -{ - uint32_t double_row = (uint32_t)32U; - KRML_CHECK_SIZE(sizeof (uint8_t), double_row); - { - uint8_t b[double_row]; - memset(b, 0U, double_row * sizeof (uint8_t)); - { - uint8_t *first = b; - uint8_t *second = b + (uint32_t)16U; - uint32_t *row0 = hash; - uint32_t *row1 = hash + (uint32_t)4U; - uint8_t *final; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - store32_le(first + i * (uint32_t)4U, row0[i]);); - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - store32_le(second + i * (uint32_t)4U, row1[i]);); - final = b; - memcpy(output, final, nn * sizeof (uint8_t)); - Lib_Memzero0_memzero(b, double_row * sizeof (b[0U])); - } - } -} - -void -Hacl_Blake2s_32_blake2s( - uint32_t nn, - uint8_t *output, - uint32_t ll, - uint8_t *d, - uint32_t kk, - uint8_t *k -) -{ - uint32_t b[16U] = { 0U }; - uint32_t b1[16U] = { 0U }; - Hacl_Blake2s_32_blake2s_init(b, kk, nn); - blake2s_update(b1, b, kk, k, ll, d); - Hacl_Blake2s_32_blake2s_finish(nn, output, b); - Lib_Memzero0_memzero(b1, (uint32_t)16U * sizeof (b1[0U])); - Lib_Memzero0_memzero(b, (uint32_t)16U * sizeof (b[0U])); -} - diff --git a/dist/c89-compatible/Hacl_Hash_Blake2.h b/dist/c89-compatible/Hacl_Hash_Blake2.h deleted file mode 100644 index 4a76f46c16..0000000000 --- a/dist/c89-compatible/Hacl_Hash_Blake2.h +++ /dev/null @@ -1,139 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#ifndef __Hacl_Hash_Blake2_H -#define __Hacl_Hash_Blake2_H - -#if defined(__cplusplus) -extern "C" { -#endif - -#include -#include "krml/internal/types.h" -#include "krml/lowstar_endianness.h" -#include "krml/internal/target.h" - - -#include "Lib_Memzero0.h" -#include "Hacl_Krmllib.h" -#include "Hacl_Impl_Blake2_Constants.h" -#include "evercrypt_targetconfig.h" -#define Hacl_Impl_Blake2_Core_M32 0 -#define Hacl_Impl_Blake2_Core_M128 1 -#define Hacl_Impl_Blake2_Core_M256 2 - -typedef uint8_t Hacl_Impl_Blake2_Core_m_spec; - -void Hacl_Blake2b_32_blake2b_init(uint64_t *hash, uint32_t kk, uint32_t nn); - -void -Hacl_Blake2b_32_blake2b_update_key( - uint64_t *wv, - uint64_t *hash, - uint32_t kk, - uint8_t *k, - uint32_t ll -); - -void -Hacl_Blake2b_32_blake2b_update_multi( - uint32_t len, - uint64_t *wv, - uint64_t *hash, - FStar_UInt128_uint128 prev, - uint8_t *blocks, - uint32_t nb -); - -void -Hacl_Blake2b_32_blake2b_update_last( - uint32_t len, - uint64_t *wv, - uint64_t *hash, - FStar_UInt128_uint128 prev, - uint32_t rem, - uint8_t *d -); - -void Hacl_Blake2b_32_blake2b_finish(uint32_t nn, uint8_t *output, uint64_t *hash); - -void -Hacl_Blake2b_32_blake2b( - uint32_t nn, - uint8_t *output, - uint32_t ll, - uint8_t *d, - uint32_t kk, - uint8_t *k -); - -void Hacl_Blake2s_32_blake2s_init(uint32_t *hash, uint32_t kk, uint32_t nn); - -void -Hacl_Blake2s_32_blake2s_update_key( - uint32_t *wv, - uint32_t *hash, - uint32_t kk, - uint8_t *k, - uint32_t ll -); - -void -Hacl_Blake2s_32_blake2s_update_multi( - uint32_t len, - uint32_t *wv, - uint32_t *hash, - uint64_t prev, - uint8_t *blocks, - uint32_t nb -); - -void -Hacl_Blake2s_32_blake2s_update_last( - uint32_t len, - uint32_t *wv, - uint32_t *hash, - uint64_t prev, - uint32_t rem, - uint8_t *d -); - -void Hacl_Blake2s_32_blake2s_finish(uint32_t nn, uint8_t *output, uint32_t *hash); - -void -Hacl_Blake2s_32_blake2s( - uint32_t nn, - uint8_t *output, - uint32_t ll, - uint8_t *d, - uint32_t kk, - uint8_t *k -); - -#if defined(__cplusplus) -} -#endif - -#define __Hacl_Hash_Blake2_H_DEFINED -#endif diff --git a/dist/c89-compatible/Hacl_Hash_Blake2b_256.c b/dist/c89-compatible/Hacl_Hash_Blake2b_256.c deleted file mode 100644 index 4147d63a6a..0000000000 --- a/dist/c89-compatible/Hacl_Hash_Blake2b_256.c +++ /dev/null @@ -1,1039 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#include "internal/Hacl_Hash_Blake2b_256.h" - -#include "internal/Hacl_Hash_Blake2.h" - -FStar_UInt128_uint128 -Hacl_Hash_Blake2b_256_init_blake2b_256(Lib_IntVector_Intrinsics_vec256 *s) -{ - Lib_IntVector_Intrinsics_vec256 *r0 = s; - Lib_IntVector_Intrinsics_vec256 *r1 = s + (uint32_t)1U; - Lib_IntVector_Intrinsics_vec256 *r2 = s + (uint32_t)2U; - Lib_IntVector_Intrinsics_vec256 *r3 = s + (uint32_t)3U; - uint64_t iv0 = Hacl_Impl_Blake2_Constants_ivTable_B[0U]; - uint64_t iv1 = Hacl_Impl_Blake2_Constants_ivTable_B[1U]; - uint64_t iv2 = Hacl_Impl_Blake2_Constants_ivTable_B[2U]; - uint64_t iv3 = Hacl_Impl_Blake2_Constants_ivTable_B[3U]; - uint64_t iv4 = Hacl_Impl_Blake2_Constants_ivTable_B[4U]; - uint64_t iv5 = Hacl_Impl_Blake2_Constants_ivTable_B[5U]; - uint64_t iv6 = Hacl_Impl_Blake2_Constants_ivTable_B[6U]; - uint64_t iv7 = Hacl_Impl_Blake2_Constants_ivTable_B[7U]; - uint64_t kk_shift_8; - uint64_t iv0_; - r2[0U] = Lib_IntVector_Intrinsics_vec256_load64s(iv0, iv1, iv2, iv3); - r3[0U] = Lib_IntVector_Intrinsics_vec256_load64s(iv4, iv5, iv6, iv7); - kk_shift_8 = (uint64_t)(uint32_t)0U << (uint32_t)8U; - iv0_ = iv0 ^ ((uint64_t)0x01010000U ^ (kk_shift_8 ^ (uint64_t)(uint32_t)64U)); - r0[0U] = Lib_IntVector_Intrinsics_vec256_load64s(iv0_, iv1, iv2, iv3); - r1[0U] = Lib_IntVector_Intrinsics_vec256_load64s(iv4, iv5, iv6, iv7); - return FStar_UInt128_uint64_to_uint128((uint64_t)0U); -} - -FStar_UInt128_uint128 -Hacl_Hash_Blake2b_256_update_blake2b_256( - Lib_IntVector_Intrinsics_vec256 *s, - FStar_UInt128_uint128 totlen, - uint8_t *block -) -{ - KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 wv[4U] KRML_POST_ALIGN(32) = { 0U }; - FStar_UInt128_uint128 - totlen1 = - FStar_UInt128_add_mod(totlen, - FStar_UInt128_uint64_to_uint128((uint64_t)(uint32_t)128U)); - uint64_t m_w[16U] = { 0U }; - KRML_MAYBE_FOR16(i, - (uint32_t)0U, - (uint32_t)16U, - (uint32_t)1U, - uint64_t *os = m_w; - uint8_t *bj = block + i * (uint32_t)8U; - uint64_t u = load64_le(bj); - uint64_t r = u; - uint64_t x = r; - os[i] = x;); - { - Lib_IntVector_Intrinsics_vec256 mask = Lib_IntVector_Intrinsics_vec256_zero; - uint64_t wv_14 = (uint64_t)0U; - uint64_t wv_15 = (uint64_t)0U; - Lib_IntVector_Intrinsics_vec256 *wv3; - Lib_IntVector_Intrinsics_vec256 *s00; - Lib_IntVector_Intrinsics_vec256 *s16; - Lib_IntVector_Intrinsics_vec256 *r00; - Lib_IntVector_Intrinsics_vec256 *r10; - Lib_IntVector_Intrinsics_vec256 *r20; - Lib_IntVector_Intrinsics_vec256 *r30; - mask = - Lib_IntVector_Intrinsics_vec256_load64s(FStar_UInt128_uint128_to_uint64(totlen1), - FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(totlen1, (uint32_t)64U)), - wv_14, - wv_15); - memcpy(wv, s, (uint32_t)4U * sizeof (Lib_IntVector_Intrinsics_vec256)); - wv3 = wv + (uint32_t)3U; - wv3[0U] = Lib_IntVector_Intrinsics_vec256_xor(wv3[0U], mask); - KRML_MAYBE_FOR12(i, - (uint32_t)0U, - (uint32_t)12U, - (uint32_t)1U, - uint32_t start_idx = i % (uint32_t)10U * (uint32_t)16U; - KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 m_st[4U] KRML_POST_ALIGN(32) = { 0U }; - Lib_IntVector_Intrinsics_vec256 *r0 = m_st; - Lib_IntVector_Intrinsics_vec256 *r1 = m_st + (uint32_t)1U; - Lib_IntVector_Intrinsics_vec256 *r21 = m_st + (uint32_t)2U; - Lib_IntVector_Intrinsics_vec256 *r31 = m_st + (uint32_t)3U; - uint32_t s0 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)0U]; - uint32_t s1 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)1U]; - uint32_t s2 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)2U]; - uint32_t s3 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)3U]; - uint32_t s4 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)4U]; - uint32_t s5 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)5U]; - uint32_t s6 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)6U]; - uint32_t s7 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)7U]; - uint32_t s8 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)8U]; - uint32_t s9 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)9U]; - uint32_t s10 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)10U]; - uint32_t s11 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)11U]; - uint32_t s12 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)12U]; - uint32_t s13 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)13U]; - uint32_t s14 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)14U]; - uint32_t s15 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)15U]; - r0[0U] = Lib_IntVector_Intrinsics_vec256_load64s(m_w[s0], m_w[s2], m_w[s4], m_w[s6]); - r1[0U] = Lib_IntVector_Intrinsics_vec256_load64s(m_w[s1], m_w[s3], m_w[s5], m_w[s7]); - r21[0U] = Lib_IntVector_Intrinsics_vec256_load64s(m_w[s8], m_w[s10], m_w[s12], m_w[s14]); - r31[0U] = Lib_IntVector_Intrinsics_vec256_load64s(m_w[s9], m_w[s11], m_w[s13], m_w[s15]); - Lib_IntVector_Intrinsics_vec256 *x = m_st; - Lib_IntVector_Intrinsics_vec256 *y = m_st + (uint32_t)1U; - Lib_IntVector_Intrinsics_vec256 *z = m_st + (uint32_t)2U; - Lib_IntVector_Intrinsics_vec256 *w = m_st + (uint32_t)3U; - uint32_t a = (uint32_t)0U; - uint32_t b0 = (uint32_t)1U; - uint32_t c0 = (uint32_t)2U; - uint32_t d0 = (uint32_t)3U; - Lib_IntVector_Intrinsics_vec256 *wv_a0 = wv + a * (uint32_t)1U; - Lib_IntVector_Intrinsics_vec256 *wv_b0 = wv + b0 * (uint32_t)1U; - wv_a0[0U] = Lib_IntVector_Intrinsics_vec256_add64(wv_a0[0U], wv_b0[0U]); - wv_a0[0U] = Lib_IntVector_Intrinsics_vec256_add64(wv_a0[0U], x[0U]); - Lib_IntVector_Intrinsics_vec256 *wv_a1 = wv + d0 * (uint32_t)1U; - Lib_IntVector_Intrinsics_vec256 *wv_b1 = wv + a * (uint32_t)1U; - wv_a1[0U] = Lib_IntVector_Intrinsics_vec256_xor(wv_a1[0U], wv_b1[0U]); - wv_a1[0U] = Lib_IntVector_Intrinsics_vec256_rotate_right64(wv_a1[0U], (uint32_t)32U); - Lib_IntVector_Intrinsics_vec256 *wv_a2 = wv + c0 * (uint32_t)1U; - Lib_IntVector_Intrinsics_vec256 *wv_b2 = wv + d0 * (uint32_t)1U; - wv_a2[0U] = Lib_IntVector_Intrinsics_vec256_add64(wv_a2[0U], wv_b2[0U]); - Lib_IntVector_Intrinsics_vec256 *wv_a3 = wv + b0 * (uint32_t)1U; - Lib_IntVector_Intrinsics_vec256 *wv_b3 = wv + c0 * (uint32_t)1U; - wv_a3[0U] = Lib_IntVector_Intrinsics_vec256_xor(wv_a3[0U], wv_b3[0U]); - wv_a3[0U] = Lib_IntVector_Intrinsics_vec256_rotate_right64(wv_a3[0U], (uint32_t)24U); - Lib_IntVector_Intrinsics_vec256 *wv_a4 = wv + a * (uint32_t)1U; - Lib_IntVector_Intrinsics_vec256 *wv_b4 = wv + b0 * (uint32_t)1U; - wv_a4[0U] = Lib_IntVector_Intrinsics_vec256_add64(wv_a4[0U], wv_b4[0U]); - wv_a4[0U] = Lib_IntVector_Intrinsics_vec256_add64(wv_a4[0U], y[0U]); - Lib_IntVector_Intrinsics_vec256 *wv_a5 = wv + d0 * (uint32_t)1U; - Lib_IntVector_Intrinsics_vec256 *wv_b5 = wv + a * (uint32_t)1U; - wv_a5[0U] = Lib_IntVector_Intrinsics_vec256_xor(wv_a5[0U], wv_b5[0U]); - wv_a5[0U] = Lib_IntVector_Intrinsics_vec256_rotate_right64(wv_a5[0U], (uint32_t)16U); - Lib_IntVector_Intrinsics_vec256 *wv_a6 = wv + c0 * (uint32_t)1U; - Lib_IntVector_Intrinsics_vec256 *wv_b6 = wv + d0 * (uint32_t)1U; - wv_a6[0U] = Lib_IntVector_Intrinsics_vec256_add64(wv_a6[0U], wv_b6[0U]); - Lib_IntVector_Intrinsics_vec256 *wv_a7 = wv + b0 * (uint32_t)1U; - Lib_IntVector_Intrinsics_vec256 *wv_b7 = wv + c0 * (uint32_t)1U; - wv_a7[0U] = Lib_IntVector_Intrinsics_vec256_xor(wv_a7[0U], wv_b7[0U]); - wv_a7[0U] = Lib_IntVector_Intrinsics_vec256_rotate_right64(wv_a7[0U], (uint32_t)63U); - Lib_IntVector_Intrinsics_vec256 *r11 = wv + (uint32_t)1U; - Lib_IntVector_Intrinsics_vec256 *r22 = wv + (uint32_t)2U; - Lib_IntVector_Intrinsics_vec256 *r32 = wv + (uint32_t)3U; - Lib_IntVector_Intrinsics_vec256 v00 = r11[0U]; - Lib_IntVector_Intrinsics_vec256 - v1 = Lib_IntVector_Intrinsics_vec256_rotate_right_lanes64(v00, (uint32_t)1U); - r11[0U] = v1; - Lib_IntVector_Intrinsics_vec256 v01 = r22[0U]; - Lib_IntVector_Intrinsics_vec256 - v10 = Lib_IntVector_Intrinsics_vec256_rotate_right_lanes64(v01, (uint32_t)2U); - r22[0U] = v10; - Lib_IntVector_Intrinsics_vec256 v02 = r32[0U]; - Lib_IntVector_Intrinsics_vec256 - v11 = Lib_IntVector_Intrinsics_vec256_rotate_right_lanes64(v02, (uint32_t)3U); - r32[0U] = v11; - uint32_t a0 = (uint32_t)0U; - uint32_t b = (uint32_t)1U; - uint32_t c = (uint32_t)2U; - uint32_t d = (uint32_t)3U; - Lib_IntVector_Intrinsics_vec256 *wv_a = wv + a0 * (uint32_t)1U; - Lib_IntVector_Intrinsics_vec256 *wv_b8 = wv + b * (uint32_t)1U; - wv_a[0U] = Lib_IntVector_Intrinsics_vec256_add64(wv_a[0U], wv_b8[0U]); - wv_a[0U] = Lib_IntVector_Intrinsics_vec256_add64(wv_a[0U], z[0U]); - Lib_IntVector_Intrinsics_vec256 *wv_a8 = wv + d * (uint32_t)1U; - Lib_IntVector_Intrinsics_vec256 *wv_b9 = wv + a0 * (uint32_t)1U; - wv_a8[0U] = Lib_IntVector_Intrinsics_vec256_xor(wv_a8[0U], wv_b9[0U]); - wv_a8[0U] = Lib_IntVector_Intrinsics_vec256_rotate_right64(wv_a8[0U], (uint32_t)32U); - Lib_IntVector_Intrinsics_vec256 *wv_a9 = wv + c * (uint32_t)1U; - Lib_IntVector_Intrinsics_vec256 *wv_b10 = wv + d * (uint32_t)1U; - wv_a9[0U] = Lib_IntVector_Intrinsics_vec256_add64(wv_a9[0U], wv_b10[0U]); - Lib_IntVector_Intrinsics_vec256 *wv_a10 = wv + b * (uint32_t)1U; - Lib_IntVector_Intrinsics_vec256 *wv_b11 = wv + c * (uint32_t)1U; - wv_a10[0U] = Lib_IntVector_Intrinsics_vec256_xor(wv_a10[0U], wv_b11[0U]); - wv_a10[0U] = Lib_IntVector_Intrinsics_vec256_rotate_right64(wv_a10[0U], (uint32_t)24U); - Lib_IntVector_Intrinsics_vec256 *wv_a11 = wv + a0 * (uint32_t)1U; - Lib_IntVector_Intrinsics_vec256 *wv_b12 = wv + b * (uint32_t)1U; - wv_a11[0U] = Lib_IntVector_Intrinsics_vec256_add64(wv_a11[0U], wv_b12[0U]); - wv_a11[0U] = Lib_IntVector_Intrinsics_vec256_add64(wv_a11[0U], w[0U]); - Lib_IntVector_Intrinsics_vec256 *wv_a12 = wv + d * (uint32_t)1U; - Lib_IntVector_Intrinsics_vec256 *wv_b13 = wv + a0 * (uint32_t)1U; - wv_a12[0U] = Lib_IntVector_Intrinsics_vec256_xor(wv_a12[0U], wv_b13[0U]); - wv_a12[0U] = Lib_IntVector_Intrinsics_vec256_rotate_right64(wv_a12[0U], (uint32_t)16U); - Lib_IntVector_Intrinsics_vec256 *wv_a13 = wv + c * (uint32_t)1U; - Lib_IntVector_Intrinsics_vec256 *wv_b14 = wv + d * (uint32_t)1U; - wv_a13[0U] = Lib_IntVector_Intrinsics_vec256_add64(wv_a13[0U], wv_b14[0U]); - Lib_IntVector_Intrinsics_vec256 *wv_a14 = wv + b * (uint32_t)1U; - Lib_IntVector_Intrinsics_vec256 *wv_b = wv + c * (uint32_t)1U; - wv_a14[0U] = Lib_IntVector_Intrinsics_vec256_xor(wv_a14[0U], wv_b[0U]); - wv_a14[0U] = Lib_IntVector_Intrinsics_vec256_rotate_right64(wv_a14[0U], (uint32_t)63U); - Lib_IntVector_Intrinsics_vec256 *r12 = wv + (uint32_t)1U; - Lib_IntVector_Intrinsics_vec256 *r2 = wv + (uint32_t)2U; - Lib_IntVector_Intrinsics_vec256 *r3 = wv + (uint32_t)3U; - Lib_IntVector_Intrinsics_vec256 v0 = r12[0U]; - Lib_IntVector_Intrinsics_vec256 - v12 = Lib_IntVector_Intrinsics_vec256_rotate_right_lanes64(v0, (uint32_t)3U); - r12[0U] = v12; - Lib_IntVector_Intrinsics_vec256 v03 = r2[0U]; - Lib_IntVector_Intrinsics_vec256 - v13 = Lib_IntVector_Intrinsics_vec256_rotate_right_lanes64(v03, (uint32_t)2U); - r2[0U] = v13; - Lib_IntVector_Intrinsics_vec256 v04 = r3[0U]; - Lib_IntVector_Intrinsics_vec256 - v14 = Lib_IntVector_Intrinsics_vec256_rotate_right_lanes64(v04, (uint32_t)1U); - r3[0U] = v14;); - s00 = s; - s16 = s + (uint32_t)1U; - r00 = wv; - r10 = wv + (uint32_t)1U; - r20 = wv + (uint32_t)2U; - r30 = wv + (uint32_t)3U; - s00[0U] = Lib_IntVector_Intrinsics_vec256_xor(s00[0U], r00[0U]); - s00[0U] = Lib_IntVector_Intrinsics_vec256_xor(s00[0U], r20[0U]); - s16[0U] = Lib_IntVector_Intrinsics_vec256_xor(s16[0U], r10[0U]); - s16[0U] = Lib_IntVector_Intrinsics_vec256_xor(s16[0U], r30[0U]); - return totlen1; - } -} - -void -Hacl_Hash_Blake2b_256_finish_blake2b_256( - Lib_IntVector_Intrinsics_vec256 *s, - FStar_UInt128_uint128 ev, - uint8_t *dst -) -{ - uint32_t double_row = (uint32_t)64U; - KRML_CHECK_SIZE(sizeof (uint8_t), double_row); - { - uint8_t b[double_row]; - memset(b, 0U, double_row * sizeof (uint8_t)); - { - uint8_t *first = b; - uint8_t *second = b + (uint32_t)32U; - Lib_IntVector_Intrinsics_vec256 *row0 = s; - Lib_IntVector_Intrinsics_vec256 *row1 = s + (uint32_t)1U; - uint8_t *final; - Lib_IntVector_Intrinsics_vec256_store64_le(first, row0[0U]); - Lib_IntVector_Intrinsics_vec256_store64_le(second, row1[0U]); - final = b; - memcpy(dst, final, (uint32_t)64U * sizeof (uint8_t)); - Lib_Memzero0_memzero(b, double_row * sizeof (b[0U])); - } - } -} - -FStar_UInt128_uint128 -Hacl_Hash_Blake2b_256_update_multi_blake2b_256( - Lib_IntVector_Intrinsics_vec256 *s, - FStar_UInt128_uint128 ev, - uint8_t *blocks, - uint32_t n_blocks -) -{ - { - uint32_t i; - for (i = (uint32_t)0U; i < n_blocks; i++) - { - uint32_t sz = (uint32_t)128U; - uint8_t *block = blocks + sz * i; - FStar_UInt128_uint128 - v_ = - Hacl_Hash_Blake2b_256_update_blake2b_256(s, - FStar_UInt128_add_mod(ev, - FStar_UInt128_uint64_to_uint128((uint64_t)i * (uint64_t)(uint32_t)128U)), - block); - } - } - return - FStar_UInt128_add_mod(ev, - FStar_UInt128_uint64_to_uint128((uint64_t)n_blocks * (uint64_t)(uint32_t)128U)); -} - -FStar_UInt128_uint128 -Hacl_Hash_Blake2b_256_update_last_blake2b_256( - Lib_IntVector_Intrinsics_vec256 *s, - FStar_UInt128_uint128 ev, - FStar_UInt128_uint128 prev_len, - uint8_t *input, - uint32_t input_len -) -{ - uint32_t blocks_n = input_len / (uint32_t)128U; - uint32_t blocks_len0 = blocks_n * (uint32_t)128U; - uint32_t rest_len0 = input_len - blocks_len0; - K___uint32_t_uint32_t_uint32_t scrut0; - if (rest_len0 == (uint32_t)0U && blocks_n > (uint32_t)0U) - { - uint32_t blocks_n1 = blocks_n - (uint32_t)1U; - uint32_t blocks_len1 = blocks_len0 - (uint32_t)128U; - uint32_t rest_len1 = (uint32_t)128U; - K___uint32_t_uint32_t_uint32_t lit; - lit.fst = blocks_n1; - lit.snd = blocks_len1; - lit.thd = rest_len1; - scrut0 = lit; - } - else - { - K___uint32_t_uint32_t_uint32_t lit; - lit.fst = blocks_n; - lit.snd = blocks_len0; - lit.thd = rest_len0; - scrut0 = lit; - } - { - uint32_t num_blocks0 = scrut0.fst; - uint32_t blocks_len = scrut0.snd; - uint32_t rest_len1 = scrut0.thd; - uint8_t *blocks0 = input; - uint8_t *rest0 = input + blocks_len; - K___uint32_t_uint32_t_uint32_t__uint8_t___uint8_t_ lit; - K___uint32_t_uint32_t_uint32_t__uint8_t___uint8_t_ scrut; - uint32_t num_blocks; - uint32_t rest_len; - uint8_t *blocks; - uint8_t *rest; - FStar_UInt128_uint128 ev_; - lit.fst = num_blocks0; - lit.snd = blocks_len; - lit.thd = rest_len1; - lit.f3 = blocks0; - lit.f4 = rest0; - scrut = lit; - num_blocks = scrut.fst; - rest_len = scrut.thd; - blocks = scrut.f3; - rest = scrut.f4; - ev_ = Hacl_Hash_Blake2b_256_update_multi_blake2b_256(s, ev, blocks, num_blocks); - { - KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 wv[4U] KRML_POST_ALIGN(32) = { 0U }; - uint8_t tmp[128U] = { 0U }; - uint8_t *tmp_rest = tmp; - FStar_UInt128_uint128 totlen; - memcpy(tmp_rest, rest, rest_len * sizeof (uint8_t)); - totlen = FStar_UInt128_add_mod(ev_, FStar_UInt128_uint64_to_uint128((uint64_t)rest_len)); - { - uint64_t m_w[16U] = { 0U }; - KRML_MAYBE_FOR16(i, - (uint32_t)0U, - (uint32_t)16U, - (uint32_t)1U, - uint64_t *os = m_w; - uint8_t *bj = tmp + i * (uint32_t)8U; - uint64_t u = load64_le(bj); - uint64_t r = u; - uint64_t x = r; - os[i] = x;); - { - Lib_IntVector_Intrinsics_vec256 mask = Lib_IntVector_Intrinsics_vec256_zero; - uint64_t wv_14 = (uint64_t)0xFFFFFFFFFFFFFFFFU; - uint64_t wv_15 = (uint64_t)0U; - Lib_IntVector_Intrinsics_vec256 *wv3; - Lib_IntVector_Intrinsics_vec256 *s00; - Lib_IntVector_Intrinsics_vec256 *s16; - Lib_IntVector_Intrinsics_vec256 *r00; - Lib_IntVector_Intrinsics_vec256 *r10; - Lib_IntVector_Intrinsics_vec256 *r20; - Lib_IntVector_Intrinsics_vec256 *r30; - mask = - Lib_IntVector_Intrinsics_vec256_load64s(FStar_UInt128_uint128_to_uint64(totlen), - FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(totlen, (uint32_t)64U)), - wv_14, - wv_15); - memcpy(wv, s, (uint32_t)4U * sizeof (Lib_IntVector_Intrinsics_vec256)); - wv3 = wv + (uint32_t)3U; - wv3[0U] = Lib_IntVector_Intrinsics_vec256_xor(wv3[0U], mask); - KRML_MAYBE_FOR12(i, - (uint32_t)0U, - (uint32_t)12U, - (uint32_t)1U, - uint32_t start_idx = i % (uint32_t)10U * (uint32_t)16U; - KRML_PRE_ALIGN(32) - Lib_IntVector_Intrinsics_vec256 - m_st[4U] KRML_POST_ALIGN(32) = { 0U }; - Lib_IntVector_Intrinsics_vec256 *r0 = m_st; - Lib_IntVector_Intrinsics_vec256 *r1 = m_st + (uint32_t)1U; - Lib_IntVector_Intrinsics_vec256 *r21 = m_st + (uint32_t)2U; - Lib_IntVector_Intrinsics_vec256 *r31 = m_st + (uint32_t)3U; - uint32_t s0 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)0U]; - uint32_t s1 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)1U]; - uint32_t s2 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)2U]; - uint32_t s3 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)3U]; - uint32_t s4 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)4U]; - uint32_t s5 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)5U]; - uint32_t s6 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)6U]; - uint32_t s7 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)7U]; - uint32_t s8 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)8U]; - uint32_t s9 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)9U]; - uint32_t s10 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)10U]; - uint32_t s11 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)11U]; - uint32_t s12 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)12U]; - uint32_t s13 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)13U]; - uint32_t s14 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)14U]; - uint32_t s15 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)15U]; - r0[0U] = Lib_IntVector_Intrinsics_vec256_load64s(m_w[s0], m_w[s2], m_w[s4], m_w[s6]); - r1[0U] = Lib_IntVector_Intrinsics_vec256_load64s(m_w[s1], m_w[s3], m_w[s5], m_w[s7]); - r21[0U] = Lib_IntVector_Intrinsics_vec256_load64s(m_w[s8], m_w[s10], m_w[s12], m_w[s14]); - r31[0U] = Lib_IntVector_Intrinsics_vec256_load64s(m_w[s9], m_w[s11], m_w[s13], m_w[s15]); - Lib_IntVector_Intrinsics_vec256 *x = m_st; - Lib_IntVector_Intrinsics_vec256 *y = m_st + (uint32_t)1U; - Lib_IntVector_Intrinsics_vec256 *z = m_st + (uint32_t)2U; - Lib_IntVector_Intrinsics_vec256 *w = m_st + (uint32_t)3U; - uint32_t a = (uint32_t)0U; - uint32_t b0 = (uint32_t)1U; - uint32_t c0 = (uint32_t)2U; - uint32_t d0 = (uint32_t)3U; - Lib_IntVector_Intrinsics_vec256 *wv_a0 = wv + a * (uint32_t)1U; - Lib_IntVector_Intrinsics_vec256 *wv_b0 = wv + b0 * (uint32_t)1U; - wv_a0[0U] = Lib_IntVector_Intrinsics_vec256_add64(wv_a0[0U], wv_b0[0U]); - wv_a0[0U] = Lib_IntVector_Intrinsics_vec256_add64(wv_a0[0U], x[0U]); - Lib_IntVector_Intrinsics_vec256 *wv_a1 = wv + d0 * (uint32_t)1U; - Lib_IntVector_Intrinsics_vec256 *wv_b1 = wv + a * (uint32_t)1U; - wv_a1[0U] = Lib_IntVector_Intrinsics_vec256_xor(wv_a1[0U], wv_b1[0U]); - wv_a1[0U] = Lib_IntVector_Intrinsics_vec256_rotate_right64(wv_a1[0U], (uint32_t)32U); - Lib_IntVector_Intrinsics_vec256 *wv_a2 = wv + c0 * (uint32_t)1U; - Lib_IntVector_Intrinsics_vec256 *wv_b2 = wv + d0 * (uint32_t)1U; - wv_a2[0U] = Lib_IntVector_Intrinsics_vec256_add64(wv_a2[0U], wv_b2[0U]); - Lib_IntVector_Intrinsics_vec256 *wv_a3 = wv + b0 * (uint32_t)1U; - Lib_IntVector_Intrinsics_vec256 *wv_b3 = wv + c0 * (uint32_t)1U; - wv_a3[0U] = Lib_IntVector_Intrinsics_vec256_xor(wv_a3[0U], wv_b3[0U]); - wv_a3[0U] = Lib_IntVector_Intrinsics_vec256_rotate_right64(wv_a3[0U], (uint32_t)24U); - Lib_IntVector_Intrinsics_vec256 *wv_a4 = wv + a * (uint32_t)1U; - Lib_IntVector_Intrinsics_vec256 *wv_b4 = wv + b0 * (uint32_t)1U; - wv_a4[0U] = Lib_IntVector_Intrinsics_vec256_add64(wv_a4[0U], wv_b4[0U]); - wv_a4[0U] = Lib_IntVector_Intrinsics_vec256_add64(wv_a4[0U], y[0U]); - Lib_IntVector_Intrinsics_vec256 *wv_a5 = wv + d0 * (uint32_t)1U; - Lib_IntVector_Intrinsics_vec256 *wv_b5 = wv + a * (uint32_t)1U; - wv_a5[0U] = Lib_IntVector_Intrinsics_vec256_xor(wv_a5[0U], wv_b5[0U]); - wv_a5[0U] = Lib_IntVector_Intrinsics_vec256_rotate_right64(wv_a5[0U], (uint32_t)16U); - Lib_IntVector_Intrinsics_vec256 *wv_a6 = wv + c0 * (uint32_t)1U; - Lib_IntVector_Intrinsics_vec256 *wv_b6 = wv + d0 * (uint32_t)1U; - wv_a6[0U] = Lib_IntVector_Intrinsics_vec256_add64(wv_a6[0U], wv_b6[0U]); - Lib_IntVector_Intrinsics_vec256 *wv_a7 = wv + b0 * (uint32_t)1U; - Lib_IntVector_Intrinsics_vec256 *wv_b7 = wv + c0 * (uint32_t)1U; - wv_a7[0U] = Lib_IntVector_Intrinsics_vec256_xor(wv_a7[0U], wv_b7[0U]); - wv_a7[0U] = Lib_IntVector_Intrinsics_vec256_rotate_right64(wv_a7[0U], (uint32_t)63U); - Lib_IntVector_Intrinsics_vec256 *r11 = wv + (uint32_t)1U; - Lib_IntVector_Intrinsics_vec256 *r22 = wv + (uint32_t)2U; - Lib_IntVector_Intrinsics_vec256 *r32 = wv + (uint32_t)3U; - Lib_IntVector_Intrinsics_vec256 v00 = r11[0U]; - Lib_IntVector_Intrinsics_vec256 - v1 = Lib_IntVector_Intrinsics_vec256_rotate_right_lanes64(v00, (uint32_t)1U); - r11[0U] = v1; - Lib_IntVector_Intrinsics_vec256 v01 = r22[0U]; - Lib_IntVector_Intrinsics_vec256 - v10 = Lib_IntVector_Intrinsics_vec256_rotate_right_lanes64(v01, (uint32_t)2U); - r22[0U] = v10; - Lib_IntVector_Intrinsics_vec256 v02 = r32[0U]; - Lib_IntVector_Intrinsics_vec256 - v11 = Lib_IntVector_Intrinsics_vec256_rotate_right_lanes64(v02, (uint32_t)3U); - r32[0U] = v11; - uint32_t a0 = (uint32_t)0U; - uint32_t b = (uint32_t)1U; - uint32_t c = (uint32_t)2U; - uint32_t d = (uint32_t)3U; - Lib_IntVector_Intrinsics_vec256 *wv_a = wv + a0 * (uint32_t)1U; - Lib_IntVector_Intrinsics_vec256 *wv_b8 = wv + b * (uint32_t)1U; - wv_a[0U] = Lib_IntVector_Intrinsics_vec256_add64(wv_a[0U], wv_b8[0U]); - wv_a[0U] = Lib_IntVector_Intrinsics_vec256_add64(wv_a[0U], z[0U]); - Lib_IntVector_Intrinsics_vec256 *wv_a8 = wv + d * (uint32_t)1U; - Lib_IntVector_Intrinsics_vec256 *wv_b9 = wv + a0 * (uint32_t)1U; - wv_a8[0U] = Lib_IntVector_Intrinsics_vec256_xor(wv_a8[0U], wv_b9[0U]); - wv_a8[0U] = Lib_IntVector_Intrinsics_vec256_rotate_right64(wv_a8[0U], (uint32_t)32U); - Lib_IntVector_Intrinsics_vec256 *wv_a9 = wv + c * (uint32_t)1U; - Lib_IntVector_Intrinsics_vec256 *wv_b10 = wv + d * (uint32_t)1U; - wv_a9[0U] = Lib_IntVector_Intrinsics_vec256_add64(wv_a9[0U], wv_b10[0U]); - Lib_IntVector_Intrinsics_vec256 *wv_a10 = wv + b * (uint32_t)1U; - Lib_IntVector_Intrinsics_vec256 *wv_b11 = wv + c * (uint32_t)1U; - wv_a10[0U] = Lib_IntVector_Intrinsics_vec256_xor(wv_a10[0U], wv_b11[0U]); - wv_a10[0U] = Lib_IntVector_Intrinsics_vec256_rotate_right64(wv_a10[0U], (uint32_t)24U); - Lib_IntVector_Intrinsics_vec256 *wv_a11 = wv + a0 * (uint32_t)1U; - Lib_IntVector_Intrinsics_vec256 *wv_b12 = wv + b * (uint32_t)1U; - wv_a11[0U] = Lib_IntVector_Intrinsics_vec256_add64(wv_a11[0U], wv_b12[0U]); - wv_a11[0U] = Lib_IntVector_Intrinsics_vec256_add64(wv_a11[0U], w[0U]); - Lib_IntVector_Intrinsics_vec256 *wv_a12 = wv + d * (uint32_t)1U; - Lib_IntVector_Intrinsics_vec256 *wv_b13 = wv + a0 * (uint32_t)1U; - wv_a12[0U] = Lib_IntVector_Intrinsics_vec256_xor(wv_a12[0U], wv_b13[0U]); - wv_a12[0U] = Lib_IntVector_Intrinsics_vec256_rotate_right64(wv_a12[0U], (uint32_t)16U); - Lib_IntVector_Intrinsics_vec256 *wv_a13 = wv + c * (uint32_t)1U; - Lib_IntVector_Intrinsics_vec256 *wv_b14 = wv + d * (uint32_t)1U; - wv_a13[0U] = Lib_IntVector_Intrinsics_vec256_add64(wv_a13[0U], wv_b14[0U]); - Lib_IntVector_Intrinsics_vec256 *wv_a14 = wv + b * (uint32_t)1U; - Lib_IntVector_Intrinsics_vec256 *wv_b = wv + c * (uint32_t)1U; - wv_a14[0U] = Lib_IntVector_Intrinsics_vec256_xor(wv_a14[0U], wv_b[0U]); - wv_a14[0U] = Lib_IntVector_Intrinsics_vec256_rotate_right64(wv_a14[0U], (uint32_t)63U); - Lib_IntVector_Intrinsics_vec256 *r12 = wv + (uint32_t)1U; - Lib_IntVector_Intrinsics_vec256 *r2 = wv + (uint32_t)2U; - Lib_IntVector_Intrinsics_vec256 *r3 = wv + (uint32_t)3U; - Lib_IntVector_Intrinsics_vec256 v0 = r12[0U]; - Lib_IntVector_Intrinsics_vec256 - v12 = Lib_IntVector_Intrinsics_vec256_rotate_right_lanes64(v0, (uint32_t)3U); - r12[0U] = v12; - Lib_IntVector_Intrinsics_vec256 v03 = r2[0U]; - Lib_IntVector_Intrinsics_vec256 - v13 = Lib_IntVector_Intrinsics_vec256_rotate_right_lanes64(v03, (uint32_t)2U); - r2[0U] = v13; - Lib_IntVector_Intrinsics_vec256 v04 = r3[0U]; - Lib_IntVector_Intrinsics_vec256 - v14 = Lib_IntVector_Intrinsics_vec256_rotate_right_lanes64(v04, (uint32_t)1U); - r3[0U] = v14;); - s00 = s; - s16 = s + (uint32_t)1U; - r00 = wv; - r10 = wv + (uint32_t)1U; - r20 = wv + (uint32_t)2U; - r30 = wv + (uint32_t)3U; - s00[0U] = Lib_IntVector_Intrinsics_vec256_xor(s00[0U], r00[0U]); - s00[0U] = Lib_IntVector_Intrinsics_vec256_xor(s00[0U], r20[0U]); - s16[0U] = Lib_IntVector_Intrinsics_vec256_xor(s16[0U], r10[0U]); - s16[0U] = Lib_IntVector_Intrinsics_vec256_xor(s16[0U], r30[0U]); - return FStar_UInt128_uint64_to_uint128((uint64_t)0U); - } - } - } - } -} - -void Hacl_Hash_Blake2b_256_hash_blake2b_256(uint8_t *input, uint32_t input_len, uint8_t *dst) -{ - Hacl_Blake2b_256_blake2b((uint32_t)64U, dst, input_len, input, (uint32_t)0U, NULL); -} - -Lib_IntVector_Intrinsics_vec256 *Hacl_Hash_Blake2b_256_malloc_blake2b_256() -{ - Lib_IntVector_Intrinsics_vec256 - *buf = - (Lib_IntVector_Intrinsics_vec256 *)KRML_ALIGNED_MALLOC(32, - sizeof (Lib_IntVector_Intrinsics_vec256) * (uint32_t)4U); - memset(buf, 0U, (uint32_t)4U * sizeof (Lib_IntVector_Intrinsics_vec256)); - return buf; -} - -static inline void -blake2b_update_block( - Lib_IntVector_Intrinsics_vec256 *wv, - Lib_IntVector_Intrinsics_vec256 *hash, - bool flag, - FStar_UInt128_uint128 totlen, - uint8_t *d -) -{ - uint64_t m_w[16U] = { 0U }; - KRML_MAYBE_FOR16(i, - (uint32_t)0U, - (uint32_t)16U, - (uint32_t)1U, - uint64_t *os = m_w; - uint8_t *bj = d + i * (uint32_t)8U; - uint64_t u = load64_le(bj); - uint64_t r = u; - uint64_t x = r; - os[i] = x;); - { - Lib_IntVector_Intrinsics_vec256 mask = Lib_IntVector_Intrinsics_vec256_zero; - uint64_t wv_14; - if (flag) - { - wv_14 = (uint64_t)0xFFFFFFFFFFFFFFFFU; - } - else - { - wv_14 = (uint64_t)0U; - } - { - uint64_t wv_15 = (uint64_t)0U; - Lib_IntVector_Intrinsics_vec256 *wv3; - Lib_IntVector_Intrinsics_vec256 *s00; - Lib_IntVector_Intrinsics_vec256 *s16; - Lib_IntVector_Intrinsics_vec256 *r00; - Lib_IntVector_Intrinsics_vec256 *r10; - Lib_IntVector_Intrinsics_vec256 *r20; - Lib_IntVector_Intrinsics_vec256 *r30; - mask = - Lib_IntVector_Intrinsics_vec256_load64s(FStar_UInt128_uint128_to_uint64(totlen), - FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(totlen, (uint32_t)64U)), - wv_14, - wv_15); - memcpy(wv, hash, (uint32_t)4U * sizeof (Lib_IntVector_Intrinsics_vec256)); - wv3 = wv + (uint32_t)3U; - wv3[0U] = Lib_IntVector_Intrinsics_vec256_xor(wv3[0U], mask); - KRML_MAYBE_FOR12(i, - (uint32_t)0U, - (uint32_t)12U, - (uint32_t)1U, - uint32_t start_idx = i % (uint32_t)10U * (uint32_t)16U; - KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 m_st[4U] KRML_POST_ALIGN(32) = { 0U }; - Lib_IntVector_Intrinsics_vec256 *r0 = m_st; - Lib_IntVector_Intrinsics_vec256 *r1 = m_st + (uint32_t)1U; - Lib_IntVector_Intrinsics_vec256 *r21 = m_st + (uint32_t)2U; - Lib_IntVector_Intrinsics_vec256 *r31 = m_st + (uint32_t)3U; - uint32_t s0 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)0U]; - uint32_t s1 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)1U]; - uint32_t s2 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)2U]; - uint32_t s3 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)3U]; - uint32_t s4 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)4U]; - uint32_t s5 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)5U]; - uint32_t s6 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)6U]; - uint32_t s7 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)7U]; - uint32_t s8 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)8U]; - uint32_t s9 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)9U]; - uint32_t s10 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)10U]; - uint32_t s11 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)11U]; - uint32_t s12 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)12U]; - uint32_t s13 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)13U]; - uint32_t s14 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)14U]; - uint32_t s15 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)15U]; - r0[0U] = Lib_IntVector_Intrinsics_vec256_load64s(m_w[s0], m_w[s2], m_w[s4], m_w[s6]); - r1[0U] = Lib_IntVector_Intrinsics_vec256_load64s(m_w[s1], m_w[s3], m_w[s5], m_w[s7]); - r21[0U] = Lib_IntVector_Intrinsics_vec256_load64s(m_w[s8], m_w[s10], m_w[s12], m_w[s14]); - r31[0U] = Lib_IntVector_Intrinsics_vec256_load64s(m_w[s9], m_w[s11], m_w[s13], m_w[s15]); - Lib_IntVector_Intrinsics_vec256 *x = m_st; - Lib_IntVector_Intrinsics_vec256 *y = m_st + (uint32_t)1U; - Lib_IntVector_Intrinsics_vec256 *z = m_st + (uint32_t)2U; - Lib_IntVector_Intrinsics_vec256 *w = m_st + (uint32_t)3U; - uint32_t a = (uint32_t)0U; - uint32_t b0 = (uint32_t)1U; - uint32_t c0 = (uint32_t)2U; - uint32_t d10 = (uint32_t)3U; - Lib_IntVector_Intrinsics_vec256 *wv_a0 = wv + a * (uint32_t)1U; - Lib_IntVector_Intrinsics_vec256 *wv_b0 = wv + b0 * (uint32_t)1U; - wv_a0[0U] = Lib_IntVector_Intrinsics_vec256_add64(wv_a0[0U], wv_b0[0U]); - wv_a0[0U] = Lib_IntVector_Intrinsics_vec256_add64(wv_a0[0U], x[0U]); - Lib_IntVector_Intrinsics_vec256 *wv_a1 = wv + d10 * (uint32_t)1U; - Lib_IntVector_Intrinsics_vec256 *wv_b1 = wv + a * (uint32_t)1U; - wv_a1[0U] = Lib_IntVector_Intrinsics_vec256_xor(wv_a1[0U], wv_b1[0U]); - wv_a1[0U] = Lib_IntVector_Intrinsics_vec256_rotate_right64(wv_a1[0U], (uint32_t)32U); - Lib_IntVector_Intrinsics_vec256 *wv_a2 = wv + c0 * (uint32_t)1U; - Lib_IntVector_Intrinsics_vec256 *wv_b2 = wv + d10 * (uint32_t)1U; - wv_a2[0U] = Lib_IntVector_Intrinsics_vec256_add64(wv_a2[0U], wv_b2[0U]); - Lib_IntVector_Intrinsics_vec256 *wv_a3 = wv + b0 * (uint32_t)1U; - Lib_IntVector_Intrinsics_vec256 *wv_b3 = wv + c0 * (uint32_t)1U; - wv_a3[0U] = Lib_IntVector_Intrinsics_vec256_xor(wv_a3[0U], wv_b3[0U]); - wv_a3[0U] = Lib_IntVector_Intrinsics_vec256_rotate_right64(wv_a3[0U], (uint32_t)24U); - Lib_IntVector_Intrinsics_vec256 *wv_a4 = wv + a * (uint32_t)1U; - Lib_IntVector_Intrinsics_vec256 *wv_b4 = wv + b0 * (uint32_t)1U; - wv_a4[0U] = Lib_IntVector_Intrinsics_vec256_add64(wv_a4[0U], wv_b4[0U]); - wv_a4[0U] = Lib_IntVector_Intrinsics_vec256_add64(wv_a4[0U], y[0U]); - Lib_IntVector_Intrinsics_vec256 *wv_a5 = wv + d10 * (uint32_t)1U; - Lib_IntVector_Intrinsics_vec256 *wv_b5 = wv + a * (uint32_t)1U; - wv_a5[0U] = Lib_IntVector_Intrinsics_vec256_xor(wv_a5[0U], wv_b5[0U]); - wv_a5[0U] = Lib_IntVector_Intrinsics_vec256_rotate_right64(wv_a5[0U], (uint32_t)16U); - Lib_IntVector_Intrinsics_vec256 *wv_a6 = wv + c0 * (uint32_t)1U; - Lib_IntVector_Intrinsics_vec256 *wv_b6 = wv + d10 * (uint32_t)1U; - wv_a6[0U] = Lib_IntVector_Intrinsics_vec256_add64(wv_a6[0U], wv_b6[0U]); - Lib_IntVector_Intrinsics_vec256 *wv_a7 = wv + b0 * (uint32_t)1U; - Lib_IntVector_Intrinsics_vec256 *wv_b7 = wv + c0 * (uint32_t)1U; - wv_a7[0U] = Lib_IntVector_Intrinsics_vec256_xor(wv_a7[0U], wv_b7[0U]); - wv_a7[0U] = Lib_IntVector_Intrinsics_vec256_rotate_right64(wv_a7[0U], (uint32_t)63U); - Lib_IntVector_Intrinsics_vec256 *r11 = wv + (uint32_t)1U; - Lib_IntVector_Intrinsics_vec256 *r22 = wv + (uint32_t)2U; - Lib_IntVector_Intrinsics_vec256 *r32 = wv + (uint32_t)3U; - Lib_IntVector_Intrinsics_vec256 v00 = r11[0U]; - Lib_IntVector_Intrinsics_vec256 - v1 = Lib_IntVector_Intrinsics_vec256_rotate_right_lanes64(v00, (uint32_t)1U); - r11[0U] = v1; - Lib_IntVector_Intrinsics_vec256 v01 = r22[0U]; - Lib_IntVector_Intrinsics_vec256 - v10 = Lib_IntVector_Intrinsics_vec256_rotate_right_lanes64(v01, (uint32_t)2U); - r22[0U] = v10; - Lib_IntVector_Intrinsics_vec256 v02 = r32[0U]; - Lib_IntVector_Intrinsics_vec256 - v11 = Lib_IntVector_Intrinsics_vec256_rotate_right_lanes64(v02, (uint32_t)3U); - r32[0U] = v11; - uint32_t a0 = (uint32_t)0U; - uint32_t b = (uint32_t)1U; - uint32_t c = (uint32_t)2U; - uint32_t d1 = (uint32_t)3U; - Lib_IntVector_Intrinsics_vec256 *wv_a = wv + a0 * (uint32_t)1U; - Lib_IntVector_Intrinsics_vec256 *wv_b8 = wv + b * (uint32_t)1U; - wv_a[0U] = Lib_IntVector_Intrinsics_vec256_add64(wv_a[0U], wv_b8[0U]); - wv_a[0U] = Lib_IntVector_Intrinsics_vec256_add64(wv_a[0U], z[0U]); - Lib_IntVector_Intrinsics_vec256 *wv_a8 = wv + d1 * (uint32_t)1U; - Lib_IntVector_Intrinsics_vec256 *wv_b9 = wv + a0 * (uint32_t)1U; - wv_a8[0U] = Lib_IntVector_Intrinsics_vec256_xor(wv_a8[0U], wv_b9[0U]); - wv_a8[0U] = Lib_IntVector_Intrinsics_vec256_rotate_right64(wv_a8[0U], (uint32_t)32U); - Lib_IntVector_Intrinsics_vec256 *wv_a9 = wv + c * (uint32_t)1U; - Lib_IntVector_Intrinsics_vec256 *wv_b10 = wv + d1 * (uint32_t)1U; - wv_a9[0U] = Lib_IntVector_Intrinsics_vec256_add64(wv_a9[0U], wv_b10[0U]); - Lib_IntVector_Intrinsics_vec256 *wv_a10 = wv + b * (uint32_t)1U; - Lib_IntVector_Intrinsics_vec256 *wv_b11 = wv + c * (uint32_t)1U; - wv_a10[0U] = Lib_IntVector_Intrinsics_vec256_xor(wv_a10[0U], wv_b11[0U]); - wv_a10[0U] = Lib_IntVector_Intrinsics_vec256_rotate_right64(wv_a10[0U], (uint32_t)24U); - Lib_IntVector_Intrinsics_vec256 *wv_a11 = wv + a0 * (uint32_t)1U; - Lib_IntVector_Intrinsics_vec256 *wv_b12 = wv + b * (uint32_t)1U; - wv_a11[0U] = Lib_IntVector_Intrinsics_vec256_add64(wv_a11[0U], wv_b12[0U]); - wv_a11[0U] = Lib_IntVector_Intrinsics_vec256_add64(wv_a11[0U], w[0U]); - Lib_IntVector_Intrinsics_vec256 *wv_a12 = wv + d1 * (uint32_t)1U; - Lib_IntVector_Intrinsics_vec256 *wv_b13 = wv + a0 * (uint32_t)1U; - wv_a12[0U] = Lib_IntVector_Intrinsics_vec256_xor(wv_a12[0U], wv_b13[0U]); - wv_a12[0U] = Lib_IntVector_Intrinsics_vec256_rotate_right64(wv_a12[0U], (uint32_t)16U); - Lib_IntVector_Intrinsics_vec256 *wv_a13 = wv + c * (uint32_t)1U; - Lib_IntVector_Intrinsics_vec256 *wv_b14 = wv + d1 * (uint32_t)1U; - wv_a13[0U] = Lib_IntVector_Intrinsics_vec256_add64(wv_a13[0U], wv_b14[0U]); - Lib_IntVector_Intrinsics_vec256 *wv_a14 = wv + b * (uint32_t)1U; - Lib_IntVector_Intrinsics_vec256 *wv_b = wv + c * (uint32_t)1U; - wv_a14[0U] = Lib_IntVector_Intrinsics_vec256_xor(wv_a14[0U], wv_b[0U]); - wv_a14[0U] = Lib_IntVector_Intrinsics_vec256_rotate_right64(wv_a14[0U], (uint32_t)63U); - Lib_IntVector_Intrinsics_vec256 *r12 = wv + (uint32_t)1U; - Lib_IntVector_Intrinsics_vec256 *r2 = wv + (uint32_t)2U; - Lib_IntVector_Intrinsics_vec256 *r3 = wv + (uint32_t)3U; - Lib_IntVector_Intrinsics_vec256 v0 = r12[0U]; - Lib_IntVector_Intrinsics_vec256 - v12 = Lib_IntVector_Intrinsics_vec256_rotate_right_lanes64(v0, (uint32_t)3U); - r12[0U] = v12; - Lib_IntVector_Intrinsics_vec256 v03 = r2[0U]; - Lib_IntVector_Intrinsics_vec256 - v13 = Lib_IntVector_Intrinsics_vec256_rotate_right_lanes64(v03, (uint32_t)2U); - r2[0U] = v13; - Lib_IntVector_Intrinsics_vec256 v04 = r3[0U]; - Lib_IntVector_Intrinsics_vec256 - v14 = Lib_IntVector_Intrinsics_vec256_rotate_right_lanes64(v04, (uint32_t)1U); - r3[0U] = v14;); - s00 = hash; - s16 = hash + (uint32_t)1U; - r00 = wv; - r10 = wv + (uint32_t)1U; - r20 = wv + (uint32_t)2U; - r30 = wv + (uint32_t)3U; - s00[0U] = Lib_IntVector_Intrinsics_vec256_xor(s00[0U], r00[0U]); - s00[0U] = Lib_IntVector_Intrinsics_vec256_xor(s00[0U], r20[0U]); - s16[0U] = Lib_IntVector_Intrinsics_vec256_xor(s16[0U], r10[0U]); - s16[0U] = Lib_IntVector_Intrinsics_vec256_xor(s16[0U], r30[0U]); - } - } -} - -void -Hacl_Blake2b_256_blake2b_init(Lib_IntVector_Intrinsics_vec256 *hash, uint32_t kk, uint32_t nn) -{ - Lib_IntVector_Intrinsics_vec256 *r0 = hash; - Lib_IntVector_Intrinsics_vec256 *r1 = hash + (uint32_t)1U; - Lib_IntVector_Intrinsics_vec256 *r2 = hash + (uint32_t)2U; - Lib_IntVector_Intrinsics_vec256 *r3 = hash + (uint32_t)3U; - uint64_t iv0 = Hacl_Impl_Blake2_Constants_ivTable_B[0U]; - uint64_t iv1 = Hacl_Impl_Blake2_Constants_ivTable_B[1U]; - uint64_t iv2 = Hacl_Impl_Blake2_Constants_ivTable_B[2U]; - uint64_t iv3 = Hacl_Impl_Blake2_Constants_ivTable_B[3U]; - uint64_t iv4 = Hacl_Impl_Blake2_Constants_ivTable_B[4U]; - uint64_t iv5 = Hacl_Impl_Blake2_Constants_ivTable_B[5U]; - uint64_t iv6 = Hacl_Impl_Blake2_Constants_ivTable_B[6U]; - uint64_t iv7 = Hacl_Impl_Blake2_Constants_ivTable_B[7U]; - uint64_t kk_shift_8; - uint64_t iv0_; - r2[0U] = Lib_IntVector_Intrinsics_vec256_load64s(iv0, iv1, iv2, iv3); - r3[0U] = Lib_IntVector_Intrinsics_vec256_load64s(iv4, iv5, iv6, iv7); - kk_shift_8 = (uint64_t)kk << (uint32_t)8U; - iv0_ = iv0 ^ ((uint64_t)0x01010000U ^ (kk_shift_8 ^ (uint64_t)nn)); - r0[0U] = Lib_IntVector_Intrinsics_vec256_load64s(iv0_, iv1, iv2, iv3); - r1[0U] = Lib_IntVector_Intrinsics_vec256_load64s(iv4, iv5, iv6, iv7); -} - -void -Hacl_Blake2b_256_blake2b_update_key( - Lib_IntVector_Intrinsics_vec256 *wv, - Lib_IntVector_Intrinsics_vec256 *hash, - uint32_t kk, - uint8_t *k, - uint32_t ll -) -{ - FStar_UInt128_uint128 lb = FStar_UInt128_uint64_to_uint128((uint64_t)(uint32_t)128U); - uint8_t b[128U] = { 0U }; - memcpy(b, k, kk * sizeof (uint8_t)); - if (ll == (uint32_t)0U) - { - blake2b_update_block(wv, hash, true, lb, b); - } - else - { - blake2b_update_block(wv, hash, false, lb, b); - } - Lib_Memzero0_memzero(b, (uint32_t)128U * sizeof (b[0U])); -} - -void -Hacl_Blake2b_256_blake2b_update_multi( - uint32_t len, - Lib_IntVector_Intrinsics_vec256 *wv, - Lib_IntVector_Intrinsics_vec256 *hash, - FStar_UInt128_uint128 prev, - uint8_t *blocks, - uint32_t nb -) -{ - uint32_t i; - for (i = (uint32_t)0U; i < nb; i++) - { - FStar_UInt128_uint128 - totlen = - FStar_UInt128_add_mod(prev, - FStar_UInt128_uint64_to_uint128((uint64_t)((i + (uint32_t)1U) * (uint32_t)128U))); - uint8_t *b = blocks + i * (uint32_t)128U; - blake2b_update_block(wv, hash, false, totlen, b); - } -} - -void -Hacl_Blake2b_256_blake2b_update_last( - uint32_t len, - Lib_IntVector_Intrinsics_vec256 *wv, - Lib_IntVector_Intrinsics_vec256 *hash, - FStar_UInt128_uint128 prev, - uint32_t rem, - uint8_t *d -) -{ - uint8_t b[128U] = { 0U }; - uint8_t *last = d + len - rem; - FStar_UInt128_uint128 totlen; - memcpy(b, last, rem * sizeof (uint8_t)); - totlen = FStar_UInt128_add_mod(prev, FStar_UInt128_uint64_to_uint128((uint64_t)len)); - blake2b_update_block(wv, hash, true, totlen, b); - Lib_Memzero0_memzero(b, (uint32_t)128U * sizeof (b[0U])); -} - -static inline void -blake2b_update_blocks( - uint32_t len, - Lib_IntVector_Intrinsics_vec256 *wv, - Lib_IntVector_Intrinsics_vec256 *hash, - FStar_UInt128_uint128 prev, - uint8_t *blocks -) -{ - uint32_t nb0 = len / (uint32_t)128U; - uint32_t rem0 = len % (uint32_t)128U; - K___uint32_t_uint32_t scrut; - if (rem0 == (uint32_t)0U && nb0 > (uint32_t)0U) - { - uint32_t nb_ = nb0 - (uint32_t)1U; - uint32_t rem_ = (uint32_t)128U; - K___uint32_t_uint32_t lit; - lit.fst = nb_; - lit.snd = rem_; - scrut = lit; - } - else - { - K___uint32_t_uint32_t lit; - lit.fst = nb0; - lit.snd = rem0; - scrut = lit; - } - { - uint32_t nb = scrut.fst; - uint32_t rem = scrut.snd; - Hacl_Blake2b_256_blake2b_update_multi(len, wv, hash, prev, blocks, nb); - Hacl_Blake2b_256_blake2b_update_last(len, wv, hash, prev, rem, blocks); - } -} - -static inline void -blake2b_update( - Lib_IntVector_Intrinsics_vec256 *wv, - Lib_IntVector_Intrinsics_vec256 *hash, - uint32_t kk, - uint8_t *k, - uint32_t ll, - uint8_t *d -) -{ - FStar_UInt128_uint128 lb = FStar_UInt128_uint64_to_uint128((uint64_t)(uint32_t)128U); - if (kk > (uint32_t)0U) - { - Hacl_Blake2b_256_blake2b_update_key(wv, hash, kk, k, ll); - if (!(ll == (uint32_t)0U)) - { - blake2b_update_blocks(ll, wv, hash, lb, d); - return; - } - return; - } - blake2b_update_blocks(ll, - wv, - hash, - FStar_UInt128_uint64_to_uint128((uint64_t)(uint32_t)0U), - d); -} - -void -Hacl_Blake2b_256_blake2b_finish( - uint32_t nn, - uint8_t *output, - Lib_IntVector_Intrinsics_vec256 *hash -) -{ - uint32_t double_row = (uint32_t)64U; - KRML_CHECK_SIZE(sizeof (uint8_t), double_row); - { - uint8_t b[double_row]; - memset(b, 0U, double_row * sizeof (uint8_t)); - { - uint8_t *first = b; - uint8_t *second = b + (uint32_t)32U; - Lib_IntVector_Intrinsics_vec256 *row0 = hash; - Lib_IntVector_Intrinsics_vec256 *row1 = hash + (uint32_t)1U; - uint8_t *final; - Lib_IntVector_Intrinsics_vec256_store64_le(first, row0[0U]); - Lib_IntVector_Intrinsics_vec256_store64_le(second, row1[0U]); - final = b; - memcpy(output, final, nn * sizeof (uint8_t)); - Lib_Memzero0_memzero(b, double_row * sizeof (b[0U])); - } - } -} - -void -Hacl_Blake2b_256_blake2b( - uint32_t nn, - uint8_t *output, - uint32_t ll, - uint8_t *d, - uint32_t kk, - uint8_t *k -) -{ - KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 b[4U] KRML_POST_ALIGN(32) = { 0U }; - KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 b1[4U] KRML_POST_ALIGN(32) = { 0U }; - Hacl_Blake2b_256_blake2b_init(b, kk, nn); - blake2b_update(b1, b, kk, k, ll, d); - Hacl_Blake2b_256_blake2b_finish(nn, output, b); - Lib_Memzero0_memzero(b1, (uint32_t)4U * sizeof (b1[0U])); - Lib_Memzero0_memzero(b, (uint32_t)4U * sizeof (b[0U])); -} - -void -Hacl_Blake2b_256_load_state256b_from_state32( - Lib_IntVector_Intrinsics_vec256 *st, - uint64_t *st32 -) -{ - Lib_IntVector_Intrinsics_vec256 *r0 = st; - Lib_IntVector_Intrinsics_vec256 *r1 = st + (uint32_t)1U; - Lib_IntVector_Intrinsics_vec256 *r2 = st + (uint32_t)2U; - Lib_IntVector_Intrinsics_vec256 *r3 = st + (uint32_t)3U; - uint64_t *b0 = st32; - uint64_t *b1 = st32 + (uint32_t)4U; - uint64_t *b2 = st32 + (uint32_t)8U; - uint64_t *b3 = st32 + (uint32_t)12U; - r0[0U] = Lib_IntVector_Intrinsics_vec256_load64s(b0[0U], b0[1U], b0[2U], b0[3U]); - r1[0U] = Lib_IntVector_Intrinsics_vec256_load64s(b1[0U], b1[1U], b1[2U], b1[3U]); - r2[0U] = Lib_IntVector_Intrinsics_vec256_load64s(b2[0U], b2[1U], b2[2U], b2[3U]); - r3[0U] = Lib_IntVector_Intrinsics_vec256_load64s(b3[0U], b3[1U], b3[2U], b3[3U]); -} - -void -Hacl_Blake2b_256_store_state256b_to_state32( - uint64_t *st32, - Lib_IntVector_Intrinsics_vec256 *st -) -{ - Lib_IntVector_Intrinsics_vec256 *r0 = st; - Lib_IntVector_Intrinsics_vec256 *r1 = st + (uint32_t)1U; - Lib_IntVector_Intrinsics_vec256 *r2 = st + (uint32_t)2U; - Lib_IntVector_Intrinsics_vec256 *r3 = st + (uint32_t)3U; - uint64_t *b0 = st32; - uint64_t *b1 = st32 + (uint32_t)4U; - uint64_t *b2 = st32 + (uint32_t)8U; - uint64_t *b3 = st32 + (uint32_t)12U; - uint8_t b8[32U] = { 0U }; - Lib_IntVector_Intrinsics_vec256_store64_le(b8, r0[0U]); - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *os = b0; - uint8_t *bj = b8 + i * (uint32_t)8U; - uint64_t u = load64_le(bj); - uint64_t r = u; - uint64_t x = r; - os[i] = x;); - { - uint8_t b80[32U] = { 0U }; - Lib_IntVector_Intrinsics_vec256_store64_le(b80, r1[0U]); - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *os = b1; - uint8_t *bj = b80 + i * (uint32_t)8U; - uint64_t u = load64_le(bj); - uint64_t r = u; - uint64_t x = r; - os[i] = x;); - { - uint8_t b81[32U] = { 0U }; - Lib_IntVector_Intrinsics_vec256_store64_le(b81, r2[0U]); - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *os = b2; - uint8_t *bj = b81 + i * (uint32_t)8U; - uint64_t u = load64_le(bj); - uint64_t r = u; - uint64_t x = r; - os[i] = x;); - { - uint8_t b82[32U] = { 0U }; - Lib_IntVector_Intrinsics_vec256_store64_le(b82, r3[0U]); - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *os = b3; - uint8_t *bj = b82 + i * (uint32_t)8U; - uint64_t u = load64_le(bj); - uint64_t r = u; - uint64_t x = r; - os[i] = x;); - } - } - } -} - diff --git a/dist/c89-compatible/Hacl_Hash_Blake2b_256.h b/dist/c89-compatible/Hacl_Hash_Blake2b_256.h deleted file mode 100644 index 30ba652aae..0000000000 --- a/dist/c89-compatible/Hacl_Hash_Blake2b_256.h +++ /dev/null @@ -1,109 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#ifndef __Hacl_Hash_Blake2b_256_H -#define __Hacl_Hash_Blake2b_256_H - -#if defined(__cplusplus) -extern "C" { -#endif - -#include -#include "krml/internal/types.h" -#include "krml/lowstar_endianness.h" -#include "krml/internal/target.h" - - -#include "Lib_Memzero0.h" -#include "Hacl_Krmllib.h" -#include "Hacl_Impl_Blake2_Constants.h" -#include "evercrypt_targetconfig.h" -#include "libintvector.h" -void -Hacl_Blake2b_256_blake2b_init(Lib_IntVector_Intrinsics_vec256 *hash, uint32_t kk, uint32_t nn); - -void -Hacl_Blake2b_256_blake2b_update_key( - Lib_IntVector_Intrinsics_vec256 *wv, - Lib_IntVector_Intrinsics_vec256 *hash, - uint32_t kk, - uint8_t *k, - uint32_t ll -); - -void -Hacl_Blake2b_256_blake2b_update_multi( - uint32_t len, - Lib_IntVector_Intrinsics_vec256 *wv, - Lib_IntVector_Intrinsics_vec256 *hash, - FStar_UInt128_uint128 prev, - uint8_t *blocks, - uint32_t nb -); - -void -Hacl_Blake2b_256_blake2b_update_last( - uint32_t len, - Lib_IntVector_Intrinsics_vec256 *wv, - Lib_IntVector_Intrinsics_vec256 *hash, - FStar_UInt128_uint128 prev, - uint32_t rem, - uint8_t *d -); - -void -Hacl_Blake2b_256_blake2b_finish( - uint32_t nn, - uint8_t *output, - Lib_IntVector_Intrinsics_vec256 *hash -); - -void -Hacl_Blake2b_256_blake2b( - uint32_t nn, - uint8_t *output, - uint32_t ll, - uint8_t *d, - uint32_t kk, - uint8_t *k -); - -void -Hacl_Blake2b_256_load_state256b_from_state32( - Lib_IntVector_Intrinsics_vec256 *st, - uint64_t *st32 -); - -void -Hacl_Blake2b_256_store_state256b_to_state32( - uint64_t *st32, - Lib_IntVector_Intrinsics_vec256 *st -); - -#if defined(__cplusplus) -} -#endif - -#define __Hacl_Hash_Blake2b_256_H_DEFINED -#endif diff --git a/dist/c89-compatible/Hacl_Hash_Blake2s_128.c b/dist/c89-compatible/Hacl_Hash_Blake2s_128.c deleted file mode 100644 index 47fe6d61bb..0000000000 --- a/dist/c89-compatible/Hacl_Hash_Blake2s_128.c +++ /dev/null @@ -1,1025 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#include "internal/Hacl_Hash_Blake2s_128.h" - -#include "internal/Hacl_Hash_Blake2.h" - -uint64_t Hacl_Hash_Blake2s_128_init_blake2s_128(Lib_IntVector_Intrinsics_vec128 *s) -{ - Lib_IntVector_Intrinsics_vec128 *r0 = s; - Lib_IntVector_Intrinsics_vec128 *r1 = s + (uint32_t)1U; - Lib_IntVector_Intrinsics_vec128 *r2 = s + (uint32_t)2U; - Lib_IntVector_Intrinsics_vec128 *r3 = s + (uint32_t)3U; - uint32_t iv0 = Hacl_Impl_Blake2_Constants_ivTable_S[0U]; - uint32_t iv1 = Hacl_Impl_Blake2_Constants_ivTable_S[1U]; - uint32_t iv2 = Hacl_Impl_Blake2_Constants_ivTable_S[2U]; - uint32_t iv3 = Hacl_Impl_Blake2_Constants_ivTable_S[3U]; - uint32_t iv4 = Hacl_Impl_Blake2_Constants_ivTable_S[4U]; - uint32_t iv5 = Hacl_Impl_Blake2_Constants_ivTable_S[5U]; - uint32_t iv6 = Hacl_Impl_Blake2_Constants_ivTable_S[6U]; - uint32_t iv7 = Hacl_Impl_Blake2_Constants_ivTable_S[7U]; - uint32_t kk_shift_8; - uint32_t iv0_; - r2[0U] = Lib_IntVector_Intrinsics_vec128_load32s(iv0, iv1, iv2, iv3); - r3[0U] = Lib_IntVector_Intrinsics_vec128_load32s(iv4, iv5, iv6, iv7); - kk_shift_8 = (uint32_t)0U; - iv0_ = iv0 ^ ((uint32_t)0x01010000U ^ (kk_shift_8 ^ (uint32_t)32U)); - r0[0U] = Lib_IntVector_Intrinsics_vec128_load32s(iv0_, iv1, iv2, iv3); - r1[0U] = Lib_IntVector_Intrinsics_vec128_load32s(iv4, iv5, iv6, iv7); - return (uint64_t)0U; -} - -uint64_t -Hacl_Hash_Blake2s_128_update_blake2s_128( - Lib_IntVector_Intrinsics_vec128 *s, - uint64_t totlen, - uint8_t *block -) -{ - KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 wv[4U] KRML_POST_ALIGN(16) = { 0U }; - uint64_t totlen1 = totlen + (uint64_t)(uint32_t)64U; - uint32_t m_w[16U] = { 0U }; - KRML_MAYBE_FOR16(i, - (uint32_t)0U, - (uint32_t)16U, - (uint32_t)1U, - uint32_t *os = m_w; - uint8_t *bj = block + i * (uint32_t)4U; - uint32_t u = load32_le(bj); - uint32_t r = u; - uint32_t x = r; - os[i] = x;); - { - Lib_IntVector_Intrinsics_vec128 mask = Lib_IntVector_Intrinsics_vec128_zero; - uint32_t wv_14 = (uint32_t)0U; - uint32_t wv_15 = (uint32_t)0U; - Lib_IntVector_Intrinsics_vec128 *wv3; - Lib_IntVector_Intrinsics_vec128 *s00; - Lib_IntVector_Intrinsics_vec128 *s16; - Lib_IntVector_Intrinsics_vec128 *r00; - Lib_IntVector_Intrinsics_vec128 *r10; - Lib_IntVector_Intrinsics_vec128 *r20; - Lib_IntVector_Intrinsics_vec128 *r30; - mask = - Lib_IntVector_Intrinsics_vec128_load32s((uint32_t)totlen1, - (uint32_t)(totlen1 >> (uint32_t)32U), - wv_14, - wv_15); - memcpy(wv, s, (uint32_t)4U * sizeof (Lib_IntVector_Intrinsics_vec128)); - wv3 = wv + (uint32_t)3U; - wv3[0U] = Lib_IntVector_Intrinsics_vec128_xor(wv3[0U], mask); - KRML_MAYBE_FOR10(i, - (uint32_t)0U, - (uint32_t)10U, - (uint32_t)1U, - uint32_t start_idx = i % (uint32_t)10U * (uint32_t)16U; - KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 m_st[4U] KRML_POST_ALIGN(16) = { 0U }; - Lib_IntVector_Intrinsics_vec128 *r0 = m_st; - Lib_IntVector_Intrinsics_vec128 *r1 = m_st + (uint32_t)1U; - Lib_IntVector_Intrinsics_vec128 *r21 = m_st + (uint32_t)2U; - Lib_IntVector_Intrinsics_vec128 *r31 = m_st + (uint32_t)3U; - uint32_t s0 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)0U]; - uint32_t s1 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)1U]; - uint32_t s2 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)2U]; - uint32_t s3 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)3U]; - uint32_t s4 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)4U]; - uint32_t s5 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)5U]; - uint32_t s6 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)6U]; - uint32_t s7 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)7U]; - uint32_t s8 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)8U]; - uint32_t s9 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)9U]; - uint32_t s10 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)10U]; - uint32_t s11 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)11U]; - uint32_t s12 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)12U]; - uint32_t s13 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)13U]; - uint32_t s14 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)14U]; - uint32_t s15 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)15U]; - r0[0U] = Lib_IntVector_Intrinsics_vec128_load32s(m_w[s0], m_w[s2], m_w[s4], m_w[s6]); - r1[0U] = Lib_IntVector_Intrinsics_vec128_load32s(m_w[s1], m_w[s3], m_w[s5], m_w[s7]); - r21[0U] = Lib_IntVector_Intrinsics_vec128_load32s(m_w[s8], m_w[s10], m_w[s12], m_w[s14]); - r31[0U] = Lib_IntVector_Intrinsics_vec128_load32s(m_w[s9], m_w[s11], m_w[s13], m_w[s15]); - Lib_IntVector_Intrinsics_vec128 *x = m_st; - Lib_IntVector_Intrinsics_vec128 *y = m_st + (uint32_t)1U; - Lib_IntVector_Intrinsics_vec128 *z = m_st + (uint32_t)2U; - Lib_IntVector_Intrinsics_vec128 *w = m_st + (uint32_t)3U; - uint32_t a = (uint32_t)0U; - uint32_t b0 = (uint32_t)1U; - uint32_t c0 = (uint32_t)2U; - uint32_t d0 = (uint32_t)3U; - Lib_IntVector_Intrinsics_vec128 *wv_a0 = wv + a * (uint32_t)1U; - Lib_IntVector_Intrinsics_vec128 *wv_b0 = wv + b0 * (uint32_t)1U; - wv_a0[0U] = Lib_IntVector_Intrinsics_vec128_add32(wv_a0[0U], wv_b0[0U]); - wv_a0[0U] = Lib_IntVector_Intrinsics_vec128_add32(wv_a0[0U], x[0U]); - Lib_IntVector_Intrinsics_vec128 *wv_a1 = wv + d0 * (uint32_t)1U; - Lib_IntVector_Intrinsics_vec128 *wv_b1 = wv + a * (uint32_t)1U; - wv_a1[0U] = Lib_IntVector_Intrinsics_vec128_xor(wv_a1[0U], wv_b1[0U]); - wv_a1[0U] = Lib_IntVector_Intrinsics_vec128_rotate_right32(wv_a1[0U], (uint32_t)16U); - Lib_IntVector_Intrinsics_vec128 *wv_a2 = wv + c0 * (uint32_t)1U; - Lib_IntVector_Intrinsics_vec128 *wv_b2 = wv + d0 * (uint32_t)1U; - wv_a2[0U] = Lib_IntVector_Intrinsics_vec128_add32(wv_a2[0U], wv_b2[0U]); - Lib_IntVector_Intrinsics_vec128 *wv_a3 = wv + b0 * (uint32_t)1U; - Lib_IntVector_Intrinsics_vec128 *wv_b3 = wv + c0 * (uint32_t)1U; - wv_a3[0U] = Lib_IntVector_Intrinsics_vec128_xor(wv_a3[0U], wv_b3[0U]); - wv_a3[0U] = Lib_IntVector_Intrinsics_vec128_rotate_right32(wv_a3[0U], (uint32_t)12U); - Lib_IntVector_Intrinsics_vec128 *wv_a4 = wv + a * (uint32_t)1U; - Lib_IntVector_Intrinsics_vec128 *wv_b4 = wv + b0 * (uint32_t)1U; - wv_a4[0U] = Lib_IntVector_Intrinsics_vec128_add32(wv_a4[0U], wv_b4[0U]); - wv_a4[0U] = Lib_IntVector_Intrinsics_vec128_add32(wv_a4[0U], y[0U]); - Lib_IntVector_Intrinsics_vec128 *wv_a5 = wv + d0 * (uint32_t)1U; - Lib_IntVector_Intrinsics_vec128 *wv_b5 = wv + a * (uint32_t)1U; - wv_a5[0U] = Lib_IntVector_Intrinsics_vec128_xor(wv_a5[0U], wv_b5[0U]); - wv_a5[0U] = Lib_IntVector_Intrinsics_vec128_rotate_right32(wv_a5[0U], (uint32_t)8U); - Lib_IntVector_Intrinsics_vec128 *wv_a6 = wv + c0 * (uint32_t)1U; - Lib_IntVector_Intrinsics_vec128 *wv_b6 = wv + d0 * (uint32_t)1U; - wv_a6[0U] = Lib_IntVector_Intrinsics_vec128_add32(wv_a6[0U], wv_b6[0U]); - Lib_IntVector_Intrinsics_vec128 *wv_a7 = wv + b0 * (uint32_t)1U; - Lib_IntVector_Intrinsics_vec128 *wv_b7 = wv + c0 * (uint32_t)1U; - wv_a7[0U] = Lib_IntVector_Intrinsics_vec128_xor(wv_a7[0U], wv_b7[0U]); - wv_a7[0U] = Lib_IntVector_Intrinsics_vec128_rotate_right32(wv_a7[0U], (uint32_t)7U); - Lib_IntVector_Intrinsics_vec128 *r11 = wv + (uint32_t)1U; - Lib_IntVector_Intrinsics_vec128 *r22 = wv + (uint32_t)2U; - Lib_IntVector_Intrinsics_vec128 *r32 = wv + (uint32_t)3U; - Lib_IntVector_Intrinsics_vec128 v00 = r11[0U]; - Lib_IntVector_Intrinsics_vec128 - v1 = Lib_IntVector_Intrinsics_vec128_rotate_right_lanes32(v00, (uint32_t)1U); - r11[0U] = v1; - Lib_IntVector_Intrinsics_vec128 v01 = r22[0U]; - Lib_IntVector_Intrinsics_vec128 - v10 = Lib_IntVector_Intrinsics_vec128_rotate_right_lanes32(v01, (uint32_t)2U); - r22[0U] = v10; - Lib_IntVector_Intrinsics_vec128 v02 = r32[0U]; - Lib_IntVector_Intrinsics_vec128 - v11 = Lib_IntVector_Intrinsics_vec128_rotate_right_lanes32(v02, (uint32_t)3U); - r32[0U] = v11; - uint32_t a0 = (uint32_t)0U; - uint32_t b = (uint32_t)1U; - uint32_t c = (uint32_t)2U; - uint32_t d = (uint32_t)3U; - Lib_IntVector_Intrinsics_vec128 *wv_a = wv + a0 * (uint32_t)1U; - Lib_IntVector_Intrinsics_vec128 *wv_b8 = wv + b * (uint32_t)1U; - wv_a[0U] = Lib_IntVector_Intrinsics_vec128_add32(wv_a[0U], wv_b8[0U]); - wv_a[0U] = Lib_IntVector_Intrinsics_vec128_add32(wv_a[0U], z[0U]); - Lib_IntVector_Intrinsics_vec128 *wv_a8 = wv + d * (uint32_t)1U; - Lib_IntVector_Intrinsics_vec128 *wv_b9 = wv + a0 * (uint32_t)1U; - wv_a8[0U] = Lib_IntVector_Intrinsics_vec128_xor(wv_a8[0U], wv_b9[0U]); - wv_a8[0U] = Lib_IntVector_Intrinsics_vec128_rotate_right32(wv_a8[0U], (uint32_t)16U); - Lib_IntVector_Intrinsics_vec128 *wv_a9 = wv + c * (uint32_t)1U; - Lib_IntVector_Intrinsics_vec128 *wv_b10 = wv + d * (uint32_t)1U; - wv_a9[0U] = Lib_IntVector_Intrinsics_vec128_add32(wv_a9[0U], wv_b10[0U]); - Lib_IntVector_Intrinsics_vec128 *wv_a10 = wv + b * (uint32_t)1U; - Lib_IntVector_Intrinsics_vec128 *wv_b11 = wv + c * (uint32_t)1U; - wv_a10[0U] = Lib_IntVector_Intrinsics_vec128_xor(wv_a10[0U], wv_b11[0U]); - wv_a10[0U] = Lib_IntVector_Intrinsics_vec128_rotate_right32(wv_a10[0U], (uint32_t)12U); - Lib_IntVector_Intrinsics_vec128 *wv_a11 = wv + a0 * (uint32_t)1U; - Lib_IntVector_Intrinsics_vec128 *wv_b12 = wv + b * (uint32_t)1U; - wv_a11[0U] = Lib_IntVector_Intrinsics_vec128_add32(wv_a11[0U], wv_b12[0U]); - wv_a11[0U] = Lib_IntVector_Intrinsics_vec128_add32(wv_a11[0U], w[0U]); - Lib_IntVector_Intrinsics_vec128 *wv_a12 = wv + d * (uint32_t)1U; - Lib_IntVector_Intrinsics_vec128 *wv_b13 = wv + a0 * (uint32_t)1U; - wv_a12[0U] = Lib_IntVector_Intrinsics_vec128_xor(wv_a12[0U], wv_b13[0U]); - wv_a12[0U] = Lib_IntVector_Intrinsics_vec128_rotate_right32(wv_a12[0U], (uint32_t)8U); - Lib_IntVector_Intrinsics_vec128 *wv_a13 = wv + c * (uint32_t)1U; - Lib_IntVector_Intrinsics_vec128 *wv_b14 = wv + d * (uint32_t)1U; - wv_a13[0U] = Lib_IntVector_Intrinsics_vec128_add32(wv_a13[0U], wv_b14[0U]); - Lib_IntVector_Intrinsics_vec128 *wv_a14 = wv + b * (uint32_t)1U; - Lib_IntVector_Intrinsics_vec128 *wv_b = wv + c * (uint32_t)1U; - wv_a14[0U] = Lib_IntVector_Intrinsics_vec128_xor(wv_a14[0U], wv_b[0U]); - wv_a14[0U] = Lib_IntVector_Intrinsics_vec128_rotate_right32(wv_a14[0U], (uint32_t)7U); - Lib_IntVector_Intrinsics_vec128 *r12 = wv + (uint32_t)1U; - Lib_IntVector_Intrinsics_vec128 *r2 = wv + (uint32_t)2U; - Lib_IntVector_Intrinsics_vec128 *r3 = wv + (uint32_t)3U; - Lib_IntVector_Intrinsics_vec128 v0 = r12[0U]; - Lib_IntVector_Intrinsics_vec128 - v12 = Lib_IntVector_Intrinsics_vec128_rotate_right_lanes32(v0, (uint32_t)3U); - r12[0U] = v12; - Lib_IntVector_Intrinsics_vec128 v03 = r2[0U]; - Lib_IntVector_Intrinsics_vec128 - v13 = Lib_IntVector_Intrinsics_vec128_rotate_right_lanes32(v03, (uint32_t)2U); - r2[0U] = v13; - Lib_IntVector_Intrinsics_vec128 v04 = r3[0U]; - Lib_IntVector_Intrinsics_vec128 - v14 = Lib_IntVector_Intrinsics_vec128_rotate_right_lanes32(v04, (uint32_t)1U); - r3[0U] = v14;); - s00 = s; - s16 = s + (uint32_t)1U; - r00 = wv; - r10 = wv + (uint32_t)1U; - r20 = wv + (uint32_t)2U; - r30 = wv + (uint32_t)3U; - s00[0U] = Lib_IntVector_Intrinsics_vec128_xor(s00[0U], r00[0U]); - s00[0U] = Lib_IntVector_Intrinsics_vec128_xor(s00[0U], r20[0U]); - s16[0U] = Lib_IntVector_Intrinsics_vec128_xor(s16[0U], r10[0U]); - s16[0U] = Lib_IntVector_Intrinsics_vec128_xor(s16[0U], r30[0U]); - return totlen1; - } -} - -void -Hacl_Hash_Blake2s_128_finish_blake2s_128( - Lib_IntVector_Intrinsics_vec128 *s, - uint64_t ev, - uint8_t *dst -) -{ - uint32_t double_row = (uint32_t)32U; - KRML_CHECK_SIZE(sizeof (uint8_t), double_row); - { - uint8_t b[double_row]; - memset(b, 0U, double_row * sizeof (uint8_t)); - { - uint8_t *first = b; - uint8_t *second = b + (uint32_t)16U; - Lib_IntVector_Intrinsics_vec128 *row0 = s; - Lib_IntVector_Intrinsics_vec128 *row1 = s + (uint32_t)1U; - uint8_t *final; - Lib_IntVector_Intrinsics_vec128_store32_le(first, row0[0U]); - Lib_IntVector_Intrinsics_vec128_store32_le(second, row1[0U]); - final = b; - memcpy(dst, final, (uint32_t)32U * sizeof (uint8_t)); - Lib_Memzero0_memzero(b, double_row * sizeof (b[0U])); - } - } -} - -uint64_t -Hacl_Hash_Blake2s_128_update_multi_blake2s_128( - Lib_IntVector_Intrinsics_vec128 *s, - uint64_t ev, - uint8_t *blocks, - uint32_t n_blocks -) -{ - { - uint32_t i; - for (i = (uint32_t)0U; i < n_blocks; i++) - { - uint32_t sz = (uint32_t)64U; - uint8_t *block = blocks + sz * i; - uint64_t - v_ = - Hacl_Hash_Blake2s_128_update_blake2s_128(s, - ev + (uint64_t)i * (uint64_t)(uint32_t)64U, - block); - } - } - return ev + (uint64_t)n_blocks * (uint64_t)(uint32_t)64U; -} - -uint64_t -Hacl_Hash_Blake2s_128_update_last_blake2s_128( - Lib_IntVector_Intrinsics_vec128 *s, - uint64_t ev, - uint64_t prev_len, - uint8_t *input, - uint32_t input_len -) -{ - uint32_t blocks_n = input_len / (uint32_t)64U; - uint32_t blocks_len0 = blocks_n * (uint32_t)64U; - uint32_t rest_len0 = input_len - blocks_len0; - K___uint32_t_uint32_t_uint32_t scrut0; - if (rest_len0 == (uint32_t)0U && blocks_n > (uint32_t)0U) - { - uint32_t blocks_n1 = blocks_n - (uint32_t)1U; - uint32_t blocks_len1 = blocks_len0 - (uint32_t)64U; - uint32_t rest_len1 = (uint32_t)64U; - K___uint32_t_uint32_t_uint32_t lit; - lit.fst = blocks_n1; - lit.snd = blocks_len1; - lit.thd = rest_len1; - scrut0 = lit; - } - else - { - K___uint32_t_uint32_t_uint32_t lit; - lit.fst = blocks_n; - lit.snd = blocks_len0; - lit.thd = rest_len0; - scrut0 = lit; - } - { - uint32_t num_blocks0 = scrut0.fst; - uint32_t blocks_len = scrut0.snd; - uint32_t rest_len1 = scrut0.thd; - uint8_t *blocks0 = input; - uint8_t *rest0 = input + blocks_len; - K___uint32_t_uint32_t_uint32_t__uint8_t___uint8_t_ lit; - K___uint32_t_uint32_t_uint32_t__uint8_t___uint8_t_ scrut; - uint32_t num_blocks; - uint32_t rest_len; - uint8_t *blocks; - uint8_t *rest; - uint64_t ev_; - lit.fst = num_blocks0; - lit.snd = blocks_len; - lit.thd = rest_len1; - lit.f3 = blocks0; - lit.f4 = rest0; - scrut = lit; - num_blocks = scrut.fst; - rest_len = scrut.thd; - blocks = scrut.f3; - rest = scrut.f4; - ev_ = Hacl_Hash_Blake2s_128_update_multi_blake2s_128(s, ev, blocks, num_blocks); - { - KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 wv[4U] KRML_POST_ALIGN(16) = { 0U }; - uint8_t tmp[64U] = { 0U }; - uint8_t *tmp_rest = tmp; - uint64_t totlen; - memcpy(tmp_rest, rest, rest_len * sizeof (uint8_t)); - totlen = ev_ + (uint64_t)rest_len; - { - uint32_t m_w[16U] = { 0U }; - KRML_MAYBE_FOR16(i, - (uint32_t)0U, - (uint32_t)16U, - (uint32_t)1U, - uint32_t *os = m_w; - uint8_t *bj = tmp + i * (uint32_t)4U; - uint32_t u = load32_le(bj); - uint32_t r = u; - uint32_t x = r; - os[i] = x;); - { - Lib_IntVector_Intrinsics_vec128 mask = Lib_IntVector_Intrinsics_vec128_zero; - uint32_t wv_14 = (uint32_t)0xFFFFFFFFU; - uint32_t wv_15 = (uint32_t)0U; - Lib_IntVector_Intrinsics_vec128 *wv3; - Lib_IntVector_Intrinsics_vec128 *s00; - Lib_IntVector_Intrinsics_vec128 *s16; - Lib_IntVector_Intrinsics_vec128 *r00; - Lib_IntVector_Intrinsics_vec128 *r10; - Lib_IntVector_Intrinsics_vec128 *r20; - Lib_IntVector_Intrinsics_vec128 *r30; - mask = - Lib_IntVector_Intrinsics_vec128_load32s((uint32_t)totlen, - (uint32_t)(totlen >> (uint32_t)32U), - wv_14, - wv_15); - memcpy(wv, s, (uint32_t)4U * sizeof (Lib_IntVector_Intrinsics_vec128)); - wv3 = wv + (uint32_t)3U; - wv3[0U] = Lib_IntVector_Intrinsics_vec128_xor(wv3[0U], mask); - KRML_MAYBE_FOR10(i, - (uint32_t)0U, - (uint32_t)10U, - (uint32_t)1U, - uint32_t start_idx = i % (uint32_t)10U * (uint32_t)16U; - KRML_PRE_ALIGN(16) - Lib_IntVector_Intrinsics_vec128 - m_st[4U] KRML_POST_ALIGN(16) = { 0U }; - Lib_IntVector_Intrinsics_vec128 *r0 = m_st; - Lib_IntVector_Intrinsics_vec128 *r1 = m_st + (uint32_t)1U; - Lib_IntVector_Intrinsics_vec128 *r21 = m_st + (uint32_t)2U; - Lib_IntVector_Intrinsics_vec128 *r31 = m_st + (uint32_t)3U; - uint32_t s0 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)0U]; - uint32_t s1 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)1U]; - uint32_t s2 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)2U]; - uint32_t s3 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)3U]; - uint32_t s4 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)4U]; - uint32_t s5 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)5U]; - uint32_t s6 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)6U]; - uint32_t s7 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)7U]; - uint32_t s8 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)8U]; - uint32_t s9 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)9U]; - uint32_t s10 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)10U]; - uint32_t s11 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)11U]; - uint32_t s12 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)12U]; - uint32_t s13 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)13U]; - uint32_t s14 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)14U]; - uint32_t s15 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)15U]; - r0[0U] = Lib_IntVector_Intrinsics_vec128_load32s(m_w[s0], m_w[s2], m_w[s4], m_w[s6]); - r1[0U] = Lib_IntVector_Intrinsics_vec128_load32s(m_w[s1], m_w[s3], m_w[s5], m_w[s7]); - r21[0U] = Lib_IntVector_Intrinsics_vec128_load32s(m_w[s8], m_w[s10], m_w[s12], m_w[s14]); - r31[0U] = Lib_IntVector_Intrinsics_vec128_load32s(m_w[s9], m_w[s11], m_w[s13], m_w[s15]); - Lib_IntVector_Intrinsics_vec128 *x = m_st; - Lib_IntVector_Intrinsics_vec128 *y = m_st + (uint32_t)1U; - Lib_IntVector_Intrinsics_vec128 *z = m_st + (uint32_t)2U; - Lib_IntVector_Intrinsics_vec128 *w = m_st + (uint32_t)3U; - uint32_t a = (uint32_t)0U; - uint32_t b0 = (uint32_t)1U; - uint32_t c0 = (uint32_t)2U; - uint32_t d0 = (uint32_t)3U; - Lib_IntVector_Intrinsics_vec128 *wv_a0 = wv + a * (uint32_t)1U; - Lib_IntVector_Intrinsics_vec128 *wv_b0 = wv + b0 * (uint32_t)1U; - wv_a0[0U] = Lib_IntVector_Intrinsics_vec128_add32(wv_a0[0U], wv_b0[0U]); - wv_a0[0U] = Lib_IntVector_Intrinsics_vec128_add32(wv_a0[0U], x[0U]); - Lib_IntVector_Intrinsics_vec128 *wv_a1 = wv + d0 * (uint32_t)1U; - Lib_IntVector_Intrinsics_vec128 *wv_b1 = wv + a * (uint32_t)1U; - wv_a1[0U] = Lib_IntVector_Intrinsics_vec128_xor(wv_a1[0U], wv_b1[0U]); - wv_a1[0U] = Lib_IntVector_Intrinsics_vec128_rotate_right32(wv_a1[0U], (uint32_t)16U); - Lib_IntVector_Intrinsics_vec128 *wv_a2 = wv + c0 * (uint32_t)1U; - Lib_IntVector_Intrinsics_vec128 *wv_b2 = wv + d0 * (uint32_t)1U; - wv_a2[0U] = Lib_IntVector_Intrinsics_vec128_add32(wv_a2[0U], wv_b2[0U]); - Lib_IntVector_Intrinsics_vec128 *wv_a3 = wv + b0 * (uint32_t)1U; - Lib_IntVector_Intrinsics_vec128 *wv_b3 = wv + c0 * (uint32_t)1U; - wv_a3[0U] = Lib_IntVector_Intrinsics_vec128_xor(wv_a3[0U], wv_b3[0U]); - wv_a3[0U] = Lib_IntVector_Intrinsics_vec128_rotate_right32(wv_a3[0U], (uint32_t)12U); - Lib_IntVector_Intrinsics_vec128 *wv_a4 = wv + a * (uint32_t)1U; - Lib_IntVector_Intrinsics_vec128 *wv_b4 = wv + b0 * (uint32_t)1U; - wv_a4[0U] = Lib_IntVector_Intrinsics_vec128_add32(wv_a4[0U], wv_b4[0U]); - wv_a4[0U] = Lib_IntVector_Intrinsics_vec128_add32(wv_a4[0U], y[0U]); - Lib_IntVector_Intrinsics_vec128 *wv_a5 = wv + d0 * (uint32_t)1U; - Lib_IntVector_Intrinsics_vec128 *wv_b5 = wv + a * (uint32_t)1U; - wv_a5[0U] = Lib_IntVector_Intrinsics_vec128_xor(wv_a5[0U], wv_b5[0U]); - wv_a5[0U] = Lib_IntVector_Intrinsics_vec128_rotate_right32(wv_a5[0U], (uint32_t)8U); - Lib_IntVector_Intrinsics_vec128 *wv_a6 = wv + c0 * (uint32_t)1U; - Lib_IntVector_Intrinsics_vec128 *wv_b6 = wv + d0 * (uint32_t)1U; - wv_a6[0U] = Lib_IntVector_Intrinsics_vec128_add32(wv_a6[0U], wv_b6[0U]); - Lib_IntVector_Intrinsics_vec128 *wv_a7 = wv + b0 * (uint32_t)1U; - Lib_IntVector_Intrinsics_vec128 *wv_b7 = wv + c0 * (uint32_t)1U; - wv_a7[0U] = Lib_IntVector_Intrinsics_vec128_xor(wv_a7[0U], wv_b7[0U]); - wv_a7[0U] = Lib_IntVector_Intrinsics_vec128_rotate_right32(wv_a7[0U], (uint32_t)7U); - Lib_IntVector_Intrinsics_vec128 *r11 = wv + (uint32_t)1U; - Lib_IntVector_Intrinsics_vec128 *r22 = wv + (uint32_t)2U; - Lib_IntVector_Intrinsics_vec128 *r32 = wv + (uint32_t)3U; - Lib_IntVector_Intrinsics_vec128 v00 = r11[0U]; - Lib_IntVector_Intrinsics_vec128 - v1 = Lib_IntVector_Intrinsics_vec128_rotate_right_lanes32(v00, (uint32_t)1U); - r11[0U] = v1; - Lib_IntVector_Intrinsics_vec128 v01 = r22[0U]; - Lib_IntVector_Intrinsics_vec128 - v10 = Lib_IntVector_Intrinsics_vec128_rotate_right_lanes32(v01, (uint32_t)2U); - r22[0U] = v10; - Lib_IntVector_Intrinsics_vec128 v02 = r32[0U]; - Lib_IntVector_Intrinsics_vec128 - v11 = Lib_IntVector_Intrinsics_vec128_rotate_right_lanes32(v02, (uint32_t)3U); - r32[0U] = v11; - uint32_t a0 = (uint32_t)0U; - uint32_t b = (uint32_t)1U; - uint32_t c = (uint32_t)2U; - uint32_t d = (uint32_t)3U; - Lib_IntVector_Intrinsics_vec128 *wv_a = wv + a0 * (uint32_t)1U; - Lib_IntVector_Intrinsics_vec128 *wv_b8 = wv + b * (uint32_t)1U; - wv_a[0U] = Lib_IntVector_Intrinsics_vec128_add32(wv_a[0U], wv_b8[0U]); - wv_a[0U] = Lib_IntVector_Intrinsics_vec128_add32(wv_a[0U], z[0U]); - Lib_IntVector_Intrinsics_vec128 *wv_a8 = wv + d * (uint32_t)1U; - Lib_IntVector_Intrinsics_vec128 *wv_b9 = wv + a0 * (uint32_t)1U; - wv_a8[0U] = Lib_IntVector_Intrinsics_vec128_xor(wv_a8[0U], wv_b9[0U]); - wv_a8[0U] = Lib_IntVector_Intrinsics_vec128_rotate_right32(wv_a8[0U], (uint32_t)16U); - Lib_IntVector_Intrinsics_vec128 *wv_a9 = wv + c * (uint32_t)1U; - Lib_IntVector_Intrinsics_vec128 *wv_b10 = wv + d * (uint32_t)1U; - wv_a9[0U] = Lib_IntVector_Intrinsics_vec128_add32(wv_a9[0U], wv_b10[0U]); - Lib_IntVector_Intrinsics_vec128 *wv_a10 = wv + b * (uint32_t)1U; - Lib_IntVector_Intrinsics_vec128 *wv_b11 = wv + c * (uint32_t)1U; - wv_a10[0U] = Lib_IntVector_Intrinsics_vec128_xor(wv_a10[0U], wv_b11[0U]); - wv_a10[0U] = Lib_IntVector_Intrinsics_vec128_rotate_right32(wv_a10[0U], (uint32_t)12U); - Lib_IntVector_Intrinsics_vec128 *wv_a11 = wv + a0 * (uint32_t)1U; - Lib_IntVector_Intrinsics_vec128 *wv_b12 = wv + b * (uint32_t)1U; - wv_a11[0U] = Lib_IntVector_Intrinsics_vec128_add32(wv_a11[0U], wv_b12[0U]); - wv_a11[0U] = Lib_IntVector_Intrinsics_vec128_add32(wv_a11[0U], w[0U]); - Lib_IntVector_Intrinsics_vec128 *wv_a12 = wv + d * (uint32_t)1U; - Lib_IntVector_Intrinsics_vec128 *wv_b13 = wv + a0 * (uint32_t)1U; - wv_a12[0U] = Lib_IntVector_Intrinsics_vec128_xor(wv_a12[0U], wv_b13[0U]); - wv_a12[0U] = Lib_IntVector_Intrinsics_vec128_rotate_right32(wv_a12[0U], (uint32_t)8U); - Lib_IntVector_Intrinsics_vec128 *wv_a13 = wv + c * (uint32_t)1U; - Lib_IntVector_Intrinsics_vec128 *wv_b14 = wv + d * (uint32_t)1U; - wv_a13[0U] = Lib_IntVector_Intrinsics_vec128_add32(wv_a13[0U], wv_b14[0U]); - Lib_IntVector_Intrinsics_vec128 *wv_a14 = wv + b * (uint32_t)1U; - Lib_IntVector_Intrinsics_vec128 *wv_b = wv + c * (uint32_t)1U; - wv_a14[0U] = Lib_IntVector_Intrinsics_vec128_xor(wv_a14[0U], wv_b[0U]); - wv_a14[0U] = Lib_IntVector_Intrinsics_vec128_rotate_right32(wv_a14[0U], (uint32_t)7U); - Lib_IntVector_Intrinsics_vec128 *r12 = wv + (uint32_t)1U; - Lib_IntVector_Intrinsics_vec128 *r2 = wv + (uint32_t)2U; - Lib_IntVector_Intrinsics_vec128 *r3 = wv + (uint32_t)3U; - Lib_IntVector_Intrinsics_vec128 v0 = r12[0U]; - Lib_IntVector_Intrinsics_vec128 - v12 = Lib_IntVector_Intrinsics_vec128_rotate_right_lanes32(v0, (uint32_t)3U); - r12[0U] = v12; - Lib_IntVector_Intrinsics_vec128 v03 = r2[0U]; - Lib_IntVector_Intrinsics_vec128 - v13 = Lib_IntVector_Intrinsics_vec128_rotate_right_lanes32(v03, (uint32_t)2U); - r2[0U] = v13; - Lib_IntVector_Intrinsics_vec128 v04 = r3[0U]; - Lib_IntVector_Intrinsics_vec128 - v14 = Lib_IntVector_Intrinsics_vec128_rotate_right_lanes32(v04, (uint32_t)1U); - r3[0U] = v14;); - s00 = s; - s16 = s + (uint32_t)1U; - r00 = wv; - r10 = wv + (uint32_t)1U; - r20 = wv + (uint32_t)2U; - r30 = wv + (uint32_t)3U; - s00[0U] = Lib_IntVector_Intrinsics_vec128_xor(s00[0U], r00[0U]); - s00[0U] = Lib_IntVector_Intrinsics_vec128_xor(s00[0U], r20[0U]); - s16[0U] = Lib_IntVector_Intrinsics_vec128_xor(s16[0U], r10[0U]); - s16[0U] = Lib_IntVector_Intrinsics_vec128_xor(s16[0U], r30[0U]); - return (uint64_t)0U; - } - } - } - } -} - -void Hacl_Hash_Blake2s_128_hash_blake2s_128(uint8_t *input, uint32_t input_len, uint8_t *dst) -{ - Hacl_Blake2s_128_blake2s((uint32_t)32U, dst, input_len, input, (uint32_t)0U, NULL); -} - -Lib_IntVector_Intrinsics_vec128 *Hacl_Hash_Blake2s_128_malloc_blake2s_128() -{ - Lib_IntVector_Intrinsics_vec128 - *buf = - (Lib_IntVector_Intrinsics_vec128 *)KRML_ALIGNED_MALLOC(16, - sizeof (Lib_IntVector_Intrinsics_vec128) * (uint32_t)4U); - memset(buf, 0U, (uint32_t)4U * sizeof (Lib_IntVector_Intrinsics_vec128)); - return buf; -} - -static inline void -blake2s_update_block( - Lib_IntVector_Intrinsics_vec128 *wv, - Lib_IntVector_Intrinsics_vec128 *hash, - bool flag, - uint64_t totlen, - uint8_t *d -) -{ - uint32_t m_w[16U] = { 0U }; - KRML_MAYBE_FOR16(i, - (uint32_t)0U, - (uint32_t)16U, - (uint32_t)1U, - uint32_t *os = m_w; - uint8_t *bj = d + i * (uint32_t)4U; - uint32_t u = load32_le(bj); - uint32_t r = u; - uint32_t x = r; - os[i] = x;); - { - Lib_IntVector_Intrinsics_vec128 mask = Lib_IntVector_Intrinsics_vec128_zero; - uint32_t wv_14; - if (flag) - { - wv_14 = (uint32_t)0xFFFFFFFFU; - } - else - { - wv_14 = (uint32_t)0U; - } - { - uint32_t wv_15 = (uint32_t)0U; - Lib_IntVector_Intrinsics_vec128 *wv3; - Lib_IntVector_Intrinsics_vec128 *s00; - Lib_IntVector_Intrinsics_vec128 *s16; - Lib_IntVector_Intrinsics_vec128 *r00; - Lib_IntVector_Intrinsics_vec128 *r10; - Lib_IntVector_Intrinsics_vec128 *r20; - Lib_IntVector_Intrinsics_vec128 *r30; - mask = - Lib_IntVector_Intrinsics_vec128_load32s((uint32_t)totlen, - (uint32_t)(totlen >> (uint32_t)32U), - wv_14, - wv_15); - memcpy(wv, hash, (uint32_t)4U * sizeof (Lib_IntVector_Intrinsics_vec128)); - wv3 = wv + (uint32_t)3U; - wv3[0U] = Lib_IntVector_Intrinsics_vec128_xor(wv3[0U], mask); - KRML_MAYBE_FOR10(i, - (uint32_t)0U, - (uint32_t)10U, - (uint32_t)1U, - uint32_t start_idx = i % (uint32_t)10U * (uint32_t)16U; - KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 m_st[4U] KRML_POST_ALIGN(16) = { 0U }; - Lib_IntVector_Intrinsics_vec128 *r0 = m_st; - Lib_IntVector_Intrinsics_vec128 *r1 = m_st + (uint32_t)1U; - Lib_IntVector_Intrinsics_vec128 *r21 = m_st + (uint32_t)2U; - Lib_IntVector_Intrinsics_vec128 *r31 = m_st + (uint32_t)3U; - uint32_t s0 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)0U]; - uint32_t s1 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)1U]; - uint32_t s2 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)2U]; - uint32_t s3 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)3U]; - uint32_t s4 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)4U]; - uint32_t s5 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)5U]; - uint32_t s6 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)6U]; - uint32_t s7 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)7U]; - uint32_t s8 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)8U]; - uint32_t s9 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)9U]; - uint32_t s10 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)10U]; - uint32_t s11 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)11U]; - uint32_t s12 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)12U]; - uint32_t s13 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)13U]; - uint32_t s14 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)14U]; - uint32_t s15 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)15U]; - r0[0U] = Lib_IntVector_Intrinsics_vec128_load32s(m_w[s0], m_w[s2], m_w[s4], m_w[s6]); - r1[0U] = Lib_IntVector_Intrinsics_vec128_load32s(m_w[s1], m_w[s3], m_w[s5], m_w[s7]); - r21[0U] = Lib_IntVector_Intrinsics_vec128_load32s(m_w[s8], m_w[s10], m_w[s12], m_w[s14]); - r31[0U] = Lib_IntVector_Intrinsics_vec128_load32s(m_w[s9], m_w[s11], m_w[s13], m_w[s15]); - Lib_IntVector_Intrinsics_vec128 *x = m_st; - Lib_IntVector_Intrinsics_vec128 *y = m_st + (uint32_t)1U; - Lib_IntVector_Intrinsics_vec128 *z = m_st + (uint32_t)2U; - Lib_IntVector_Intrinsics_vec128 *w = m_st + (uint32_t)3U; - uint32_t a = (uint32_t)0U; - uint32_t b0 = (uint32_t)1U; - uint32_t c0 = (uint32_t)2U; - uint32_t d10 = (uint32_t)3U; - Lib_IntVector_Intrinsics_vec128 *wv_a0 = wv + a * (uint32_t)1U; - Lib_IntVector_Intrinsics_vec128 *wv_b0 = wv + b0 * (uint32_t)1U; - wv_a0[0U] = Lib_IntVector_Intrinsics_vec128_add32(wv_a0[0U], wv_b0[0U]); - wv_a0[0U] = Lib_IntVector_Intrinsics_vec128_add32(wv_a0[0U], x[0U]); - Lib_IntVector_Intrinsics_vec128 *wv_a1 = wv + d10 * (uint32_t)1U; - Lib_IntVector_Intrinsics_vec128 *wv_b1 = wv + a * (uint32_t)1U; - wv_a1[0U] = Lib_IntVector_Intrinsics_vec128_xor(wv_a1[0U], wv_b1[0U]); - wv_a1[0U] = Lib_IntVector_Intrinsics_vec128_rotate_right32(wv_a1[0U], (uint32_t)16U); - Lib_IntVector_Intrinsics_vec128 *wv_a2 = wv + c0 * (uint32_t)1U; - Lib_IntVector_Intrinsics_vec128 *wv_b2 = wv + d10 * (uint32_t)1U; - wv_a2[0U] = Lib_IntVector_Intrinsics_vec128_add32(wv_a2[0U], wv_b2[0U]); - Lib_IntVector_Intrinsics_vec128 *wv_a3 = wv + b0 * (uint32_t)1U; - Lib_IntVector_Intrinsics_vec128 *wv_b3 = wv + c0 * (uint32_t)1U; - wv_a3[0U] = Lib_IntVector_Intrinsics_vec128_xor(wv_a3[0U], wv_b3[0U]); - wv_a3[0U] = Lib_IntVector_Intrinsics_vec128_rotate_right32(wv_a3[0U], (uint32_t)12U); - Lib_IntVector_Intrinsics_vec128 *wv_a4 = wv + a * (uint32_t)1U; - Lib_IntVector_Intrinsics_vec128 *wv_b4 = wv + b0 * (uint32_t)1U; - wv_a4[0U] = Lib_IntVector_Intrinsics_vec128_add32(wv_a4[0U], wv_b4[0U]); - wv_a4[0U] = Lib_IntVector_Intrinsics_vec128_add32(wv_a4[0U], y[0U]); - Lib_IntVector_Intrinsics_vec128 *wv_a5 = wv + d10 * (uint32_t)1U; - Lib_IntVector_Intrinsics_vec128 *wv_b5 = wv + a * (uint32_t)1U; - wv_a5[0U] = Lib_IntVector_Intrinsics_vec128_xor(wv_a5[0U], wv_b5[0U]); - wv_a5[0U] = Lib_IntVector_Intrinsics_vec128_rotate_right32(wv_a5[0U], (uint32_t)8U); - Lib_IntVector_Intrinsics_vec128 *wv_a6 = wv + c0 * (uint32_t)1U; - Lib_IntVector_Intrinsics_vec128 *wv_b6 = wv + d10 * (uint32_t)1U; - wv_a6[0U] = Lib_IntVector_Intrinsics_vec128_add32(wv_a6[0U], wv_b6[0U]); - Lib_IntVector_Intrinsics_vec128 *wv_a7 = wv + b0 * (uint32_t)1U; - Lib_IntVector_Intrinsics_vec128 *wv_b7 = wv + c0 * (uint32_t)1U; - wv_a7[0U] = Lib_IntVector_Intrinsics_vec128_xor(wv_a7[0U], wv_b7[0U]); - wv_a7[0U] = Lib_IntVector_Intrinsics_vec128_rotate_right32(wv_a7[0U], (uint32_t)7U); - Lib_IntVector_Intrinsics_vec128 *r11 = wv + (uint32_t)1U; - Lib_IntVector_Intrinsics_vec128 *r22 = wv + (uint32_t)2U; - Lib_IntVector_Intrinsics_vec128 *r32 = wv + (uint32_t)3U; - Lib_IntVector_Intrinsics_vec128 v00 = r11[0U]; - Lib_IntVector_Intrinsics_vec128 - v1 = Lib_IntVector_Intrinsics_vec128_rotate_right_lanes32(v00, (uint32_t)1U); - r11[0U] = v1; - Lib_IntVector_Intrinsics_vec128 v01 = r22[0U]; - Lib_IntVector_Intrinsics_vec128 - v10 = Lib_IntVector_Intrinsics_vec128_rotate_right_lanes32(v01, (uint32_t)2U); - r22[0U] = v10; - Lib_IntVector_Intrinsics_vec128 v02 = r32[0U]; - Lib_IntVector_Intrinsics_vec128 - v11 = Lib_IntVector_Intrinsics_vec128_rotate_right_lanes32(v02, (uint32_t)3U); - r32[0U] = v11; - uint32_t a0 = (uint32_t)0U; - uint32_t b = (uint32_t)1U; - uint32_t c = (uint32_t)2U; - uint32_t d1 = (uint32_t)3U; - Lib_IntVector_Intrinsics_vec128 *wv_a = wv + a0 * (uint32_t)1U; - Lib_IntVector_Intrinsics_vec128 *wv_b8 = wv + b * (uint32_t)1U; - wv_a[0U] = Lib_IntVector_Intrinsics_vec128_add32(wv_a[0U], wv_b8[0U]); - wv_a[0U] = Lib_IntVector_Intrinsics_vec128_add32(wv_a[0U], z[0U]); - Lib_IntVector_Intrinsics_vec128 *wv_a8 = wv + d1 * (uint32_t)1U; - Lib_IntVector_Intrinsics_vec128 *wv_b9 = wv + a0 * (uint32_t)1U; - wv_a8[0U] = Lib_IntVector_Intrinsics_vec128_xor(wv_a8[0U], wv_b9[0U]); - wv_a8[0U] = Lib_IntVector_Intrinsics_vec128_rotate_right32(wv_a8[0U], (uint32_t)16U); - Lib_IntVector_Intrinsics_vec128 *wv_a9 = wv + c * (uint32_t)1U; - Lib_IntVector_Intrinsics_vec128 *wv_b10 = wv + d1 * (uint32_t)1U; - wv_a9[0U] = Lib_IntVector_Intrinsics_vec128_add32(wv_a9[0U], wv_b10[0U]); - Lib_IntVector_Intrinsics_vec128 *wv_a10 = wv + b * (uint32_t)1U; - Lib_IntVector_Intrinsics_vec128 *wv_b11 = wv + c * (uint32_t)1U; - wv_a10[0U] = Lib_IntVector_Intrinsics_vec128_xor(wv_a10[0U], wv_b11[0U]); - wv_a10[0U] = Lib_IntVector_Intrinsics_vec128_rotate_right32(wv_a10[0U], (uint32_t)12U); - Lib_IntVector_Intrinsics_vec128 *wv_a11 = wv + a0 * (uint32_t)1U; - Lib_IntVector_Intrinsics_vec128 *wv_b12 = wv + b * (uint32_t)1U; - wv_a11[0U] = Lib_IntVector_Intrinsics_vec128_add32(wv_a11[0U], wv_b12[0U]); - wv_a11[0U] = Lib_IntVector_Intrinsics_vec128_add32(wv_a11[0U], w[0U]); - Lib_IntVector_Intrinsics_vec128 *wv_a12 = wv + d1 * (uint32_t)1U; - Lib_IntVector_Intrinsics_vec128 *wv_b13 = wv + a0 * (uint32_t)1U; - wv_a12[0U] = Lib_IntVector_Intrinsics_vec128_xor(wv_a12[0U], wv_b13[0U]); - wv_a12[0U] = Lib_IntVector_Intrinsics_vec128_rotate_right32(wv_a12[0U], (uint32_t)8U); - Lib_IntVector_Intrinsics_vec128 *wv_a13 = wv + c * (uint32_t)1U; - Lib_IntVector_Intrinsics_vec128 *wv_b14 = wv + d1 * (uint32_t)1U; - wv_a13[0U] = Lib_IntVector_Intrinsics_vec128_add32(wv_a13[0U], wv_b14[0U]); - Lib_IntVector_Intrinsics_vec128 *wv_a14 = wv + b * (uint32_t)1U; - Lib_IntVector_Intrinsics_vec128 *wv_b = wv + c * (uint32_t)1U; - wv_a14[0U] = Lib_IntVector_Intrinsics_vec128_xor(wv_a14[0U], wv_b[0U]); - wv_a14[0U] = Lib_IntVector_Intrinsics_vec128_rotate_right32(wv_a14[0U], (uint32_t)7U); - Lib_IntVector_Intrinsics_vec128 *r12 = wv + (uint32_t)1U; - Lib_IntVector_Intrinsics_vec128 *r2 = wv + (uint32_t)2U; - Lib_IntVector_Intrinsics_vec128 *r3 = wv + (uint32_t)3U; - Lib_IntVector_Intrinsics_vec128 v0 = r12[0U]; - Lib_IntVector_Intrinsics_vec128 - v12 = Lib_IntVector_Intrinsics_vec128_rotate_right_lanes32(v0, (uint32_t)3U); - r12[0U] = v12; - Lib_IntVector_Intrinsics_vec128 v03 = r2[0U]; - Lib_IntVector_Intrinsics_vec128 - v13 = Lib_IntVector_Intrinsics_vec128_rotate_right_lanes32(v03, (uint32_t)2U); - r2[0U] = v13; - Lib_IntVector_Intrinsics_vec128 v04 = r3[0U]; - Lib_IntVector_Intrinsics_vec128 - v14 = Lib_IntVector_Intrinsics_vec128_rotate_right_lanes32(v04, (uint32_t)1U); - r3[0U] = v14;); - s00 = hash; - s16 = hash + (uint32_t)1U; - r00 = wv; - r10 = wv + (uint32_t)1U; - r20 = wv + (uint32_t)2U; - r30 = wv + (uint32_t)3U; - s00[0U] = Lib_IntVector_Intrinsics_vec128_xor(s00[0U], r00[0U]); - s00[0U] = Lib_IntVector_Intrinsics_vec128_xor(s00[0U], r20[0U]); - s16[0U] = Lib_IntVector_Intrinsics_vec128_xor(s16[0U], r10[0U]); - s16[0U] = Lib_IntVector_Intrinsics_vec128_xor(s16[0U], r30[0U]); - } - } -} - -void -Hacl_Blake2s_128_blake2s_init(Lib_IntVector_Intrinsics_vec128 *hash, uint32_t kk, uint32_t nn) -{ - Lib_IntVector_Intrinsics_vec128 *r0 = hash; - Lib_IntVector_Intrinsics_vec128 *r1 = hash + (uint32_t)1U; - Lib_IntVector_Intrinsics_vec128 *r2 = hash + (uint32_t)2U; - Lib_IntVector_Intrinsics_vec128 *r3 = hash + (uint32_t)3U; - uint32_t iv0 = Hacl_Impl_Blake2_Constants_ivTable_S[0U]; - uint32_t iv1 = Hacl_Impl_Blake2_Constants_ivTable_S[1U]; - uint32_t iv2 = Hacl_Impl_Blake2_Constants_ivTable_S[2U]; - uint32_t iv3 = Hacl_Impl_Blake2_Constants_ivTable_S[3U]; - uint32_t iv4 = Hacl_Impl_Blake2_Constants_ivTable_S[4U]; - uint32_t iv5 = Hacl_Impl_Blake2_Constants_ivTable_S[5U]; - uint32_t iv6 = Hacl_Impl_Blake2_Constants_ivTable_S[6U]; - uint32_t iv7 = Hacl_Impl_Blake2_Constants_ivTable_S[7U]; - uint32_t kk_shift_8; - uint32_t iv0_; - r2[0U] = Lib_IntVector_Intrinsics_vec128_load32s(iv0, iv1, iv2, iv3); - r3[0U] = Lib_IntVector_Intrinsics_vec128_load32s(iv4, iv5, iv6, iv7); - kk_shift_8 = kk << (uint32_t)8U; - iv0_ = iv0 ^ ((uint32_t)0x01010000U ^ (kk_shift_8 ^ nn)); - r0[0U] = Lib_IntVector_Intrinsics_vec128_load32s(iv0_, iv1, iv2, iv3); - r1[0U] = Lib_IntVector_Intrinsics_vec128_load32s(iv4, iv5, iv6, iv7); -} - -void -Hacl_Blake2s_128_blake2s_update_key( - Lib_IntVector_Intrinsics_vec128 *wv, - Lib_IntVector_Intrinsics_vec128 *hash, - uint32_t kk, - uint8_t *k, - uint32_t ll -) -{ - uint64_t lb = (uint64_t)(uint32_t)64U; - uint8_t b[64U] = { 0U }; - memcpy(b, k, kk * sizeof (uint8_t)); - if (ll == (uint32_t)0U) - { - blake2s_update_block(wv, hash, true, lb, b); - } - else - { - blake2s_update_block(wv, hash, false, lb, b); - } - Lib_Memzero0_memzero(b, (uint32_t)64U * sizeof (b[0U])); -} - -void -Hacl_Blake2s_128_blake2s_update_multi( - uint32_t len, - Lib_IntVector_Intrinsics_vec128 *wv, - Lib_IntVector_Intrinsics_vec128 *hash, - uint64_t prev, - uint8_t *blocks, - uint32_t nb -) -{ - uint32_t i; - for (i = (uint32_t)0U; i < nb; i++) - { - uint64_t totlen = prev + (uint64_t)((i + (uint32_t)1U) * (uint32_t)64U); - uint8_t *b = blocks + i * (uint32_t)64U; - blake2s_update_block(wv, hash, false, totlen, b); - } -} - -void -Hacl_Blake2s_128_blake2s_update_last( - uint32_t len, - Lib_IntVector_Intrinsics_vec128 *wv, - Lib_IntVector_Intrinsics_vec128 *hash, - uint64_t prev, - uint32_t rem, - uint8_t *d -) -{ - uint8_t b[64U] = { 0U }; - uint8_t *last = d + len - rem; - uint64_t totlen; - memcpy(b, last, rem * sizeof (uint8_t)); - totlen = prev + (uint64_t)len; - blake2s_update_block(wv, hash, true, totlen, b); - Lib_Memzero0_memzero(b, (uint32_t)64U * sizeof (b[0U])); -} - -static inline void -blake2s_update_blocks( - uint32_t len, - Lib_IntVector_Intrinsics_vec128 *wv, - Lib_IntVector_Intrinsics_vec128 *hash, - uint64_t prev, - uint8_t *blocks -) -{ - uint32_t nb0 = len / (uint32_t)64U; - uint32_t rem0 = len % (uint32_t)64U; - K___uint32_t_uint32_t scrut; - if (rem0 == (uint32_t)0U && nb0 > (uint32_t)0U) - { - uint32_t nb_ = nb0 - (uint32_t)1U; - uint32_t rem_ = (uint32_t)64U; - K___uint32_t_uint32_t lit; - lit.fst = nb_; - lit.snd = rem_; - scrut = lit; - } - else - { - K___uint32_t_uint32_t lit; - lit.fst = nb0; - lit.snd = rem0; - scrut = lit; - } - { - uint32_t nb = scrut.fst; - uint32_t rem = scrut.snd; - Hacl_Blake2s_128_blake2s_update_multi(len, wv, hash, prev, blocks, nb); - Hacl_Blake2s_128_blake2s_update_last(len, wv, hash, prev, rem, blocks); - } -} - -static inline void -blake2s_update( - Lib_IntVector_Intrinsics_vec128 *wv, - Lib_IntVector_Intrinsics_vec128 *hash, - uint32_t kk, - uint8_t *k, - uint32_t ll, - uint8_t *d -) -{ - uint64_t lb = (uint64_t)(uint32_t)64U; - if (kk > (uint32_t)0U) - { - Hacl_Blake2s_128_blake2s_update_key(wv, hash, kk, k, ll); - if (!(ll == (uint32_t)0U)) - { - blake2s_update_blocks(ll, wv, hash, lb, d); - return; - } - return; - } - blake2s_update_blocks(ll, wv, hash, (uint64_t)(uint32_t)0U, d); -} - -void -Hacl_Blake2s_128_blake2s_finish( - uint32_t nn, - uint8_t *output, - Lib_IntVector_Intrinsics_vec128 *hash -) -{ - uint32_t double_row = (uint32_t)32U; - KRML_CHECK_SIZE(sizeof (uint8_t), double_row); - { - uint8_t b[double_row]; - memset(b, 0U, double_row * sizeof (uint8_t)); - { - uint8_t *first = b; - uint8_t *second = b + (uint32_t)16U; - Lib_IntVector_Intrinsics_vec128 *row0 = hash; - Lib_IntVector_Intrinsics_vec128 *row1 = hash + (uint32_t)1U; - uint8_t *final; - Lib_IntVector_Intrinsics_vec128_store32_le(first, row0[0U]); - Lib_IntVector_Intrinsics_vec128_store32_le(second, row1[0U]); - final = b; - memcpy(output, final, nn * sizeof (uint8_t)); - Lib_Memzero0_memzero(b, double_row * sizeof (b[0U])); - } - } -} - -void -Hacl_Blake2s_128_blake2s( - uint32_t nn, - uint8_t *output, - uint32_t ll, - uint8_t *d, - uint32_t kk, - uint8_t *k -) -{ - KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 b[4U] KRML_POST_ALIGN(16) = { 0U }; - KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 b1[4U] KRML_POST_ALIGN(16) = { 0U }; - Hacl_Blake2s_128_blake2s_init(b, kk, nn); - blake2s_update(b1, b, kk, k, ll, d); - Hacl_Blake2s_128_blake2s_finish(nn, output, b); - Lib_Memzero0_memzero(b1, (uint32_t)4U * sizeof (b1[0U])); - Lib_Memzero0_memzero(b, (uint32_t)4U * sizeof (b[0U])); -} - -void -Hacl_Blake2s_128_store_state128s_to_state32( - uint32_t *st32, - Lib_IntVector_Intrinsics_vec128 *st -) -{ - Lib_IntVector_Intrinsics_vec128 *r0 = st; - Lib_IntVector_Intrinsics_vec128 *r1 = st + (uint32_t)1U; - Lib_IntVector_Intrinsics_vec128 *r2 = st + (uint32_t)2U; - Lib_IntVector_Intrinsics_vec128 *r3 = st + (uint32_t)3U; - uint32_t *b0 = st32; - uint32_t *b1 = st32 + (uint32_t)4U; - uint32_t *b2 = st32 + (uint32_t)8U; - uint32_t *b3 = st32 + (uint32_t)12U; - uint8_t b8[16U] = { 0U }; - Lib_IntVector_Intrinsics_vec128_store32_le(b8, r0[0U]); - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint32_t *os = b0; - uint8_t *bj = b8 + i * (uint32_t)4U; - uint32_t u = load32_le(bj); - uint32_t r = u; - uint32_t x = r; - os[i] = x;); - { - uint8_t b80[16U] = { 0U }; - Lib_IntVector_Intrinsics_vec128_store32_le(b80, r1[0U]); - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint32_t *os = b1; - uint8_t *bj = b80 + i * (uint32_t)4U; - uint32_t u = load32_le(bj); - uint32_t r = u; - uint32_t x = r; - os[i] = x;); - { - uint8_t b81[16U] = { 0U }; - Lib_IntVector_Intrinsics_vec128_store32_le(b81, r2[0U]); - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint32_t *os = b2; - uint8_t *bj = b81 + i * (uint32_t)4U; - uint32_t u = load32_le(bj); - uint32_t r = u; - uint32_t x = r; - os[i] = x;); - { - uint8_t b82[16U] = { 0U }; - Lib_IntVector_Intrinsics_vec128_store32_le(b82, r3[0U]); - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint32_t *os = b3; - uint8_t *bj = b82 + i * (uint32_t)4U; - uint32_t u = load32_le(bj); - uint32_t r = u; - uint32_t x = r; - os[i] = x;); - } - } - } -} - -void -Hacl_Blake2s_128_load_state128s_from_state32( - Lib_IntVector_Intrinsics_vec128 *st, - uint32_t *st32 -) -{ - Lib_IntVector_Intrinsics_vec128 *r0 = st; - Lib_IntVector_Intrinsics_vec128 *r1 = st + (uint32_t)1U; - Lib_IntVector_Intrinsics_vec128 *r2 = st + (uint32_t)2U; - Lib_IntVector_Intrinsics_vec128 *r3 = st + (uint32_t)3U; - uint32_t *b0 = st32; - uint32_t *b1 = st32 + (uint32_t)4U; - uint32_t *b2 = st32 + (uint32_t)8U; - uint32_t *b3 = st32 + (uint32_t)12U; - r0[0U] = Lib_IntVector_Intrinsics_vec128_load32s(b0[0U], b0[1U], b0[2U], b0[3U]); - r1[0U] = Lib_IntVector_Intrinsics_vec128_load32s(b1[0U], b1[1U], b1[2U], b1[3U]); - r2[0U] = Lib_IntVector_Intrinsics_vec128_load32s(b2[0U], b2[1U], b2[2U], b2[3U]); - r3[0U] = Lib_IntVector_Intrinsics_vec128_load32s(b3[0U], b3[1U], b3[2U], b3[3U]); -} - diff --git a/dist/c89-compatible/Hacl_Hash_Blake2s_128.h b/dist/c89-compatible/Hacl_Hash_Blake2s_128.h deleted file mode 100644 index 9ee59f873d..0000000000 --- a/dist/c89-compatible/Hacl_Hash_Blake2s_128.h +++ /dev/null @@ -1,109 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#ifndef __Hacl_Hash_Blake2s_128_H -#define __Hacl_Hash_Blake2s_128_H - -#if defined(__cplusplus) -extern "C" { -#endif - -#include -#include "krml/internal/types.h" -#include "krml/lowstar_endianness.h" -#include "krml/internal/target.h" - - -#include "Lib_Memzero0.h" -#include "Hacl_Krmllib.h" -#include "Hacl_Impl_Blake2_Constants.h" -#include "evercrypt_targetconfig.h" -#include "libintvector.h" -void -Hacl_Blake2s_128_blake2s_init(Lib_IntVector_Intrinsics_vec128 *hash, uint32_t kk, uint32_t nn); - -void -Hacl_Blake2s_128_blake2s_update_key( - Lib_IntVector_Intrinsics_vec128 *wv, - Lib_IntVector_Intrinsics_vec128 *hash, - uint32_t kk, - uint8_t *k, - uint32_t ll -); - -void -Hacl_Blake2s_128_blake2s_update_multi( - uint32_t len, - Lib_IntVector_Intrinsics_vec128 *wv, - Lib_IntVector_Intrinsics_vec128 *hash, - uint64_t prev, - uint8_t *blocks, - uint32_t nb -); - -void -Hacl_Blake2s_128_blake2s_update_last( - uint32_t len, - Lib_IntVector_Intrinsics_vec128 *wv, - Lib_IntVector_Intrinsics_vec128 *hash, - uint64_t prev, - uint32_t rem, - uint8_t *d -); - -void -Hacl_Blake2s_128_blake2s_finish( - uint32_t nn, - uint8_t *output, - Lib_IntVector_Intrinsics_vec128 *hash -); - -void -Hacl_Blake2s_128_blake2s( - uint32_t nn, - uint8_t *output, - uint32_t ll, - uint8_t *d, - uint32_t kk, - uint8_t *k -); - -void -Hacl_Blake2s_128_store_state128s_to_state32( - uint32_t *st32, - Lib_IntVector_Intrinsics_vec128 *st -); - -void -Hacl_Blake2s_128_load_state128s_from_state32( - Lib_IntVector_Intrinsics_vec128 *st, - uint32_t *st32 -); - -#if defined(__cplusplus) -} -#endif - -#define __Hacl_Hash_Blake2s_128_H_DEFINED -#endif diff --git a/dist/c89-compatible/Hacl_Hash_MD5.c b/dist/c89-compatible/Hacl_Hash_MD5.c deleted file mode 100644 index 569445a3ac..0000000000 --- a/dist/c89-compatible/Hacl_Hash_MD5.c +++ /dev/null @@ -1,1731 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#include "internal/Hacl_Hash_MD5.h" - - - -static uint32_t -_h0[4U] = - { (uint32_t)0x67452301U, (uint32_t)0xefcdab89U, (uint32_t)0x98badcfeU, (uint32_t)0x10325476U }; - -static uint32_t -_t[64U] = - { - (uint32_t)0xd76aa478U, (uint32_t)0xe8c7b756U, (uint32_t)0x242070dbU, (uint32_t)0xc1bdceeeU, - (uint32_t)0xf57c0fafU, (uint32_t)0x4787c62aU, (uint32_t)0xa8304613U, (uint32_t)0xfd469501U, - (uint32_t)0x698098d8U, (uint32_t)0x8b44f7afU, (uint32_t)0xffff5bb1U, (uint32_t)0x895cd7beU, - (uint32_t)0x6b901122U, (uint32_t)0xfd987193U, (uint32_t)0xa679438eU, (uint32_t)0x49b40821U, - (uint32_t)0xf61e2562U, (uint32_t)0xc040b340U, (uint32_t)0x265e5a51U, (uint32_t)0xe9b6c7aaU, - (uint32_t)0xd62f105dU, (uint32_t)0x02441453U, (uint32_t)0xd8a1e681U, (uint32_t)0xe7d3fbc8U, - (uint32_t)0x21e1cde6U, (uint32_t)0xc33707d6U, (uint32_t)0xf4d50d87U, (uint32_t)0x455a14edU, - (uint32_t)0xa9e3e905U, (uint32_t)0xfcefa3f8U, (uint32_t)0x676f02d9U, (uint32_t)0x8d2a4c8aU, - (uint32_t)0xfffa3942U, (uint32_t)0x8771f681U, (uint32_t)0x6d9d6122U, (uint32_t)0xfde5380cU, - (uint32_t)0xa4beea44U, (uint32_t)0x4bdecfa9U, (uint32_t)0xf6bb4b60U, (uint32_t)0xbebfbc70U, - (uint32_t)0x289b7ec6U, (uint32_t)0xeaa127faU, (uint32_t)0xd4ef3085U, (uint32_t)0x4881d05U, - (uint32_t)0xd9d4d039U, (uint32_t)0xe6db99e5U, (uint32_t)0x1fa27cf8U, (uint32_t)0xc4ac5665U, - (uint32_t)0xf4292244U, (uint32_t)0x432aff97U, (uint32_t)0xab9423a7U, (uint32_t)0xfc93a039U, - (uint32_t)0x655b59c3U, (uint32_t)0x8f0ccc92U, (uint32_t)0xffeff47dU, (uint32_t)0x85845dd1U, - (uint32_t)0x6fa87e4fU, (uint32_t)0xfe2ce6e0U, (uint32_t)0xa3014314U, (uint32_t)0x4e0811a1U, - (uint32_t)0xf7537e82U, (uint32_t)0xbd3af235U, (uint32_t)0x2ad7d2bbU, (uint32_t)0xeb86d391U - }; - -void Hacl_Hash_Core_MD5_legacy_init(uint32_t *s) -{ - uint32_t i; - for (i = (uint32_t)0U; i < (uint32_t)4U; i++) - { - s[i] = _h0[i]; - } -} - -void Hacl_Hash_Core_MD5_legacy_update(uint32_t *abcd, uint8_t *x) -{ - uint32_t aa = abcd[0U]; - uint32_t bb = abcd[1U]; - uint32_t cc = abcd[2U]; - uint32_t dd = abcd[3U]; - uint32_t va0 = abcd[0U]; - uint32_t vb0 = abcd[1U]; - uint32_t vc0 = abcd[2U]; - uint32_t vd0 = abcd[3U]; - uint8_t *b0 = x; - uint32_t u0 = load32_le(b0); - uint32_t xk0 = u0; - uint32_t ti0 = _t[0U]; - uint32_t - v0 = - vb0 - + - ((va0 + ((vb0 & vc0) | (~vb0 & vd0)) + xk0 + ti0) - << (uint32_t)7U - | (va0 + ((vb0 & vc0) | (~vb0 & vd0)) + xk0 + ti0) >> (uint32_t)25U); - uint32_t va1; - uint32_t vb1; - uint32_t vc1; - uint32_t vd1; - uint8_t *b1; - uint32_t u1; - uint32_t xk1; - uint32_t ti1; - uint32_t v1; - uint32_t va2; - uint32_t vb2; - uint32_t vc2; - uint32_t vd2; - uint8_t *b2; - uint32_t u2; - uint32_t xk2; - uint32_t ti2; - uint32_t v2; - uint32_t va3; - uint32_t vb3; - uint32_t vc3; - uint32_t vd3; - uint8_t *b3; - uint32_t u3; - uint32_t xk3; - uint32_t ti3; - uint32_t v3; - uint32_t va4; - uint32_t vb4; - uint32_t vc4; - uint32_t vd4; - uint8_t *b4; - uint32_t u4; - uint32_t xk4; - uint32_t ti4; - uint32_t v4; - uint32_t va5; - uint32_t vb5; - uint32_t vc5; - uint32_t vd5; - uint8_t *b5; - uint32_t u5; - uint32_t xk5; - uint32_t ti5; - uint32_t v5; - uint32_t va6; - uint32_t vb6; - uint32_t vc6; - uint32_t vd6; - uint8_t *b6; - uint32_t u6; - uint32_t xk6; - uint32_t ti6; - uint32_t v6; - uint32_t va7; - uint32_t vb7; - uint32_t vc7; - uint32_t vd7; - uint8_t *b7; - uint32_t u7; - uint32_t xk7; - uint32_t ti7; - uint32_t v7; - uint32_t va8; - uint32_t vb8; - uint32_t vc8; - uint32_t vd8; - uint8_t *b8; - uint32_t u8; - uint32_t xk8; - uint32_t ti8; - uint32_t v8; - uint32_t va9; - uint32_t vb9; - uint32_t vc9; - uint32_t vd9; - uint8_t *b9; - uint32_t u9; - uint32_t xk9; - uint32_t ti9; - uint32_t v9; - uint32_t va10; - uint32_t vb10; - uint32_t vc10; - uint32_t vd10; - uint8_t *b10; - uint32_t u10; - uint32_t xk10; - uint32_t ti10; - uint32_t v10; - uint32_t va11; - uint32_t vb11; - uint32_t vc11; - uint32_t vd11; - uint8_t *b11; - uint32_t u11; - uint32_t xk11; - uint32_t ti11; - uint32_t v11; - uint32_t va12; - uint32_t vb12; - uint32_t vc12; - uint32_t vd12; - uint8_t *b12; - uint32_t u12; - uint32_t xk12; - uint32_t ti12; - uint32_t v12; - uint32_t va13; - uint32_t vb13; - uint32_t vc13; - uint32_t vd13; - uint8_t *b13; - uint32_t u13; - uint32_t xk13; - uint32_t ti13; - uint32_t v13; - uint32_t va14; - uint32_t vb14; - uint32_t vc14; - uint32_t vd14; - uint8_t *b14; - uint32_t u14; - uint32_t xk14; - uint32_t ti14; - uint32_t v14; - uint32_t va15; - uint32_t vb15; - uint32_t vc15; - uint32_t vd15; - uint8_t *b15; - uint32_t u15; - uint32_t xk15; - uint32_t ti15; - uint32_t v15; - uint32_t va16; - uint32_t vb16; - uint32_t vc16; - uint32_t vd16; - uint8_t *b16; - uint32_t u16; - uint32_t xk16; - uint32_t ti16; - uint32_t v16; - uint32_t va17; - uint32_t vb17; - uint32_t vc17; - uint32_t vd17; - uint8_t *b17; - uint32_t u17; - uint32_t xk17; - uint32_t ti17; - uint32_t v17; - uint32_t va18; - uint32_t vb18; - uint32_t vc18; - uint32_t vd18; - uint8_t *b18; - uint32_t u18; - uint32_t xk18; - uint32_t ti18; - uint32_t v18; - uint32_t va19; - uint32_t vb19; - uint32_t vc19; - uint32_t vd19; - uint8_t *b19; - uint32_t u19; - uint32_t xk19; - uint32_t ti19; - uint32_t v19; - uint32_t va20; - uint32_t vb20; - uint32_t vc20; - uint32_t vd20; - uint8_t *b20; - uint32_t u20; - uint32_t xk20; - uint32_t ti20; - uint32_t v20; - uint32_t va21; - uint32_t vb21; - uint32_t vc21; - uint32_t vd21; - uint8_t *b21; - uint32_t u21; - uint32_t xk21; - uint32_t ti21; - uint32_t v21; - uint32_t va22; - uint32_t vb22; - uint32_t vc22; - uint32_t vd22; - uint8_t *b22; - uint32_t u22; - uint32_t xk22; - uint32_t ti22; - uint32_t v22; - uint32_t va23; - uint32_t vb23; - uint32_t vc23; - uint32_t vd23; - uint8_t *b23; - uint32_t u23; - uint32_t xk23; - uint32_t ti23; - uint32_t v23; - uint32_t va24; - uint32_t vb24; - uint32_t vc24; - uint32_t vd24; - uint8_t *b24; - uint32_t u24; - uint32_t xk24; - uint32_t ti24; - uint32_t v24; - uint32_t va25; - uint32_t vb25; - uint32_t vc25; - uint32_t vd25; - uint8_t *b25; - uint32_t u25; - uint32_t xk25; - uint32_t ti25; - uint32_t v25; - uint32_t va26; - uint32_t vb26; - uint32_t vc26; - uint32_t vd26; - uint8_t *b26; - uint32_t u26; - uint32_t xk26; - uint32_t ti26; - uint32_t v26; - uint32_t va27; - uint32_t vb27; - uint32_t vc27; - uint32_t vd27; - uint8_t *b27; - uint32_t u27; - uint32_t xk27; - uint32_t ti27; - uint32_t v27; - uint32_t va28; - uint32_t vb28; - uint32_t vc28; - uint32_t vd28; - uint8_t *b28; - uint32_t u28; - uint32_t xk28; - uint32_t ti28; - uint32_t v28; - uint32_t va29; - uint32_t vb29; - uint32_t vc29; - uint32_t vd29; - uint8_t *b29; - uint32_t u29; - uint32_t xk29; - uint32_t ti29; - uint32_t v29; - uint32_t va30; - uint32_t vb30; - uint32_t vc30; - uint32_t vd30; - uint8_t *b30; - uint32_t u30; - uint32_t xk30; - uint32_t ti30; - uint32_t v30; - uint32_t va31; - uint32_t vb31; - uint32_t vc31; - uint32_t vd31; - uint8_t *b31; - uint32_t u31; - uint32_t xk31; - uint32_t ti31; - uint32_t v31; - uint32_t va32; - uint32_t vb32; - uint32_t vc32; - uint32_t vd32; - uint8_t *b32; - uint32_t u32; - uint32_t xk32; - uint32_t ti32; - uint32_t v32; - uint32_t va33; - uint32_t vb33; - uint32_t vc33; - uint32_t vd33; - uint8_t *b33; - uint32_t u33; - uint32_t xk33; - uint32_t ti33; - uint32_t v33; - uint32_t va34; - uint32_t vb34; - uint32_t vc34; - uint32_t vd34; - uint8_t *b34; - uint32_t u34; - uint32_t xk34; - uint32_t ti34; - uint32_t v34; - uint32_t va35; - uint32_t vb35; - uint32_t vc35; - uint32_t vd35; - uint8_t *b35; - uint32_t u35; - uint32_t xk35; - uint32_t ti35; - uint32_t v35; - uint32_t va36; - uint32_t vb36; - uint32_t vc36; - uint32_t vd36; - uint8_t *b36; - uint32_t u36; - uint32_t xk36; - uint32_t ti36; - uint32_t v36; - uint32_t va37; - uint32_t vb37; - uint32_t vc37; - uint32_t vd37; - uint8_t *b37; - uint32_t u37; - uint32_t xk37; - uint32_t ti37; - uint32_t v37; - uint32_t va38; - uint32_t vb38; - uint32_t vc38; - uint32_t vd38; - uint8_t *b38; - uint32_t u38; - uint32_t xk38; - uint32_t ti38; - uint32_t v38; - uint32_t va39; - uint32_t vb39; - uint32_t vc39; - uint32_t vd39; - uint8_t *b39; - uint32_t u39; - uint32_t xk39; - uint32_t ti39; - uint32_t v39; - uint32_t va40; - uint32_t vb40; - uint32_t vc40; - uint32_t vd40; - uint8_t *b40; - uint32_t u40; - uint32_t xk40; - uint32_t ti40; - uint32_t v40; - uint32_t va41; - uint32_t vb41; - uint32_t vc41; - uint32_t vd41; - uint8_t *b41; - uint32_t u41; - uint32_t xk41; - uint32_t ti41; - uint32_t v41; - uint32_t va42; - uint32_t vb42; - uint32_t vc42; - uint32_t vd42; - uint8_t *b42; - uint32_t u42; - uint32_t xk42; - uint32_t ti42; - uint32_t v42; - uint32_t va43; - uint32_t vb43; - uint32_t vc43; - uint32_t vd43; - uint8_t *b43; - uint32_t u43; - uint32_t xk43; - uint32_t ti43; - uint32_t v43; - uint32_t va44; - uint32_t vb44; - uint32_t vc44; - uint32_t vd44; - uint8_t *b44; - uint32_t u44; - uint32_t xk44; - uint32_t ti44; - uint32_t v44; - uint32_t va45; - uint32_t vb45; - uint32_t vc45; - uint32_t vd45; - uint8_t *b45; - uint32_t u45; - uint32_t xk45; - uint32_t ti45; - uint32_t v45; - uint32_t va46; - uint32_t vb46; - uint32_t vc46; - uint32_t vd46; - uint8_t *b46; - uint32_t u46; - uint32_t xk46; - uint32_t ti46; - uint32_t v46; - uint32_t va47; - uint32_t vb47; - uint32_t vc47; - uint32_t vd47; - uint8_t *b47; - uint32_t u47; - uint32_t xk47; - uint32_t ti47; - uint32_t v47; - uint32_t va48; - uint32_t vb48; - uint32_t vc48; - uint32_t vd48; - uint8_t *b48; - uint32_t u48; - uint32_t xk48; - uint32_t ti48; - uint32_t v48; - uint32_t va49; - uint32_t vb49; - uint32_t vc49; - uint32_t vd49; - uint8_t *b49; - uint32_t u49; - uint32_t xk49; - uint32_t ti49; - uint32_t v49; - uint32_t va50; - uint32_t vb50; - uint32_t vc50; - uint32_t vd50; - uint8_t *b50; - uint32_t u50; - uint32_t xk50; - uint32_t ti50; - uint32_t v50; - uint32_t va51; - uint32_t vb51; - uint32_t vc51; - uint32_t vd51; - uint8_t *b51; - uint32_t u51; - uint32_t xk51; - uint32_t ti51; - uint32_t v51; - uint32_t va52; - uint32_t vb52; - uint32_t vc52; - uint32_t vd52; - uint8_t *b52; - uint32_t u52; - uint32_t xk52; - uint32_t ti52; - uint32_t v52; - uint32_t va53; - uint32_t vb53; - uint32_t vc53; - uint32_t vd53; - uint8_t *b53; - uint32_t u53; - uint32_t xk53; - uint32_t ti53; - uint32_t v53; - uint32_t va54; - uint32_t vb54; - uint32_t vc54; - uint32_t vd54; - uint8_t *b54; - uint32_t u54; - uint32_t xk54; - uint32_t ti54; - uint32_t v54; - uint32_t va55; - uint32_t vb55; - uint32_t vc55; - uint32_t vd55; - uint8_t *b55; - uint32_t u55; - uint32_t xk55; - uint32_t ti55; - uint32_t v55; - uint32_t va56; - uint32_t vb56; - uint32_t vc56; - uint32_t vd56; - uint8_t *b56; - uint32_t u56; - uint32_t xk56; - uint32_t ti56; - uint32_t v56; - uint32_t va57; - uint32_t vb57; - uint32_t vc57; - uint32_t vd57; - uint8_t *b57; - uint32_t u57; - uint32_t xk57; - uint32_t ti57; - uint32_t v57; - uint32_t va58; - uint32_t vb58; - uint32_t vc58; - uint32_t vd58; - uint8_t *b58; - uint32_t u58; - uint32_t xk58; - uint32_t ti58; - uint32_t v58; - uint32_t va59; - uint32_t vb59; - uint32_t vc59; - uint32_t vd59; - uint8_t *b59; - uint32_t u59; - uint32_t xk59; - uint32_t ti59; - uint32_t v59; - uint32_t va60; - uint32_t vb60; - uint32_t vc60; - uint32_t vd60; - uint8_t *b60; - uint32_t u60; - uint32_t xk60; - uint32_t ti60; - uint32_t v60; - uint32_t va61; - uint32_t vb61; - uint32_t vc61; - uint32_t vd61; - uint8_t *b61; - uint32_t u61; - uint32_t xk61; - uint32_t ti61; - uint32_t v61; - uint32_t va62; - uint32_t vb62; - uint32_t vc62; - uint32_t vd62; - uint8_t *b62; - uint32_t u62; - uint32_t xk62; - uint32_t ti62; - uint32_t v62; - uint32_t va; - uint32_t vb; - uint32_t vc; - uint32_t vd; - uint8_t *b63; - uint32_t u; - uint32_t xk; - uint32_t ti; - uint32_t v; - uint32_t a; - uint32_t b; - uint32_t c; - uint32_t d; - abcd[0U] = v0; - va1 = abcd[3U]; - vb1 = abcd[0U]; - vc1 = abcd[1U]; - vd1 = abcd[2U]; - b1 = x + (uint32_t)4U; - u1 = load32_le(b1); - xk1 = u1; - ti1 = _t[1U]; - v1 = - vb1 - + - ((va1 + ((vb1 & vc1) | (~vb1 & vd1)) + xk1 + ti1) - << (uint32_t)12U - | (va1 + ((vb1 & vc1) | (~vb1 & vd1)) + xk1 + ti1) >> (uint32_t)20U); - abcd[3U] = v1; - va2 = abcd[2U]; - vb2 = abcd[3U]; - vc2 = abcd[0U]; - vd2 = abcd[1U]; - b2 = x + (uint32_t)8U; - u2 = load32_le(b2); - xk2 = u2; - ti2 = _t[2U]; - v2 = - vb2 - + - ((va2 + ((vb2 & vc2) | (~vb2 & vd2)) + xk2 + ti2) - << (uint32_t)17U - | (va2 + ((vb2 & vc2) | (~vb2 & vd2)) + xk2 + ti2) >> (uint32_t)15U); - abcd[2U] = v2; - va3 = abcd[1U]; - vb3 = abcd[2U]; - vc3 = abcd[3U]; - vd3 = abcd[0U]; - b3 = x + (uint32_t)12U; - u3 = load32_le(b3); - xk3 = u3; - ti3 = _t[3U]; - v3 = - vb3 - + - ((va3 + ((vb3 & vc3) | (~vb3 & vd3)) + xk3 + ti3) - << (uint32_t)22U - | (va3 + ((vb3 & vc3) | (~vb3 & vd3)) + xk3 + ti3) >> (uint32_t)10U); - abcd[1U] = v3; - va4 = abcd[0U]; - vb4 = abcd[1U]; - vc4 = abcd[2U]; - vd4 = abcd[3U]; - b4 = x + (uint32_t)16U; - u4 = load32_le(b4); - xk4 = u4; - ti4 = _t[4U]; - v4 = - vb4 - + - ((va4 + ((vb4 & vc4) | (~vb4 & vd4)) + xk4 + ti4) - << (uint32_t)7U - | (va4 + ((vb4 & vc4) | (~vb4 & vd4)) + xk4 + ti4) >> (uint32_t)25U); - abcd[0U] = v4; - va5 = abcd[3U]; - vb5 = abcd[0U]; - vc5 = abcd[1U]; - vd5 = abcd[2U]; - b5 = x + (uint32_t)20U; - u5 = load32_le(b5); - xk5 = u5; - ti5 = _t[5U]; - v5 = - vb5 - + - ((va5 + ((vb5 & vc5) | (~vb5 & vd5)) + xk5 + ti5) - << (uint32_t)12U - | (va5 + ((vb5 & vc5) | (~vb5 & vd5)) + xk5 + ti5) >> (uint32_t)20U); - abcd[3U] = v5; - va6 = abcd[2U]; - vb6 = abcd[3U]; - vc6 = abcd[0U]; - vd6 = abcd[1U]; - b6 = x + (uint32_t)24U; - u6 = load32_le(b6); - xk6 = u6; - ti6 = _t[6U]; - v6 = - vb6 - + - ((va6 + ((vb6 & vc6) | (~vb6 & vd6)) + xk6 + ti6) - << (uint32_t)17U - | (va6 + ((vb6 & vc6) | (~vb6 & vd6)) + xk6 + ti6) >> (uint32_t)15U); - abcd[2U] = v6; - va7 = abcd[1U]; - vb7 = abcd[2U]; - vc7 = abcd[3U]; - vd7 = abcd[0U]; - b7 = x + (uint32_t)28U; - u7 = load32_le(b7); - xk7 = u7; - ti7 = _t[7U]; - v7 = - vb7 - + - ((va7 + ((vb7 & vc7) | (~vb7 & vd7)) + xk7 + ti7) - << (uint32_t)22U - | (va7 + ((vb7 & vc7) | (~vb7 & vd7)) + xk7 + ti7) >> (uint32_t)10U); - abcd[1U] = v7; - va8 = abcd[0U]; - vb8 = abcd[1U]; - vc8 = abcd[2U]; - vd8 = abcd[3U]; - b8 = x + (uint32_t)32U; - u8 = load32_le(b8); - xk8 = u8; - ti8 = _t[8U]; - v8 = - vb8 - + - ((va8 + ((vb8 & vc8) | (~vb8 & vd8)) + xk8 + ti8) - << (uint32_t)7U - | (va8 + ((vb8 & vc8) | (~vb8 & vd8)) + xk8 + ti8) >> (uint32_t)25U); - abcd[0U] = v8; - va9 = abcd[3U]; - vb9 = abcd[0U]; - vc9 = abcd[1U]; - vd9 = abcd[2U]; - b9 = x + (uint32_t)36U; - u9 = load32_le(b9); - xk9 = u9; - ti9 = _t[9U]; - v9 = - vb9 - + - ((va9 + ((vb9 & vc9) | (~vb9 & vd9)) + xk9 + ti9) - << (uint32_t)12U - | (va9 + ((vb9 & vc9) | (~vb9 & vd9)) + xk9 + ti9) >> (uint32_t)20U); - abcd[3U] = v9; - va10 = abcd[2U]; - vb10 = abcd[3U]; - vc10 = abcd[0U]; - vd10 = abcd[1U]; - b10 = x + (uint32_t)40U; - u10 = load32_le(b10); - xk10 = u10; - ti10 = _t[10U]; - v10 = - vb10 - + - ((va10 + ((vb10 & vc10) | (~vb10 & vd10)) + xk10 + ti10) - << (uint32_t)17U - | (va10 + ((vb10 & vc10) | (~vb10 & vd10)) + xk10 + ti10) >> (uint32_t)15U); - abcd[2U] = v10; - va11 = abcd[1U]; - vb11 = abcd[2U]; - vc11 = abcd[3U]; - vd11 = abcd[0U]; - b11 = x + (uint32_t)44U; - u11 = load32_le(b11); - xk11 = u11; - ti11 = _t[11U]; - v11 = - vb11 - + - ((va11 + ((vb11 & vc11) | (~vb11 & vd11)) + xk11 + ti11) - << (uint32_t)22U - | (va11 + ((vb11 & vc11) | (~vb11 & vd11)) + xk11 + ti11) >> (uint32_t)10U); - abcd[1U] = v11; - va12 = abcd[0U]; - vb12 = abcd[1U]; - vc12 = abcd[2U]; - vd12 = abcd[3U]; - b12 = x + (uint32_t)48U; - u12 = load32_le(b12); - xk12 = u12; - ti12 = _t[12U]; - v12 = - vb12 - + - ((va12 + ((vb12 & vc12) | (~vb12 & vd12)) + xk12 + ti12) - << (uint32_t)7U - | (va12 + ((vb12 & vc12) | (~vb12 & vd12)) + xk12 + ti12) >> (uint32_t)25U); - abcd[0U] = v12; - va13 = abcd[3U]; - vb13 = abcd[0U]; - vc13 = abcd[1U]; - vd13 = abcd[2U]; - b13 = x + (uint32_t)52U; - u13 = load32_le(b13); - xk13 = u13; - ti13 = _t[13U]; - v13 = - vb13 - + - ((va13 + ((vb13 & vc13) | (~vb13 & vd13)) + xk13 + ti13) - << (uint32_t)12U - | (va13 + ((vb13 & vc13) | (~vb13 & vd13)) + xk13 + ti13) >> (uint32_t)20U); - abcd[3U] = v13; - va14 = abcd[2U]; - vb14 = abcd[3U]; - vc14 = abcd[0U]; - vd14 = abcd[1U]; - b14 = x + (uint32_t)56U; - u14 = load32_le(b14); - xk14 = u14; - ti14 = _t[14U]; - v14 = - vb14 - + - ((va14 + ((vb14 & vc14) | (~vb14 & vd14)) + xk14 + ti14) - << (uint32_t)17U - | (va14 + ((vb14 & vc14) | (~vb14 & vd14)) + xk14 + ti14) >> (uint32_t)15U); - abcd[2U] = v14; - va15 = abcd[1U]; - vb15 = abcd[2U]; - vc15 = abcd[3U]; - vd15 = abcd[0U]; - b15 = x + (uint32_t)60U; - u15 = load32_le(b15); - xk15 = u15; - ti15 = _t[15U]; - v15 = - vb15 - + - ((va15 + ((vb15 & vc15) | (~vb15 & vd15)) + xk15 + ti15) - << (uint32_t)22U - | (va15 + ((vb15 & vc15) | (~vb15 & vd15)) + xk15 + ti15) >> (uint32_t)10U); - abcd[1U] = v15; - va16 = abcd[0U]; - vb16 = abcd[1U]; - vc16 = abcd[2U]; - vd16 = abcd[3U]; - b16 = x + (uint32_t)4U; - u16 = load32_le(b16); - xk16 = u16; - ti16 = _t[16U]; - v16 = - vb16 - + - ((va16 + ((vb16 & vd16) | (vc16 & ~vd16)) + xk16 + ti16) - << (uint32_t)5U - | (va16 + ((vb16 & vd16) | (vc16 & ~vd16)) + xk16 + ti16) >> (uint32_t)27U); - abcd[0U] = v16; - va17 = abcd[3U]; - vb17 = abcd[0U]; - vc17 = abcd[1U]; - vd17 = abcd[2U]; - b17 = x + (uint32_t)24U; - u17 = load32_le(b17); - xk17 = u17; - ti17 = _t[17U]; - v17 = - vb17 - + - ((va17 + ((vb17 & vd17) | (vc17 & ~vd17)) + xk17 + ti17) - << (uint32_t)9U - | (va17 + ((vb17 & vd17) | (vc17 & ~vd17)) + xk17 + ti17) >> (uint32_t)23U); - abcd[3U] = v17; - va18 = abcd[2U]; - vb18 = abcd[3U]; - vc18 = abcd[0U]; - vd18 = abcd[1U]; - b18 = x + (uint32_t)44U; - u18 = load32_le(b18); - xk18 = u18; - ti18 = _t[18U]; - v18 = - vb18 - + - ((va18 + ((vb18 & vd18) | (vc18 & ~vd18)) + xk18 + ti18) - << (uint32_t)14U - | (va18 + ((vb18 & vd18) | (vc18 & ~vd18)) + xk18 + ti18) >> (uint32_t)18U); - abcd[2U] = v18; - va19 = abcd[1U]; - vb19 = abcd[2U]; - vc19 = abcd[3U]; - vd19 = abcd[0U]; - b19 = x; - u19 = load32_le(b19); - xk19 = u19; - ti19 = _t[19U]; - v19 = - vb19 - + - ((va19 + ((vb19 & vd19) | (vc19 & ~vd19)) + xk19 + ti19) - << (uint32_t)20U - | (va19 + ((vb19 & vd19) | (vc19 & ~vd19)) + xk19 + ti19) >> (uint32_t)12U); - abcd[1U] = v19; - va20 = abcd[0U]; - vb20 = abcd[1U]; - vc20 = abcd[2U]; - vd20 = abcd[3U]; - b20 = x + (uint32_t)20U; - u20 = load32_le(b20); - xk20 = u20; - ti20 = _t[20U]; - v20 = - vb20 - + - ((va20 + ((vb20 & vd20) | (vc20 & ~vd20)) + xk20 + ti20) - << (uint32_t)5U - | (va20 + ((vb20 & vd20) | (vc20 & ~vd20)) + xk20 + ti20) >> (uint32_t)27U); - abcd[0U] = v20; - va21 = abcd[3U]; - vb21 = abcd[0U]; - vc21 = abcd[1U]; - vd21 = abcd[2U]; - b21 = x + (uint32_t)40U; - u21 = load32_le(b21); - xk21 = u21; - ti21 = _t[21U]; - v21 = - vb21 - + - ((va21 + ((vb21 & vd21) | (vc21 & ~vd21)) + xk21 + ti21) - << (uint32_t)9U - | (va21 + ((vb21 & vd21) | (vc21 & ~vd21)) + xk21 + ti21) >> (uint32_t)23U); - abcd[3U] = v21; - va22 = abcd[2U]; - vb22 = abcd[3U]; - vc22 = abcd[0U]; - vd22 = abcd[1U]; - b22 = x + (uint32_t)60U; - u22 = load32_le(b22); - xk22 = u22; - ti22 = _t[22U]; - v22 = - vb22 - + - ((va22 + ((vb22 & vd22) | (vc22 & ~vd22)) + xk22 + ti22) - << (uint32_t)14U - | (va22 + ((vb22 & vd22) | (vc22 & ~vd22)) + xk22 + ti22) >> (uint32_t)18U); - abcd[2U] = v22; - va23 = abcd[1U]; - vb23 = abcd[2U]; - vc23 = abcd[3U]; - vd23 = abcd[0U]; - b23 = x + (uint32_t)16U; - u23 = load32_le(b23); - xk23 = u23; - ti23 = _t[23U]; - v23 = - vb23 - + - ((va23 + ((vb23 & vd23) | (vc23 & ~vd23)) + xk23 + ti23) - << (uint32_t)20U - | (va23 + ((vb23 & vd23) | (vc23 & ~vd23)) + xk23 + ti23) >> (uint32_t)12U); - abcd[1U] = v23; - va24 = abcd[0U]; - vb24 = abcd[1U]; - vc24 = abcd[2U]; - vd24 = abcd[3U]; - b24 = x + (uint32_t)36U; - u24 = load32_le(b24); - xk24 = u24; - ti24 = _t[24U]; - v24 = - vb24 - + - ((va24 + ((vb24 & vd24) | (vc24 & ~vd24)) + xk24 + ti24) - << (uint32_t)5U - | (va24 + ((vb24 & vd24) | (vc24 & ~vd24)) + xk24 + ti24) >> (uint32_t)27U); - abcd[0U] = v24; - va25 = abcd[3U]; - vb25 = abcd[0U]; - vc25 = abcd[1U]; - vd25 = abcd[2U]; - b25 = x + (uint32_t)56U; - u25 = load32_le(b25); - xk25 = u25; - ti25 = _t[25U]; - v25 = - vb25 - + - ((va25 + ((vb25 & vd25) | (vc25 & ~vd25)) + xk25 + ti25) - << (uint32_t)9U - | (va25 + ((vb25 & vd25) | (vc25 & ~vd25)) + xk25 + ti25) >> (uint32_t)23U); - abcd[3U] = v25; - va26 = abcd[2U]; - vb26 = abcd[3U]; - vc26 = abcd[0U]; - vd26 = abcd[1U]; - b26 = x + (uint32_t)12U; - u26 = load32_le(b26); - xk26 = u26; - ti26 = _t[26U]; - v26 = - vb26 - + - ((va26 + ((vb26 & vd26) | (vc26 & ~vd26)) + xk26 + ti26) - << (uint32_t)14U - | (va26 + ((vb26 & vd26) | (vc26 & ~vd26)) + xk26 + ti26) >> (uint32_t)18U); - abcd[2U] = v26; - va27 = abcd[1U]; - vb27 = abcd[2U]; - vc27 = abcd[3U]; - vd27 = abcd[0U]; - b27 = x + (uint32_t)32U; - u27 = load32_le(b27); - xk27 = u27; - ti27 = _t[27U]; - v27 = - vb27 - + - ((va27 + ((vb27 & vd27) | (vc27 & ~vd27)) + xk27 + ti27) - << (uint32_t)20U - | (va27 + ((vb27 & vd27) | (vc27 & ~vd27)) + xk27 + ti27) >> (uint32_t)12U); - abcd[1U] = v27; - va28 = abcd[0U]; - vb28 = abcd[1U]; - vc28 = abcd[2U]; - vd28 = abcd[3U]; - b28 = x + (uint32_t)52U; - u28 = load32_le(b28); - xk28 = u28; - ti28 = _t[28U]; - v28 = - vb28 - + - ((va28 + ((vb28 & vd28) | (vc28 & ~vd28)) + xk28 + ti28) - << (uint32_t)5U - | (va28 + ((vb28 & vd28) | (vc28 & ~vd28)) + xk28 + ti28) >> (uint32_t)27U); - abcd[0U] = v28; - va29 = abcd[3U]; - vb29 = abcd[0U]; - vc29 = abcd[1U]; - vd29 = abcd[2U]; - b29 = x + (uint32_t)8U; - u29 = load32_le(b29); - xk29 = u29; - ti29 = _t[29U]; - v29 = - vb29 - + - ((va29 + ((vb29 & vd29) | (vc29 & ~vd29)) + xk29 + ti29) - << (uint32_t)9U - | (va29 + ((vb29 & vd29) | (vc29 & ~vd29)) + xk29 + ti29) >> (uint32_t)23U); - abcd[3U] = v29; - va30 = abcd[2U]; - vb30 = abcd[3U]; - vc30 = abcd[0U]; - vd30 = abcd[1U]; - b30 = x + (uint32_t)28U; - u30 = load32_le(b30); - xk30 = u30; - ti30 = _t[30U]; - v30 = - vb30 - + - ((va30 + ((vb30 & vd30) | (vc30 & ~vd30)) + xk30 + ti30) - << (uint32_t)14U - | (va30 + ((vb30 & vd30) | (vc30 & ~vd30)) + xk30 + ti30) >> (uint32_t)18U); - abcd[2U] = v30; - va31 = abcd[1U]; - vb31 = abcd[2U]; - vc31 = abcd[3U]; - vd31 = abcd[0U]; - b31 = x + (uint32_t)48U; - u31 = load32_le(b31); - xk31 = u31; - ti31 = _t[31U]; - v31 = - vb31 - + - ((va31 + ((vb31 & vd31) | (vc31 & ~vd31)) + xk31 + ti31) - << (uint32_t)20U - | (va31 + ((vb31 & vd31) | (vc31 & ~vd31)) + xk31 + ti31) >> (uint32_t)12U); - abcd[1U] = v31; - va32 = abcd[0U]; - vb32 = abcd[1U]; - vc32 = abcd[2U]; - vd32 = abcd[3U]; - b32 = x + (uint32_t)20U; - u32 = load32_le(b32); - xk32 = u32; - ti32 = _t[32U]; - v32 = - vb32 - + - ((va32 + (vb32 ^ (vc32 ^ vd32)) + xk32 + ti32) - << (uint32_t)4U - | (va32 + (vb32 ^ (vc32 ^ vd32)) + xk32 + ti32) >> (uint32_t)28U); - abcd[0U] = v32; - va33 = abcd[3U]; - vb33 = abcd[0U]; - vc33 = abcd[1U]; - vd33 = abcd[2U]; - b33 = x + (uint32_t)32U; - u33 = load32_le(b33); - xk33 = u33; - ti33 = _t[33U]; - v33 = - vb33 - + - ((va33 + (vb33 ^ (vc33 ^ vd33)) + xk33 + ti33) - << (uint32_t)11U - | (va33 + (vb33 ^ (vc33 ^ vd33)) + xk33 + ti33) >> (uint32_t)21U); - abcd[3U] = v33; - va34 = abcd[2U]; - vb34 = abcd[3U]; - vc34 = abcd[0U]; - vd34 = abcd[1U]; - b34 = x + (uint32_t)44U; - u34 = load32_le(b34); - xk34 = u34; - ti34 = _t[34U]; - v34 = - vb34 - + - ((va34 + (vb34 ^ (vc34 ^ vd34)) + xk34 + ti34) - << (uint32_t)16U - | (va34 + (vb34 ^ (vc34 ^ vd34)) + xk34 + ti34) >> (uint32_t)16U); - abcd[2U] = v34; - va35 = abcd[1U]; - vb35 = abcd[2U]; - vc35 = abcd[3U]; - vd35 = abcd[0U]; - b35 = x + (uint32_t)56U; - u35 = load32_le(b35); - xk35 = u35; - ti35 = _t[35U]; - v35 = - vb35 - + - ((va35 + (vb35 ^ (vc35 ^ vd35)) + xk35 + ti35) - << (uint32_t)23U - | (va35 + (vb35 ^ (vc35 ^ vd35)) + xk35 + ti35) >> (uint32_t)9U); - abcd[1U] = v35; - va36 = abcd[0U]; - vb36 = abcd[1U]; - vc36 = abcd[2U]; - vd36 = abcd[3U]; - b36 = x + (uint32_t)4U; - u36 = load32_le(b36); - xk36 = u36; - ti36 = _t[36U]; - v36 = - vb36 - + - ((va36 + (vb36 ^ (vc36 ^ vd36)) + xk36 + ti36) - << (uint32_t)4U - | (va36 + (vb36 ^ (vc36 ^ vd36)) + xk36 + ti36) >> (uint32_t)28U); - abcd[0U] = v36; - va37 = abcd[3U]; - vb37 = abcd[0U]; - vc37 = abcd[1U]; - vd37 = abcd[2U]; - b37 = x + (uint32_t)16U; - u37 = load32_le(b37); - xk37 = u37; - ti37 = _t[37U]; - v37 = - vb37 - + - ((va37 + (vb37 ^ (vc37 ^ vd37)) + xk37 + ti37) - << (uint32_t)11U - | (va37 + (vb37 ^ (vc37 ^ vd37)) + xk37 + ti37) >> (uint32_t)21U); - abcd[3U] = v37; - va38 = abcd[2U]; - vb38 = abcd[3U]; - vc38 = abcd[0U]; - vd38 = abcd[1U]; - b38 = x + (uint32_t)28U; - u38 = load32_le(b38); - xk38 = u38; - ti38 = _t[38U]; - v38 = - vb38 - + - ((va38 + (vb38 ^ (vc38 ^ vd38)) + xk38 + ti38) - << (uint32_t)16U - | (va38 + (vb38 ^ (vc38 ^ vd38)) + xk38 + ti38) >> (uint32_t)16U); - abcd[2U] = v38; - va39 = abcd[1U]; - vb39 = abcd[2U]; - vc39 = abcd[3U]; - vd39 = abcd[0U]; - b39 = x + (uint32_t)40U; - u39 = load32_le(b39); - xk39 = u39; - ti39 = _t[39U]; - v39 = - vb39 - + - ((va39 + (vb39 ^ (vc39 ^ vd39)) + xk39 + ti39) - << (uint32_t)23U - | (va39 + (vb39 ^ (vc39 ^ vd39)) + xk39 + ti39) >> (uint32_t)9U); - abcd[1U] = v39; - va40 = abcd[0U]; - vb40 = abcd[1U]; - vc40 = abcd[2U]; - vd40 = abcd[3U]; - b40 = x + (uint32_t)52U; - u40 = load32_le(b40); - xk40 = u40; - ti40 = _t[40U]; - v40 = - vb40 - + - ((va40 + (vb40 ^ (vc40 ^ vd40)) + xk40 + ti40) - << (uint32_t)4U - | (va40 + (vb40 ^ (vc40 ^ vd40)) + xk40 + ti40) >> (uint32_t)28U); - abcd[0U] = v40; - va41 = abcd[3U]; - vb41 = abcd[0U]; - vc41 = abcd[1U]; - vd41 = abcd[2U]; - b41 = x; - u41 = load32_le(b41); - xk41 = u41; - ti41 = _t[41U]; - v41 = - vb41 - + - ((va41 + (vb41 ^ (vc41 ^ vd41)) + xk41 + ti41) - << (uint32_t)11U - | (va41 + (vb41 ^ (vc41 ^ vd41)) + xk41 + ti41) >> (uint32_t)21U); - abcd[3U] = v41; - va42 = abcd[2U]; - vb42 = abcd[3U]; - vc42 = abcd[0U]; - vd42 = abcd[1U]; - b42 = x + (uint32_t)12U; - u42 = load32_le(b42); - xk42 = u42; - ti42 = _t[42U]; - v42 = - vb42 - + - ((va42 + (vb42 ^ (vc42 ^ vd42)) + xk42 + ti42) - << (uint32_t)16U - | (va42 + (vb42 ^ (vc42 ^ vd42)) + xk42 + ti42) >> (uint32_t)16U); - abcd[2U] = v42; - va43 = abcd[1U]; - vb43 = abcd[2U]; - vc43 = abcd[3U]; - vd43 = abcd[0U]; - b43 = x + (uint32_t)24U; - u43 = load32_le(b43); - xk43 = u43; - ti43 = _t[43U]; - v43 = - vb43 - + - ((va43 + (vb43 ^ (vc43 ^ vd43)) + xk43 + ti43) - << (uint32_t)23U - | (va43 + (vb43 ^ (vc43 ^ vd43)) + xk43 + ti43) >> (uint32_t)9U); - abcd[1U] = v43; - va44 = abcd[0U]; - vb44 = abcd[1U]; - vc44 = abcd[2U]; - vd44 = abcd[3U]; - b44 = x + (uint32_t)36U; - u44 = load32_le(b44); - xk44 = u44; - ti44 = _t[44U]; - v44 = - vb44 - + - ((va44 + (vb44 ^ (vc44 ^ vd44)) + xk44 + ti44) - << (uint32_t)4U - | (va44 + (vb44 ^ (vc44 ^ vd44)) + xk44 + ti44) >> (uint32_t)28U); - abcd[0U] = v44; - va45 = abcd[3U]; - vb45 = abcd[0U]; - vc45 = abcd[1U]; - vd45 = abcd[2U]; - b45 = x + (uint32_t)48U; - u45 = load32_le(b45); - xk45 = u45; - ti45 = _t[45U]; - v45 = - vb45 - + - ((va45 + (vb45 ^ (vc45 ^ vd45)) + xk45 + ti45) - << (uint32_t)11U - | (va45 + (vb45 ^ (vc45 ^ vd45)) + xk45 + ti45) >> (uint32_t)21U); - abcd[3U] = v45; - va46 = abcd[2U]; - vb46 = abcd[3U]; - vc46 = abcd[0U]; - vd46 = abcd[1U]; - b46 = x + (uint32_t)60U; - u46 = load32_le(b46); - xk46 = u46; - ti46 = _t[46U]; - v46 = - vb46 - + - ((va46 + (vb46 ^ (vc46 ^ vd46)) + xk46 + ti46) - << (uint32_t)16U - | (va46 + (vb46 ^ (vc46 ^ vd46)) + xk46 + ti46) >> (uint32_t)16U); - abcd[2U] = v46; - va47 = abcd[1U]; - vb47 = abcd[2U]; - vc47 = abcd[3U]; - vd47 = abcd[0U]; - b47 = x + (uint32_t)8U; - u47 = load32_le(b47); - xk47 = u47; - ti47 = _t[47U]; - v47 = - vb47 - + - ((va47 + (vb47 ^ (vc47 ^ vd47)) + xk47 + ti47) - << (uint32_t)23U - | (va47 + (vb47 ^ (vc47 ^ vd47)) + xk47 + ti47) >> (uint32_t)9U); - abcd[1U] = v47; - va48 = abcd[0U]; - vb48 = abcd[1U]; - vc48 = abcd[2U]; - vd48 = abcd[3U]; - b48 = x; - u48 = load32_le(b48); - xk48 = u48; - ti48 = _t[48U]; - v48 = - vb48 - + - ((va48 + (vc48 ^ (vb48 | ~vd48)) + xk48 + ti48) - << (uint32_t)6U - | (va48 + (vc48 ^ (vb48 | ~vd48)) + xk48 + ti48) >> (uint32_t)26U); - abcd[0U] = v48; - va49 = abcd[3U]; - vb49 = abcd[0U]; - vc49 = abcd[1U]; - vd49 = abcd[2U]; - b49 = x + (uint32_t)28U; - u49 = load32_le(b49); - xk49 = u49; - ti49 = _t[49U]; - v49 = - vb49 - + - ((va49 + (vc49 ^ (vb49 | ~vd49)) + xk49 + ti49) - << (uint32_t)10U - | (va49 + (vc49 ^ (vb49 | ~vd49)) + xk49 + ti49) >> (uint32_t)22U); - abcd[3U] = v49; - va50 = abcd[2U]; - vb50 = abcd[3U]; - vc50 = abcd[0U]; - vd50 = abcd[1U]; - b50 = x + (uint32_t)56U; - u50 = load32_le(b50); - xk50 = u50; - ti50 = _t[50U]; - v50 = - vb50 - + - ((va50 + (vc50 ^ (vb50 | ~vd50)) + xk50 + ti50) - << (uint32_t)15U - | (va50 + (vc50 ^ (vb50 | ~vd50)) + xk50 + ti50) >> (uint32_t)17U); - abcd[2U] = v50; - va51 = abcd[1U]; - vb51 = abcd[2U]; - vc51 = abcd[3U]; - vd51 = abcd[0U]; - b51 = x + (uint32_t)20U; - u51 = load32_le(b51); - xk51 = u51; - ti51 = _t[51U]; - v51 = - vb51 - + - ((va51 + (vc51 ^ (vb51 | ~vd51)) + xk51 + ti51) - << (uint32_t)21U - | (va51 + (vc51 ^ (vb51 | ~vd51)) + xk51 + ti51) >> (uint32_t)11U); - abcd[1U] = v51; - va52 = abcd[0U]; - vb52 = abcd[1U]; - vc52 = abcd[2U]; - vd52 = abcd[3U]; - b52 = x + (uint32_t)48U; - u52 = load32_le(b52); - xk52 = u52; - ti52 = _t[52U]; - v52 = - vb52 - + - ((va52 + (vc52 ^ (vb52 | ~vd52)) + xk52 + ti52) - << (uint32_t)6U - | (va52 + (vc52 ^ (vb52 | ~vd52)) + xk52 + ti52) >> (uint32_t)26U); - abcd[0U] = v52; - va53 = abcd[3U]; - vb53 = abcd[0U]; - vc53 = abcd[1U]; - vd53 = abcd[2U]; - b53 = x + (uint32_t)12U; - u53 = load32_le(b53); - xk53 = u53; - ti53 = _t[53U]; - v53 = - vb53 - + - ((va53 + (vc53 ^ (vb53 | ~vd53)) + xk53 + ti53) - << (uint32_t)10U - | (va53 + (vc53 ^ (vb53 | ~vd53)) + xk53 + ti53) >> (uint32_t)22U); - abcd[3U] = v53; - va54 = abcd[2U]; - vb54 = abcd[3U]; - vc54 = abcd[0U]; - vd54 = abcd[1U]; - b54 = x + (uint32_t)40U; - u54 = load32_le(b54); - xk54 = u54; - ti54 = _t[54U]; - v54 = - vb54 - + - ((va54 + (vc54 ^ (vb54 | ~vd54)) + xk54 + ti54) - << (uint32_t)15U - | (va54 + (vc54 ^ (vb54 | ~vd54)) + xk54 + ti54) >> (uint32_t)17U); - abcd[2U] = v54; - va55 = abcd[1U]; - vb55 = abcd[2U]; - vc55 = abcd[3U]; - vd55 = abcd[0U]; - b55 = x + (uint32_t)4U; - u55 = load32_le(b55); - xk55 = u55; - ti55 = _t[55U]; - v55 = - vb55 - + - ((va55 + (vc55 ^ (vb55 | ~vd55)) + xk55 + ti55) - << (uint32_t)21U - | (va55 + (vc55 ^ (vb55 | ~vd55)) + xk55 + ti55) >> (uint32_t)11U); - abcd[1U] = v55; - va56 = abcd[0U]; - vb56 = abcd[1U]; - vc56 = abcd[2U]; - vd56 = abcd[3U]; - b56 = x + (uint32_t)32U; - u56 = load32_le(b56); - xk56 = u56; - ti56 = _t[56U]; - v56 = - vb56 - + - ((va56 + (vc56 ^ (vb56 | ~vd56)) + xk56 + ti56) - << (uint32_t)6U - | (va56 + (vc56 ^ (vb56 | ~vd56)) + xk56 + ti56) >> (uint32_t)26U); - abcd[0U] = v56; - va57 = abcd[3U]; - vb57 = abcd[0U]; - vc57 = abcd[1U]; - vd57 = abcd[2U]; - b57 = x + (uint32_t)60U; - u57 = load32_le(b57); - xk57 = u57; - ti57 = _t[57U]; - v57 = - vb57 - + - ((va57 + (vc57 ^ (vb57 | ~vd57)) + xk57 + ti57) - << (uint32_t)10U - | (va57 + (vc57 ^ (vb57 | ~vd57)) + xk57 + ti57) >> (uint32_t)22U); - abcd[3U] = v57; - va58 = abcd[2U]; - vb58 = abcd[3U]; - vc58 = abcd[0U]; - vd58 = abcd[1U]; - b58 = x + (uint32_t)24U; - u58 = load32_le(b58); - xk58 = u58; - ti58 = _t[58U]; - v58 = - vb58 - + - ((va58 + (vc58 ^ (vb58 | ~vd58)) + xk58 + ti58) - << (uint32_t)15U - | (va58 + (vc58 ^ (vb58 | ~vd58)) + xk58 + ti58) >> (uint32_t)17U); - abcd[2U] = v58; - va59 = abcd[1U]; - vb59 = abcd[2U]; - vc59 = abcd[3U]; - vd59 = abcd[0U]; - b59 = x + (uint32_t)52U; - u59 = load32_le(b59); - xk59 = u59; - ti59 = _t[59U]; - v59 = - vb59 - + - ((va59 + (vc59 ^ (vb59 | ~vd59)) + xk59 + ti59) - << (uint32_t)21U - | (va59 + (vc59 ^ (vb59 | ~vd59)) + xk59 + ti59) >> (uint32_t)11U); - abcd[1U] = v59; - va60 = abcd[0U]; - vb60 = abcd[1U]; - vc60 = abcd[2U]; - vd60 = abcd[3U]; - b60 = x + (uint32_t)16U; - u60 = load32_le(b60); - xk60 = u60; - ti60 = _t[60U]; - v60 = - vb60 - + - ((va60 + (vc60 ^ (vb60 | ~vd60)) + xk60 + ti60) - << (uint32_t)6U - | (va60 + (vc60 ^ (vb60 | ~vd60)) + xk60 + ti60) >> (uint32_t)26U); - abcd[0U] = v60; - va61 = abcd[3U]; - vb61 = abcd[0U]; - vc61 = abcd[1U]; - vd61 = abcd[2U]; - b61 = x + (uint32_t)44U; - u61 = load32_le(b61); - xk61 = u61; - ti61 = _t[61U]; - v61 = - vb61 - + - ((va61 + (vc61 ^ (vb61 | ~vd61)) + xk61 + ti61) - << (uint32_t)10U - | (va61 + (vc61 ^ (vb61 | ~vd61)) + xk61 + ti61) >> (uint32_t)22U); - abcd[3U] = v61; - va62 = abcd[2U]; - vb62 = abcd[3U]; - vc62 = abcd[0U]; - vd62 = abcd[1U]; - b62 = x + (uint32_t)8U; - u62 = load32_le(b62); - xk62 = u62; - ti62 = _t[62U]; - v62 = - vb62 - + - ((va62 + (vc62 ^ (vb62 | ~vd62)) + xk62 + ti62) - << (uint32_t)15U - | (va62 + (vc62 ^ (vb62 | ~vd62)) + xk62 + ti62) >> (uint32_t)17U); - abcd[2U] = v62; - va = abcd[1U]; - vb = abcd[2U]; - vc = abcd[3U]; - vd = abcd[0U]; - b63 = x + (uint32_t)36U; - u = load32_le(b63); - xk = u; - ti = _t[63U]; - v = - vb - + - ((va + (vc ^ (vb | ~vd)) + xk + ti) - << (uint32_t)21U - | (va + (vc ^ (vb | ~vd)) + xk + ti) >> (uint32_t)11U); - abcd[1U] = v; - a = abcd[0U]; - b = abcd[1U]; - c = abcd[2U]; - d = abcd[3U]; - abcd[0U] = a + aa; - abcd[1U] = b + bb; - abcd[2U] = c + cc; - abcd[3U] = d + dd; -} - -static void legacy_pad(uint64_t len, uint8_t *dst) -{ - uint8_t *dst1 = dst; - uint8_t *dst2; - uint8_t *dst3; - dst1[0U] = (uint8_t)0x80U; - dst2 = dst + (uint32_t)1U; - { - uint32_t i; - for - (i - = (uint32_t)0U; - i - < - ((uint32_t)128U - ((uint32_t)9U + (uint32_t)(len % (uint64_t)(uint32_t)64U))) - % (uint32_t)64U; - i++) - { - dst2[i] = (uint8_t)0U; - } - } - dst3 = - dst - + - (uint32_t)1U - + - ((uint32_t)128U - ((uint32_t)9U + (uint32_t)(len % (uint64_t)(uint32_t)64U))) - % (uint32_t)64U; - store64_le(dst3, len << (uint32_t)3U); -} - -void Hacl_Hash_Core_MD5_legacy_finish(uint32_t *s, uint8_t *dst) -{ - uint32_t i; - for (i = (uint32_t)0U; i < (uint32_t)4U; i++) - { - store32_le(dst + i * (uint32_t)4U, s[i]); - } -} - -void Hacl_Hash_MD5_legacy_update_multi(uint32_t *s, uint8_t *blocks, uint32_t n_blocks) -{ - uint32_t i; - for (i = (uint32_t)0U; i < n_blocks; i++) - { - uint32_t sz = (uint32_t)64U; - uint8_t *block = blocks + sz * i; - Hacl_Hash_Core_MD5_legacy_update(s, block); - } -} - -void -Hacl_Hash_MD5_legacy_update_last( - uint32_t *s, - uint64_t prev_len, - uint8_t *input, - uint32_t input_len -) -{ - uint32_t blocks_n = input_len / (uint32_t)64U; - uint32_t blocks_len = blocks_n * (uint32_t)64U; - uint8_t *blocks = input; - uint32_t rest_len = input_len - blocks_len; - uint8_t *rest = input + blocks_len; - uint64_t total_input_len; - uint32_t pad_len; - uint32_t tmp_len; - Hacl_Hash_MD5_legacy_update_multi(s, blocks, blocks_n); - total_input_len = prev_len + (uint64_t)input_len; - pad_len = - (uint32_t)1U - + - ((uint32_t)128U - ((uint32_t)9U + (uint32_t)(total_input_len % (uint64_t)(uint32_t)64U))) - % (uint32_t)64U - + (uint32_t)8U; - tmp_len = rest_len + pad_len; - { - uint8_t tmp_twoblocks[128U] = { 0U }; - uint8_t *tmp = tmp_twoblocks; - uint8_t *tmp_rest = tmp; - uint8_t *tmp_pad = tmp + rest_len; - memcpy(tmp_rest, rest, rest_len * sizeof (uint8_t)); - legacy_pad(total_input_len, tmp_pad); - Hacl_Hash_MD5_legacy_update_multi(s, tmp, tmp_len / (uint32_t)64U); - } -} - -typedef uint32_t *___uint32_t____; - -void Hacl_Hash_MD5_legacy_hash(uint8_t *input, uint32_t input_len, uint8_t *dst) -{ - uint32_t - scrut[4U] = - { (uint32_t)0x67452301U, (uint32_t)0xefcdab89U, (uint32_t)0x98badcfeU, (uint32_t)0x10325476U }; - uint32_t *s = scrut; - uint32_t blocks_n0 = input_len / (uint32_t)64U; - uint32_t blocks_n1; - if (input_len % (uint32_t)64U == (uint32_t)0U && blocks_n0 > (uint32_t)0U) - { - blocks_n1 = blocks_n0 - (uint32_t)1U; - } - else - { - blocks_n1 = blocks_n0; - } - { - uint32_t blocks_len0 = blocks_n1 * (uint32_t)64U; - uint8_t *blocks0 = input; - uint32_t rest_len0 = input_len - blocks_len0; - uint8_t *rest0 = input + blocks_len0; - uint32_t blocks_n = blocks_n1; - uint32_t blocks_len = blocks_len0; - uint8_t *blocks = blocks0; - uint32_t rest_len = rest_len0; - uint8_t *rest = rest0; - Hacl_Hash_MD5_legacy_update_multi(s, blocks, blocks_n); - Hacl_Hash_MD5_legacy_update_last(s, (uint64_t)blocks_len, rest, rest_len); - Hacl_Hash_Core_MD5_legacy_finish(s, dst); - } -} - diff --git a/dist/c89-compatible/Hacl_Hash_MD5.h b/dist/c89-compatible/Hacl_Hash_MD5.h deleted file mode 100644 index 29e8643bc9..0000000000 --- a/dist/c89-compatible/Hacl_Hash_MD5.h +++ /dev/null @@ -1,57 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#ifndef __Hacl_Hash_MD5_H -#define __Hacl_Hash_MD5_H - -#if defined(__cplusplus) -extern "C" { -#endif - -#include -#include "krml/internal/types.h" -#include "krml/lowstar_endianness.h" -#include "krml/internal/target.h" - - -#include "Hacl_Krmllib.h" -#include "evercrypt_targetconfig.h" -void Hacl_Hash_MD5_legacy_update_multi(uint32_t *s, uint8_t *blocks, uint32_t n_blocks); - -void -Hacl_Hash_MD5_legacy_update_last( - uint32_t *s, - uint64_t prev_len, - uint8_t *input, - uint32_t input_len -); - -void Hacl_Hash_MD5_legacy_hash(uint8_t *input, uint32_t input_len, uint8_t *dst); - -#if defined(__cplusplus) -} -#endif - -#define __Hacl_Hash_MD5_H_DEFINED -#endif diff --git a/dist/c89-compatible/Hacl_Hash_SHA1.c b/dist/c89-compatible/Hacl_Hash_SHA1.c deleted file mode 100644 index 573ad87a39..0000000000 --- a/dist/c89-compatible/Hacl_Hash_SHA1.c +++ /dev/null @@ -1,275 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#include "internal/Hacl_Hash_SHA1.h" - - - -static uint32_t -_h0[5U] = - { - (uint32_t)0x67452301U, (uint32_t)0xefcdab89U, (uint32_t)0x98badcfeU, (uint32_t)0x10325476U, - (uint32_t)0xc3d2e1f0U - }; - -void Hacl_Hash_Core_SHA1_legacy_init(uint32_t *s) -{ - uint32_t i; - for (i = (uint32_t)0U; i < (uint32_t)5U; i++) - { - s[i] = _h0[i]; - } -} - -void Hacl_Hash_Core_SHA1_legacy_update(uint32_t *h, uint8_t *l) -{ - uint32_t ha = h[0U]; - uint32_t hb = h[1U]; - uint32_t hc = h[2U]; - uint32_t hd = h[3U]; - uint32_t he = h[4U]; - uint32_t _w[80U] = { 0U }; - uint32_t sta; - uint32_t stb; - uint32_t stc; - uint32_t std; - uint32_t ste; - { - uint32_t i; - for (i = (uint32_t)0U; i < (uint32_t)80U; i++) - { - uint32_t v; - if (i < (uint32_t)16U) - { - uint8_t *b = l + i * (uint32_t)4U; - uint32_t u = load32_be(b); - v = u; - } - else - { - uint32_t wmit3 = _w[i - (uint32_t)3U]; - uint32_t wmit8 = _w[i - (uint32_t)8U]; - uint32_t wmit14 = _w[i - (uint32_t)14U]; - uint32_t wmit16 = _w[i - (uint32_t)16U]; - v = - (wmit3 ^ (wmit8 ^ (wmit14 ^ wmit16))) - << (uint32_t)1U - | (wmit3 ^ (wmit8 ^ (wmit14 ^ wmit16))) >> (uint32_t)31U; - } - _w[i] = v; - } - } - { - uint32_t i; - for (i = (uint32_t)0U; i < (uint32_t)80U; i++) - { - uint32_t _a = h[0U]; - uint32_t _b = h[1U]; - uint32_t _c = h[2U]; - uint32_t _d = h[3U]; - uint32_t _e = h[4U]; - uint32_t wmit = _w[i]; - uint32_t ite0; - if (i < (uint32_t)20U) - { - ite0 = (_b & _c) ^ (~_b & _d); - } - else if ((uint32_t)39U < i && i < (uint32_t)60U) - { - ite0 = (_b & _c) ^ ((_b & _d) ^ (_c & _d)); - } - else - { - ite0 = _b ^ (_c ^ _d); - } - { - uint32_t ite; - if (i < (uint32_t)20U) - { - ite = (uint32_t)0x5a827999U; - } - else if (i < (uint32_t)40U) - { - ite = (uint32_t)0x6ed9eba1U; - } - else if (i < (uint32_t)60U) - { - ite = (uint32_t)0x8f1bbcdcU; - } - else - { - ite = (uint32_t)0xca62c1d6U; - } - { - uint32_t _T = (_a << (uint32_t)5U | _a >> (uint32_t)27U) + ite0 + _e + ite + wmit; - h[0U] = _T; - h[1U] = _a; - h[2U] = _b << (uint32_t)30U | _b >> (uint32_t)2U; - h[3U] = _c; - h[4U] = _d; - } - } - } - } - { - uint32_t i; - for (i = (uint32_t)0U; i < (uint32_t)80U; i++) - { - _w[i] = (uint32_t)0U; - } - } - sta = h[0U]; - stb = h[1U]; - stc = h[2U]; - std = h[3U]; - ste = h[4U]; - h[0U] = sta + ha; - h[1U] = stb + hb; - h[2U] = stc + hc; - h[3U] = std + hd; - h[4U] = ste + he; -} - -static void legacy_pad(uint64_t len, uint8_t *dst) -{ - uint8_t *dst1 = dst; - uint8_t *dst2; - uint8_t *dst3; - dst1[0U] = (uint8_t)0x80U; - dst2 = dst + (uint32_t)1U; - { - uint32_t i; - for - (i - = (uint32_t)0U; - i - < - ((uint32_t)128U - ((uint32_t)9U + (uint32_t)(len % (uint64_t)(uint32_t)64U))) - % (uint32_t)64U; - i++) - { - dst2[i] = (uint8_t)0U; - } - } - dst3 = - dst - + - (uint32_t)1U - + - ((uint32_t)128U - ((uint32_t)9U + (uint32_t)(len % (uint64_t)(uint32_t)64U))) - % (uint32_t)64U; - store64_be(dst3, len << (uint32_t)3U); -} - -void Hacl_Hash_Core_SHA1_legacy_finish(uint32_t *s, uint8_t *dst) -{ - uint32_t i; - for (i = (uint32_t)0U; i < (uint32_t)5U; i++) - { - store32_be(dst + i * (uint32_t)4U, s[i]); - } -} - -void Hacl_Hash_SHA1_legacy_update_multi(uint32_t *s, uint8_t *blocks, uint32_t n_blocks) -{ - uint32_t i; - for (i = (uint32_t)0U; i < n_blocks; i++) - { - uint32_t sz = (uint32_t)64U; - uint8_t *block = blocks + sz * i; - Hacl_Hash_Core_SHA1_legacy_update(s, block); - } -} - -void -Hacl_Hash_SHA1_legacy_update_last( - uint32_t *s, - uint64_t prev_len, - uint8_t *input, - uint32_t input_len -) -{ - uint32_t blocks_n = input_len / (uint32_t)64U; - uint32_t blocks_len = blocks_n * (uint32_t)64U; - uint8_t *blocks = input; - uint32_t rest_len = input_len - blocks_len; - uint8_t *rest = input + blocks_len; - uint64_t total_input_len; - uint32_t pad_len; - uint32_t tmp_len; - Hacl_Hash_SHA1_legacy_update_multi(s, blocks, blocks_n); - total_input_len = prev_len + (uint64_t)input_len; - pad_len = - (uint32_t)1U - + - ((uint32_t)128U - ((uint32_t)9U + (uint32_t)(total_input_len % (uint64_t)(uint32_t)64U))) - % (uint32_t)64U - + (uint32_t)8U; - tmp_len = rest_len + pad_len; - { - uint8_t tmp_twoblocks[128U] = { 0U }; - uint8_t *tmp = tmp_twoblocks; - uint8_t *tmp_rest = tmp; - uint8_t *tmp_pad = tmp + rest_len; - memcpy(tmp_rest, rest, rest_len * sizeof (uint8_t)); - legacy_pad(total_input_len, tmp_pad); - Hacl_Hash_SHA1_legacy_update_multi(s, tmp, tmp_len / (uint32_t)64U); - } -} - -void Hacl_Hash_SHA1_legacy_hash(uint8_t *input, uint32_t input_len, uint8_t *dst) -{ - uint32_t - scrut[5U] = - { - (uint32_t)0x67452301U, (uint32_t)0xefcdab89U, (uint32_t)0x98badcfeU, (uint32_t)0x10325476U, - (uint32_t)0xc3d2e1f0U - }; - uint32_t *s = scrut; - uint32_t blocks_n0 = input_len / (uint32_t)64U; - uint32_t blocks_n1; - if (input_len % (uint32_t)64U == (uint32_t)0U && blocks_n0 > (uint32_t)0U) - { - blocks_n1 = blocks_n0 - (uint32_t)1U; - } - else - { - blocks_n1 = blocks_n0; - } - { - uint32_t blocks_len0 = blocks_n1 * (uint32_t)64U; - uint8_t *blocks0 = input; - uint32_t rest_len0 = input_len - blocks_len0; - uint8_t *rest0 = input + blocks_len0; - uint32_t blocks_n = blocks_n1; - uint32_t blocks_len = blocks_len0; - uint8_t *blocks = blocks0; - uint32_t rest_len = rest_len0; - uint8_t *rest = rest0; - Hacl_Hash_SHA1_legacy_update_multi(s, blocks, blocks_n); - Hacl_Hash_SHA1_legacy_update_last(s, (uint64_t)blocks_len, rest, rest_len); - Hacl_Hash_Core_SHA1_legacy_finish(s, dst); - } -} - diff --git a/dist/c89-compatible/Hacl_Hash_SHA1.h b/dist/c89-compatible/Hacl_Hash_SHA1.h deleted file mode 100644 index eb6c8566be..0000000000 --- a/dist/c89-compatible/Hacl_Hash_SHA1.h +++ /dev/null @@ -1,57 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#ifndef __Hacl_Hash_SHA1_H -#define __Hacl_Hash_SHA1_H - -#if defined(__cplusplus) -extern "C" { -#endif - -#include -#include "krml/internal/types.h" -#include "krml/lowstar_endianness.h" -#include "krml/internal/target.h" - - -#include "Hacl_Krmllib.h" -#include "evercrypt_targetconfig.h" -void Hacl_Hash_SHA1_legacy_update_multi(uint32_t *s, uint8_t *blocks, uint32_t n_blocks); - -void -Hacl_Hash_SHA1_legacy_update_last( - uint32_t *s, - uint64_t prev_len, - uint8_t *input, - uint32_t input_len -); - -void Hacl_Hash_SHA1_legacy_hash(uint8_t *input, uint32_t input_len, uint8_t *dst); - -#if defined(__cplusplus) -} -#endif - -#define __Hacl_Hash_SHA1_H_DEFINED -#endif diff --git a/dist/c89-compatible/Hacl_Hash_SHA2.c b/dist/c89-compatible/Hacl_Hash_SHA2.c deleted file mode 100644 index ffb29d672a..0000000000 --- a/dist/c89-compatible/Hacl_Hash_SHA2.c +++ /dev/null @@ -1,1007 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#include "internal/Hacl_Hash_SHA2.h" - - - -static uint32_t -h224[8U] = - { - (uint32_t)0xc1059ed8U, (uint32_t)0x367cd507U, (uint32_t)0x3070dd17U, (uint32_t)0xf70e5939U, - (uint32_t)0xffc00b31U, (uint32_t)0x68581511U, (uint32_t)0x64f98fa7U, (uint32_t)0xbefa4fa4U - }; - -static uint32_t -h256[8U] = - { - (uint32_t)0x6a09e667U, (uint32_t)0xbb67ae85U, (uint32_t)0x3c6ef372U, (uint32_t)0xa54ff53aU, - (uint32_t)0x510e527fU, (uint32_t)0x9b05688cU, (uint32_t)0x1f83d9abU, (uint32_t)0x5be0cd19U - }; - -static uint64_t -h384[8U] = - { - (uint64_t)0xcbbb9d5dc1059ed8U, (uint64_t)0x629a292a367cd507U, (uint64_t)0x9159015a3070dd17U, - (uint64_t)0x152fecd8f70e5939U, (uint64_t)0x67332667ffc00b31U, (uint64_t)0x8eb44a8768581511U, - (uint64_t)0xdb0c2e0d64f98fa7U, (uint64_t)0x47b5481dbefa4fa4U - }; - -static uint64_t -h512[8U] = - { - (uint64_t)0x6a09e667f3bcc908U, (uint64_t)0xbb67ae8584caa73bU, (uint64_t)0x3c6ef372fe94f82bU, - (uint64_t)0xa54ff53a5f1d36f1U, (uint64_t)0x510e527fade682d1U, (uint64_t)0x9b05688c2b3e6c1fU, - (uint64_t)0x1f83d9abfb41bd6bU, (uint64_t)0x5be0cd19137e2179U - }; - -static uint32_t -k224_256[64U] = - { - (uint32_t)0x428a2f98U, (uint32_t)0x71374491U, (uint32_t)0xb5c0fbcfU, (uint32_t)0xe9b5dba5U, - (uint32_t)0x3956c25bU, (uint32_t)0x59f111f1U, (uint32_t)0x923f82a4U, (uint32_t)0xab1c5ed5U, - (uint32_t)0xd807aa98U, (uint32_t)0x12835b01U, (uint32_t)0x243185beU, (uint32_t)0x550c7dc3U, - (uint32_t)0x72be5d74U, (uint32_t)0x80deb1feU, (uint32_t)0x9bdc06a7U, (uint32_t)0xc19bf174U, - (uint32_t)0xe49b69c1U, (uint32_t)0xefbe4786U, (uint32_t)0x0fc19dc6U, (uint32_t)0x240ca1ccU, - (uint32_t)0x2de92c6fU, (uint32_t)0x4a7484aaU, (uint32_t)0x5cb0a9dcU, (uint32_t)0x76f988daU, - (uint32_t)0x983e5152U, (uint32_t)0xa831c66dU, (uint32_t)0xb00327c8U, (uint32_t)0xbf597fc7U, - (uint32_t)0xc6e00bf3U, (uint32_t)0xd5a79147U, (uint32_t)0x06ca6351U, (uint32_t)0x14292967U, - (uint32_t)0x27b70a85U, (uint32_t)0x2e1b2138U, (uint32_t)0x4d2c6dfcU, (uint32_t)0x53380d13U, - (uint32_t)0x650a7354U, (uint32_t)0x766a0abbU, (uint32_t)0x81c2c92eU, (uint32_t)0x92722c85U, - (uint32_t)0xa2bfe8a1U, (uint32_t)0xa81a664bU, (uint32_t)0xc24b8b70U, (uint32_t)0xc76c51a3U, - (uint32_t)0xd192e819U, (uint32_t)0xd6990624U, (uint32_t)0xf40e3585U, (uint32_t)0x106aa070U, - (uint32_t)0x19a4c116U, (uint32_t)0x1e376c08U, (uint32_t)0x2748774cU, (uint32_t)0x34b0bcb5U, - (uint32_t)0x391c0cb3U, (uint32_t)0x4ed8aa4aU, (uint32_t)0x5b9cca4fU, (uint32_t)0x682e6ff3U, - (uint32_t)0x748f82eeU, (uint32_t)0x78a5636fU, (uint32_t)0x84c87814U, (uint32_t)0x8cc70208U, - (uint32_t)0x90befffaU, (uint32_t)0xa4506cebU, (uint32_t)0xbef9a3f7U, (uint32_t)0xc67178f2U - }; - -static uint64_t -k384_512[80U] = - { - (uint64_t)0x428a2f98d728ae22U, (uint64_t)0x7137449123ef65cdU, (uint64_t)0xb5c0fbcfec4d3b2fU, - (uint64_t)0xe9b5dba58189dbbcU, (uint64_t)0x3956c25bf348b538U, (uint64_t)0x59f111f1b605d019U, - (uint64_t)0x923f82a4af194f9bU, (uint64_t)0xab1c5ed5da6d8118U, (uint64_t)0xd807aa98a3030242U, - (uint64_t)0x12835b0145706fbeU, (uint64_t)0x243185be4ee4b28cU, (uint64_t)0x550c7dc3d5ffb4e2U, - (uint64_t)0x72be5d74f27b896fU, (uint64_t)0x80deb1fe3b1696b1U, (uint64_t)0x9bdc06a725c71235U, - (uint64_t)0xc19bf174cf692694U, (uint64_t)0xe49b69c19ef14ad2U, (uint64_t)0xefbe4786384f25e3U, - (uint64_t)0x0fc19dc68b8cd5b5U, (uint64_t)0x240ca1cc77ac9c65U, (uint64_t)0x2de92c6f592b0275U, - (uint64_t)0x4a7484aa6ea6e483U, (uint64_t)0x5cb0a9dcbd41fbd4U, (uint64_t)0x76f988da831153b5U, - (uint64_t)0x983e5152ee66dfabU, (uint64_t)0xa831c66d2db43210U, (uint64_t)0xb00327c898fb213fU, - (uint64_t)0xbf597fc7beef0ee4U, (uint64_t)0xc6e00bf33da88fc2U, (uint64_t)0xd5a79147930aa725U, - (uint64_t)0x06ca6351e003826fU, (uint64_t)0x142929670a0e6e70U, (uint64_t)0x27b70a8546d22ffcU, - (uint64_t)0x2e1b21385c26c926U, (uint64_t)0x4d2c6dfc5ac42aedU, (uint64_t)0x53380d139d95b3dfU, - (uint64_t)0x650a73548baf63deU, (uint64_t)0x766a0abb3c77b2a8U, (uint64_t)0x81c2c92e47edaee6U, - (uint64_t)0x92722c851482353bU, (uint64_t)0xa2bfe8a14cf10364U, (uint64_t)0xa81a664bbc423001U, - (uint64_t)0xc24b8b70d0f89791U, (uint64_t)0xc76c51a30654be30U, (uint64_t)0xd192e819d6ef5218U, - (uint64_t)0xd69906245565a910U, (uint64_t)0xf40e35855771202aU, (uint64_t)0x106aa07032bbd1b8U, - (uint64_t)0x19a4c116b8d2d0c8U, (uint64_t)0x1e376c085141ab53U, (uint64_t)0x2748774cdf8eeb99U, - (uint64_t)0x34b0bcb5e19b48a8U, (uint64_t)0x391c0cb3c5c95a63U, (uint64_t)0x4ed8aa4ae3418acbU, - (uint64_t)0x5b9cca4f7763e373U, (uint64_t)0x682e6ff3d6b2b8a3U, (uint64_t)0x748f82ee5defb2fcU, - (uint64_t)0x78a5636f43172f60U, (uint64_t)0x84c87814a1f0ab72U, (uint64_t)0x8cc702081a6439ecU, - (uint64_t)0x90befffa23631e28U, (uint64_t)0xa4506cebde82bde9U, (uint64_t)0xbef9a3f7b2c67915U, - (uint64_t)0xc67178f2e372532bU, (uint64_t)0xca273eceea26619cU, (uint64_t)0xd186b8c721c0c207U, - (uint64_t)0xeada7dd6cde0eb1eU, (uint64_t)0xf57d4f7fee6ed178U, (uint64_t)0x06f067aa72176fbaU, - (uint64_t)0x0a637dc5a2c898a6U, (uint64_t)0x113f9804bef90daeU, (uint64_t)0x1b710b35131c471bU, - (uint64_t)0x28db77f523047d84U, (uint64_t)0x32caab7b40c72493U, (uint64_t)0x3c9ebe0a15c9bebcU, - (uint64_t)0x431d67c49c100d4cU, (uint64_t)0x4cc5d4becb3e42b6U, (uint64_t)0x597f299cfc657e2aU, - (uint64_t)0x5fcb6fab3ad6faecU, (uint64_t)0x6c44198c4a475817U - }; - -void Hacl_Hash_Core_SHA2_init_224(uint32_t *s) -{ - uint32_t i; - for (i = (uint32_t)0U; i < (uint32_t)8U; i++) - { - s[i] = h224[i]; - } -} - -void Hacl_Hash_Core_SHA2_init_256(uint32_t *s) -{ - uint32_t i; - for (i = (uint32_t)0U; i < (uint32_t)8U; i++) - { - s[i] = h256[i]; - } -} - -void Hacl_Hash_Core_SHA2_init_384(uint64_t *s) -{ - uint32_t i; - for (i = (uint32_t)0U; i < (uint32_t)8U; i++) - { - s[i] = h384[i]; - } -} - -void Hacl_Hash_Core_SHA2_init_512(uint64_t *s) -{ - uint32_t i; - for (i = (uint32_t)0U; i < (uint32_t)8U; i++) - { - s[i] = h512[i]; - } -} - -static void update_224(uint32_t *hash, uint8_t *block) -{ - uint32_t hash1[8U] = { 0U }; - uint32_t computed_ws[64U] = { 0U }; - { - uint32_t i; - for (i = (uint32_t)0U; i < (uint32_t)64U; i++) - { - if (i < (uint32_t)16U) - { - uint8_t *b = block + i * (uint32_t)4U; - uint32_t u = load32_be(b); - computed_ws[i] = u; - } - else - { - uint32_t t16 = computed_ws[i - (uint32_t)16U]; - uint32_t t15 = computed_ws[i - (uint32_t)15U]; - uint32_t t7 = computed_ws[i - (uint32_t)7U]; - uint32_t t2 = computed_ws[i - (uint32_t)2U]; - uint32_t - s1 = - (t2 >> (uint32_t)17U | t2 << (uint32_t)15U) - ^ ((t2 >> (uint32_t)19U | t2 << (uint32_t)13U) ^ t2 >> (uint32_t)10U); - uint32_t - s0 = - (t15 >> (uint32_t)7U | t15 << (uint32_t)25U) - ^ ((t15 >> (uint32_t)18U | t15 << (uint32_t)14U) ^ t15 >> (uint32_t)3U); - uint32_t w = s1 + t7 + s0 + t16; - computed_ws[i] = w; - } - } - } - memcpy(hash1, hash, (uint32_t)8U * sizeof (uint32_t)); - { - uint32_t i; - for (i = (uint32_t)0U; i < (uint32_t)64U; i++) - { - uint32_t a0 = hash1[0U]; - uint32_t b0 = hash1[1U]; - uint32_t c0 = hash1[2U]; - uint32_t d0 = hash1[3U]; - uint32_t e0 = hash1[4U]; - uint32_t f0 = hash1[5U]; - uint32_t g0 = hash1[6U]; - uint32_t h02 = hash1[7U]; - uint32_t w = computed_ws[i]; - uint32_t - t1 = - h02 - + - ((e0 >> (uint32_t)6U | e0 << (uint32_t)26U) - ^ - ((e0 >> (uint32_t)11U | e0 << (uint32_t)21U) - ^ (e0 >> (uint32_t)25U | e0 << (uint32_t)7U))) - + ((e0 & f0) ^ (~e0 & g0)) - + k224_256[i] - + w; - uint32_t - t2 = - ((a0 >> (uint32_t)2U | a0 << (uint32_t)30U) - ^ - ((a0 >> (uint32_t)13U | a0 << (uint32_t)19U) - ^ (a0 >> (uint32_t)22U | a0 << (uint32_t)10U))) - + ((a0 & b0) ^ ((a0 & c0) ^ (b0 & c0))); - hash1[0U] = t1 + t2; - hash1[1U] = a0; - hash1[2U] = b0; - hash1[3U] = c0; - hash1[4U] = d0 + t1; - hash1[5U] = e0; - hash1[6U] = f0; - hash1[7U] = g0; - } - } - KRML_MAYBE_FOR8(i, - (uint32_t)0U, - (uint32_t)8U, - (uint32_t)1U, - uint32_t xi = hash[i]; - uint32_t yi = hash1[i]; - hash[i] = xi + yi;); -} - -static void update_256(uint32_t *hash, uint8_t *block) -{ - uint32_t hash1[8U] = { 0U }; - uint32_t computed_ws[64U] = { 0U }; - { - uint32_t i; - for (i = (uint32_t)0U; i < (uint32_t)64U; i++) - { - if (i < (uint32_t)16U) - { - uint8_t *b = block + i * (uint32_t)4U; - uint32_t u = load32_be(b); - computed_ws[i] = u; - } - else - { - uint32_t t16 = computed_ws[i - (uint32_t)16U]; - uint32_t t15 = computed_ws[i - (uint32_t)15U]; - uint32_t t7 = computed_ws[i - (uint32_t)7U]; - uint32_t t2 = computed_ws[i - (uint32_t)2U]; - uint32_t - s1 = - (t2 >> (uint32_t)17U | t2 << (uint32_t)15U) - ^ ((t2 >> (uint32_t)19U | t2 << (uint32_t)13U) ^ t2 >> (uint32_t)10U); - uint32_t - s0 = - (t15 >> (uint32_t)7U | t15 << (uint32_t)25U) - ^ ((t15 >> (uint32_t)18U | t15 << (uint32_t)14U) ^ t15 >> (uint32_t)3U); - uint32_t w = s1 + t7 + s0 + t16; - computed_ws[i] = w; - } - } - } - memcpy(hash1, hash, (uint32_t)8U * sizeof (uint32_t)); - { - uint32_t i; - for (i = (uint32_t)0U; i < (uint32_t)64U; i++) - { - uint32_t a0 = hash1[0U]; - uint32_t b0 = hash1[1U]; - uint32_t c0 = hash1[2U]; - uint32_t d0 = hash1[3U]; - uint32_t e0 = hash1[4U]; - uint32_t f0 = hash1[5U]; - uint32_t g0 = hash1[6U]; - uint32_t h02 = hash1[7U]; - uint32_t w = computed_ws[i]; - uint32_t - t1 = - h02 - + - ((e0 >> (uint32_t)6U | e0 << (uint32_t)26U) - ^ - ((e0 >> (uint32_t)11U | e0 << (uint32_t)21U) - ^ (e0 >> (uint32_t)25U | e0 << (uint32_t)7U))) - + ((e0 & f0) ^ (~e0 & g0)) - + k224_256[i] - + w; - uint32_t - t2 = - ((a0 >> (uint32_t)2U | a0 << (uint32_t)30U) - ^ - ((a0 >> (uint32_t)13U | a0 << (uint32_t)19U) - ^ (a0 >> (uint32_t)22U | a0 << (uint32_t)10U))) - + ((a0 & b0) ^ ((a0 & c0) ^ (b0 & c0))); - hash1[0U] = t1 + t2; - hash1[1U] = a0; - hash1[2U] = b0; - hash1[3U] = c0; - hash1[4U] = d0 + t1; - hash1[5U] = e0; - hash1[6U] = f0; - hash1[7U] = g0; - } - } - KRML_MAYBE_FOR8(i, - (uint32_t)0U, - (uint32_t)8U, - (uint32_t)1U, - uint32_t xi = hash[i]; - uint32_t yi = hash1[i]; - hash[i] = xi + yi;); -} - -void Hacl_Hash_Core_SHA2_update_384(uint64_t *hash, uint8_t *block) -{ - uint64_t hash1[8U] = { 0U }; - uint64_t computed_ws[80U] = { 0U }; - { - uint32_t i; - for (i = (uint32_t)0U; i < (uint32_t)80U; i++) - { - if (i < (uint32_t)16U) - { - uint8_t *b = block + i * (uint32_t)8U; - uint64_t u = load64_be(b); - computed_ws[i] = u; - } - else - { - uint64_t t16 = computed_ws[i - (uint32_t)16U]; - uint64_t t15 = computed_ws[i - (uint32_t)15U]; - uint64_t t7 = computed_ws[i - (uint32_t)7U]; - uint64_t t2 = computed_ws[i - (uint32_t)2U]; - uint64_t - s1 = - (t2 >> (uint32_t)19U | t2 << (uint32_t)45U) - ^ ((t2 >> (uint32_t)61U | t2 << (uint32_t)3U) ^ t2 >> (uint32_t)6U); - uint64_t - s0 = - (t15 >> (uint32_t)1U | t15 << (uint32_t)63U) - ^ ((t15 >> (uint32_t)8U | t15 << (uint32_t)56U) ^ t15 >> (uint32_t)7U); - uint64_t w = s1 + t7 + s0 + t16; - computed_ws[i] = w; - } - } - } - memcpy(hash1, hash, (uint32_t)8U * sizeof (uint64_t)); - { - uint32_t i; - for (i = (uint32_t)0U; i < (uint32_t)80U; i++) - { - uint64_t a0 = hash1[0U]; - uint64_t b0 = hash1[1U]; - uint64_t c0 = hash1[2U]; - uint64_t d0 = hash1[3U]; - uint64_t e0 = hash1[4U]; - uint64_t f0 = hash1[5U]; - uint64_t g0 = hash1[6U]; - uint64_t h02 = hash1[7U]; - uint64_t w = computed_ws[i]; - uint64_t - t1 = - h02 - + - ((e0 >> (uint32_t)14U | e0 << (uint32_t)50U) - ^ - ((e0 >> (uint32_t)18U | e0 << (uint32_t)46U) - ^ (e0 >> (uint32_t)41U | e0 << (uint32_t)23U))) - + ((e0 & f0) ^ (~e0 & g0)) - + k384_512[i] - + w; - uint64_t - t2 = - ((a0 >> (uint32_t)28U | a0 << (uint32_t)36U) - ^ - ((a0 >> (uint32_t)34U | a0 << (uint32_t)30U) - ^ (a0 >> (uint32_t)39U | a0 << (uint32_t)25U))) - + ((a0 & b0) ^ ((a0 & c0) ^ (b0 & c0))); - hash1[0U] = t1 + t2; - hash1[1U] = a0; - hash1[2U] = b0; - hash1[3U] = c0; - hash1[4U] = d0 + t1; - hash1[5U] = e0; - hash1[6U] = f0; - hash1[7U] = g0; - } - } - KRML_MAYBE_FOR8(i, - (uint32_t)0U, - (uint32_t)8U, - (uint32_t)1U, - uint64_t xi = hash[i]; - uint64_t yi = hash1[i]; - hash[i] = xi + yi;); -} - -void Hacl_Hash_Core_SHA2_update_512(uint64_t *hash, uint8_t *block) -{ - uint64_t hash1[8U] = { 0U }; - uint64_t computed_ws[80U] = { 0U }; - { - uint32_t i; - for (i = (uint32_t)0U; i < (uint32_t)80U; i++) - { - if (i < (uint32_t)16U) - { - uint8_t *b = block + i * (uint32_t)8U; - uint64_t u = load64_be(b); - computed_ws[i] = u; - } - else - { - uint64_t t16 = computed_ws[i - (uint32_t)16U]; - uint64_t t15 = computed_ws[i - (uint32_t)15U]; - uint64_t t7 = computed_ws[i - (uint32_t)7U]; - uint64_t t2 = computed_ws[i - (uint32_t)2U]; - uint64_t - s1 = - (t2 >> (uint32_t)19U | t2 << (uint32_t)45U) - ^ ((t2 >> (uint32_t)61U | t2 << (uint32_t)3U) ^ t2 >> (uint32_t)6U); - uint64_t - s0 = - (t15 >> (uint32_t)1U | t15 << (uint32_t)63U) - ^ ((t15 >> (uint32_t)8U | t15 << (uint32_t)56U) ^ t15 >> (uint32_t)7U); - uint64_t w = s1 + t7 + s0 + t16; - computed_ws[i] = w; - } - } - } - memcpy(hash1, hash, (uint32_t)8U * sizeof (uint64_t)); - { - uint32_t i; - for (i = (uint32_t)0U; i < (uint32_t)80U; i++) - { - uint64_t a0 = hash1[0U]; - uint64_t b0 = hash1[1U]; - uint64_t c0 = hash1[2U]; - uint64_t d0 = hash1[3U]; - uint64_t e0 = hash1[4U]; - uint64_t f0 = hash1[5U]; - uint64_t g0 = hash1[6U]; - uint64_t h02 = hash1[7U]; - uint64_t w = computed_ws[i]; - uint64_t - t1 = - h02 - + - ((e0 >> (uint32_t)14U | e0 << (uint32_t)50U) - ^ - ((e0 >> (uint32_t)18U | e0 << (uint32_t)46U) - ^ (e0 >> (uint32_t)41U | e0 << (uint32_t)23U))) - + ((e0 & f0) ^ (~e0 & g0)) - + k384_512[i] - + w; - uint64_t - t2 = - ((a0 >> (uint32_t)28U | a0 << (uint32_t)36U) - ^ - ((a0 >> (uint32_t)34U | a0 << (uint32_t)30U) - ^ (a0 >> (uint32_t)39U | a0 << (uint32_t)25U))) - + ((a0 & b0) ^ ((a0 & c0) ^ (b0 & c0))); - hash1[0U] = t1 + t2; - hash1[1U] = a0; - hash1[2U] = b0; - hash1[3U] = c0; - hash1[4U] = d0 + t1; - hash1[5U] = e0; - hash1[6U] = f0; - hash1[7U] = g0; - } - } - KRML_MAYBE_FOR8(i, - (uint32_t)0U, - (uint32_t)8U, - (uint32_t)1U, - uint64_t xi = hash[i]; - uint64_t yi = hash1[i]; - hash[i] = xi + yi;); -} - -static void pad_224(uint64_t len, uint8_t *dst) -{ - uint8_t *dst1 = dst; - uint8_t *dst2; - uint8_t *dst3; - dst1[0U] = (uint8_t)0x80U; - dst2 = dst + (uint32_t)1U; - { - uint32_t i; - for - (i - = (uint32_t)0U; - i - < - ((uint32_t)128U - ((uint32_t)9U + (uint32_t)(len % (uint64_t)(uint32_t)64U))) - % (uint32_t)64U; - i++) - { - dst2[i] = (uint8_t)0U; - } - } - dst3 = - dst - + - (uint32_t)1U - + - ((uint32_t)128U - ((uint32_t)9U + (uint32_t)(len % (uint64_t)(uint32_t)64U))) - % (uint32_t)64U; - store64_be(dst3, len << (uint32_t)3U); -} - -void Hacl_Hash_Core_SHA2_pad_256(uint64_t len, uint8_t *dst) -{ - uint8_t *dst1 = dst; - uint8_t *dst2; - uint8_t *dst3; - dst1[0U] = (uint8_t)0x80U; - dst2 = dst + (uint32_t)1U; - { - uint32_t i; - for - (i - = (uint32_t)0U; - i - < - ((uint32_t)128U - ((uint32_t)9U + (uint32_t)(len % (uint64_t)(uint32_t)64U))) - % (uint32_t)64U; - i++) - { - dst2[i] = (uint8_t)0U; - } - } - dst3 = - dst - + - (uint32_t)1U - + - ((uint32_t)128U - ((uint32_t)9U + (uint32_t)(len % (uint64_t)(uint32_t)64U))) - % (uint32_t)64U; - store64_be(dst3, len << (uint32_t)3U); -} - -static void pad_384(FStar_UInt128_uint128 len, uint8_t *dst) -{ - uint8_t *dst1 = dst; - uint8_t *dst2; - uint8_t *dst3; - FStar_UInt128_uint128 len_; - dst1[0U] = (uint8_t)0x80U; - dst2 = dst + (uint32_t)1U; - { - uint32_t i; - for - (i - = (uint32_t)0U; - i - < - ((uint32_t)256U - - - ((uint32_t)17U - + (uint32_t)(FStar_UInt128_uint128_to_uint64(len) % (uint64_t)(uint32_t)128U))) - % (uint32_t)128U; - i++) - { - dst2[i] = (uint8_t)0U; - } - } - dst3 = - dst - + - (uint32_t)1U - + - ((uint32_t)256U - - - ((uint32_t)17U - + (uint32_t)(FStar_UInt128_uint128_to_uint64(len) % (uint64_t)(uint32_t)128U))) - % (uint32_t)128U; - len_ = FStar_UInt128_shift_left(len, (uint32_t)3U); - store128_be(dst3, len_); -} - -static void pad_512(FStar_UInt128_uint128 len, uint8_t *dst) -{ - uint8_t *dst1 = dst; - uint8_t *dst2; - uint8_t *dst3; - FStar_UInt128_uint128 len_; - dst1[0U] = (uint8_t)0x80U; - dst2 = dst + (uint32_t)1U; - { - uint32_t i; - for - (i - = (uint32_t)0U; - i - < - ((uint32_t)256U - - - ((uint32_t)17U - + (uint32_t)(FStar_UInt128_uint128_to_uint64(len) % (uint64_t)(uint32_t)128U))) - % (uint32_t)128U; - i++) - { - dst2[i] = (uint8_t)0U; - } - } - dst3 = - dst - + - (uint32_t)1U - + - ((uint32_t)256U - - - ((uint32_t)17U - + (uint32_t)(FStar_UInt128_uint128_to_uint64(len) % (uint64_t)(uint32_t)128U))) - % (uint32_t)128U; - len_ = FStar_UInt128_shift_left(len, (uint32_t)3U); - store128_be(dst3, len_); -} - -void Hacl_Hash_Core_SHA2_finish_224(uint32_t *s, uint8_t *dst) -{ - uint32_t i; - for (i = (uint32_t)0U; i < (uint32_t)7U; i++) - { - store32_be(dst + i * (uint32_t)4U, s[i]); - } -} - -void Hacl_Hash_Core_SHA2_finish_256(uint32_t *s, uint8_t *dst) -{ - uint32_t i; - for (i = (uint32_t)0U; i < (uint32_t)8U; i++) - { - store32_be(dst + i * (uint32_t)4U, s[i]); - } -} - -void Hacl_Hash_Core_SHA2_finish_384(uint64_t *s, uint8_t *dst) -{ - uint32_t i; - for (i = (uint32_t)0U; i < (uint32_t)6U; i++) - { - store64_be(dst + i * (uint32_t)8U, s[i]); - } -} - -void Hacl_Hash_Core_SHA2_finish_512(uint64_t *s, uint8_t *dst) -{ - uint32_t i; - for (i = (uint32_t)0U; i < (uint32_t)8U; i++) - { - store64_be(dst + i * (uint32_t)8U, s[i]); - } -} - -void Hacl_Hash_SHA2_update_multi_224(uint32_t *s, uint8_t *blocks, uint32_t n_blocks) -{ - uint32_t i; - for (i = (uint32_t)0U; i < n_blocks; i++) - { - uint32_t sz = (uint32_t)64U; - uint8_t *block = blocks + sz * i; - update_224(s, block); - } -} - -void Hacl_Hash_SHA2_update_multi_256(uint32_t *s, uint8_t *blocks, uint32_t n_blocks) -{ - uint32_t i; - for (i = (uint32_t)0U; i < n_blocks; i++) - { - uint32_t sz = (uint32_t)64U; - uint8_t *block = blocks + sz * i; - update_256(s, block); - } -} - -void Hacl_Hash_SHA2_update_multi_384(uint64_t *s, uint8_t *blocks, uint32_t n_blocks) -{ - uint32_t i; - for (i = (uint32_t)0U; i < n_blocks; i++) - { - uint32_t sz = (uint32_t)128U; - uint8_t *block = blocks + sz * i; - Hacl_Hash_Core_SHA2_update_384(s, block); - } -} - -void Hacl_Hash_SHA2_update_multi_512(uint64_t *s, uint8_t *blocks, uint32_t n_blocks) -{ - uint32_t i; - for (i = (uint32_t)0U; i < n_blocks; i++) - { - uint32_t sz = (uint32_t)128U; - uint8_t *block = blocks + sz * i; - Hacl_Hash_Core_SHA2_update_512(s, block); - } -} - -void -Hacl_Hash_SHA2_update_last_224( - uint32_t *s, - uint64_t prev_len, - uint8_t *input, - uint32_t input_len -) -{ - uint32_t blocks_n = input_len / (uint32_t)64U; - uint32_t blocks_len = blocks_n * (uint32_t)64U; - uint8_t *blocks = input; - uint32_t rest_len = input_len - blocks_len; - uint8_t *rest = input + blocks_len; - uint64_t total_input_len; - uint32_t pad_len; - uint32_t tmp_len; - Hacl_Hash_SHA2_update_multi_224(s, blocks, blocks_n); - total_input_len = prev_len + (uint64_t)input_len; - pad_len = - (uint32_t)1U - + - ((uint32_t)128U - ((uint32_t)9U + (uint32_t)(total_input_len % (uint64_t)(uint32_t)64U))) - % (uint32_t)64U - + (uint32_t)8U; - tmp_len = rest_len + pad_len; - { - uint8_t tmp_twoblocks[128U] = { 0U }; - uint8_t *tmp = tmp_twoblocks; - uint8_t *tmp_rest = tmp; - uint8_t *tmp_pad = tmp + rest_len; - memcpy(tmp_rest, rest, rest_len * sizeof (uint8_t)); - pad_224(total_input_len, tmp_pad); - Hacl_Hash_SHA2_update_multi_224(s, tmp, tmp_len / (uint32_t)64U); - } -} - -void -Hacl_Hash_SHA2_update_last_256( - uint32_t *s, - uint64_t prev_len, - uint8_t *input, - uint32_t input_len -) -{ - uint32_t blocks_n = input_len / (uint32_t)64U; - uint32_t blocks_len = blocks_n * (uint32_t)64U; - uint8_t *blocks = input; - uint32_t rest_len = input_len - blocks_len; - uint8_t *rest = input + blocks_len; - uint64_t total_input_len; - uint32_t pad_len; - uint32_t tmp_len; - Hacl_Hash_SHA2_update_multi_256(s, blocks, blocks_n); - total_input_len = prev_len + (uint64_t)input_len; - pad_len = - (uint32_t)1U - + - ((uint32_t)128U - ((uint32_t)9U + (uint32_t)(total_input_len % (uint64_t)(uint32_t)64U))) - % (uint32_t)64U - + (uint32_t)8U; - tmp_len = rest_len + pad_len; - { - uint8_t tmp_twoblocks[128U] = { 0U }; - uint8_t *tmp = tmp_twoblocks; - uint8_t *tmp_rest = tmp; - uint8_t *tmp_pad = tmp + rest_len; - memcpy(tmp_rest, rest, rest_len * sizeof (uint8_t)); - Hacl_Hash_Core_SHA2_pad_256(total_input_len, tmp_pad); - Hacl_Hash_SHA2_update_multi_256(s, tmp, tmp_len / (uint32_t)64U); - } -} - -void -Hacl_Hash_SHA2_update_last_384( - uint64_t *s, - FStar_UInt128_uint128 prev_len, - uint8_t *input, - uint32_t input_len -) -{ - uint32_t blocks_n = input_len / (uint32_t)128U; - uint32_t blocks_len = blocks_n * (uint32_t)128U; - uint8_t *blocks = input; - uint32_t rest_len = input_len - blocks_len; - uint8_t *rest = input + blocks_len; - FStar_UInt128_uint128 total_input_len; - uint32_t pad_len; - uint32_t tmp_len; - Hacl_Hash_SHA2_update_multi_384(s, blocks, blocks_n); - total_input_len = - FStar_UInt128_add(prev_len, - FStar_UInt128_uint64_to_uint128((uint64_t)input_len)); - pad_len = - (uint32_t)1U - + - ((uint32_t)256U - - - ((uint32_t)17U - + (uint32_t)(FStar_UInt128_uint128_to_uint64(total_input_len) % (uint64_t)(uint32_t)128U))) - % (uint32_t)128U - + (uint32_t)16U; - tmp_len = rest_len + pad_len; - { - uint8_t tmp_twoblocks[256U] = { 0U }; - uint8_t *tmp = tmp_twoblocks; - uint8_t *tmp_rest = tmp; - uint8_t *tmp_pad = tmp + rest_len; - memcpy(tmp_rest, rest, rest_len * sizeof (uint8_t)); - pad_384(total_input_len, tmp_pad); - Hacl_Hash_SHA2_update_multi_384(s, tmp, tmp_len / (uint32_t)128U); - } -} - -void -Hacl_Hash_SHA2_update_last_512( - uint64_t *s, - FStar_UInt128_uint128 prev_len, - uint8_t *input, - uint32_t input_len -) -{ - uint32_t blocks_n = input_len / (uint32_t)128U; - uint32_t blocks_len = blocks_n * (uint32_t)128U; - uint8_t *blocks = input; - uint32_t rest_len = input_len - blocks_len; - uint8_t *rest = input + blocks_len; - FStar_UInt128_uint128 total_input_len; - uint32_t pad_len; - uint32_t tmp_len; - Hacl_Hash_SHA2_update_multi_512(s, blocks, blocks_n); - total_input_len = - FStar_UInt128_add(prev_len, - FStar_UInt128_uint64_to_uint128((uint64_t)input_len)); - pad_len = - (uint32_t)1U - + - ((uint32_t)256U - - - ((uint32_t)17U - + (uint32_t)(FStar_UInt128_uint128_to_uint64(total_input_len) % (uint64_t)(uint32_t)128U))) - % (uint32_t)128U - + (uint32_t)16U; - tmp_len = rest_len + pad_len; - { - uint8_t tmp_twoblocks[256U] = { 0U }; - uint8_t *tmp = tmp_twoblocks; - uint8_t *tmp_rest = tmp; - uint8_t *tmp_pad = tmp + rest_len; - memcpy(tmp_rest, rest, rest_len * sizeof (uint8_t)); - pad_512(total_input_len, tmp_pad); - Hacl_Hash_SHA2_update_multi_512(s, tmp, tmp_len / (uint32_t)128U); - } -} - -void Hacl_Hash_SHA2_hash_224(uint8_t *input, uint32_t input_len, uint8_t *dst) -{ - uint32_t - scrut[8U] = - { - (uint32_t)0xc1059ed8U, (uint32_t)0x367cd507U, (uint32_t)0x3070dd17U, (uint32_t)0xf70e5939U, - (uint32_t)0xffc00b31U, (uint32_t)0x68581511U, (uint32_t)0x64f98fa7U, (uint32_t)0xbefa4fa4U - }; - uint32_t *s = scrut; - uint32_t blocks_n0 = input_len / (uint32_t)64U; - uint32_t blocks_n1; - if (input_len % (uint32_t)64U == (uint32_t)0U && blocks_n0 > (uint32_t)0U) - { - blocks_n1 = blocks_n0 - (uint32_t)1U; - } - else - { - blocks_n1 = blocks_n0; - } - { - uint32_t blocks_len0 = blocks_n1 * (uint32_t)64U; - uint8_t *blocks0 = input; - uint32_t rest_len0 = input_len - blocks_len0; - uint8_t *rest0 = input + blocks_len0; - uint32_t blocks_n = blocks_n1; - uint32_t blocks_len = blocks_len0; - uint8_t *blocks = blocks0; - uint32_t rest_len = rest_len0; - uint8_t *rest = rest0; - Hacl_Hash_SHA2_update_multi_224(s, blocks, blocks_n); - Hacl_Hash_SHA2_update_last_224(s, (uint64_t)blocks_len, rest, rest_len); - Hacl_Hash_Core_SHA2_finish_224(s, dst); - } -} - -void Hacl_Hash_SHA2_hash_256(uint8_t *input, uint32_t input_len, uint8_t *dst) -{ - uint32_t - scrut[8U] = - { - (uint32_t)0x6a09e667U, (uint32_t)0xbb67ae85U, (uint32_t)0x3c6ef372U, (uint32_t)0xa54ff53aU, - (uint32_t)0x510e527fU, (uint32_t)0x9b05688cU, (uint32_t)0x1f83d9abU, (uint32_t)0x5be0cd19U - }; - uint32_t *s = scrut; - uint32_t blocks_n0 = input_len / (uint32_t)64U; - uint32_t blocks_n1; - if (input_len % (uint32_t)64U == (uint32_t)0U && blocks_n0 > (uint32_t)0U) - { - blocks_n1 = blocks_n0 - (uint32_t)1U; - } - else - { - blocks_n1 = blocks_n0; - } - { - uint32_t blocks_len0 = blocks_n1 * (uint32_t)64U; - uint8_t *blocks0 = input; - uint32_t rest_len0 = input_len - blocks_len0; - uint8_t *rest0 = input + blocks_len0; - uint32_t blocks_n = blocks_n1; - uint32_t blocks_len = blocks_len0; - uint8_t *blocks = blocks0; - uint32_t rest_len = rest_len0; - uint8_t *rest = rest0; - Hacl_Hash_SHA2_update_multi_256(s, blocks, blocks_n); - Hacl_Hash_SHA2_update_last_256(s, (uint64_t)blocks_len, rest, rest_len); - Hacl_Hash_Core_SHA2_finish_256(s, dst); - } -} - -typedef uint64_t *___uint64_t____; - -void Hacl_Hash_SHA2_hash_384(uint8_t *input, uint32_t input_len, uint8_t *dst) -{ - uint64_t - scrut[8U] = - { - (uint64_t)0xcbbb9d5dc1059ed8U, (uint64_t)0x629a292a367cd507U, (uint64_t)0x9159015a3070dd17U, - (uint64_t)0x152fecd8f70e5939U, (uint64_t)0x67332667ffc00b31U, (uint64_t)0x8eb44a8768581511U, - (uint64_t)0xdb0c2e0d64f98fa7U, (uint64_t)0x47b5481dbefa4fa4U - }; - uint64_t *s = scrut; - uint32_t blocks_n0 = input_len / (uint32_t)128U; - uint32_t blocks_n1; - if (input_len % (uint32_t)128U == (uint32_t)0U && blocks_n0 > (uint32_t)0U) - { - blocks_n1 = blocks_n0 - (uint32_t)1U; - } - else - { - blocks_n1 = blocks_n0; - } - { - uint32_t blocks_len0 = blocks_n1 * (uint32_t)128U; - uint8_t *blocks0 = input; - uint32_t rest_len0 = input_len - blocks_len0; - uint8_t *rest0 = input + blocks_len0; - uint32_t blocks_n = blocks_n1; - uint32_t blocks_len = blocks_len0; - uint8_t *blocks = blocks0; - uint32_t rest_len = rest_len0; - uint8_t *rest = rest0; - Hacl_Hash_SHA2_update_multi_384(s, blocks, blocks_n); - Hacl_Hash_SHA2_update_last_384(s, - FStar_UInt128_uint64_to_uint128((uint64_t)blocks_len), - rest, - rest_len); - Hacl_Hash_Core_SHA2_finish_384(s, dst); - } -} - -void Hacl_Hash_SHA2_hash_512(uint8_t *input, uint32_t input_len, uint8_t *dst) -{ - uint64_t - scrut[8U] = - { - (uint64_t)0x6a09e667f3bcc908U, (uint64_t)0xbb67ae8584caa73bU, (uint64_t)0x3c6ef372fe94f82bU, - (uint64_t)0xa54ff53a5f1d36f1U, (uint64_t)0x510e527fade682d1U, (uint64_t)0x9b05688c2b3e6c1fU, - (uint64_t)0x1f83d9abfb41bd6bU, (uint64_t)0x5be0cd19137e2179U - }; - uint64_t *s = scrut; - uint32_t blocks_n0 = input_len / (uint32_t)128U; - uint32_t blocks_n1; - if (input_len % (uint32_t)128U == (uint32_t)0U && blocks_n0 > (uint32_t)0U) - { - blocks_n1 = blocks_n0 - (uint32_t)1U; - } - else - { - blocks_n1 = blocks_n0; - } - { - uint32_t blocks_len0 = blocks_n1 * (uint32_t)128U; - uint8_t *blocks0 = input; - uint32_t rest_len0 = input_len - blocks_len0; - uint8_t *rest0 = input + blocks_len0; - uint32_t blocks_n = blocks_n1; - uint32_t blocks_len = blocks_len0; - uint8_t *blocks = blocks0; - uint32_t rest_len = rest_len0; - uint8_t *rest = rest0; - Hacl_Hash_SHA2_update_multi_512(s, blocks, blocks_n); - Hacl_Hash_SHA2_update_last_512(s, - FStar_UInt128_uint64_to_uint128((uint64_t)blocks_len), - rest, - rest_len); - Hacl_Hash_Core_SHA2_finish_512(s, dst); - } -} - diff --git a/dist/c89-compatible/Hacl_Hash_SHA2.h b/dist/c89-compatible/Hacl_Hash_SHA2.h deleted file mode 100644 index 530f1116d3..0000000000 --- a/dist/c89-compatible/Hacl_Hash_SHA2.h +++ /dev/null @@ -1,93 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#ifndef __Hacl_Hash_SHA2_H -#define __Hacl_Hash_SHA2_H - -#if defined(__cplusplus) -extern "C" { -#endif - -#include -#include "krml/internal/types.h" -#include "krml/lowstar_endianness.h" -#include "krml/internal/target.h" - - -#include "Hacl_Krmllib.h" -#include "evercrypt_targetconfig.h" -void Hacl_Hash_SHA2_update_multi_224(uint32_t *s, uint8_t *blocks, uint32_t n_blocks); - -void Hacl_Hash_SHA2_update_multi_256(uint32_t *s, uint8_t *blocks, uint32_t n_blocks); - -void Hacl_Hash_SHA2_update_multi_384(uint64_t *s, uint8_t *blocks, uint32_t n_blocks); - -void Hacl_Hash_SHA2_update_multi_512(uint64_t *s, uint8_t *blocks, uint32_t n_blocks); - -void -Hacl_Hash_SHA2_update_last_224( - uint32_t *s, - uint64_t prev_len, - uint8_t *input, - uint32_t input_len -); - -void -Hacl_Hash_SHA2_update_last_256( - uint32_t *s, - uint64_t prev_len, - uint8_t *input, - uint32_t input_len -); - -void -Hacl_Hash_SHA2_update_last_384( - uint64_t *s, - FStar_UInt128_uint128 prev_len, - uint8_t *input, - uint32_t input_len -); - -void -Hacl_Hash_SHA2_update_last_512( - uint64_t *s, - FStar_UInt128_uint128 prev_len, - uint8_t *input, - uint32_t input_len -); - -void Hacl_Hash_SHA2_hash_224(uint8_t *input, uint32_t input_len, uint8_t *dst); - -void Hacl_Hash_SHA2_hash_256(uint8_t *input, uint32_t input_len, uint8_t *dst); - -void Hacl_Hash_SHA2_hash_384(uint8_t *input, uint32_t input_len, uint8_t *dst); - -void Hacl_Hash_SHA2_hash_512(uint8_t *input, uint32_t input_len, uint8_t *dst); - -#if defined(__cplusplus) -} -#endif - -#define __Hacl_Hash_SHA2_H_DEFINED -#endif diff --git a/dist/c89-compatible/Hacl_Impl_Blake2_Constants.h b/dist/c89-compatible/Hacl_Impl_Blake2_Constants.h deleted file mode 100644 index 596c74bb10..0000000000 --- a/dist/c89-compatible/Hacl_Impl_Blake2_Constants.h +++ /dev/null @@ -1,95 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#ifndef __Hacl_Impl_Blake2_Constants_H -#define __Hacl_Impl_Blake2_Constants_H - -#if defined(__cplusplus) -extern "C" { -#endif - -#include -#include "krml/internal/types.h" -#include "krml/lowstar_endianness.h" -#include "krml/internal/target.h" - - - -#include "evercrypt_targetconfig.h" -static const -uint32_t -Hacl_Impl_Blake2_Constants_sigmaTable[160U] = - { - (uint32_t)0U, (uint32_t)1U, (uint32_t)2U, (uint32_t)3U, (uint32_t)4U, (uint32_t)5U, - (uint32_t)6U, (uint32_t)7U, (uint32_t)8U, (uint32_t)9U, (uint32_t)10U, (uint32_t)11U, - (uint32_t)12U, (uint32_t)13U, (uint32_t)14U, (uint32_t)15U, (uint32_t)14U, (uint32_t)10U, - (uint32_t)4U, (uint32_t)8U, (uint32_t)9U, (uint32_t)15U, (uint32_t)13U, (uint32_t)6U, - (uint32_t)1U, (uint32_t)12U, (uint32_t)0U, (uint32_t)2U, (uint32_t)11U, (uint32_t)7U, - (uint32_t)5U, (uint32_t)3U, (uint32_t)11U, (uint32_t)8U, (uint32_t)12U, (uint32_t)0U, - (uint32_t)5U, (uint32_t)2U, (uint32_t)15U, (uint32_t)13U, (uint32_t)10U, (uint32_t)14U, - (uint32_t)3U, (uint32_t)6U, (uint32_t)7U, (uint32_t)1U, (uint32_t)9U, (uint32_t)4U, - (uint32_t)7U, (uint32_t)9U, (uint32_t)3U, (uint32_t)1U, (uint32_t)13U, (uint32_t)12U, - (uint32_t)11U, (uint32_t)14U, (uint32_t)2U, (uint32_t)6U, (uint32_t)5U, (uint32_t)10U, - (uint32_t)4U, (uint32_t)0U, (uint32_t)15U, (uint32_t)8U, (uint32_t)9U, (uint32_t)0U, - (uint32_t)5U, (uint32_t)7U, (uint32_t)2U, (uint32_t)4U, (uint32_t)10U, (uint32_t)15U, - (uint32_t)14U, (uint32_t)1U, (uint32_t)11U, (uint32_t)12U, (uint32_t)6U, (uint32_t)8U, - (uint32_t)3U, (uint32_t)13U, (uint32_t)2U, (uint32_t)12U, (uint32_t)6U, (uint32_t)10U, - (uint32_t)0U, (uint32_t)11U, (uint32_t)8U, (uint32_t)3U, (uint32_t)4U, (uint32_t)13U, - (uint32_t)7U, (uint32_t)5U, (uint32_t)15U, (uint32_t)14U, (uint32_t)1U, (uint32_t)9U, - (uint32_t)12U, (uint32_t)5U, (uint32_t)1U, (uint32_t)15U, (uint32_t)14U, (uint32_t)13U, - (uint32_t)4U, (uint32_t)10U, (uint32_t)0U, (uint32_t)7U, (uint32_t)6U, (uint32_t)3U, - (uint32_t)9U, (uint32_t)2U, (uint32_t)8U, (uint32_t)11U, (uint32_t)13U, (uint32_t)11U, - (uint32_t)7U, (uint32_t)14U, (uint32_t)12U, (uint32_t)1U, (uint32_t)3U, (uint32_t)9U, - (uint32_t)5U, (uint32_t)0U, (uint32_t)15U, (uint32_t)4U, (uint32_t)8U, (uint32_t)6U, - (uint32_t)2U, (uint32_t)10U, (uint32_t)6U, (uint32_t)15U, (uint32_t)14U, (uint32_t)9U, - (uint32_t)11U, (uint32_t)3U, (uint32_t)0U, (uint32_t)8U, (uint32_t)12U, (uint32_t)2U, - (uint32_t)13U, (uint32_t)7U, (uint32_t)1U, (uint32_t)4U, (uint32_t)10U, (uint32_t)5U, - (uint32_t)10U, (uint32_t)2U, (uint32_t)8U, (uint32_t)4U, (uint32_t)7U, (uint32_t)6U, - (uint32_t)1U, (uint32_t)5U, (uint32_t)15U, (uint32_t)11U, (uint32_t)9U, (uint32_t)14U, - (uint32_t)3U, (uint32_t)12U, (uint32_t)13U - }; - -static const -uint32_t -Hacl_Impl_Blake2_Constants_ivTable_S[8U] = - { - (uint32_t)0x6A09E667U, (uint32_t)0xBB67AE85U, (uint32_t)0x3C6EF372U, (uint32_t)0xA54FF53AU, - (uint32_t)0x510E527FU, (uint32_t)0x9B05688CU, (uint32_t)0x1F83D9ABU, (uint32_t)0x5BE0CD19U - }; - -static const -uint64_t -Hacl_Impl_Blake2_Constants_ivTable_B[8U] = - { - (uint64_t)0x6A09E667F3BCC908U, (uint64_t)0xBB67AE8584CAA73BU, (uint64_t)0x3C6EF372FE94F82BU, - (uint64_t)0xA54FF53A5F1D36F1U, (uint64_t)0x510E527FADE682D1U, (uint64_t)0x9B05688C2B3E6C1FU, - (uint64_t)0x1F83D9ABFB41BD6BU, (uint64_t)0x5BE0CD19137E2179U - }; - -#if defined(__cplusplus) -} -#endif - -#define __Hacl_Impl_Blake2_Constants_H_DEFINED -#endif diff --git a/dist/c89-compatible/Hacl_Impl_FFDHE_Constants.h b/dist/c89-compatible/Hacl_Impl_FFDHE_Constants.h deleted file mode 100644 index 9f89ca01fb..0000000000 --- a/dist/c89-compatible/Hacl_Impl_FFDHE_Constants.h +++ /dev/null @@ -1,569 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#ifndef __Hacl_Impl_FFDHE_Constants_H -#define __Hacl_Impl_FFDHE_Constants_H - -#if defined(__cplusplus) -extern "C" { -#endif - -#include -#include "krml/internal/types.h" -#include "krml/lowstar_endianness.h" -#include "krml/internal/target.h" - - - -#include "evercrypt_targetconfig.h" -static const uint8_t Hacl_Impl_FFDHE_Constants_ffdhe_g2[1U] = { (uint8_t)0x02U }; - -static const -uint8_t -Hacl_Impl_FFDHE_Constants_ffdhe_p2048[256U] = - { - (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, - (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xADU, (uint8_t)0xF8U, (uint8_t)0x54U, (uint8_t)0x58U, - (uint8_t)0xA2U, (uint8_t)0xBBU, (uint8_t)0x4AU, (uint8_t)0x9AU, (uint8_t)0xAFU, (uint8_t)0xDCU, - (uint8_t)0x56U, (uint8_t)0x20U, (uint8_t)0x27U, (uint8_t)0x3DU, (uint8_t)0x3CU, (uint8_t)0xF1U, - (uint8_t)0xD8U, (uint8_t)0xB9U, (uint8_t)0xC5U, (uint8_t)0x83U, (uint8_t)0xCEU, (uint8_t)0x2DU, - (uint8_t)0x36U, (uint8_t)0x95U, (uint8_t)0xA9U, (uint8_t)0xE1U, (uint8_t)0x36U, (uint8_t)0x41U, - (uint8_t)0x14U, (uint8_t)0x64U, (uint8_t)0x33U, (uint8_t)0xFBU, (uint8_t)0xCCU, (uint8_t)0x93U, - (uint8_t)0x9DU, (uint8_t)0xCEU, (uint8_t)0x24U, (uint8_t)0x9BU, (uint8_t)0x3EU, (uint8_t)0xF9U, - (uint8_t)0x7DU, (uint8_t)0x2FU, (uint8_t)0xE3U, (uint8_t)0x63U, (uint8_t)0x63U, (uint8_t)0x0CU, - (uint8_t)0x75U, (uint8_t)0xD8U, (uint8_t)0xF6U, (uint8_t)0x81U, (uint8_t)0xB2U, (uint8_t)0x02U, - (uint8_t)0xAEU, (uint8_t)0xC4U, (uint8_t)0x61U, (uint8_t)0x7AU, (uint8_t)0xD3U, (uint8_t)0xDFU, - (uint8_t)0x1EU, (uint8_t)0xD5U, (uint8_t)0xD5U, (uint8_t)0xFDU, (uint8_t)0x65U, (uint8_t)0x61U, - (uint8_t)0x24U, (uint8_t)0x33U, (uint8_t)0xF5U, (uint8_t)0x1FU, (uint8_t)0x5FU, (uint8_t)0x06U, - (uint8_t)0x6EU, (uint8_t)0xD0U, (uint8_t)0x85U, (uint8_t)0x63U, (uint8_t)0x65U, (uint8_t)0x55U, - (uint8_t)0x3DU, (uint8_t)0xEDU, (uint8_t)0x1AU, (uint8_t)0xF3U, (uint8_t)0xB5U, (uint8_t)0x57U, - (uint8_t)0x13U, (uint8_t)0x5EU, (uint8_t)0x7FU, (uint8_t)0x57U, (uint8_t)0xC9U, (uint8_t)0x35U, - (uint8_t)0x98U, (uint8_t)0x4FU, (uint8_t)0x0CU, (uint8_t)0x70U, (uint8_t)0xE0U, (uint8_t)0xE6U, - (uint8_t)0x8BU, (uint8_t)0x77U, (uint8_t)0xE2U, (uint8_t)0xA6U, (uint8_t)0x89U, (uint8_t)0xDAU, - (uint8_t)0xF3U, (uint8_t)0xEFU, (uint8_t)0xE8U, (uint8_t)0x72U, (uint8_t)0x1DU, (uint8_t)0xF1U, - (uint8_t)0x58U, (uint8_t)0xA1U, (uint8_t)0x36U, (uint8_t)0xADU, (uint8_t)0xE7U, (uint8_t)0x35U, - (uint8_t)0x30U, (uint8_t)0xACU, (uint8_t)0xCAU, (uint8_t)0x4FU, (uint8_t)0x48U, (uint8_t)0x3AU, - (uint8_t)0x79U, (uint8_t)0x7AU, (uint8_t)0xBCU, (uint8_t)0x0AU, (uint8_t)0xB1U, (uint8_t)0x82U, - (uint8_t)0xB3U, (uint8_t)0x24U, (uint8_t)0xFBU, (uint8_t)0x61U, (uint8_t)0xD1U, (uint8_t)0x08U, - (uint8_t)0xA9U, (uint8_t)0x4BU, (uint8_t)0xB2U, (uint8_t)0xC8U, (uint8_t)0xE3U, (uint8_t)0xFBU, - (uint8_t)0xB9U, (uint8_t)0x6AU, (uint8_t)0xDAU, (uint8_t)0xB7U, (uint8_t)0x60U, (uint8_t)0xD7U, - (uint8_t)0xF4U, (uint8_t)0x68U, (uint8_t)0x1DU, (uint8_t)0x4FU, (uint8_t)0x42U, (uint8_t)0xA3U, - (uint8_t)0xDEU, (uint8_t)0x39U, (uint8_t)0x4DU, (uint8_t)0xF4U, (uint8_t)0xAEU, (uint8_t)0x56U, - (uint8_t)0xEDU, (uint8_t)0xE7U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0xBBU, (uint8_t)0x19U, - (uint8_t)0x0BU, (uint8_t)0x07U, (uint8_t)0xA7U, (uint8_t)0xC8U, (uint8_t)0xEEU, (uint8_t)0x0AU, - (uint8_t)0x6DU, (uint8_t)0x70U, (uint8_t)0x9EU, (uint8_t)0x02U, (uint8_t)0xFCU, (uint8_t)0xE1U, - (uint8_t)0xCDU, (uint8_t)0xF7U, (uint8_t)0xE2U, (uint8_t)0xECU, (uint8_t)0xC0U, (uint8_t)0x34U, - (uint8_t)0x04U, (uint8_t)0xCDU, (uint8_t)0x28U, (uint8_t)0x34U, (uint8_t)0x2FU, (uint8_t)0x61U, - (uint8_t)0x91U, (uint8_t)0x72U, (uint8_t)0xFEU, (uint8_t)0x9CU, (uint8_t)0xE9U, (uint8_t)0x85U, - (uint8_t)0x83U, (uint8_t)0xFFU, (uint8_t)0x8EU, (uint8_t)0x4FU, (uint8_t)0x12U, (uint8_t)0x32U, - (uint8_t)0xEEU, (uint8_t)0xF2U, (uint8_t)0x81U, (uint8_t)0x83U, (uint8_t)0xC3U, (uint8_t)0xFEU, - (uint8_t)0x3BU, (uint8_t)0x1BU, (uint8_t)0x4CU, (uint8_t)0x6FU, (uint8_t)0xADU, (uint8_t)0x73U, - (uint8_t)0x3BU, (uint8_t)0xB5U, (uint8_t)0xFCU, (uint8_t)0xBCU, (uint8_t)0x2EU, (uint8_t)0xC2U, - (uint8_t)0x20U, (uint8_t)0x05U, (uint8_t)0xC5U, (uint8_t)0x8EU, (uint8_t)0xF1U, (uint8_t)0x83U, - (uint8_t)0x7DU, (uint8_t)0x16U, (uint8_t)0x83U, (uint8_t)0xB2U, (uint8_t)0xC6U, (uint8_t)0xF3U, - (uint8_t)0x4AU, (uint8_t)0x26U, (uint8_t)0xC1U, (uint8_t)0xB2U, (uint8_t)0xEFU, (uint8_t)0xFAU, - (uint8_t)0x88U, (uint8_t)0x6BU, (uint8_t)0x42U, (uint8_t)0x38U, (uint8_t)0x61U, (uint8_t)0x28U, - (uint8_t)0x5CU, (uint8_t)0x97U, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, - (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU - }; - -static const -uint8_t -Hacl_Impl_FFDHE_Constants_ffdhe_p3072[384U] = - { - (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, - (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xADU, (uint8_t)0xF8U, (uint8_t)0x54U, (uint8_t)0x58U, - (uint8_t)0xA2U, (uint8_t)0xBBU, (uint8_t)0x4AU, (uint8_t)0x9AU, (uint8_t)0xAFU, (uint8_t)0xDCU, - (uint8_t)0x56U, (uint8_t)0x20U, (uint8_t)0x27U, (uint8_t)0x3DU, (uint8_t)0x3CU, (uint8_t)0xF1U, - (uint8_t)0xD8U, (uint8_t)0xB9U, (uint8_t)0xC5U, (uint8_t)0x83U, (uint8_t)0xCEU, (uint8_t)0x2DU, - (uint8_t)0x36U, (uint8_t)0x95U, (uint8_t)0xA9U, (uint8_t)0xE1U, (uint8_t)0x36U, (uint8_t)0x41U, - (uint8_t)0x14U, (uint8_t)0x64U, (uint8_t)0x33U, (uint8_t)0xFBU, (uint8_t)0xCCU, (uint8_t)0x93U, - (uint8_t)0x9DU, (uint8_t)0xCEU, (uint8_t)0x24U, (uint8_t)0x9BU, (uint8_t)0x3EU, (uint8_t)0xF9U, - (uint8_t)0x7DU, (uint8_t)0x2FU, (uint8_t)0xE3U, (uint8_t)0x63U, (uint8_t)0x63U, (uint8_t)0x0CU, - (uint8_t)0x75U, (uint8_t)0xD8U, (uint8_t)0xF6U, (uint8_t)0x81U, (uint8_t)0xB2U, (uint8_t)0x02U, - (uint8_t)0xAEU, (uint8_t)0xC4U, (uint8_t)0x61U, (uint8_t)0x7AU, (uint8_t)0xD3U, (uint8_t)0xDFU, - (uint8_t)0x1EU, (uint8_t)0xD5U, (uint8_t)0xD5U, (uint8_t)0xFDU, (uint8_t)0x65U, (uint8_t)0x61U, - (uint8_t)0x24U, (uint8_t)0x33U, (uint8_t)0xF5U, (uint8_t)0x1FU, (uint8_t)0x5FU, (uint8_t)0x06U, - (uint8_t)0x6EU, (uint8_t)0xD0U, (uint8_t)0x85U, (uint8_t)0x63U, (uint8_t)0x65U, (uint8_t)0x55U, - (uint8_t)0x3DU, (uint8_t)0xEDU, (uint8_t)0x1AU, (uint8_t)0xF3U, (uint8_t)0xB5U, (uint8_t)0x57U, - (uint8_t)0x13U, (uint8_t)0x5EU, (uint8_t)0x7FU, (uint8_t)0x57U, (uint8_t)0xC9U, (uint8_t)0x35U, - (uint8_t)0x98U, (uint8_t)0x4FU, (uint8_t)0x0CU, (uint8_t)0x70U, (uint8_t)0xE0U, (uint8_t)0xE6U, - (uint8_t)0x8BU, (uint8_t)0x77U, (uint8_t)0xE2U, (uint8_t)0xA6U, (uint8_t)0x89U, (uint8_t)0xDAU, - (uint8_t)0xF3U, (uint8_t)0xEFU, (uint8_t)0xE8U, (uint8_t)0x72U, (uint8_t)0x1DU, (uint8_t)0xF1U, - (uint8_t)0x58U, (uint8_t)0xA1U, (uint8_t)0x36U, (uint8_t)0xADU, (uint8_t)0xE7U, (uint8_t)0x35U, - (uint8_t)0x30U, (uint8_t)0xACU, (uint8_t)0xCAU, (uint8_t)0x4FU, (uint8_t)0x48U, (uint8_t)0x3AU, - (uint8_t)0x79U, (uint8_t)0x7AU, (uint8_t)0xBCU, (uint8_t)0x0AU, (uint8_t)0xB1U, (uint8_t)0x82U, - (uint8_t)0xB3U, (uint8_t)0x24U, (uint8_t)0xFBU, (uint8_t)0x61U, (uint8_t)0xD1U, (uint8_t)0x08U, - (uint8_t)0xA9U, (uint8_t)0x4BU, (uint8_t)0xB2U, (uint8_t)0xC8U, (uint8_t)0xE3U, (uint8_t)0xFBU, - (uint8_t)0xB9U, (uint8_t)0x6AU, (uint8_t)0xDAU, (uint8_t)0xB7U, (uint8_t)0x60U, (uint8_t)0xD7U, - (uint8_t)0xF4U, (uint8_t)0x68U, (uint8_t)0x1DU, (uint8_t)0x4FU, (uint8_t)0x42U, (uint8_t)0xA3U, - (uint8_t)0xDEU, (uint8_t)0x39U, (uint8_t)0x4DU, (uint8_t)0xF4U, (uint8_t)0xAEU, (uint8_t)0x56U, - (uint8_t)0xEDU, (uint8_t)0xE7U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0xBBU, (uint8_t)0x19U, - (uint8_t)0x0BU, (uint8_t)0x07U, (uint8_t)0xA7U, (uint8_t)0xC8U, (uint8_t)0xEEU, (uint8_t)0x0AU, - (uint8_t)0x6DU, (uint8_t)0x70U, (uint8_t)0x9EU, (uint8_t)0x02U, (uint8_t)0xFCU, (uint8_t)0xE1U, - (uint8_t)0xCDU, (uint8_t)0xF7U, (uint8_t)0xE2U, (uint8_t)0xECU, (uint8_t)0xC0U, (uint8_t)0x34U, - (uint8_t)0x04U, (uint8_t)0xCDU, (uint8_t)0x28U, (uint8_t)0x34U, (uint8_t)0x2FU, (uint8_t)0x61U, - (uint8_t)0x91U, (uint8_t)0x72U, (uint8_t)0xFEU, (uint8_t)0x9CU, (uint8_t)0xE9U, (uint8_t)0x85U, - (uint8_t)0x83U, (uint8_t)0xFFU, (uint8_t)0x8EU, (uint8_t)0x4FU, (uint8_t)0x12U, (uint8_t)0x32U, - (uint8_t)0xEEU, (uint8_t)0xF2U, (uint8_t)0x81U, (uint8_t)0x83U, (uint8_t)0xC3U, (uint8_t)0xFEU, - (uint8_t)0x3BU, (uint8_t)0x1BU, (uint8_t)0x4CU, (uint8_t)0x6FU, (uint8_t)0xADU, (uint8_t)0x73U, - (uint8_t)0x3BU, (uint8_t)0xB5U, (uint8_t)0xFCU, (uint8_t)0xBCU, (uint8_t)0x2EU, (uint8_t)0xC2U, - (uint8_t)0x20U, (uint8_t)0x05U, (uint8_t)0xC5U, (uint8_t)0x8EU, (uint8_t)0xF1U, (uint8_t)0x83U, - (uint8_t)0x7DU, (uint8_t)0x16U, (uint8_t)0x83U, (uint8_t)0xB2U, (uint8_t)0xC6U, (uint8_t)0xF3U, - (uint8_t)0x4AU, (uint8_t)0x26U, (uint8_t)0xC1U, (uint8_t)0xB2U, (uint8_t)0xEFU, (uint8_t)0xFAU, - (uint8_t)0x88U, (uint8_t)0x6BU, (uint8_t)0x42U, (uint8_t)0x38U, (uint8_t)0x61U, (uint8_t)0x1FU, - (uint8_t)0xCFU, (uint8_t)0xDCU, (uint8_t)0xDEU, (uint8_t)0x35U, (uint8_t)0x5BU, (uint8_t)0x3BU, - (uint8_t)0x65U, (uint8_t)0x19U, (uint8_t)0x03U, (uint8_t)0x5BU, (uint8_t)0xBCU, (uint8_t)0x34U, - (uint8_t)0xF4U, (uint8_t)0xDEU, (uint8_t)0xF9U, (uint8_t)0x9CU, (uint8_t)0x02U, (uint8_t)0x38U, - (uint8_t)0x61U, (uint8_t)0xB4U, (uint8_t)0x6FU, (uint8_t)0xC9U, (uint8_t)0xD6U, (uint8_t)0xE6U, - (uint8_t)0xC9U, (uint8_t)0x07U, (uint8_t)0x7AU, (uint8_t)0xD9U, (uint8_t)0x1DU, (uint8_t)0x26U, - (uint8_t)0x91U, (uint8_t)0xF7U, (uint8_t)0xF7U, (uint8_t)0xEEU, (uint8_t)0x59U, (uint8_t)0x8CU, - (uint8_t)0xB0U, (uint8_t)0xFAU, (uint8_t)0xC1U, (uint8_t)0x86U, (uint8_t)0xD9U, (uint8_t)0x1CU, - (uint8_t)0xAEU, (uint8_t)0xFEU, (uint8_t)0x13U, (uint8_t)0x09U, (uint8_t)0x85U, (uint8_t)0x13U, - (uint8_t)0x92U, (uint8_t)0x70U, (uint8_t)0xB4U, (uint8_t)0x13U, (uint8_t)0x0CU, (uint8_t)0x93U, - (uint8_t)0xBCU, (uint8_t)0x43U, (uint8_t)0x79U, (uint8_t)0x44U, (uint8_t)0xF4U, (uint8_t)0xFDU, - (uint8_t)0x44U, (uint8_t)0x52U, (uint8_t)0xE2U, (uint8_t)0xD7U, (uint8_t)0x4DU, (uint8_t)0xD3U, - (uint8_t)0x64U, (uint8_t)0xF2U, (uint8_t)0xE2U, (uint8_t)0x1EU, (uint8_t)0x71U, (uint8_t)0xF5U, - (uint8_t)0x4BU, (uint8_t)0xFFU, (uint8_t)0x5CU, (uint8_t)0xAEU, (uint8_t)0x82U, (uint8_t)0xABU, - (uint8_t)0x9CU, (uint8_t)0x9DU, (uint8_t)0xF6U, (uint8_t)0x9EU, (uint8_t)0xE8U, (uint8_t)0x6DU, - (uint8_t)0x2BU, (uint8_t)0xC5U, (uint8_t)0x22U, (uint8_t)0x36U, (uint8_t)0x3AU, (uint8_t)0x0DU, - (uint8_t)0xABU, (uint8_t)0xC5U, (uint8_t)0x21U, (uint8_t)0x97U, (uint8_t)0x9BU, (uint8_t)0x0DU, - (uint8_t)0xEAU, (uint8_t)0xDAU, (uint8_t)0x1DU, (uint8_t)0xBFU, (uint8_t)0x9AU, (uint8_t)0x42U, - (uint8_t)0xD5U, (uint8_t)0xC4U, (uint8_t)0x48U, (uint8_t)0x4EU, (uint8_t)0x0AU, (uint8_t)0xBCU, - (uint8_t)0xD0U, (uint8_t)0x6BU, (uint8_t)0xFAU, (uint8_t)0x53U, (uint8_t)0xDDU, (uint8_t)0xEFU, - (uint8_t)0x3CU, (uint8_t)0x1BU, (uint8_t)0x20U, (uint8_t)0xEEU, (uint8_t)0x3FU, (uint8_t)0xD5U, - (uint8_t)0x9DU, (uint8_t)0x7CU, (uint8_t)0x25U, (uint8_t)0xE4U, (uint8_t)0x1DU, (uint8_t)0x2BU, - (uint8_t)0x66U, (uint8_t)0xC6U, (uint8_t)0x2EU, (uint8_t)0x37U, (uint8_t)0xFFU, (uint8_t)0xFFU, - (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU - }; - -static const -uint8_t -Hacl_Impl_FFDHE_Constants_ffdhe_p4096[512U] = - { - (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, - (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xADU, (uint8_t)0xF8U, (uint8_t)0x54U, (uint8_t)0x58U, - (uint8_t)0xA2U, (uint8_t)0xBBU, (uint8_t)0x4AU, (uint8_t)0x9AU, (uint8_t)0xAFU, (uint8_t)0xDCU, - (uint8_t)0x56U, (uint8_t)0x20U, (uint8_t)0x27U, (uint8_t)0x3DU, (uint8_t)0x3CU, (uint8_t)0xF1U, - (uint8_t)0xD8U, (uint8_t)0xB9U, (uint8_t)0xC5U, (uint8_t)0x83U, (uint8_t)0xCEU, (uint8_t)0x2DU, - (uint8_t)0x36U, (uint8_t)0x95U, (uint8_t)0xA9U, (uint8_t)0xE1U, (uint8_t)0x36U, (uint8_t)0x41U, - (uint8_t)0x14U, (uint8_t)0x64U, (uint8_t)0x33U, (uint8_t)0xFBU, (uint8_t)0xCCU, (uint8_t)0x93U, - (uint8_t)0x9DU, (uint8_t)0xCEU, (uint8_t)0x24U, (uint8_t)0x9BU, (uint8_t)0x3EU, (uint8_t)0xF9U, - (uint8_t)0x7DU, (uint8_t)0x2FU, (uint8_t)0xE3U, (uint8_t)0x63U, (uint8_t)0x63U, (uint8_t)0x0CU, - (uint8_t)0x75U, (uint8_t)0xD8U, (uint8_t)0xF6U, (uint8_t)0x81U, (uint8_t)0xB2U, (uint8_t)0x02U, - (uint8_t)0xAEU, (uint8_t)0xC4U, (uint8_t)0x61U, (uint8_t)0x7AU, (uint8_t)0xD3U, (uint8_t)0xDFU, - (uint8_t)0x1EU, (uint8_t)0xD5U, (uint8_t)0xD5U, (uint8_t)0xFDU, (uint8_t)0x65U, (uint8_t)0x61U, - (uint8_t)0x24U, (uint8_t)0x33U, (uint8_t)0xF5U, (uint8_t)0x1FU, (uint8_t)0x5FU, (uint8_t)0x06U, - (uint8_t)0x6EU, (uint8_t)0xD0U, (uint8_t)0x85U, (uint8_t)0x63U, (uint8_t)0x65U, (uint8_t)0x55U, - (uint8_t)0x3DU, (uint8_t)0xEDU, (uint8_t)0x1AU, (uint8_t)0xF3U, (uint8_t)0xB5U, (uint8_t)0x57U, - (uint8_t)0x13U, (uint8_t)0x5EU, (uint8_t)0x7FU, (uint8_t)0x57U, (uint8_t)0xC9U, (uint8_t)0x35U, - (uint8_t)0x98U, (uint8_t)0x4FU, (uint8_t)0x0CU, (uint8_t)0x70U, (uint8_t)0xE0U, (uint8_t)0xE6U, - (uint8_t)0x8BU, (uint8_t)0x77U, (uint8_t)0xE2U, (uint8_t)0xA6U, (uint8_t)0x89U, (uint8_t)0xDAU, - (uint8_t)0xF3U, (uint8_t)0xEFU, (uint8_t)0xE8U, (uint8_t)0x72U, (uint8_t)0x1DU, (uint8_t)0xF1U, - (uint8_t)0x58U, (uint8_t)0xA1U, (uint8_t)0x36U, (uint8_t)0xADU, (uint8_t)0xE7U, (uint8_t)0x35U, - (uint8_t)0x30U, (uint8_t)0xACU, (uint8_t)0xCAU, (uint8_t)0x4FU, (uint8_t)0x48U, (uint8_t)0x3AU, - (uint8_t)0x79U, (uint8_t)0x7AU, (uint8_t)0xBCU, (uint8_t)0x0AU, (uint8_t)0xB1U, (uint8_t)0x82U, - (uint8_t)0xB3U, (uint8_t)0x24U, (uint8_t)0xFBU, (uint8_t)0x61U, (uint8_t)0xD1U, (uint8_t)0x08U, - (uint8_t)0xA9U, (uint8_t)0x4BU, (uint8_t)0xB2U, (uint8_t)0xC8U, (uint8_t)0xE3U, (uint8_t)0xFBU, - (uint8_t)0xB9U, (uint8_t)0x6AU, (uint8_t)0xDAU, (uint8_t)0xB7U, (uint8_t)0x60U, (uint8_t)0xD7U, - (uint8_t)0xF4U, (uint8_t)0x68U, (uint8_t)0x1DU, (uint8_t)0x4FU, (uint8_t)0x42U, (uint8_t)0xA3U, - (uint8_t)0xDEU, (uint8_t)0x39U, (uint8_t)0x4DU, (uint8_t)0xF4U, (uint8_t)0xAEU, (uint8_t)0x56U, - (uint8_t)0xEDU, (uint8_t)0xE7U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0xBBU, (uint8_t)0x19U, - (uint8_t)0x0BU, (uint8_t)0x07U, (uint8_t)0xA7U, (uint8_t)0xC8U, (uint8_t)0xEEU, (uint8_t)0x0AU, - (uint8_t)0x6DU, (uint8_t)0x70U, (uint8_t)0x9EU, (uint8_t)0x02U, (uint8_t)0xFCU, (uint8_t)0xE1U, - (uint8_t)0xCDU, (uint8_t)0xF7U, (uint8_t)0xE2U, (uint8_t)0xECU, (uint8_t)0xC0U, (uint8_t)0x34U, - (uint8_t)0x04U, (uint8_t)0xCDU, (uint8_t)0x28U, (uint8_t)0x34U, (uint8_t)0x2FU, (uint8_t)0x61U, - (uint8_t)0x91U, (uint8_t)0x72U, (uint8_t)0xFEU, (uint8_t)0x9CU, (uint8_t)0xE9U, (uint8_t)0x85U, - (uint8_t)0x83U, (uint8_t)0xFFU, (uint8_t)0x8EU, (uint8_t)0x4FU, (uint8_t)0x12U, (uint8_t)0x32U, - (uint8_t)0xEEU, (uint8_t)0xF2U, (uint8_t)0x81U, (uint8_t)0x83U, (uint8_t)0xC3U, (uint8_t)0xFEU, - (uint8_t)0x3BU, (uint8_t)0x1BU, (uint8_t)0x4CU, (uint8_t)0x6FU, (uint8_t)0xADU, (uint8_t)0x73U, - (uint8_t)0x3BU, (uint8_t)0xB5U, (uint8_t)0xFCU, (uint8_t)0xBCU, (uint8_t)0x2EU, (uint8_t)0xC2U, - (uint8_t)0x20U, (uint8_t)0x05U, (uint8_t)0xC5U, (uint8_t)0x8EU, (uint8_t)0xF1U, (uint8_t)0x83U, - (uint8_t)0x7DU, (uint8_t)0x16U, (uint8_t)0x83U, (uint8_t)0xB2U, (uint8_t)0xC6U, (uint8_t)0xF3U, - (uint8_t)0x4AU, (uint8_t)0x26U, (uint8_t)0xC1U, (uint8_t)0xB2U, (uint8_t)0xEFU, (uint8_t)0xFAU, - (uint8_t)0x88U, (uint8_t)0x6BU, (uint8_t)0x42U, (uint8_t)0x38U, (uint8_t)0x61U, (uint8_t)0x1FU, - (uint8_t)0xCFU, (uint8_t)0xDCU, (uint8_t)0xDEU, (uint8_t)0x35U, (uint8_t)0x5BU, (uint8_t)0x3BU, - (uint8_t)0x65U, (uint8_t)0x19U, (uint8_t)0x03U, (uint8_t)0x5BU, (uint8_t)0xBCU, (uint8_t)0x34U, - (uint8_t)0xF4U, (uint8_t)0xDEU, (uint8_t)0xF9U, (uint8_t)0x9CU, (uint8_t)0x02U, (uint8_t)0x38U, - (uint8_t)0x61U, (uint8_t)0xB4U, (uint8_t)0x6FU, (uint8_t)0xC9U, (uint8_t)0xD6U, (uint8_t)0xE6U, - (uint8_t)0xC9U, (uint8_t)0x07U, (uint8_t)0x7AU, (uint8_t)0xD9U, (uint8_t)0x1DU, (uint8_t)0x26U, - (uint8_t)0x91U, (uint8_t)0xF7U, (uint8_t)0xF7U, (uint8_t)0xEEU, (uint8_t)0x59U, (uint8_t)0x8CU, - (uint8_t)0xB0U, (uint8_t)0xFAU, (uint8_t)0xC1U, (uint8_t)0x86U, (uint8_t)0xD9U, (uint8_t)0x1CU, - (uint8_t)0xAEU, (uint8_t)0xFEU, (uint8_t)0x13U, (uint8_t)0x09U, (uint8_t)0x85U, (uint8_t)0x13U, - (uint8_t)0x92U, (uint8_t)0x70U, (uint8_t)0xB4U, (uint8_t)0x13U, (uint8_t)0x0CU, (uint8_t)0x93U, - (uint8_t)0xBCU, (uint8_t)0x43U, (uint8_t)0x79U, (uint8_t)0x44U, (uint8_t)0xF4U, (uint8_t)0xFDU, - (uint8_t)0x44U, (uint8_t)0x52U, (uint8_t)0xE2U, (uint8_t)0xD7U, (uint8_t)0x4DU, (uint8_t)0xD3U, - (uint8_t)0x64U, (uint8_t)0xF2U, (uint8_t)0xE2U, (uint8_t)0x1EU, (uint8_t)0x71U, (uint8_t)0xF5U, - (uint8_t)0x4BU, (uint8_t)0xFFU, (uint8_t)0x5CU, (uint8_t)0xAEU, (uint8_t)0x82U, (uint8_t)0xABU, - (uint8_t)0x9CU, (uint8_t)0x9DU, (uint8_t)0xF6U, (uint8_t)0x9EU, (uint8_t)0xE8U, (uint8_t)0x6DU, - (uint8_t)0x2BU, (uint8_t)0xC5U, (uint8_t)0x22U, (uint8_t)0x36U, (uint8_t)0x3AU, (uint8_t)0x0DU, - (uint8_t)0xABU, (uint8_t)0xC5U, (uint8_t)0x21U, (uint8_t)0x97U, (uint8_t)0x9BU, (uint8_t)0x0DU, - (uint8_t)0xEAU, (uint8_t)0xDAU, (uint8_t)0x1DU, (uint8_t)0xBFU, (uint8_t)0x9AU, (uint8_t)0x42U, - (uint8_t)0xD5U, (uint8_t)0xC4U, (uint8_t)0x48U, (uint8_t)0x4EU, (uint8_t)0x0AU, (uint8_t)0xBCU, - (uint8_t)0xD0U, (uint8_t)0x6BU, (uint8_t)0xFAU, (uint8_t)0x53U, (uint8_t)0xDDU, (uint8_t)0xEFU, - (uint8_t)0x3CU, (uint8_t)0x1BU, (uint8_t)0x20U, (uint8_t)0xEEU, (uint8_t)0x3FU, (uint8_t)0xD5U, - (uint8_t)0x9DU, (uint8_t)0x7CU, (uint8_t)0x25U, (uint8_t)0xE4U, (uint8_t)0x1DU, (uint8_t)0x2BU, - (uint8_t)0x66U, (uint8_t)0x9EU, (uint8_t)0x1EU, (uint8_t)0xF1U, (uint8_t)0x6EU, (uint8_t)0x6FU, - (uint8_t)0x52U, (uint8_t)0xC3U, (uint8_t)0x16U, (uint8_t)0x4DU, (uint8_t)0xF4U, (uint8_t)0xFBU, - (uint8_t)0x79U, (uint8_t)0x30U, (uint8_t)0xE9U, (uint8_t)0xE4U, (uint8_t)0xE5U, (uint8_t)0x88U, - (uint8_t)0x57U, (uint8_t)0xB6U, (uint8_t)0xACU, (uint8_t)0x7DU, (uint8_t)0x5FU, (uint8_t)0x42U, - (uint8_t)0xD6U, (uint8_t)0x9FU, (uint8_t)0x6DU, (uint8_t)0x18U, (uint8_t)0x77U, (uint8_t)0x63U, - (uint8_t)0xCFU, (uint8_t)0x1DU, (uint8_t)0x55U, (uint8_t)0x03U, (uint8_t)0x40U, (uint8_t)0x04U, - (uint8_t)0x87U, (uint8_t)0xF5U, (uint8_t)0x5BU, (uint8_t)0xA5U, (uint8_t)0x7EU, (uint8_t)0x31U, - (uint8_t)0xCCU, (uint8_t)0x7AU, (uint8_t)0x71U, (uint8_t)0x35U, (uint8_t)0xC8U, (uint8_t)0x86U, - (uint8_t)0xEFU, (uint8_t)0xB4U, (uint8_t)0x31U, (uint8_t)0x8AU, (uint8_t)0xEDU, (uint8_t)0x6AU, - (uint8_t)0x1EU, (uint8_t)0x01U, (uint8_t)0x2DU, (uint8_t)0x9EU, (uint8_t)0x68U, (uint8_t)0x32U, - (uint8_t)0xA9U, (uint8_t)0x07U, (uint8_t)0x60U, (uint8_t)0x0AU, (uint8_t)0x91U, (uint8_t)0x81U, - (uint8_t)0x30U, (uint8_t)0xC4U, (uint8_t)0x6DU, (uint8_t)0xC7U, (uint8_t)0x78U, (uint8_t)0xF9U, - (uint8_t)0x71U, (uint8_t)0xADU, (uint8_t)0x00U, (uint8_t)0x38U, (uint8_t)0x09U, (uint8_t)0x29U, - (uint8_t)0x99U, (uint8_t)0xA3U, (uint8_t)0x33U, (uint8_t)0xCBU, (uint8_t)0x8BU, (uint8_t)0x7AU, - (uint8_t)0x1AU, (uint8_t)0x1DU, (uint8_t)0xB9U, (uint8_t)0x3DU, (uint8_t)0x71U, (uint8_t)0x40U, - (uint8_t)0x00U, (uint8_t)0x3CU, (uint8_t)0x2AU, (uint8_t)0x4EU, (uint8_t)0xCEU, (uint8_t)0xA9U, - (uint8_t)0xF9U, (uint8_t)0x8DU, (uint8_t)0x0AU, (uint8_t)0xCCU, (uint8_t)0x0AU, (uint8_t)0x82U, - (uint8_t)0x91U, (uint8_t)0xCDU, (uint8_t)0xCEU, (uint8_t)0xC9U, (uint8_t)0x7DU, (uint8_t)0xCFU, - (uint8_t)0x8EU, (uint8_t)0xC9U, (uint8_t)0xB5U, (uint8_t)0x5AU, (uint8_t)0x7FU, (uint8_t)0x88U, - (uint8_t)0xA4U, (uint8_t)0x6BU, (uint8_t)0x4DU, (uint8_t)0xB5U, (uint8_t)0xA8U, (uint8_t)0x51U, - (uint8_t)0xF4U, (uint8_t)0x41U, (uint8_t)0x82U, (uint8_t)0xE1U, (uint8_t)0xC6U, (uint8_t)0x8AU, - (uint8_t)0x00U, (uint8_t)0x7EU, (uint8_t)0x5EU, (uint8_t)0x65U, (uint8_t)0x5FU, (uint8_t)0x6AU, - (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, - (uint8_t)0xFFU, (uint8_t)0xFFU - }; - -static const -uint8_t -Hacl_Impl_FFDHE_Constants_ffdhe_p6144[768U] = - { - (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, - (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xADU, (uint8_t)0xF8U, (uint8_t)0x54U, (uint8_t)0x58U, - (uint8_t)0xA2U, (uint8_t)0xBBU, (uint8_t)0x4AU, (uint8_t)0x9AU, (uint8_t)0xAFU, (uint8_t)0xDCU, - (uint8_t)0x56U, (uint8_t)0x20U, (uint8_t)0x27U, (uint8_t)0x3DU, (uint8_t)0x3CU, (uint8_t)0xF1U, - (uint8_t)0xD8U, (uint8_t)0xB9U, (uint8_t)0xC5U, (uint8_t)0x83U, (uint8_t)0xCEU, (uint8_t)0x2DU, - (uint8_t)0x36U, (uint8_t)0x95U, (uint8_t)0xA9U, (uint8_t)0xE1U, (uint8_t)0x36U, (uint8_t)0x41U, - (uint8_t)0x14U, (uint8_t)0x64U, (uint8_t)0x33U, (uint8_t)0xFBU, (uint8_t)0xCCU, (uint8_t)0x93U, - (uint8_t)0x9DU, (uint8_t)0xCEU, (uint8_t)0x24U, (uint8_t)0x9BU, (uint8_t)0x3EU, (uint8_t)0xF9U, - (uint8_t)0x7DU, (uint8_t)0x2FU, (uint8_t)0xE3U, (uint8_t)0x63U, (uint8_t)0x63U, (uint8_t)0x0CU, - (uint8_t)0x75U, (uint8_t)0xD8U, (uint8_t)0xF6U, (uint8_t)0x81U, (uint8_t)0xB2U, (uint8_t)0x02U, - (uint8_t)0xAEU, (uint8_t)0xC4U, (uint8_t)0x61U, (uint8_t)0x7AU, (uint8_t)0xD3U, (uint8_t)0xDFU, - (uint8_t)0x1EU, (uint8_t)0xD5U, (uint8_t)0xD5U, (uint8_t)0xFDU, (uint8_t)0x65U, (uint8_t)0x61U, - (uint8_t)0x24U, (uint8_t)0x33U, (uint8_t)0xF5U, (uint8_t)0x1FU, (uint8_t)0x5FU, (uint8_t)0x06U, - (uint8_t)0x6EU, (uint8_t)0xD0U, (uint8_t)0x85U, (uint8_t)0x63U, (uint8_t)0x65U, (uint8_t)0x55U, - (uint8_t)0x3DU, (uint8_t)0xEDU, (uint8_t)0x1AU, (uint8_t)0xF3U, (uint8_t)0xB5U, (uint8_t)0x57U, - (uint8_t)0x13U, (uint8_t)0x5EU, (uint8_t)0x7FU, (uint8_t)0x57U, (uint8_t)0xC9U, (uint8_t)0x35U, - (uint8_t)0x98U, (uint8_t)0x4FU, (uint8_t)0x0CU, (uint8_t)0x70U, (uint8_t)0xE0U, (uint8_t)0xE6U, - (uint8_t)0x8BU, (uint8_t)0x77U, (uint8_t)0xE2U, (uint8_t)0xA6U, (uint8_t)0x89U, (uint8_t)0xDAU, - (uint8_t)0xF3U, (uint8_t)0xEFU, (uint8_t)0xE8U, (uint8_t)0x72U, (uint8_t)0x1DU, (uint8_t)0xF1U, - (uint8_t)0x58U, (uint8_t)0xA1U, (uint8_t)0x36U, (uint8_t)0xADU, (uint8_t)0xE7U, (uint8_t)0x35U, - (uint8_t)0x30U, (uint8_t)0xACU, (uint8_t)0xCAU, (uint8_t)0x4FU, (uint8_t)0x48U, (uint8_t)0x3AU, - (uint8_t)0x79U, (uint8_t)0x7AU, (uint8_t)0xBCU, (uint8_t)0x0AU, (uint8_t)0xB1U, (uint8_t)0x82U, - (uint8_t)0xB3U, (uint8_t)0x24U, (uint8_t)0xFBU, (uint8_t)0x61U, (uint8_t)0xD1U, (uint8_t)0x08U, - (uint8_t)0xA9U, (uint8_t)0x4BU, (uint8_t)0xB2U, (uint8_t)0xC8U, (uint8_t)0xE3U, (uint8_t)0xFBU, - (uint8_t)0xB9U, (uint8_t)0x6AU, (uint8_t)0xDAU, (uint8_t)0xB7U, (uint8_t)0x60U, (uint8_t)0xD7U, - (uint8_t)0xF4U, (uint8_t)0x68U, (uint8_t)0x1DU, (uint8_t)0x4FU, (uint8_t)0x42U, (uint8_t)0xA3U, - (uint8_t)0xDEU, (uint8_t)0x39U, (uint8_t)0x4DU, (uint8_t)0xF4U, (uint8_t)0xAEU, (uint8_t)0x56U, - (uint8_t)0xEDU, (uint8_t)0xE7U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0xBBU, (uint8_t)0x19U, - (uint8_t)0x0BU, (uint8_t)0x07U, (uint8_t)0xA7U, (uint8_t)0xC8U, (uint8_t)0xEEU, (uint8_t)0x0AU, - (uint8_t)0x6DU, (uint8_t)0x70U, (uint8_t)0x9EU, (uint8_t)0x02U, (uint8_t)0xFCU, (uint8_t)0xE1U, - (uint8_t)0xCDU, (uint8_t)0xF7U, (uint8_t)0xE2U, (uint8_t)0xECU, (uint8_t)0xC0U, (uint8_t)0x34U, - (uint8_t)0x04U, (uint8_t)0xCDU, (uint8_t)0x28U, (uint8_t)0x34U, (uint8_t)0x2FU, (uint8_t)0x61U, - (uint8_t)0x91U, (uint8_t)0x72U, (uint8_t)0xFEU, (uint8_t)0x9CU, (uint8_t)0xE9U, (uint8_t)0x85U, - (uint8_t)0x83U, (uint8_t)0xFFU, (uint8_t)0x8EU, (uint8_t)0x4FU, (uint8_t)0x12U, (uint8_t)0x32U, - (uint8_t)0xEEU, (uint8_t)0xF2U, (uint8_t)0x81U, (uint8_t)0x83U, (uint8_t)0xC3U, (uint8_t)0xFEU, - (uint8_t)0x3BU, (uint8_t)0x1BU, (uint8_t)0x4CU, (uint8_t)0x6FU, (uint8_t)0xADU, (uint8_t)0x73U, - (uint8_t)0x3BU, (uint8_t)0xB5U, (uint8_t)0xFCU, (uint8_t)0xBCU, (uint8_t)0x2EU, (uint8_t)0xC2U, - (uint8_t)0x20U, (uint8_t)0x05U, (uint8_t)0xC5U, (uint8_t)0x8EU, (uint8_t)0xF1U, (uint8_t)0x83U, - (uint8_t)0x7DU, (uint8_t)0x16U, (uint8_t)0x83U, (uint8_t)0xB2U, (uint8_t)0xC6U, (uint8_t)0xF3U, - (uint8_t)0x4AU, (uint8_t)0x26U, (uint8_t)0xC1U, (uint8_t)0xB2U, (uint8_t)0xEFU, (uint8_t)0xFAU, - (uint8_t)0x88U, (uint8_t)0x6BU, (uint8_t)0x42U, (uint8_t)0x38U, (uint8_t)0x61U, (uint8_t)0x1FU, - (uint8_t)0xCFU, (uint8_t)0xDCU, (uint8_t)0xDEU, (uint8_t)0x35U, (uint8_t)0x5BU, (uint8_t)0x3BU, - (uint8_t)0x65U, (uint8_t)0x19U, (uint8_t)0x03U, (uint8_t)0x5BU, (uint8_t)0xBCU, (uint8_t)0x34U, - (uint8_t)0xF4U, (uint8_t)0xDEU, (uint8_t)0xF9U, (uint8_t)0x9CU, (uint8_t)0x02U, (uint8_t)0x38U, - (uint8_t)0x61U, (uint8_t)0xB4U, (uint8_t)0x6FU, (uint8_t)0xC9U, (uint8_t)0xD6U, (uint8_t)0xE6U, - (uint8_t)0xC9U, (uint8_t)0x07U, (uint8_t)0x7AU, (uint8_t)0xD9U, (uint8_t)0x1DU, (uint8_t)0x26U, - (uint8_t)0x91U, (uint8_t)0xF7U, (uint8_t)0xF7U, (uint8_t)0xEEU, (uint8_t)0x59U, (uint8_t)0x8CU, - (uint8_t)0xB0U, (uint8_t)0xFAU, (uint8_t)0xC1U, (uint8_t)0x86U, (uint8_t)0xD9U, (uint8_t)0x1CU, - (uint8_t)0xAEU, (uint8_t)0xFEU, (uint8_t)0x13U, (uint8_t)0x09U, (uint8_t)0x85U, (uint8_t)0x13U, - (uint8_t)0x92U, (uint8_t)0x70U, (uint8_t)0xB4U, (uint8_t)0x13U, (uint8_t)0x0CU, (uint8_t)0x93U, - (uint8_t)0xBCU, (uint8_t)0x43U, (uint8_t)0x79U, (uint8_t)0x44U, (uint8_t)0xF4U, (uint8_t)0xFDU, - (uint8_t)0x44U, (uint8_t)0x52U, (uint8_t)0xE2U, (uint8_t)0xD7U, (uint8_t)0x4DU, (uint8_t)0xD3U, - (uint8_t)0x64U, (uint8_t)0xF2U, (uint8_t)0xE2U, (uint8_t)0x1EU, (uint8_t)0x71U, (uint8_t)0xF5U, - (uint8_t)0x4BU, (uint8_t)0xFFU, (uint8_t)0x5CU, (uint8_t)0xAEU, (uint8_t)0x82U, (uint8_t)0xABU, - (uint8_t)0x9CU, (uint8_t)0x9DU, (uint8_t)0xF6U, (uint8_t)0x9EU, (uint8_t)0xE8U, (uint8_t)0x6DU, - (uint8_t)0x2BU, (uint8_t)0xC5U, (uint8_t)0x22U, (uint8_t)0x36U, (uint8_t)0x3AU, (uint8_t)0x0DU, - (uint8_t)0xABU, (uint8_t)0xC5U, (uint8_t)0x21U, (uint8_t)0x97U, (uint8_t)0x9BU, (uint8_t)0x0DU, - (uint8_t)0xEAU, (uint8_t)0xDAU, (uint8_t)0x1DU, (uint8_t)0xBFU, (uint8_t)0x9AU, (uint8_t)0x42U, - (uint8_t)0xD5U, (uint8_t)0xC4U, (uint8_t)0x48U, (uint8_t)0x4EU, (uint8_t)0x0AU, (uint8_t)0xBCU, - (uint8_t)0xD0U, (uint8_t)0x6BU, (uint8_t)0xFAU, (uint8_t)0x53U, (uint8_t)0xDDU, (uint8_t)0xEFU, - (uint8_t)0x3CU, (uint8_t)0x1BU, (uint8_t)0x20U, (uint8_t)0xEEU, (uint8_t)0x3FU, (uint8_t)0xD5U, - (uint8_t)0x9DU, (uint8_t)0x7CU, (uint8_t)0x25U, (uint8_t)0xE4U, (uint8_t)0x1DU, (uint8_t)0x2BU, - (uint8_t)0x66U, (uint8_t)0x9EU, (uint8_t)0x1EU, (uint8_t)0xF1U, (uint8_t)0x6EU, (uint8_t)0x6FU, - (uint8_t)0x52U, (uint8_t)0xC3U, (uint8_t)0x16U, (uint8_t)0x4DU, (uint8_t)0xF4U, (uint8_t)0xFBU, - (uint8_t)0x79U, (uint8_t)0x30U, (uint8_t)0xE9U, (uint8_t)0xE4U, (uint8_t)0xE5U, (uint8_t)0x88U, - (uint8_t)0x57U, (uint8_t)0xB6U, (uint8_t)0xACU, (uint8_t)0x7DU, (uint8_t)0x5FU, (uint8_t)0x42U, - (uint8_t)0xD6U, (uint8_t)0x9FU, (uint8_t)0x6DU, (uint8_t)0x18U, (uint8_t)0x77U, (uint8_t)0x63U, - (uint8_t)0xCFU, (uint8_t)0x1DU, (uint8_t)0x55U, (uint8_t)0x03U, (uint8_t)0x40U, (uint8_t)0x04U, - (uint8_t)0x87U, (uint8_t)0xF5U, (uint8_t)0x5BU, (uint8_t)0xA5U, (uint8_t)0x7EU, (uint8_t)0x31U, - (uint8_t)0xCCU, (uint8_t)0x7AU, (uint8_t)0x71U, (uint8_t)0x35U, (uint8_t)0xC8U, (uint8_t)0x86U, - (uint8_t)0xEFU, (uint8_t)0xB4U, (uint8_t)0x31U, (uint8_t)0x8AU, (uint8_t)0xEDU, (uint8_t)0x6AU, - (uint8_t)0x1EU, (uint8_t)0x01U, (uint8_t)0x2DU, (uint8_t)0x9EU, (uint8_t)0x68U, (uint8_t)0x32U, - (uint8_t)0xA9U, (uint8_t)0x07U, (uint8_t)0x60U, (uint8_t)0x0AU, (uint8_t)0x91U, (uint8_t)0x81U, - (uint8_t)0x30U, (uint8_t)0xC4U, (uint8_t)0x6DU, (uint8_t)0xC7U, (uint8_t)0x78U, (uint8_t)0xF9U, - (uint8_t)0x71U, (uint8_t)0xADU, (uint8_t)0x00U, (uint8_t)0x38U, (uint8_t)0x09U, (uint8_t)0x29U, - (uint8_t)0x99U, (uint8_t)0xA3U, (uint8_t)0x33U, (uint8_t)0xCBU, (uint8_t)0x8BU, (uint8_t)0x7AU, - (uint8_t)0x1AU, (uint8_t)0x1DU, (uint8_t)0xB9U, (uint8_t)0x3DU, (uint8_t)0x71U, (uint8_t)0x40U, - (uint8_t)0x00U, (uint8_t)0x3CU, (uint8_t)0x2AU, (uint8_t)0x4EU, (uint8_t)0xCEU, (uint8_t)0xA9U, - (uint8_t)0xF9U, (uint8_t)0x8DU, (uint8_t)0x0AU, (uint8_t)0xCCU, (uint8_t)0x0AU, (uint8_t)0x82U, - (uint8_t)0x91U, (uint8_t)0xCDU, (uint8_t)0xCEU, (uint8_t)0xC9U, (uint8_t)0x7DU, (uint8_t)0xCFU, - (uint8_t)0x8EU, (uint8_t)0xC9U, (uint8_t)0xB5U, (uint8_t)0x5AU, (uint8_t)0x7FU, (uint8_t)0x88U, - (uint8_t)0xA4U, (uint8_t)0x6BU, (uint8_t)0x4DU, (uint8_t)0xB5U, (uint8_t)0xA8U, (uint8_t)0x51U, - (uint8_t)0xF4U, (uint8_t)0x41U, (uint8_t)0x82U, (uint8_t)0xE1U, (uint8_t)0xC6U, (uint8_t)0x8AU, - (uint8_t)0x00U, (uint8_t)0x7EU, (uint8_t)0x5EU, (uint8_t)0x0DU, (uint8_t)0xD9U, (uint8_t)0x02U, - (uint8_t)0x0BU, (uint8_t)0xFDU, (uint8_t)0x64U, (uint8_t)0xB6U, (uint8_t)0x45U, (uint8_t)0x03U, - (uint8_t)0x6CU, (uint8_t)0x7AU, (uint8_t)0x4EU, (uint8_t)0x67U, (uint8_t)0x7DU, (uint8_t)0x2CU, - (uint8_t)0x38U, (uint8_t)0x53U, (uint8_t)0x2AU, (uint8_t)0x3AU, (uint8_t)0x23U, (uint8_t)0xBAU, - (uint8_t)0x44U, (uint8_t)0x42U, (uint8_t)0xCAU, (uint8_t)0xF5U, (uint8_t)0x3EU, (uint8_t)0xA6U, - (uint8_t)0x3BU, (uint8_t)0xB4U, (uint8_t)0x54U, (uint8_t)0x32U, (uint8_t)0x9BU, (uint8_t)0x76U, - (uint8_t)0x24U, (uint8_t)0xC8U, (uint8_t)0x91U, (uint8_t)0x7BU, (uint8_t)0xDDU, (uint8_t)0x64U, - (uint8_t)0xB1U, (uint8_t)0xC0U, (uint8_t)0xFDU, (uint8_t)0x4CU, (uint8_t)0xB3U, (uint8_t)0x8EU, - (uint8_t)0x8CU, (uint8_t)0x33U, (uint8_t)0x4CU, (uint8_t)0x70U, (uint8_t)0x1CU, (uint8_t)0x3AU, - (uint8_t)0xCDU, (uint8_t)0xADU, (uint8_t)0x06U, (uint8_t)0x57U, (uint8_t)0xFCU, (uint8_t)0xCFU, - (uint8_t)0xECU, (uint8_t)0x71U, (uint8_t)0x9BU, (uint8_t)0x1FU, (uint8_t)0x5CU, (uint8_t)0x3EU, - (uint8_t)0x4EU, (uint8_t)0x46U, (uint8_t)0x04U, (uint8_t)0x1FU, (uint8_t)0x38U, (uint8_t)0x81U, - (uint8_t)0x47U, (uint8_t)0xFBU, (uint8_t)0x4CU, (uint8_t)0xFDU, (uint8_t)0xB4U, (uint8_t)0x77U, - (uint8_t)0xA5U, (uint8_t)0x24U, (uint8_t)0x71U, (uint8_t)0xF7U, (uint8_t)0xA9U, (uint8_t)0xA9U, - (uint8_t)0x69U, (uint8_t)0x10U, (uint8_t)0xB8U, (uint8_t)0x55U, (uint8_t)0x32U, (uint8_t)0x2EU, - (uint8_t)0xDBU, (uint8_t)0x63U, (uint8_t)0x40U, (uint8_t)0xD8U, (uint8_t)0xA0U, (uint8_t)0x0EU, - (uint8_t)0xF0U, (uint8_t)0x92U, (uint8_t)0x35U, (uint8_t)0x05U, (uint8_t)0x11U, (uint8_t)0xE3U, - (uint8_t)0x0AU, (uint8_t)0xBEU, (uint8_t)0xC1U, (uint8_t)0xFFU, (uint8_t)0xF9U, (uint8_t)0xE3U, - (uint8_t)0xA2U, (uint8_t)0x6EU, (uint8_t)0x7FU, (uint8_t)0xB2U, (uint8_t)0x9FU, (uint8_t)0x8CU, - (uint8_t)0x18U, (uint8_t)0x30U, (uint8_t)0x23U, (uint8_t)0xC3U, (uint8_t)0x58U, (uint8_t)0x7EU, - (uint8_t)0x38U, (uint8_t)0xDAU, (uint8_t)0x00U, (uint8_t)0x77U, (uint8_t)0xD9U, (uint8_t)0xB4U, - (uint8_t)0x76U, (uint8_t)0x3EU, (uint8_t)0x4EU, (uint8_t)0x4BU, (uint8_t)0x94U, (uint8_t)0xB2U, - (uint8_t)0xBBU, (uint8_t)0xC1U, (uint8_t)0x94U, (uint8_t)0xC6U, (uint8_t)0x65U, (uint8_t)0x1EU, - (uint8_t)0x77U, (uint8_t)0xCAU, (uint8_t)0xF9U, (uint8_t)0x92U, (uint8_t)0xEEU, (uint8_t)0xAAU, - (uint8_t)0xC0U, (uint8_t)0x23U, (uint8_t)0x2AU, (uint8_t)0x28U, (uint8_t)0x1BU, (uint8_t)0xF6U, - (uint8_t)0xB3U, (uint8_t)0xA7U, (uint8_t)0x39U, (uint8_t)0xC1U, (uint8_t)0x22U, (uint8_t)0x61U, - (uint8_t)0x16U, (uint8_t)0x82U, (uint8_t)0x0AU, (uint8_t)0xE8U, (uint8_t)0xDBU, (uint8_t)0x58U, - (uint8_t)0x47U, (uint8_t)0xA6U, (uint8_t)0x7CU, (uint8_t)0xBEU, (uint8_t)0xF9U, (uint8_t)0xC9U, - (uint8_t)0x09U, (uint8_t)0x1BU, (uint8_t)0x46U, (uint8_t)0x2DU, (uint8_t)0x53U, (uint8_t)0x8CU, - (uint8_t)0xD7U, (uint8_t)0x2BU, (uint8_t)0x03U, (uint8_t)0x74U, (uint8_t)0x6AU, (uint8_t)0xE7U, - (uint8_t)0x7FU, (uint8_t)0x5EU, (uint8_t)0x62U, (uint8_t)0x29U, (uint8_t)0x2CU, (uint8_t)0x31U, - (uint8_t)0x15U, (uint8_t)0x62U, (uint8_t)0xA8U, (uint8_t)0x46U, (uint8_t)0x50U, (uint8_t)0x5DU, - (uint8_t)0xC8U, (uint8_t)0x2DU, (uint8_t)0xB8U, (uint8_t)0x54U, (uint8_t)0x33U, (uint8_t)0x8AU, - (uint8_t)0xE4U, (uint8_t)0x9FU, (uint8_t)0x52U, (uint8_t)0x35U, (uint8_t)0xC9U, (uint8_t)0x5BU, - (uint8_t)0x91U, (uint8_t)0x17U, (uint8_t)0x8CU, (uint8_t)0xCFU, (uint8_t)0x2DU, (uint8_t)0xD5U, - (uint8_t)0xCAU, (uint8_t)0xCEU, (uint8_t)0xF4U, (uint8_t)0x03U, (uint8_t)0xECU, (uint8_t)0x9DU, - (uint8_t)0x18U, (uint8_t)0x10U, (uint8_t)0xC6U, (uint8_t)0x27U, (uint8_t)0x2BU, (uint8_t)0x04U, - (uint8_t)0x5BU, (uint8_t)0x3BU, (uint8_t)0x71U, (uint8_t)0xF9U, (uint8_t)0xDCU, (uint8_t)0x6BU, - (uint8_t)0x80U, (uint8_t)0xD6U, (uint8_t)0x3FU, (uint8_t)0xDDU, (uint8_t)0x4AU, (uint8_t)0x8EU, - (uint8_t)0x9AU, (uint8_t)0xDBU, (uint8_t)0x1EU, (uint8_t)0x69U, (uint8_t)0x62U, (uint8_t)0xA6U, - (uint8_t)0x95U, (uint8_t)0x26U, (uint8_t)0xD4U, (uint8_t)0x31U, (uint8_t)0x61U, (uint8_t)0xC1U, - (uint8_t)0xA4U, (uint8_t)0x1DU, (uint8_t)0x57U, (uint8_t)0x0DU, (uint8_t)0x79U, (uint8_t)0x38U, - (uint8_t)0xDAU, (uint8_t)0xD4U, (uint8_t)0xA4U, (uint8_t)0x0EU, (uint8_t)0x32U, (uint8_t)0x9CU, - (uint8_t)0xD0U, (uint8_t)0xE4U, (uint8_t)0x0EU, (uint8_t)0x65U, (uint8_t)0xFFU, (uint8_t)0xFFU, - (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU - }; - -static const -uint8_t -Hacl_Impl_FFDHE_Constants_ffdhe_p8192[1024U] = - { - (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, - (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xADU, (uint8_t)0xF8U, (uint8_t)0x54U, (uint8_t)0x58U, - (uint8_t)0xA2U, (uint8_t)0xBBU, (uint8_t)0x4AU, (uint8_t)0x9AU, (uint8_t)0xAFU, (uint8_t)0xDCU, - (uint8_t)0x56U, (uint8_t)0x20U, (uint8_t)0x27U, (uint8_t)0x3DU, (uint8_t)0x3CU, (uint8_t)0xF1U, - (uint8_t)0xD8U, (uint8_t)0xB9U, (uint8_t)0xC5U, (uint8_t)0x83U, (uint8_t)0xCEU, (uint8_t)0x2DU, - (uint8_t)0x36U, (uint8_t)0x95U, (uint8_t)0xA9U, (uint8_t)0xE1U, (uint8_t)0x36U, (uint8_t)0x41U, - (uint8_t)0x14U, (uint8_t)0x64U, (uint8_t)0x33U, (uint8_t)0xFBU, (uint8_t)0xCCU, (uint8_t)0x93U, - (uint8_t)0x9DU, (uint8_t)0xCEU, (uint8_t)0x24U, (uint8_t)0x9BU, (uint8_t)0x3EU, (uint8_t)0xF9U, - (uint8_t)0x7DU, (uint8_t)0x2FU, (uint8_t)0xE3U, (uint8_t)0x63U, (uint8_t)0x63U, (uint8_t)0x0CU, - (uint8_t)0x75U, (uint8_t)0xD8U, (uint8_t)0xF6U, (uint8_t)0x81U, (uint8_t)0xB2U, (uint8_t)0x02U, - (uint8_t)0xAEU, (uint8_t)0xC4U, (uint8_t)0x61U, (uint8_t)0x7AU, (uint8_t)0xD3U, (uint8_t)0xDFU, - (uint8_t)0x1EU, (uint8_t)0xD5U, (uint8_t)0xD5U, (uint8_t)0xFDU, (uint8_t)0x65U, (uint8_t)0x61U, - (uint8_t)0x24U, (uint8_t)0x33U, (uint8_t)0xF5U, (uint8_t)0x1FU, (uint8_t)0x5FU, (uint8_t)0x06U, - (uint8_t)0x6EU, (uint8_t)0xD0U, (uint8_t)0x85U, (uint8_t)0x63U, (uint8_t)0x65U, (uint8_t)0x55U, - (uint8_t)0x3DU, (uint8_t)0xEDU, (uint8_t)0x1AU, (uint8_t)0xF3U, (uint8_t)0xB5U, (uint8_t)0x57U, - (uint8_t)0x13U, (uint8_t)0x5EU, (uint8_t)0x7FU, (uint8_t)0x57U, (uint8_t)0xC9U, (uint8_t)0x35U, - (uint8_t)0x98U, (uint8_t)0x4FU, (uint8_t)0x0CU, (uint8_t)0x70U, (uint8_t)0xE0U, (uint8_t)0xE6U, - (uint8_t)0x8BU, (uint8_t)0x77U, (uint8_t)0xE2U, (uint8_t)0xA6U, (uint8_t)0x89U, (uint8_t)0xDAU, - (uint8_t)0xF3U, (uint8_t)0xEFU, (uint8_t)0xE8U, (uint8_t)0x72U, (uint8_t)0x1DU, (uint8_t)0xF1U, - (uint8_t)0x58U, (uint8_t)0xA1U, (uint8_t)0x36U, (uint8_t)0xADU, (uint8_t)0xE7U, (uint8_t)0x35U, - (uint8_t)0x30U, (uint8_t)0xACU, (uint8_t)0xCAU, (uint8_t)0x4FU, (uint8_t)0x48U, (uint8_t)0x3AU, - (uint8_t)0x79U, (uint8_t)0x7AU, (uint8_t)0xBCU, (uint8_t)0x0AU, (uint8_t)0xB1U, (uint8_t)0x82U, - (uint8_t)0xB3U, (uint8_t)0x24U, (uint8_t)0xFBU, (uint8_t)0x61U, (uint8_t)0xD1U, (uint8_t)0x08U, - (uint8_t)0xA9U, (uint8_t)0x4BU, (uint8_t)0xB2U, (uint8_t)0xC8U, (uint8_t)0xE3U, (uint8_t)0xFBU, - (uint8_t)0xB9U, (uint8_t)0x6AU, (uint8_t)0xDAU, (uint8_t)0xB7U, (uint8_t)0x60U, (uint8_t)0xD7U, - (uint8_t)0xF4U, (uint8_t)0x68U, (uint8_t)0x1DU, (uint8_t)0x4FU, (uint8_t)0x42U, (uint8_t)0xA3U, - (uint8_t)0xDEU, (uint8_t)0x39U, (uint8_t)0x4DU, (uint8_t)0xF4U, (uint8_t)0xAEU, (uint8_t)0x56U, - (uint8_t)0xEDU, (uint8_t)0xE7U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0xBBU, (uint8_t)0x19U, - (uint8_t)0x0BU, (uint8_t)0x07U, (uint8_t)0xA7U, (uint8_t)0xC8U, (uint8_t)0xEEU, (uint8_t)0x0AU, - (uint8_t)0x6DU, (uint8_t)0x70U, (uint8_t)0x9EU, (uint8_t)0x02U, (uint8_t)0xFCU, (uint8_t)0xE1U, - (uint8_t)0xCDU, (uint8_t)0xF7U, (uint8_t)0xE2U, (uint8_t)0xECU, (uint8_t)0xC0U, (uint8_t)0x34U, - (uint8_t)0x04U, (uint8_t)0xCDU, (uint8_t)0x28U, (uint8_t)0x34U, (uint8_t)0x2FU, (uint8_t)0x61U, - (uint8_t)0x91U, (uint8_t)0x72U, (uint8_t)0xFEU, (uint8_t)0x9CU, (uint8_t)0xE9U, (uint8_t)0x85U, - (uint8_t)0x83U, (uint8_t)0xFFU, (uint8_t)0x8EU, (uint8_t)0x4FU, (uint8_t)0x12U, (uint8_t)0x32U, - (uint8_t)0xEEU, (uint8_t)0xF2U, (uint8_t)0x81U, (uint8_t)0x83U, (uint8_t)0xC3U, (uint8_t)0xFEU, - (uint8_t)0x3BU, (uint8_t)0x1BU, (uint8_t)0x4CU, (uint8_t)0x6FU, (uint8_t)0xADU, (uint8_t)0x73U, - (uint8_t)0x3BU, (uint8_t)0xB5U, (uint8_t)0xFCU, (uint8_t)0xBCU, (uint8_t)0x2EU, (uint8_t)0xC2U, - (uint8_t)0x20U, (uint8_t)0x05U, (uint8_t)0xC5U, (uint8_t)0x8EU, (uint8_t)0xF1U, (uint8_t)0x83U, - (uint8_t)0x7DU, (uint8_t)0x16U, (uint8_t)0x83U, (uint8_t)0xB2U, (uint8_t)0xC6U, (uint8_t)0xF3U, - (uint8_t)0x4AU, (uint8_t)0x26U, (uint8_t)0xC1U, (uint8_t)0xB2U, (uint8_t)0xEFU, (uint8_t)0xFAU, - (uint8_t)0x88U, (uint8_t)0x6BU, (uint8_t)0x42U, (uint8_t)0x38U, (uint8_t)0x61U, (uint8_t)0x1FU, - (uint8_t)0xCFU, (uint8_t)0xDCU, (uint8_t)0xDEU, (uint8_t)0x35U, (uint8_t)0x5BU, (uint8_t)0x3BU, - (uint8_t)0x65U, (uint8_t)0x19U, (uint8_t)0x03U, (uint8_t)0x5BU, (uint8_t)0xBCU, (uint8_t)0x34U, - (uint8_t)0xF4U, (uint8_t)0xDEU, (uint8_t)0xF9U, (uint8_t)0x9CU, (uint8_t)0x02U, (uint8_t)0x38U, - (uint8_t)0x61U, (uint8_t)0xB4U, (uint8_t)0x6FU, (uint8_t)0xC9U, (uint8_t)0xD6U, (uint8_t)0xE6U, - (uint8_t)0xC9U, (uint8_t)0x07U, (uint8_t)0x7AU, (uint8_t)0xD9U, (uint8_t)0x1DU, (uint8_t)0x26U, - (uint8_t)0x91U, (uint8_t)0xF7U, (uint8_t)0xF7U, (uint8_t)0xEEU, (uint8_t)0x59U, (uint8_t)0x8CU, - (uint8_t)0xB0U, (uint8_t)0xFAU, (uint8_t)0xC1U, (uint8_t)0x86U, (uint8_t)0xD9U, (uint8_t)0x1CU, - (uint8_t)0xAEU, (uint8_t)0xFEU, (uint8_t)0x13U, (uint8_t)0x09U, (uint8_t)0x85U, (uint8_t)0x13U, - (uint8_t)0x92U, (uint8_t)0x70U, (uint8_t)0xB4U, (uint8_t)0x13U, (uint8_t)0x0CU, (uint8_t)0x93U, - (uint8_t)0xBCU, (uint8_t)0x43U, (uint8_t)0x79U, (uint8_t)0x44U, (uint8_t)0xF4U, (uint8_t)0xFDU, - (uint8_t)0x44U, (uint8_t)0x52U, (uint8_t)0xE2U, (uint8_t)0xD7U, (uint8_t)0x4DU, (uint8_t)0xD3U, - (uint8_t)0x64U, (uint8_t)0xF2U, (uint8_t)0xE2U, (uint8_t)0x1EU, (uint8_t)0x71U, (uint8_t)0xF5U, - (uint8_t)0x4BU, (uint8_t)0xFFU, (uint8_t)0x5CU, (uint8_t)0xAEU, (uint8_t)0x82U, (uint8_t)0xABU, - (uint8_t)0x9CU, (uint8_t)0x9DU, (uint8_t)0xF6U, (uint8_t)0x9EU, (uint8_t)0xE8U, (uint8_t)0x6DU, - (uint8_t)0x2BU, (uint8_t)0xC5U, (uint8_t)0x22U, (uint8_t)0x36U, (uint8_t)0x3AU, (uint8_t)0x0DU, - (uint8_t)0xABU, (uint8_t)0xC5U, (uint8_t)0x21U, (uint8_t)0x97U, (uint8_t)0x9BU, (uint8_t)0x0DU, - (uint8_t)0xEAU, (uint8_t)0xDAU, (uint8_t)0x1DU, (uint8_t)0xBFU, (uint8_t)0x9AU, (uint8_t)0x42U, - (uint8_t)0xD5U, (uint8_t)0xC4U, (uint8_t)0x48U, (uint8_t)0x4EU, (uint8_t)0x0AU, (uint8_t)0xBCU, - (uint8_t)0xD0U, (uint8_t)0x6BU, (uint8_t)0xFAU, (uint8_t)0x53U, (uint8_t)0xDDU, (uint8_t)0xEFU, - (uint8_t)0x3CU, (uint8_t)0x1BU, (uint8_t)0x20U, (uint8_t)0xEEU, (uint8_t)0x3FU, (uint8_t)0xD5U, - (uint8_t)0x9DU, (uint8_t)0x7CU, (uint8_t)0x25U, (uint8_t)0xE4U, (uint8_t)0x1DU, (uint8_t)0x2BU, - (uint8_t)0x66U, (uint8_t)0x9EU, (uint8_t)0x1EU, (uint8_t)0xF1U, (uint8_t)0x6EU, (uint8_t)0x6FU, - (uint8_t)0x52U, (uint8_t)0xC3U, (uint8_t)0x16U, (uint8_t)0x4DU, (uint8_t)0xF4U, (uint8_t)0xFBU, - (uint8_t)0x79U, (uint8_t)0x30U, (uint8_t)0xE9U, (uint8_t)0xE4U, (uint8_t)0xE5U, (uint8_t)0x88U, - (uint8_t)0x57U, (uint8_t)0xB6U, (uint8_t)0xACU, (uint8_t)0x7DU, (uint8_t)0x5FU, (uint8_t)0x42U, - (uint8_t)0xD6U, (uint8_t)0x9FU, (uint8_t)0x6DU, (uint8_t)0x18U, (uint8_t)0x77U, (uint8_t)0x63U, - (uint8_t)0xCFU, (uint8_t)0x1DU, (uint8_t)0x55U, (uint8_t)0x03U, (uint8_t)0x40U, (uint8_t)0x04U, - (uint8_t)0x87U, (uint8_t)0xF5U, (uint8_t)0x5BU, (uint8_t)0xA5U, (uint8_t)0x7EU, (uint8_t)0x31U, - (uint8_t)0xCCU, (uint8_t)0x7AU, (uint8_t)0x71U, (uint8_t)0x35U, (uint8_t)0xC8U, (uint8_t)0x86U, - (uint8_t)0xEFU, (uint8_t)0xB4U, (uint8_t)0x31U, (uint8_t)0x8AU, (uint8_t)0xEDU, (uint8_t)0x6AU, - (uint8_t)0x1EU, (uint8_t)0x01U, (uint8_t)0x2DU, (uint8_t)0x9EU, (uint8_t)0x68U, (uint8_t)0x32U, - (uint8_t)0xA9U, (uint8_t)0x07U, (uint8_t)0x60U, (uint8_t)0x0AU, (uint8_t)0x91U, (uint8_t)0x81U, - (uint8_t)0x30U, (uint8_t)0xC4U, (uint8_t)0x6DU, (uint8_t)0xC7U, (uint8_t)0x78U, (uint8_t)0xF9U, - (uint8_t)0x71U, (uint8_t)0xADU, (uint8_t)0x00U, (uint8_t)0x38U, (uint8_t)0x09U, (uint8_t)0x29U, - (uint8_t)0x99U, (uint8_t)0xA3U, (uint8_t)0x33U, (uint8_t)0xCBU, (uint8_t)0x8BU, (uint8_t)0x7AU, - (uint8_t)0x1AU, (uint8_t)0x1DU, (uint8_t)0xB9U, (uint8_t)0x3DU, (uint8_t)0x71U, (uint8_t)0x40U, - (uint8_t)0x00U, (uint8_t)0x3CU, (uint8_t)0x2AU, (uint8_t)0x4EU, (uint8_t)0xCEU, (uint8_t)0xA9U, - (uint8_t)0xF9U, (uint8_t)0x8DU, (uint8_t)0x0AU, (uint8_t)0xCCU, (uint8_t)0x0AU, (uint8_t)0x82U, - (uint8_t)0x91U, (uint8_t)0xCDU, (uint8_t)0xCEU, (uint8_t)0xC9U, (uint8_t)0x7DU, (uint8_t)0xCFU, - (uint8_t)0x8EU, (uint8_t)0xC9U, (uint8_t)0xB5U, (uint8_t)0x5AU, (uint8_t)0x7FU, (uint8_t)0x88U, - (uint8_t)0xA4U, (uint8_t)0x6BU, (uint8_t)0x4DU, (uint8_t)0xB5U, (uint8_t)0xA8U, (uint8_t)0x51U, - (uint8_t)0xF4U, (uint8_t)0x41U, (uint8_t)0x82U, (uint8_t)0xE1U, (uint8_t)0xC6U, (uint8_t)0x8AU, - (uint8_t)0x00U, (uint8_t)0x7EU, (uint8_t)0x5EU, (uint8_t)0x0DU, (uint8_t)0xD9U, (uint8_t)0x02U, - (uint8_t)0x0BU, (uint8_t)0xFDU, (uint8_t)0x64U, (uint8_t)0xB6U, (uint8_t)0x45U, (uint8_t)0x03U, - (uint8_t)0x6CU, (uint8_t)0x7AU, (uint8_t)0x4EU, (uint8_t)0x67U, (uint8_t)0x7DU, (uint8_t)0x2CU, - (uint8_t)0x38U, (uint8_t)0x53U, (uint8_t)0x2AU, (uint8_t)0x3AU, (uint8_t)0x23U, (uint8_t)0xBAU, - (uint8_t)0x44U, (uint8_t)0x42U, (uint8_t)0xCAU, (uint8_t)0xF5U, (uint8_t)0x3EU, (uint8_t)0xA6U, - (uint8_t)0x3BU, (uint8_t)0xB4U, (uint8_t)0x54U, (uint8_t)0x32U, (uint8_t)0x9BU, (uint8_t)0x76U, - (uint8_t)0x24U, (uint8_t)0xC8U, (uint8_t)0x91U, (uint8_t)0x7BU, (uint8_t)0xDDU, (uint8_t)0x64U, - (uint8_t)0xB1U, (uint8_t)0xC0U, (uint8_t)0xFDU, (uint8_t)0x4CU, (uint8_t)0xB3U, (uint8_t)0x8EU, - (uint8_t)0x8CU, (uint8_t)0x33U, (uint8_t)0x4CU, (uint8_t)0x70U, (uint8_t)0x1CU, (uint8_t)0x3AU, - (uint8_t)0xCDU, (uint8_t)0xADU, (uint8_t)0x06U, (uint8_t)0x57U, (uint8_t)0xFCU, (uint8_t)0xCFU, - (uint8_t)0xECU, (uint8_t)0x71U, (uint8_t)0x9BU, (uint8_t)0x1FU, (uint8_t)0x5CU, (uint8_t)0x3EU, - (uint8_t)0x4EU, (uint8_t)0x46U, (uint8_t)0x04U, (uint8_t)0x1FU, (uint8_t)0x38U, (uint8_t)0x81U, - (uint8_t)0x47U, (uint8_t)0xFBU, (uint8_t)0x4CU, (uint8_t)0xFDU, (uint8_t)0xB4U, (uint8_t)0x77U, - (uint8_t)0xA5U, (uint8_t)0x24U, (uint8_t)0x71U, (uint8_t)0xF7U, (uint8_t)0xA9U, (uint8_t)0xA9U, - (uint8_t)0x69U, (uint8_t)0x10U, (uint8_t)0xB8U, (uint8_t)0x55U, (uint8_t)0x32U, (uint8_t)0x2EU, - (uint8_t)0xDBU, (uint8_t)0x63U, (uint8_t)0x40U, (uint8_t)0xD8U, (uint8_t)0xA0U, (uint8_t)0x0EU, - (uint8_t)0xF0U, (uint8_t)0x92U, (uint8_t)0x35U, (uint8_t)0x05U, (uint8_t)0x11U, (uint8_t)0xE3U, - (uint8_t)0x0AU, (uint8_t)0xBEU, (uint8_t)0xC1U, (uint8_t)0xFFU, (uint8_t)0xF9U, (uint8_t)0xE3U, - (uint8_t)0xA2U, (uint8_t)0x6EU, (uint8_t)0x7FU, (uint8_t)0xB2U, (uint8_t)0x9FU, (uint8_t)0x8CU, - (uint8_t)0x18U, (uint8_t)0x30U, (uint8_t)0x23U, (uint8_t)0xC3U, (uint8_t)0x58U, (uint8_t)0x7EU, - (uint8_t)0x38U, (uint8_t)0xDAU, (uint8_t)0x00U, (uint8_t)0x77U, (uint8_t)0xD9U, (uint8_t)0xB4U, - (uint8_t)0x76U, (uint8_t)0x3EU, (uint8_t)0x4EU, (uint8_t)0x4BU, (uint8_t)0x94U, (uint8_t)0xB2U, - (uint8_t)0xBBU, (uint8_t)0xC1U, (uint8_t)0x94U, (uint8_t)0xC6U, (uint8_t)0x65U, (uint8_t)0x1EU, - (uint8_t)0x77U, (uint8_t)0xCAU, (uint8_t)0xF9U, (uint8_t)0x92U, (uint8_t)0xEEU, (uint8_t)0xAAU, - (uint8_t)0xC0U, (uint8_t)0x23U, (uint8_t)0x2AU, (uint8_t)0x28U, (uint8_t)0x1BU, (uint8_t)0xF6U, - (uint8_t)0xB3U, (uint8_t)0xA7U, (uint8_t)0x39U, (uint8_t)0xC1U, (uint8_t)0x22U, (uint8_t)0x61U, - (uint8_t)0x16U, (uint8_t)0x82U, (uint8_t)0x0AU, (uint8_t)0xE8U, (uint8_t)0xDBU, (uint8_t)0x58U, - (uint8_t)0x47U, (uint8_t)0xA6U, (uint8_t)0x7CU, (uint8_t)0xBEU, (uint8_t)0xF9U, (uint8_t)0xC9U, - (uint8_t)0x09U, (uint8_t)0x1BU, (uint8_t)0x46U, (uint8_t)0x2DU, (uint8_t)0x53U, (uint8_t)0x8CU, - (uint8_t)0xD7U, (uint8_t)0x2BU, (uint8_t)0x03U, (uint8_t)0x74U, (uint8_t)0x6AU, (uint8_t)0xE7U, - (uint8_t)0x7FU, (uint8_t)0x5EU, (uint8_t)0x62U, (uint8_t)0x29U, (uint8_t)0x2CU, (uint8_t)0x31U, - (uint8_t)0x15U, (uint8_t)0x62U, (uint8_t)0xA8U, (uint8_t)0x46U, (uint8_t)0x50U, (uint8_t)0x5DU, - (uint8_t)0xC8U, (uint8_t)0x2DU, (uint8_t)0xB8U, (uint8_t)0x54U, (uint8_t)0x33U, (uint8_t)0x8AU, - (uint8_t)0xE4U, (uint8_t)0x9FU, (uint8_t)0x52U, (uint8_t)0x35U, (uint8_t)0xC9U, (uint8_t)0x5BU, - (uint8_t)0x91U, (uint8_t)0x17U, (uint8_t)0x8CU, (uint8_t)0xCFU, (uint8_t)0x2DU, (uint8_t)0xD5U, - (uint8_t)0xCAU, (uint8_t)0xCEU, (uint8_t)0xF4U, (uint8_t)0x03U, (uint8_t)0xECU, (uint8_t)0x9DU, - (uint8_t)0x18U, (uint8_t)0x10U, (uint8_t)0xC6U, (uint8_t)0x27U, (uint8_t)0x2BU, (uint8_t)0x04U, - (uint8_t)0x5BU, (uint8_t)0x3BU, (uint8_t)0x71U, (uint8_t)0xF9U, (uint8_t)0xDCU, (uint8_t)0x6BU, - (uint8_t)0x80U, (uint8_t)0xD6U, (uint8_t)0x3FU, (uint8_t)0xDDU, (uint8_t)0x4AU, (uint8_t)0x8EU, - (uint8_t)0x9AU, (uint8_t)0xDBU, (uint8_t)0x1EU, (uint8_t)0x69U, (uint8_t)0x62U, (uint8_t)0xA6U, - (uint8_t)0x95U, (uint8_t)0x26U, (uint8_t)0xD4U, (uint8_t)0x31U, (uint8_t)0x61U, (uint8_t)0xC1U, - (uint8_t)0xA4U, (uint8_t)0x1DU, (uint8_t)0x57U, (uint8_t)0x0DU, (uint8_t)0x79U, (uint8_t)0x38U, - (uint8_t)0xDAU, (uint8_t)0xD4U, (uint8_t)0xA4U, (uint8_t)0x0EU, (uint8_t)0x32U, (uint8_t)0x9CU, - (uint8_t)0xCFU, (uint8_t)0xF4U, (uint8_t)0x6AU, (uint8_t)0xAAU, (uint8_t)0x36U, (uint8_t)0xADU, - (uint8_t)0x00U, (uint8_t)0x4CU, (uint8_t)0xF6U, (uint8_t)0x00U, (uint8_t)0xC8U, (uint8_t)0x38U, - (uint8_t)0x1EU, (uint8_t)0x42U, (uint8_t)0x5AU, (uint8_t)0x31U, (uint8_t)0xD9U, (uint8_t)0x51U, - (uint8_t)0xAEU, (uint8_t)0x64U, (uint8_t)0xFDU, (uint8_t)0xB2U, (uint8_t)0x3FU, (uint8_t)0xCEU, - (uint8_t)0xC9U, (uint8_t)0x50U, (uint8_t)0x9DU, (uint8_t)0x43U, (uint8_t)0x68U, (uint8_t)0x7FU, - (uint8_t)0xEBU, (uint8_t)0x69U, (uint8_t)0xEDU, (uint8_t)0xD1U, (uint8_t)0xCCU, (uint8_t)0x5EU, - (uint8_t)0x0BU, (uint8_t)0x8CU, (uint8_t)0xC3U, (uint8_t)0xBDU, (uint8_t)0xF6U, (uint8_t)0x4BU, - (uint8_t)0x10U, (uint8_t)0xEFU, (uint8_t)0x86U, (uint8_t)0xB6U, (uint8_t)0x31U, (uint8_t)0x42U, - (uint8_t)0xA3U, (uint8_t)0xABU, (uint8_t)0x88U, (uint8_t)0x29U, (uint8_t)0x55U, (uint8_t)0x5BU, - (uint8_t)0x2FU, (uint8_t)0x74U, (uint8_t)0x7CU, (uint8_t)0x93U, (uint8_t)0x26U, (uint8_t)0x65U, - (uint8_t)0xCBU, (uint8_t)0x2CU, (uint8_t)0x0FU, (uint8_t)0x1CU, (uint8_t)0xC0U, (uint8_t)0x1BU, - (uint8_t)0xD7U, (uint8_t)0x02U, (uint8_t)0x29U, (uint8_t)0x38U, (uint8_t)0x88U, (uint8_t)0x39U, - (uint8_t)0xD2U, (uint8_t)0xAFU, (uint8_t)0x05U, (uint8_t)0xE4U, (uint8_t)0x54U, (uint8_t)0x50U, - (uint8_t)0x4AU, (uint8_t)0xC7U, (uint8_t)0x8BU, (uint8_t)0x75U, (uint8_t)0x82U, (uint8_t)0x82U, - (uint8_t)0x28U, (uint8_t)0x46U, (uint8_t)0xC0U, (uint8_t)0xBAU, (uint8_t)0x35U, (uint8_t)0xC3U, - (uint8_t)0x5FU, (uint8_t)0x5CU, (uint8_t)0x59U, (uint8_t)0x16U, (uint8_t)0x0CU, (uint8_t)0xC0U, - (uint8_t)0x46U, (uint8_t)0xFDU, (uint8_t)0x82U, (uint8_t)0x51U, (uint8_t)0x54U, (uint8_t)0x1FU, - (uint8_t)0xC6U, (uint8_t)0x8CU, (uint8_t)0x9CU, (uint8_t)0x86U, (uint8_t)0xB0U, (uint8_t)0x22U, - (uint8_t)0xBBU, (uint8_t)0x70U, (uint8_t)0x99U, (uint8_t)0x87U, (uint8_t)0x6AU, (uint8_t)0x46U, - (uint8_t)0x0EU, (uint8_t)0x74U, (uint8_t)0x51U, (uint8_t)0xA8U, (uint8_t)0xA9U, (uint8_t)0x31U, - (uint8_t)0x09U, (uint8_t)0x70U, (uint8_t)0x3FU, (uint8_t)0xEEU, (uint8_t)0x1CU, (uint8_t)0x21U, - (uint8_t)0x7EU, (uint8_t)0x6CU, (uint8_t)0x38U, (uint8_t)0x26U, (uint8_t)0xE5U, (uint8_t)0x2CU, - (uint8_t)0x51U, (uint8_t)0xAAU, (uint8_t)0x69U, (uint8_t)0x1EU, (uint8_t)0x0EU, (uint8_t)0x42U, - (uint8_t)0x3CU, (uint8_t)0xFCU, (uint8_t)0x99U, (uint8_t)0xE9U, (uint8_t)0xE3U, (uint8_t)0x16U, - (uint8_t)0x50U, (uint8_t)0xC1U, (uint8_t)0x21U, (uint8_t)0x7BU, (uint8_t)0x62U, (uint8_t)0x48U, - (uint8_t)0x16U, (uint8_t)0xCDU, (uint8_t)0xADU, (uint8_t)0x9AU, (uint8_t)0x95U, (uint8_t)0xF9U, - (uint8_t)0xD5U, (uint8_t)0xB8U, (uint8_t)0x01U, (uint8_t)0x94U, (uint8_t)0x88U, (uint8_t)0xD9U, - (uint8_t)0xC0U, (uint8_t)0xA0U, (uint8_t)0xA1U, (uint8_t)0xFEU, (uint8_t)0x30U, (uint8_t)0x75U, - (uint8_t)0xA5U, (uint8_t)0x77U, (uint8_t)0xE2U, (uint8_t)0x31U, (uint8_t)0x83U, (uint8_t)0xF8U, - (uint8_t)0x1DU, (uint8_t)0x4AU, (uint8_t)0x3FU, (uint8_t)0x2FU, (uint8_t)0xA4U, (uint8_t)0x57U, - (uint8_t)0x1EU, (uint8_t)0xFCU, (uint8_t)0x8CU, (uint8_t)0xE0U, (uint8_t)0xBAU, (uint8_t)0x8AU, - (uint8_t)0x4FU, (uint8_t)0xE8U, (uint8_t)0xB6U, (uint8_t)0x85U, (uint8_t)0x5DU, (uint8_t)0xFEU, - (uint8_t)0x72U, (uint8_t)0xB0U, (uint8_t)0xA6U, (uint8_t)0x6EU, (uint8_t)0xDEU, (uint8_t)0xD2U, - (uint8_t)0xFBU, (uint8_t)0xABU, (uint8_t)0xFBU, (uint8_t)0xE5U, (uint8_t)0x8AU, (uint8_t)0x30U, - (uint8_t)0xFAU, (uint8_t)0xFAU, (uint8_t)0xBEU, (uint8_t)0x1CU, (uint8_t)0x5DU, (uint8_t)0x71U, - (uint8_t)0xA8U, (uint8_t)0x7EU, (uint8_t)0x2FU, (uint8_t)0x74U, (uint8_t)0x1EU, (uint8_t)0xF8U, - (uint8_t)0xC1U, (uint8_t)0xFEU, (uint8_t)0x86U, (uint8_t)0xFEU, (uint8_t)0xA6U, (uint8_t)0xBBU, - (uint8_t)0xFDU, (uint8_t)0xE5U, (uint8_t)0x30U, (uint8_t)0x67U, (uint8_t)0x7FU, (uint8_t)0x0DU, - (uint8_t)0x97U, (uint8_t)0xD1U, (uint8_t)0x1DU, (uint8_t)0x49U, (uint8_t)0xF7U, (uint8_t)0xA8U, - (uint8_t)0x44U, (uint8_t)0x3DU, (uint8_t)0x08U, (uint8_t)0x22U, (uint8_t)0xE5U, (uint8_t)0x06U, - (uint8_t)0xA9U, (uint8_t)0xF4U, (uint8_t)0x61U, (uint8_t)0x4EU, (uint8_t)0x01U, (uint8_t)0x1EU, - (uint8_t)0x2AU, (uint8_t)0x94U, (uint8_t)0x83U, (uint8_t)0x8FU, (uint8_t)0xF8U, (uint8_t)0x8CU, - (uint8_t)0xD6U, (uint8_t)0x8CU, (uint8_t)0x8BU, (uint8_t)0xB7U, (uint8_t)0xC5U, (uint8_t)0xC6U, - (uint8_t)0x42U, (uint8_t)0x4CU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, - (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU - }; - -#if defined(__cplusplus) -} -#endif - -#define __Hacl_Impl_FFDHE_Constants_H_DEFINED -#endif diff --git a/dist/c89-compatible/Hacl_IntTypes_Intrinsics.h b/dist/c89-compatible/Hacl_IntTypes_Intrinsics.h deleted file mode 100644 index a03650bcbb..0000000000 --- a/dist/c89-compatible/Hacl_IntTypes_Intrinsics.h +++ /dev/null @@ -1,86 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#ifndef __Hacl_IntTypes_Intrinsics_H -#define __Hacl_IntTypes_Intrinsics_H - -#if defined(__cplusplus) -extern "C" { -#endif - -#include -#include "krml/internal/types.h" -#include "krml/lowstar_endianness.h" -#include "krml/internal/target.h" - - - -#include "evercrypt_targetconfig.h" -static inline uint32_t -Hacl_IntTypes_Intrinsics_add_carry_u32(uint32_t cin, uint32_t x, uint32_t y, uint32_t *r) -{ - uint64_t res = (uint64_t)x + (uint64_t)cin + (uint64_t)y; - uint32_t c = (uint32_t)(res >> (uint32_t)32U); - r[0U] = (uint32_t)res; - return c; -} - -static inline uint32_t -Hacl_IntTypes_Intrinsics_sub_borrow_u32(uint32_t cin, uint32_t x, uint32_t y, uint32_t *r) -{ - uint64_t res = (uint64_t)x - (uint64_t)y - (uint64_t)cin; - uint32_t c = (uint32_t)(res >> (uint32_t)32U) & (uint32_t)1U; - r[0U] = (uint32_t)res; - return c; -} - -static inline uint64_t -Hacl_IntTypes_Intrinsics_add_carry_u64(uint64_t cin, uint64_t x, uint64_t y, uint64_t *r) -{ - uint64_t res = x + cin + y; - uint64_t - c = (~FStar_UInt64_gte_mask(res, x) | (FStar_UInt64_eq_mask(res, x) & cin)) & (uint64_t)1U; - r[0U] = res; - return c; -} - -static inline uint64_t -Hacl_IntTypes_Intrinsics_sub_borrow_u64(uint64_t cin, uint64_t x, uint64_t y, uint64_t *r) -{ - uint64_t res = x - y - cin; - uint64_t - c = - ((FStar_UInt64_gte_mask(res, x) & ~FStar_UInt64_eq_mask(res, x)) - | (FStar_UInt64_eq_mask(res, x) & cin)) - & (uint64_t)1U; - r[0U] = res; - return c; -} - -#if defined(__cplusplus) -} -#endif - -#define __Hacl_IntTypes_Intrinsics_H_DEFINED -#endif diff --git a/dist/c89-compatible/Hacl_IntTypes_Intrinsics_128.h b/dist/c89-compatible/Hacl_IntTypes_Intrinsics_128.h deleted file mode 100644 index b187029ba7..0000000000 --- a/dist/c89-compatible/Hacl_IntTypes_Intrinsics_128.h +++ /dev/null @@ -1,74 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#ifndef __Hacl_IntTypes_Intrinsics_128_H -#define __Hacl_IntTypes_Intrinsics_128_H - -#if defined(__cplusplus) -extern "C" { -#endif - -#include -#include "krml/internal/types.h" -#include "krml/lowstar_endianness.h" -#include "krml/internal/target.h" - - -#include "Hacl_Krmllib.h" -#include "evercrypt_targetconfig.h" -static inline uint64_t -Hacl_IntTypes_Intrinsics_128_add_carry_u64(uint64_t cin, uint64_t x, uint64_t y, uint64_t *r) -{ - FStar_UInt128_uint128 - res = - FStar_UInt128_add_mod(FStar_UInt128_add_mod(FStar_UInt128_uint64_to_uint128(x), - FStar_UInt128_uint64_to_uint128(cin)), - FStar_UInt128_uint64_to_uint128(y)); - uint64_t c = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(res, (uint32_t)64U)); - r[0U] = FStar_UInt128_uint128_to_uint64(res); - return c; -} - -static inline uint64_t -Hacl_IntTypes_Intrinsics_128_sub_borrow_u64(uint64_t cin, uint64_t x, uint64_t y, uint64_t *r) -{ - FStar_UInt128_uint128 - res = - FStar_UInt128_sub_mod(FStar_UInt128_sub_mod(FStar_UInt128_uint64_to_uint128(x), - FStar_UInt128_uint64_to_uint128(y)), - FStar_UInt128_uint64_to_uint128(cin)); - uint64_t - c = - FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(res, (uint32_t)64U)) - & (uint64_t)1U; - r[0U] = FStar_UInt128_uint128_to_uint64(res); - return c; -} - -#if defined(__cplusplus) -} -#endif - -#define __Hacl_IntTypes_Intrinsics_128_H_DEFINED -#endif diff --git a/dist/c89-compatible/Hacl_K256_ECDSA.c b/dist/c89-compatible/Hacl_K256_ECDSA.c deleted file mode 100644 index c676e2adcd..0000000000 --- a/dist/c89-compatible/Hacl_K256_ECDSA.c +++ /dev/null @@ -1,1867 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#include "internal/Hacl_K256_ECDSA.h" - - - -static inline uint64_t -bn_add(uint32_t aLen, uint64_t *a, uint32_t bLen, uint64_t *b, uint64_t *res) -{ - uint64_t *a0 = a; - uint64_t *res0 = res; - uint64_t c1 = (uint64_t)0U; - uint64_t c0; - uint64_t ite; - { - uint32_t i; - for (i = (uint32_t)0U; i < bLen / (uint32_t)4U; i++) - { - uint64_t t1 = a0[(uint32_t)4U * i]; - uint64_t t20 = b[(uint32_t)4U * i]; - uint64_t *res_i0 = res0 + (uint32_t)4U * i; - c1 = Lib_IntTypes_Intrinsics_add_carry_u64(c1, t1, t20, res_i0); - { - uint64_t t10 = a0[(uint32_t)4U * i + (uint32_t)1U]; - uint64_t t21 = b[(uint32_t)4U * i + (uint32_t)1U]; - uint64_t *res_i1 = res0 + (uint32_t)4U * i + (uint32_t)1U; - c1 = Lib_IntTypes_Intrinsics_add_carry_u64(c1, t10, t21, res_i1); - { - uint64_t t11 = a0[(uint32_t)4U * i + (uint32_t)2U]; - uint64_t t22 = b[(uint32_t)4U * i + (uint32_t)2U]; - uint64_t *res_i2 = res0 + (uint32_t)4U * i + (uint32_t)2U; - c1 = Lib_IntTypes_Intrinsics_add_carry_u64(c1, t11, t22, res_i2); - { - uint64_t t12 = a0[(uint32_t)4U * i + (uint32_t)3U]; - uint64_t t2 = b[(uint32_t)4U * i + (uint32_t)3U]; - uint64_t *res_i = res0 + (uint32_t)4U * i + (uint32_t)3U; - c1 = Lib_IntTypes_Intrinsics_add_carry_u64(c1, t12, t2, res_i); - } - } - } - } - } - { - uint32_t i; - for (i = bLen / (uint32_t)4U * (uint32_t)4U; i < bLen; i++) - { - uint64_t t1 = a0[i]; - uint64_t t2 = b[i]; - uint64_t *res_i = res0 + i; - c1 = Lib_IntTypes_Intrinsics_add_carry_u64(c1, t1, t2, res_i); - } - } - c0 = c1; - if (bLen < aLen) - { - uint64_t *a1 = a + bLen; - uint64_t *res1 = res + bLen; - uint64_t c = c0; - { - uint32_t i; - for (i = (uint32_t)0U; i < (aLen - bLen) / (uint32_t)4U; i++) - { - uint64_t t1 = a1[(uint32_t)4U * i]; - uint64_t *res_i0 = res1 + (uint32_t)4U * i; - c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t1, (uint64_t)0U, res_i0); - { - uint64_t t10 = a1[(uint32_t)4U * i + (uint32_t)1U]; - uint64_t *res_i1 = res1 + (uint32_t)4U * i + (uint32_t)1U; - c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t10, (uint64_t)0U, res_i1); - { - uint64_t t11 = a1[(uint32_t)4U * i + (uint32_t)2U]; - uint64_t *res_i2 = res1 + (uint32_t)4U * i + (uint32_t)2U; - c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t11, (uint64_t)0U, res_i2); - { - uint64_t t12 = a1[(uint32_t)4U * i + (uint32_t)3U]; - uint64_t *res_i = res1 + (uint32_t)4U * i + (uint32_t)3U; - c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t12, (uint64_t)0U, res_i); - } - } - } - } - } - { - uint32_t i; - for (i = (aLen - bLen) / (uint32_t)4U * (uint32_t)4U; i < aLen - bLen; i++) - { - uint64_t t1 = a1[i]; - uint64_t *res_i = res1 + i; - c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t1, (uint64_t)0U, res_i); - } - } - { - uint64_t c10 = c; - ite = c10; - } - } - else - { - ite = c0; - } - return ite; -} - -static uint64_t add4(uint64_t *a, uint64_t *b, uint64_t *res) -{ - uint64_t c = (uint64_t)0U; - { - uint64_t t1 = a[(uint32_t)4U * (uint32_t)0U]; - uint64_t t20 = b[(uint32_t)4U * (uint32_t)0U]; - uint64_t *res_i0 = res + (uint32_t)4U * (uint32_t)0U; - c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t1, t20, res_i0); - { - uint64_t t10 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U]; - uint64_t t21 = b[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U]; - uint64_t *res_i1 = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U; - c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t10, t21, res_i1); - { - uint64_t t11 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U]; - uint64_t t22 = b[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U]; - uint64_t *res_i2 = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U; - c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t11, t22, res_i2); - { - uint64_t t12 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U]; - uint64_t t2 = b[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U]; - uint64_t *res_i = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U; - c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t12, t2, res_i); - } - } - } - } - return c; -} - -static void add_mod4(uint64_t *n, uint64_t *a, uint64_t *b, uint64_t *res) -{ - uint64_t c2 = (uint64_t)0U; - uint64_t c0; - { - uint64_t t1 = a[(uint32_t)4U * (uint32_t)0U]; - uint64_t t20 = b[(uint32_t)4U * (uint32_t)0U]; - uint64_t *res_i0 = res + (uint32_t)4U * (uint32_t)0U; - c2 = Lib_IntTypes_Intrinsics_add_carry_u64(c2, t1, t20, res_i0); - { - uint64_t t10 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U]; - uint64_t t21 = b[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U]; - uint64_t *res_i1 = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U; - c2 = Lib_IntTypes_Intrinsics_add_carry_u64(c2, t10, t21, res_i1); - { - uint64_t t11 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U]; - uint64_t t22 = b[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U]; - uint64_t *res_i2 = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U; - c2 = Lib_IntTypes_Intrinsics_add_carry_u64(c2, t11, t22, res_i2); - { - uint64_t t12 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U]; - uint64_t t2 = b[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U]; - uint64_t *res_i = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U; - c2 = Lib_IntTypes_Intrinsics_add_carry_u64(c2, t12, t2, res_i); - } - } - } - } - c0 = c2; - { - uint64_t tmp[4U] = { 0U }; - uint64_t c3 = (uint64_t)0U; - uint64_t c1; - uint64_t c; - { - uint64_t t1 = res[(uint32_t)4U * (uint32_t)0U]; - uint64_t t20 = n[(uint32_t)4U * (uint32_t)0U]; - uint64_t *res_i0 = tmp + (uint32_t)4U * (uint32_t)0U; - c3 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c3, t1, t20, res_i0); - { - uint64_t t10 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U]; - uint64_t t21 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U]; - uint64_t *res_i1 = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U; - c3 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c3, t10, t21, res_i1); - { - uint64_t t11 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U]; - uint64_t t22 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U]; - uint64_t *res_i2 = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U; - c3 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c3, t11, t22, res_i2); - { - uint64_t t12 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U]; - uint64_t t2 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U]; - uint64_t *res_i = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U; - c3 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c3, t12, t2, res_i); - } - } - } - } - c1 = c3; - c = c0 - c1; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *os = res; - uint64_t x = (c & res[i]) | (~c & tmp[i]); - os[i] = x;); - } -} - -static void sub_mod4(uint64_t *n, uint64_t *a, uint64_t *b, uint64_t *res) -{ - uint64_t c2 = (uint64_t)0U; - uint64_t c0; - { - uint64_t t1 = a[(uint32_t)4U * (uint32_t)0U]; - uint64_t t20 = b[(uint32_t)4U * (uint32_t)0U]; - uint64_t *res_i0 = res + (uint32_t)4U * (uint32_t)0U; - c2 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c2, t1, t20, res_i0); - { - uint64_t t10 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U]; - uint64_t t21 = b[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U]; - uint64_t *res_i1 = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U; - c2 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c2, t10, t21, res_i1); - { - uint64_t t11 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U]; - uint64_t t22 = b[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U]; - uint64_t *res_i2 = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U; - c2 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c2, t11, t22, res_i2); - { - uint64_t t12 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U]; - uint64_t t2 = b[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U]; - uint64_t *res_i = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U; - c2 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c2, t12, t2, res_i); - } - } - } - } - c0 = c2; - { - uint64_t tmp[4U] = { 0U }; - uint64_t c3 = (uint64_t)0U; - uint64_t c1; - uint64_t c; - { - uint64_t t1 = res[(uint32_t)4U * (uint32_t)0U]; - uint64_t t20 = n[(uint32_t)4U * (uint32_t)0U]; - uint64_t *res_i0 = tmp + (uint32_t)4U * (uint32_t)0U; - c3 = Lib_IntTypes_Intrinsics_add_carry_u64(c3, t1, t20, res_i0); - { - uint64_t t10 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U]; - uint64_t t21 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U]; - uint64_t *res_i1 = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U; - c3 = Lib_IntTypes_Intrinsics_add_carry_u64(c3, t10, t21, res_i1); - { - uint64_t t11 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U]; - uint64_t t22 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U]; - uint64_t *res_i2 = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U; - c3 = Lib_IntTypes_Intrinsics_add_carry_u64(c3, t11, t22, res_i2); - { - uint64_t t12 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U]; - uint64_t t2 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U]; - uint64_t *res_i = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U; - c3 = Lib_IntTypes_Intrinsics_add_carry_u64(c3, t12, t2, res_i); - } - } - } - } - c1 = c3; - c = (uint64_t)0U - c0; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *os = res; - uint64_t x = (c & tmp[i]) | (~c & res[i]); - os[i] = x;); - } -} - -static void mul4(uint64_t *a, uint64_t *b, uint64_t *res) -{ - uint32_t i; - memset(res, 0U, (uint32_t)8U * sizeof (uint64_t)); - for (i = (uint32_t)0U; i < (uint32_t)4U; i++) - { - uint64_t bj = b[i]; - uint64_t *res_j = res + i; - uint64_t c = (uint64_t)0U; - uint64_t r; - { - uint64_t a_i = a[(uint32_t)4U * (uint32_t)0U]; - uint64_t *res_i0 = res_j + (uint32_t)4U * (uint32_t)0U; - c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, bj, c, res_i0); - { - uint64_t a_i0 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U]; - uint64_t *res_i1 = res_j + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U; - c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i0, bj, c, res_i1); - { - uint64_t a_i1 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U]; - uint64_t *res_i2 = res_j + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U; - c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i1, bj, c, res_i2); - { - uint64_t a_i2 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U]; - uint64_t *res_i = res_j + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U; - c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i2, bj, c, res_i); - } - } - } - } - r = c; - res[(uint32_t)4U + i] = r; - } -} - -static void sqr4(uint64_t *a, uint64_t *res) -{ - uint64_t c0; - memset(res, 0U, (uint32_t)8U * sizeof (uint64_t)); - KRML_MAYBE_FOR4(i0, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *ab = a; - uint64_t a_j = a[i0]; - uint64_t *res_j = res + i0; - uint64_t c = (uint64_t)0U; - { - uint32_t i; - for (i = (uint32_t)0U; i < i0 / (uint32_t)4U; i++) - { - uint64_t a_i = ab[(uint32_t)4U * i]; - uint64_t *res_i0 = res_j + (uint32_t)4U * i; - c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, a_j, c, res_i0); - { - uint64_t a_i0 = ab[(uint32_t)4U * i + (uint32_t)1U]; - uint64_t *res_i1 = res_j + (uint32_t)4U * i + (uint32_t)1U; - c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i0, a_j, c, res_i1); - { - uint64_t a_i1 = ab[(uint32_t)4U * i + (uint32_t)2U]; - uint64_t *res_i2 = res_j + (uint32_t)4U * i + (uint32_t)2U; - c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i1, a_j, c, res_i2); - { - uint64_t a_i2 = ab[(uint32_t)4U * i + (uint32_t)3U]; - uint64_t *res_i = res_j + (uint32_t)4U * i + (uint32_t)3U; - c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i2, a_j, c, res_i); - } - } - } - } - } - { - uint32_t i; - for (i = i0 / (uint32_t)4U * (uint32_t)4U; i < i0; i++) - { - uint64_t a_i = ab[i]; - uint64_t *res_i = res_j + i; - c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, a_j, c, res_i); - } - } - uint64_t r = c; - res[i0 + i0] = r;); - c0 = Hacl_Bignum_Addition_bn_add_eq_len_u64((uint32_t)8U, res, res, res); - { - uint64_t tmp[8U] = { 0U }; - uint64_t c1; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - FStar_UInt128_uint128 res1 = FStar_UInt128_mul_wide(a[i], a[i]); - uint64_t hi = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(res1, (uint32_t)64U)); - uint64_t lo = FStar_UInt128_uint128_to_uint64(res1); - tmp[(uint32_t)2U * i] = lo; - tmp[(uint32_t)2U * i + (uint32_t)1U] = hi;); - c1 = Hacl_Bignum_Addition_bn_add_eq_len_u64((uint32_t)8U, res, tmp, res); - } -} - -static inline uint64_t is_qelem_zero(uint64_t *f) -{ - uint64_t bn_zero[4U] = { 0U }; - uint64_t mask = (uint64_t)0xFFFFFFFFFFFFFFFFU; - uint64_t mask1; - uint64_t res; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t uu____0 = FStar_UInt64_eq_mask(f[i], bn_zero[i]); - mask = uu____0 & mask;); - mask1 = mask; - res = mask1; - return res; -} - -static inline bool is_qelem_zero_vartime(uint64_t *f) -{ - uint64_t f0 = f[0U]; - uint64_t f1 = f[1U]; - uint64_t f2 = f[2U]; - uint64_t f3 = f[3U]; - return f0 == (uint64_t)0U && f1 == (uint64_t)0U && f2 == (uint64_t)0U && f3 == (uint64_t)0U; -} - -static inline uint64_t load_qelem_check(uint64_t *f, uint8_t *b) -{ - uint64_t n[4U] = { 0U }; - n[0U] = (uint64_t)0xbfd25e8cd0364141U; - n[1U] = (uint64_t)0xbaaedce6af48a03bU; - n[2U] = (uint64_t)0xfffffffffffffffeU; - n[3U] = (uint64_t)0xffffffffffffffffU; - { - uint64_t is_zero; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *os = f; - uint64_t u = load64_be(b + ((uint32_t)4U - i - (uint32_t)1U) * (uint32_t)8U); - uint64_t x = u; - os[i] = x;); - is_zero = is_qelem_zero(f); - { - uint64_t acc = (uint64_t)0U; - uint64_t is_lt_q; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t beq = FStar_UInt64_eq_mask(f[i], n[i]); - uint64_t blt = ~FStar_UInt64_gte_mask(f[i], n[i]); - acc = (beq & acc) | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U)));); - is_lt_q = acc; - return ~is_zero & is_lt_q; - } - } -} - -static inline bool load_qelem_vartime(uint64_t *f, uint8_t *b) -{ - bool is_zero; - uint64_t a0; - uint64_t a1; - uint64_t a2; - uint64_t a3; - bool is_lt_q_b; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *os = f; - uint64_t u = load64_be(b + ((uint32_t)4U - i - (uint32_t)1U) * (uint32_t)8U); - uint64_t x = u; - os[i] = x;); - is_zero = is_qelem_zero_vartime(f); - a0 = f[0U]; - a1 = f[1U]; - a2 = f[2U]; - a3 = f[3U]; - if (a3 < (uint64_t)0xffffffffffffffffU) - { - is_lt_q_b = true; - } - else if (a2 < (uint64_t)0xfffffffffffffffeU) - { - is_lt_q_b = true; - } - else if (a2 > (uint64_t)0xfffffffffffffffeU) - { - is_lt_q_b = false; - } - else if (a1 < (uint64_t)0xbaaedce6af48a03bU) - { - is_lt_q_b = true; - } - else if (a1 > (uint64_t)0xbaaedce6af48a03bU) - { - is_lt_q_b = false; - } - else - { - is_lt_q_b = a0 < (uint64_t)0xbfd25e8cd0364141U; - } - return !is_zero && is_lt_q_b; -} - -static inline void modq_short(uint64_t *out, uint64_t *a) -{ - uint64_t tmp[4U] = { 0U }; - uint64_t c; - uint64_t mask; - tmp[0U] = (uint64_t)0x402da1732fc9bebfU; - tmp[1U] = (uint64_t)0x4551231950b75fc4U; - tmp[2U] = (uint64_t)0x1U; - tmp[3U] = (uint64_t)0x0U; - c = add4(a, tmp, out); - mask = (uint64_t)0U - c; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *os = out; - uint64_t x = (mask & out[i]) | (~mask & a[i]); - os[i] = x;); -} - -static inline void load_qelem_modq(uint64_t *f, uint8_t *b) -{ - uint64_t tmp[4U] = { 0U }; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *os = f; - uint64_t u = load64_be(b + ((uint32_t)4U - i - (uint32_t)1U) * (uint32_t)8U); - uint64_t x = u; - os[i] = x;); - memcpy(tmp, f, (uint32_t)4U * sizeof (uint64_t)); - modq_short(f, tmp); -} - -static inline void store_qelem(uint8_t *b, uint64_t *f) -{ - uint8_t tmp[32U] = { 0U }; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - store64_be(b + i * (uint32_t)8U, f[(uint32_t)4U - i - (uint32_t)1U]);); -} - -static inline void qadd(uint64_t *out, uint64_t *f1, uint64_t *f2) -{ - uint64_t n[4U] = { 0U }; - n[0U] = (uint64_t)0xbfd25e8cd0364141U; - n[1U] = (uint64_t)0xbaaedce6af48a03bU; - n[2U] = (uint64_t)0xfffffffffffffffeU; - n[3U] = (uint64_t)0xffffffffffffffffU; - add_mod4(n, f1, f2, out); -} - -static inline uint64_t -mul_pow2_256_minus_q_add( - uint32_t len, - uint32_t resLen, - uint64_t *t01, - uint64_t *a, - uint64_t *e, - uint64_t *res -) -{ - KRML_CHECK_SIZE(sizeof (uint64_t), len + (uint32_t)2U); - { - uint64_t tmp[len + (uint32_t)2U]; - memset(tmp, 0U, (len + (uint32_t)2U) * sizeof (uint64_t)); - { - uint64_t uu____0; - uint64_t c0; - memset(tmp, 0U, (len + (uint32_t)2U) * sizeof (uint64_t)); - KRML_MAYBE_FOR2(i0, - (uint32_t)0U, - (uint32_t)2U, - (uint32_t)1U, - uint64_t bj = t01[i0]; - uint64_t *res_j = tmp + i0; - uint64_t c = (uint64_t)0U; - { - uint32_t i; - for (i = (uint32_t)0U; i < len / (uint32_t)4U; i++) - { - uint64_t a_i = a[(uint32_t)4U * i]; - uint64_t *res_i0 = res_j + (uint32_t)4U * i; - c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, bj, c, res_i0); - { - uint64_t a_i0 = a[(uint32_t)4U * i + (uint32_t)1U]; - uint64_t *res_i1 = res_j + (uint32_t)4U * i + (uint32_t)1U; - c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i0, bj, c, res_i1); - { - uint64_t a_i1 = a[(uint32_t)4U * i + (uint32_t)2U]; - uint64_t *res_i2 = res_j + (uint32_t)4U * i + (uint32_t)2U; - c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i1, bj, c, res_i2); - { - uint64_t a_i2 = a[(uint32_t)4U * i + (uint32_t)3U]; - uint64_t *res_i = res_j + (uint32_t)4U * i + (uint32_t)3U; - c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i2, bj, c, res_i); - } - } - } - } - } - { - uint32_t i; - for (i = len / (uint32_t)4U * (uint32_t)4U; i < len; i++) - { - uint64_t a_i = a[i]; - uint64_t *res_i = res_j + i; - c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, bj, c, res_i); - } - } - uint64_t r = c; - tmp[len + i0] = r;); - memcpy(res + (uint32_t)2U, a, len * sizeof (uint64_t)); - uu____0 = bn_add(resLen, res, len + (uint32_t)2U, tmp, res); - c0 = bn_add(resLen, res, (uint32_t)4U, e, res); - return c0; - } - } -} - -static inline void modq(uint64_t *out, uint64_t *a) -{ - uint64_t r[4U] = { 0U }; - uint64_t tmp[4U] = { 0U }; - uint64_t *t01; - tmp[0U] = (uint64_t)0x402da1732fc9bebfU; - tmp[1U] = (uint64_t)0x4551231950b75fc4U; - tmp[2U] = (uint64_t)0x1U; - tmp[3U] = (uint64_t)0x0U; - t01 = tmp; - { - uint64_t m[7U] = { 0U }; - uint64_t p[5U] = { 0U }; - uint64_t - c0 = mul_pow2_256_minus_q_add((uint32_t)4U, (uint32_t)7U, t01, a + (uint32_t)4U, a, m); - uint64_t - c10 = mul_pow2_256_minus_q_add((uint32_t)3U, (uint32_t)5U, t01, m + (uint32_t)4U, m, p); - uint64_t - c2 = mul_pow2_256_minus_q_add((uint32_t)1U, (uint32_t)4U, t01, p + (uint32_t)4U, p, r); - uint64_t c00 = c2; - uint64_t c1 = add4(r, tmp, out); - uint64_t mask = (uint64_t)0U - (c00 + c1); - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *os = out; - uint64_t x = (mask & out[i]) | (~mask & r[i]); - os[i] = x;); - } -} - -static inline void qmul(uint64_t *out, uint64_t *f1, uint64_t *f2) -{ - uint64_t tmp[8U] = { 0U }; - mul4(f1, f2, tmp); - modq(out, tmp); -} - -static inline void qsqr(uint64_t *out, uint64_t *f) -{ - uint64_t tmp[8U] = { 0U }; - sqr4(f, tmp); - modq(out, tmp); -} - -static inline void qnegate_conditional_vartime(uint64_t *f, bool is_negate) -{ - uint64_t n[4U] = { 0U }; - n[0U] = (uint64_t)0xbfd25e8cd0364141U; - n[1U] = (uint64_t)0xbaaedce6af48a03bU; - n[2U] = (uint64_t)0xfffffffffffffffeU; - n[3U] = (uint64_t)0xffffffffffffffffU; - { - uint64_t zero[4U] = { 0U }; - if (is_negate) - { - sub_mod4(n, zero, f, f); - } - } -} - -static inline bool is_qelem_le_q_halved_vartime(uint64_t *f) -{ - uint64_t a0 = f[0U]; - uint64_t a1 = f[1U]; - uint64_t a2 = f[2U]; - uint64_t a3 = f[3U]; - if (a3 < (uint64_t)0x7fffffffffffffffU) - { - return true; - } - if (a3 > (uint64_t)0x7fffffffffffffffU) - { - return false; - } - if (a2 < (uint64_t)0xffffffffffffffffU) - { - return true; - } - if (a2 > (uint64_t)0xffffffffffffffffU) - { - return false; - } - if (a1 < (uint64_t)0x5d576e7357a4501dU) - { - return true; - } - if (a1 > (uint64_t)0x5d576e7357a4501dU) - { - return false; - } - return a0 <= (uint64_t)0xdfe92f46681b20a0U; -} - -static inline void qsquare_times_in_place(uint64_t *out, uint32_t b) -{ - uint32_t i; - for (i = (uint32_t)0U; i < b; i++) - { - qsqr(out, out); - } -} - -static inline void qsquare_times(uint64_t *out, uint64_t *a, uint32_t b) -{ - uint32_t i; - memcpy(out, a, (uint32_t)4U * sizeof (uint64_t)); - for (i = (uint32_t)0U; i < b; i++) - { - qsqr(out, out); - } -} - -static inline void qinv(uint64_t *out, uint64_t *f) -{ - uint64_t x_10[4U] = { 0U }; - uint64_t x_11[4U] = { 0U }; - uint64_t x_101[4U] = { 0U }; - uint64_t x_111[4U] = { 0U }; - uint64_t x_1001[4U] = { 0U }; - uint64_t x_1011[4U] = { 0U }; - uint64_t x_1101[4U] = { 0U }; - qsquare_times(x_10, f, (uint32_t)1U); - qmul(x_11, x_10, f); - qmul(x_101, x_10, x_11); - qmul(x_111, x_10, x_101); - qmul(x_1001, x_10, x_111); - qmul(x_1011, x_10, x_1001); - qmul(x_1101, x_10, x_1011); - { - uint64_t x6[4U] = { 0U }; - uint64_t x8[4U] = { 0U }; - uint64_t x14[4U] = { 0U }; - qsquare_times(x6, x_1101, (uint32_t)2U); - qmul(x6, x6, x_1011); - qsquare_times(x8, x6, (uint32_t)2U); - qmul(x8, x8, x_11); - qsquare_times(x14, x8, (uint32_t)6U); - qmul(x14, x14, x6); - { - uint64_t x56[4U] = { 0U }; - qsquare_times(out, x14, (uint32_t)14U); - qmul(out, out, x14); - qsquare_times(x56, out, (uint32_t)28U); - qmul(x56, x56, out); - qsquare_times(out, x56, (uint32_t)56U); - qmul(out, out, x56); - qsquare_times_in_place(out, (uint32_t)14U); - qmul(out, out, x14); - qsquare_times_in_place(out, (uint32_t)3U); - qmul(out, out, x_101); - qsquare_times_in_place(out, (uint32_t)4U); - qmul(out, out, x_111); - qsquare_times_in_place(out, (uint32_t)4U); - qmul(out, out, x_101); - qsquare_times_in_place(out, (uint32_t)5U); - qmul(out, out, x_1011); - qsquare_times_in_place(out, (uint32_t)4U); - qmul(out, out, x_1011); - qsquare_times_in_place(out, (uint32_t)4U); - qmul(out, out, x_111); - qsquare_times_in_place(out, (uint32_t)5U); - qmul(out, out, x_111); - qsquare_times_in_place(out, (uint32_t)6U); - qmul(out, out, x_1101); - qsquare_times_in_place(out, (uint32_t)4U); - qmul(out, out, x_101); - qsquare_times_in_place(out, (uint32_t)3U); - qmul(out, out, x_111); - qsquare_times_in_place(out, (uint32_t)5U); - qmul(out, out, x_1001); - qsquare_times_in_place(out, (uint32_t)6U); - qmul(out, out, x_101); - qsquare_times_in_place(out, (uint32_t)10U); - qmul(out, out, x_111); - qsquare_times_in_place(out, (uint32_t)4U); - qmul(out, out, x_111); - qsquare_times_in_place(out, (uint32_t)9U); - qmul(out, out, x8); - qsquare_times_in_place(out, (uint32_t)5U); - qmul(out, out, x_1001); - qsquare_times_in_place(out, (uint32_t)6U); - qmul(out, out, x_1011); - qsquare_times_in_place(out, (uint32_t)4U); - qmul(out, out, x_1101); - qsquare_times_in_place(out, (uint32_t)5U); - qmul(out, out, x_11); - qsquare_times_in_place(out, (uint32_t)6U); - qmul(out, out, x_1101); - qsquare_times_in_place(out, (uint32_t)10U); - qmul(out, out, x_1101); - qsquare_times_in_place(out, (uint32_t)4U); - qmul(out, out, x_1001); - qsquare_times_in_place(out, (uint32_t)6U); - qmul(out, out, f); - qsquare_times_in_place(out, (uint32_t)8U); - qmul(out, out, x6); - } - } -} - -bool Hacl_Impl_K256_Point_aff_point_decompress_vartime(uint64_t *x, uint64_t *y, uint8_t *s) -{ - uint8_t s0 = s[0U]; - if (!(s0 == (uint8_t)0x02U || s0 == (uint8_t)0x03U)) - { - return false; - } - { - uint8_t *xb = s + (uint32_t)1U; - bool is_x_valid = Hacl_K256_Field_load_felem_vartime(x, xb); - bool is_y_odd = s0 == (uint8_t)0x03U; - if (!is_x_valid) - { - return false; - } - { - uint64_t y2[5U] = { 0U }; - uint64_t b[5U] = { 0U }; - b[0U] = (uint64_t)0x7U; - b[1U] = (uint64_t)0U; - b[2U] = (uint64_t)0U; - b[3U] = (uint64_t)0U; - b[4U] = (uint64_t)0U; - Hacl_K256_Field_fsqr(y2, x); - Hacl_K256_Field_fmul(y2, y2, x); - Hacl_K256_Field_fadd(y2, y2, b); - Hacl_K256_Field_fnormalize(y2, y2); - Hacl_Impl_K256_Finv_fsqrt(y, y2); - Hacl_K256_Field_fnormalize(y, y); - { - uint64_t y2_comp[5U] = { 0U }; - Hacl_K256_Field_fsqr(y2_comp, y); - Hacl_K256_Field_fnormalize(y2_comp, y2_comp); - { - bool res = Hacl_K256_Field_is_felem_eq_vartime(y2, y2_comp); - bool is_y_valid = res; - if (!is_y_valid) - { - return false; - } - { - uint64_t x0 = y[0U]; - bool is_y_odd1 = (x0 & (uint64_t)1U) == (uint64_t)1U; - Hacl_K256_Field_fnegate_conditional_vartime(y, is_y_odd1 != is_y_odd); - return true; - } - } - } - } - } -} - -void Hacl_Impl_K256_Point_aff_point_compress_vartime(uint8_t *s, uint64_t *x, uint64_t *y) -{ - uint64_t x0; - bool is_y_odd; - uint8_t ite; - Hacl_K256_Field_fnormalize(y, y); - Hacl_K256_Field_fnormalize(x, x); - x0 = y[0U]; - is_y_odd = (x0 & (uint64_t)1U) == (uint64_t)1U; - if (is_y_odd) - { - ite = (uint8_t)0x03U; - } - else - { - ite = (uint8_t)0x02U; - } - s[0U] = ite; - Hacl_K256_Field_store_felem(s + (uint32_t)1U, x); -} - -void Hacl_Impl_K256_Point_point_negate(uint64_t *out, uint64_t *p) -{ - uint64_t *px = p; - uint64_t *py = p + (uint32_t)5U; - uint64_t *pz = p + (uint32_t)10U; - uint64_t *ox = out; - uint64_t *oy = out + (uint32_t)5U; - uint64_t *oz = out + (uint32_t)10U; - uint64_t a0; - uint64_t a1; - uint64_t a2; - uint64_t a3; - uint64_t a4; - uint64_t r0; - uint64_t r1; - uint64_t r2; - uint64_t r3; - uint64_t r4; - uint64_t f0; - uint64_t f1; - uint64_t f2; - uint64_t f3; - uint64_t f4; - ox[0U] = px[0U]; - ox[1U] = px[1U]; - ox[2U] = px[2U]; - ox[3U] = px[3U]; - ox[4U] = px[4U]; - oz[0U] = pz[0U]; - oz[1U] = pz[1U]; - oz[2U] = pz[2U]; - oz[3U] = pz[3U]; - oz[4U] = pz[4U]; - a0 = py[0U]; - a1 = py[1U]; - a2 = py[2U]; - a3 = py[3U]; - a4 = py[4U]; - r0 = (uint64_t)18014381329608892U - a0; - r1 = (uint64_t)18014398509481980U - a1; - r2 = (uint64_t)18014398509481980U - a2; - r3 = (uint64_t)18014398509481980U - a3; - r4 = (uint64_t)1125899906842620U - a4; - f0 = r0; - f1 = r1; - f2 = r2; - f3 = r3; - f4 = r4; - oy[0U] = f0; - oy[1U] = f1; - oy[2U] = f2; - oy[3U] = f3; - oy[4U] = f4; - Hacl_K256_Field_fnormalize_weak(oy, oy); -} - -static inline bool fmul_fmul_eq_vartime(uint64_t *a, uint64_t *bz, uint64_t *c, uint64_t *dz) -{ - uint64_t a_bz[5U] = { 0U }; - uint64_t c_dz[5U] = { 0U }; - bool z; - Hacl_K256_Field_fmul(a_bz, a, bz); - Hacl_K256_Field_fmul(c_dz, c, dz); - Hacl_K256_Field_fnormalize(a_bz, a_bz); - Hacl_K256_Field_fnormalize(c_dz, c_dz); - z = Hacl_K256_Field_is_felem_eq_vartime(a_bz, c_dz); - return z; -} - -bool Hacl_Impl_K256_Point_point_eq(uint64_t *p, uint64_t *q) -{ - uint64_t *px = p; - uint64_t *py = p + (uint32_t)5U; - uint64_t *pz = p + (uint32_t)10U; - uint64_t *qx = q; - uint64_t *qy = q + (uint32_t)5U; - uint64_t *qz = q + (uint32_t)10U; - bool z0 = fmul_fmul_eq_vartime(px, qz, qx, pz); - if (!z0) - { - return false; - } - return fmul_fmul_eq_vartime(py, qz, qy, pz); -} - -void Hacl_Impl_K256_PointDouble_point_double(uint64_t *out, uint64_t *p) -{ - uint64_t tmp[25U] = { 0U }; - uint64_t *x1 = p; - uint64_t *y1 = p + (uint32_t)5U; - uint64_t *z1 = p + (uint32_t)10U; - uint64_t *x3 = out; - uint64_t *y3 = out + (uint32_t)5U; - uint64_t *z3 = out + (uint32_t)10U; - uint64_t *yy = tmp; - uint64_t *zz = tmp + (uint32_t)5U; - uint64_t *bzz3 = tmp + (uint32_t)10U; - uint64_t *bzz9 = tmp + (uint32_t)15U; - uint64_t *tmp1 = tmp + (uint32_t)20U; - Hacl_K256_Field_fsqr(yy, y1); - Hacl_K256_Field_fsqr(zz, z1); - Hacl_K256_Field_fmul_small_num(x3, x1, (uint64_t)2U); - Hacl_K256_Field_fmul(x3, x3, y1); - Hacl_K256_Field_fmul(tmp1, yy, y1); - Hacl_K256_Field_fmul(z3, tmp1, z1); - Hacl_K256_Field_fmul_small_num(z3, z3, (uint64_t)8U); - Hacl_K256_Field_fnormalize_weak(z3, z3); - Hacl_K256_Field_fmul_small_num(bzz3, zz, (uint64_t)21U); - Hacl_K256_Field_fnormalize_weak(bzz3, bzz3); - Hacl_K256_Field_fmul_small_num(bzz9, bzz3, (uint64_t)3U); - Hacl_K256_Field_fsub(bzz9, yy, bzz9, (uint64_t)6U); - Hacl_K256_Field_fadd(tmp1, yy, bzz3); - Hacl_K256_Field_fmul(tmp1, bzz9, tmp1); - Hacl_K256_Field_fmul(y3, yy, zz); - Hacl_K256_Field_fmul(x3, x3, bzz9); - Hacl_K256_Field_fmul_small_num(y3, y3, (uint64_t)168U); - Hacl_K256_Field_fadd(y3, tmp1, y3); - Hacl_K256_Field_fnormalize_weak(y3, y3); -} - -void Hacl_Impl_K256_PointAdd_point_add(uint64_t *out, uint64_t *p, uint64_t *q) -{ - uint64_t tmp[45U] = { 0U }; - uint64_t *x1 = p; - uint64_t *y1 = p + (uint32_t)5U; - uint64_t *z1 = p + (uint32_t)10U; - uint64_t *x2 = q; - uint64_t *y2 = q + (uint32_t)5U; - uint64_t *z2 = q + (uint32_t)10U; - uint64_t *x3 = out; - uint64_t *y3 = out + (uint32_t)5U; - uint64_t *z3 = out + (uint32_t)10U; - uint64_t *xx = tmp; - uint64_t *yy = tmp + (uint32_t)5U; - uint64_t *zz = tmp + (uint32_t)10U; - uint64_t *xy_pairs = tmp + (uint32_t)15U; - uint64_t *yz_pairs = tmp + (uint32_t)20U; - uint64_t *xz_pairs = tmp + (uint32_t)25U; - uint64_t *yy_m_bzz3 = tmp + (uint32_t)30U; - uint64_t *yy_p_bzz3 = tmp + (uint32_t)35U; - uint64_t *tmp1 = tmp + (uint32_t)40U; - Hacl_K256_Field_fmul(xx, x1, x2); - Hacl_K256_Field_fmul(yy, y1, y2); - Hacl_K256_Field_fmul(zz, z1, z2); - Hacl_K256_Field_fadd(xy_pairs, x1, y1); - Hacl_K256_Field_fadd(tmp1, x2, y2); - Hacl_K256_Field_fmul(xy_pairs, xy_pairs, tmp1); - Hacl_K256_Field_fadd(tmp1, xx, yy); - Hacl_K256_Field_fsub(xy_pairs, xy_pairs, tmp1, (uint64_t)4U); - Hacl_K256_Field_fadd(yz_pairs, y1, z1); - Hacl_K256_Field_fadd(tmp1, y2, z2); - Hacl_K256_Field_fmul(yz_pairs, yz_pairs, tmp1); - Hacl_K256_Field_fadd(tmp1, yy, zz); - Hacl_K256_Field_fsub(yz_pairs, yz_pairs, tmp1, (uint64_t)4U); - Hacl_K256_Field_fadd(xz_pairs, x1, z1); - Hacl_K256_Field_fadd(tmp1, x2, z2); - Hacl_K256_Field_fmul(xz_pairs, xz_pairs, tmp1); - Hacl_K256_Field_fadd(tmp1, xx, zz); - Hacl_K256_Field_fsub(xz_pairs, xz_pairs, tmp1, (uint64_t)4U); - Hacl_K256_Field_fmul_small_num(tmp1, zz, (uint64_t)21U); - Hacl_K256_Field_fnormalize_weak(tmp1, tmp1); - Hacl_K256_Field_fsub(yy_m_bzz3, yy, tmp1, (uint64_t)2U); - Hacl_K256_Field_fadd(yy_p_bzz3, yy, tmp1); - Hacl_K256_Field_fmul_small_num(x3, yz_pairs, (uint64_t)21U); - Hacl_K256_Field_fnormalize_weak(x3, x3); - Hacl_K256_Field_fmul_small_num(z3, xx, (uint64_t)3U); - Hacl_K256_Field_fmul_small_num(y3, z3, (uint64_t)21U); - Hacl_K256_Field_fnormalize_weak(y3, y3); - Hacl_K256_Field_fmul(tmp1, xy_pairs, yy_m_bzz3); - Hacl_K256_Field_fmul(x3, x3, xz_pairs); - Hacl_K256_Field_fsub(x3, tmp1, x3, (uint64_t)2U); - Hacl_K256_Field_fnormalize_weak(x3, x3); - Hacl_K256_Field_fmul(tmp1, yy_p_bzz3, yy_m_bzz3); - Hacl_K256_Field_fmul(y3, y3, xz_pairs); - Hacl_K256_Field_fadd(y3, tmp1, y3); - Hacl_K256_Field_fnormalize_weak(y3, y3); - Hacl_K256_Field_fmul(tmp1, yz_pairs, yy_p_bzz3); - Hacl_K256_Field_fmul(z3, z3, xy_pairs); - Hacl_K256_Field_fadd(z3, tmp1, z3); - Hacl_K256_Field_fnormalize_weak(z3, z3); -} - -void Hacl_Impl_K256_PointMul_make_point_at_inf(uint64_t *p) -{ - uint64_t *px = p; - uint64_t *py = p + (uint32_t)5U; - uint64_t *pz = p + (uint32_t)10U; - memset(px, 0U, (uint32_t)5U * sizeof (uint64_t)); - memset(py, 0U, (uint32_t)5U * sizeof (uint64_t)); - py[0U] = (uint64_t)1U; - memset(pz, 0U, (uint32_t)5U * sizeof (uint64_t)); -} - -void Hacl_Impl_K256_PointMul_point_mul(uint64_t *out, uint64_t *scalar, uint64_t *q) -{ - uint64_t table[240U] = { 0U }; - uint64_t tmp[15U] = { 0U }; - uint64_t *t0 = table; - uint64_t *t1 = table + (uint32_t)15U; - Hacl_Impl_K256_PointMul_make_point_at_inf(t0); - memcpy(t1, q, (uint32_t)15U * sizeof (uint64_t)); - KRML_MAYBE_FOR7(i, - (uint32_t)0U, - (uint32_t)7U, - (uint32_t)1U, - uint64_t *t11 = table + (i + (uint32_t)1U) * (uint32_t)15U; - Hacl_Impl_K256_PointDouble_point_double(tmp, t11); - memcpy(table + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)15U, - tmp, - (uint32_t)15U * sizeof (uint64_t)); - uint64_t *t2 = table + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)15U; - Hacl_Impl_K256_PointAdd_point_add(tmp, q, t2); - memcpy(table + ((uint32_t)2U * i + (uint32_t)3U) * (uint32_t)15U, - tmp, - (uint32_t)15U * sizeof (uint64_t));); - Hacl_Impl_K256_PointMul_make_point_at_inf(out); - { - uint32_t i0; - for (i0 = (uint32_t)0U; i0 < (uint32_t)64U; i0++) - { - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - Hacl_Impl_K256_PointDouble_point_double(out, out);); - { - uint32_t bk = (uint32_t)256U; - uint64_t mask_l = (uint64_t)15U; - uint32_t i1 = (bk - (uint32_t)4U * i0 - (uint32_t)4U) / (uint32_t)64U; - uint32_t j = (bk - (uint32_t)4U * i0 - (uint32_t)4U) % (uint32_t)64U; - uint64_t p1 = scalar[i1] >> j; - uint64_t ite; - if (i1 + (uint32_t)1U < (uint32_t)4U && (uint32_t)0U < j) - { - ite = p1 | scalar[i1 + (uint32_t)1U] << ((uint32_t)64U - j); - } - else - { - ite = p1; - } - { - uint64_t bits_l = ite & mask_l; - uint64_t a_bits_l[15U] = { 0U }; - memcpy(a_bits_l, table, (uint32_t)15U * sizeof (uint64_t)); - KRML_MAYBE_FOR15(i2, - (uint32_t)0U, - (uint32_t)15U, - (uint32_t)1U, - uint64_t c = FStar_UInt64_eq_mask(bits_l, (uint64_t)(i2 + (uint32_t)1U)); - uint64_t *res_j = table + (i2 + (uint32_t)1U) * (uint32_t)15U; - KRML_MAYBE_FOR15(i, - (uint32_t)0U, - (uint32_t)15U, - (uint32_t)1U, - uint64_t *os = a_bits_l; - uint64_t x = (c & res_j[i]) | (~c & a_bits_l[i]); - os[i] = x;);); - Hacl_Impl_K256_PointAdd_point_add(out, out, a_bits_l); - } - } - } - } -} - -static inline void -point_mul_double_vartime( - uint64_t *out, - uint64_t *scalar1, - uint64_t *q1, - uint64_t *scalar2, - uint64_t *q2 -) -{ - uint64_t table1[240U] = { 0U }; - uint64_t tmp0[15U] = { 0U }; - uint64_t *t00 = table1; - uint64_t *t10 = table1 + (uint32_t)15U; - Hacl_Impl_K256_PointMul_make_point_at_inf(t00); - memcpy(t10, q1, (uint32_t)15U * sizeof (uint64_t)); - KRML_MAYBE_FOR7(i, - (uint32_t)0U, - (uint32_t)7U, - (uint32_t)1U, - uint64_t *t11 = table1 + (i + (uint32_t)1U) * (uint32_t)15U; - Hacl_Impl_K256_PointDouble_point_double(tmp0, t11); - memcpy(table1 + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)15U, - tmp0, - (uint32_t)15U * sizeof (uint64_t)); - uint64_t *t2 = table1 + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)15U; - Hacl_Impl_K256_PointAdd_point_add(tmp0, q1, t2); - memcpy(table1 + ((uint32_t)2U * i + (uint32_t)3U) * (uint32_t)15U, - tmp0, - (uint32_t)15U * sizeof (uint64_t));); - { - uint64_t table2[240U] = { 0U }; - uint64_t tmp[15U] = { 0U }; - uint64_t *t0 = table2; - uint64_t *t1 = table2 + (uint32_t)15U; - Hacl_Impl_K256_PointMul_make_point_at_inf(t0); - memcpy(t1, q2, (uint32_t)15U * sizeof (uint64_t)); - KRML_MAYBE_FOR7(i, - (uint32_t)0U, - (uint32_t)7U, - (uint32_t)1U, - uint64_t *t11 = table2 + (i + (uint32_t)1U) * (uint32_t)15U; - Hacl_Impl_K256_PointDouble_point_double(tmp, t11); - memcpy(table2 + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)15U, - tmp, - (uint32_t)15U * sizeof (uint64_t)); - uint64_t *t2 = table2 + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)15U; - Hacl_Impl_K256_PointAdd_point_add(tmp, q2, t2); - memcpy(table2 + ((uint32_t)2U * i + (uint32_t)3U) * (uint32_t)15U, - tmp, - (uint32_t)15U * sizeof (uint64_t));); - Hacl_Impl_K256_PointMul_make_point_at_inf(out); - { - uint32_t i; - for (i = (uint32_t)0U; i < (uint32_t)64U; i++) - { - KRML_MAYBE_FOR4(i0, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - Hacl_Impl_K256_PointDouble_point_double(out, out);); - { - uint32_t bk = (uint32_t)256U; - uint64_t mask_l0 = (uint64_t)15U; - uint32_t i10 = (bk - (uint32_t)4U * i - (uint32_t)4U) / (uint32_t)64U; - uint32_t j0 = (bk - (uint32_t)4U * i - (uint32_t)4U) % (uint32_t)64U; - uint64_t p10 = scalar2[i10] >> j0; - uint64_t ite0; - if (i10 + (uint32_t)1U < (uint32_t)4U && (uint32_t)0U < j0) - { - ite0 = p10 | scalar2[i10 + (uint32_t)1U] << ((uint32_t)64U - j0); - } - else - { - ite0 = p10; - } - { - uint64_t bits_l = ite0 & mask_l0; - uint64_t a_bits_l0[15U] = { 0U }; - uint32_t bits_l320 = (uint32_t)bits_l; - uint64_t *a_bits_l1 = table2 + bits_l320 * (uint32_t)15U; - memcpy(a_bits_l0, a_bits_l1, (uint32_t)15U * sizeof (uint64_t)); - Hacl_Impl_K256_PointAdd_point_add(out, out, a_bits_l0); - { - uint32_t bk0 = (uint32_t)256U; - uint64_t mask_l = (uint64_t)15U; - uint32_t i1 = (bk0 - (uint32_t)4U * i - (uint32_t)4U) / (uint32_t)64U; - uint32_t j = (bk0 - (uint32_t)4U * i - (uint32_t)4U) % (uint32_t)64U; - uint64_t p1 = scalar1[i1] >> j; - uint64_t ite; - if (i1 + (uint32_t)1U < (uint32_t)4U && (uint32_t)0U < j) - { - ite = p1 | scalar1[i1 + (uint32_t)1U] << ((uint32_t)64U - j); - } - else - { - ite = p1; - } - { - uint64_t bits_l0 = ite & mask_l; - uint64_t a_bits_l[15U] = { 0U }; - uint32_t bits_l32 = (uint32_t)bits_l0; - uint64_t *a_bits_l10 = table1 + bits_l32 * (uint32_t)15U; - memcpy(a_bits_l, a_bits_l10, (uint32_t)15U * sizeof (uint64_t)); - Hacl_Impl_K256_PointAdd_point_add(out, out, a_bits_l); - } - } - } - } - } - } - } -} - -static inline void point_mul_g(uint64_t *out, uint64_t *scalar) -{ - uint64_t g[15U] = { 0U }; - uint64_t *gx = g; - uint64_t *gy = g + (uint32_t)5U; - uint64_t *gz = g + (uint32_t)10U; - gx[0U] = (uint64_t)0x2815b16f81798U; - gx[1U] = (uint64_t)0xdb2dce28d959fU; - gx[2U] = (uint64_t)0xe870b07029bfcU; - gx[3U] = (uint64_t)0xbbac55a06295cU; - gx[4U] = (uint64_t)0x79be667ef9dcU; - gy[0U] = (uint64_t)0x7d08ffb10d4b8U; - gy[1U] = (uint64_t)0x48a68554199c4U; - gy[2U] = (uint64_t)0xe1108a8fd17b4U; - gy[3U] = (uint64_t)0xc4655da4fbfc0U; - gy[4U] = (uint64_t)0x483ada7726a3U; - memset(gz, 0U, (uint32_t)5U * sizeof (uint64_t)); - gz[0U] = (uint64_t)1U; - Hacl_Impl_K256_PointMul_point_mul(out, scalar, g); -} - -static inline void -point_mul_g_double_vartime(uint64_t *out, uint64_t *scalar1, uint64_t *scalar2, uint64_t *q2) -{ - uint64_t g[15U] = { 0U }; - uint64_t *gx = g; - uint64_t *gy = g + (uint32_t)5U; - uint64_t *gz = g + (uint32_t)10U; - gx[0U] = (uint64_t)0x2815b16f81798U; - gx[1U] = (uint64_t)0xdb2dce28d959fU; - gx[2U] = (uint64_t)0xe870b07029bfcU; - gx[3U] = (uint64_t)0xbbac55a06295cU; - gx[4U] = (uint64_t)0x79be667ef9dcU; - gy[0U] = (uint64_t)0x7d08ffb10d4b8U; - gy[1U] = (uint64_t)0x48a68554199c4U; - gy[2U] = (uint64_t)0xe1108a8fd17b4U; - gy[3U] = (uint64_t)0xc4655da4fbfc0U; - gy[4U] = (uint64_t)0x483ada7726a3U; - memset(gz, 0U, (uint32_t)5U * sizeof (uint64_t)); - gz[0U] = (uint64_t)1U; - point_mul_double_vartime(out, scalar1, g, scalar2, q2); -} - -static inline bool load_public_key(uint8_t *pk, uint64_t *fpk_x, uint64_t *fpk_y) -{ - uint8_t *pk_x = pk; - uint8_t *pk_y = pk + (uint32_t)32U; - bool is_x_valid = Hacl_K256_Field_load_felem_vartime(fpk_x, pk_x); - bool is_y_valid = Hacl_K256_Field_load_felem_vartime(fpk_y, pk_y); - if (is_x_valid && is_y_valid) - { - uint64_t y2_exp[5U] = { 0U }; - uint64_t b[5U] = { 0U }; - b[0U] = (uint64_t)0x7U; - b[1U] = (uint64_t)0U; - b[2U] = (uint64_t)0U; - b[3U] = (uint64_t)0U; - b[4U] = (uint64_t)0U; - Hacl_K256_Field_fsqr(y2_exp, fpk_x); - Hacl_K256_Field_fmul(y2_exp, y2_exp, fpk_x); - Hacl_K256_Field_fadd(y2_exp, y2_exp, b); - Hacl_K256_Field_fnormalize(y2_exp, y2_exp); - { - uint64_t y2_comp[5U] = { 0U }; - Hacl_K256_Field_fsqr(y2_comp, fpk_y); - Hacl_K256_Field_fnormalize(y2_comp, y2_comp); - { - bool res = Hacl_K256_Field_is_felem_eq_vartime(y2_exp, y2_comp); - bool res0 = res; - return res0; - } - } - } - return false; -} - -static inline bool fmul_eq_vartime(uint64_t *r, uint64_t *z, uint64_t *x) -{ - uint64_t tmp[5U] = { 0U }; - bool b; - Hacl_K256_Field_fmul(tmp, r, z); - Hacl_K256_Field_fnormalize(tmp, tmp); - b = Hacl_K256_Field_is_felem_eq_vartime(tmp, x); - return b; -} - -/******************************************************************************* - Verified C library for ECDSA signing and verification on the secp256k1 curve. - - For the comments on low-S normalization (or canonical lowest S value), see: - • https://en.bitcoin.it/wiki/BIP_0062 - • https://yondon.blog/2019/01/01/how-not-to-use-ecdsa/ - • https://eklitzke.org/bitcoin-transaction-malleability - - For example, bitcoin-core/secp256k1 *always* performs low-S normalization. - -*******************************************************************************/ - - -/** -Create an ECDSA signature. - - The function returns `true` for successful creation of an ECDSA signature and `false` otherwise. - - The outparam `signature` (R || S) points to 64 bytes of valid memory, i.e., uint8_t[64]. - The arguments `msgHash`, `private_key`, and `nonce` point to 32 bytes of valid memory, i.e., uint8_t[32]. - - The function DOESN'T perform low-S normalization, see `secp256k1_ecdsa_sign_hashed_msg` if needed. - - The function also checks whether `private_key` and `nonce` are valid values: - • 0 < `private_key` and `private_key` < the order of the curve - • 0 < `nonce` and `nonce` < the order of the curve -*/ -bool -Hacl_K256_ECDSA_ecdsa_sign_hashed_msg( - uint8_t *signature, - uint8_t *msgHash, - uint8_t *private_key, - uint8_t *nonce -) -{ - uint64_t r_q[4U] = { 0U }; - uint64_t s_q[4U] = { 0U }; - uint64_t d_a[4U] = { 0U }; - uint64_t k_q[4U] = { 0U }; - uint64_t is_sk_valid = load_qelem_check(d_a, private_key); - uint64_t is_nonce_valid = load_qelem_check(k_q, nonce); - uint64_t are_sk_nonce_valid = is_sk_valid & is_nonce_valid; - if (are_sk_nonce_valid == (uint64_t)0U) - { - return false; - } - { - uint64_t tmp[5U] = { 0U }; - uint8_t x_bytes[32U] = { 0U }; - uint64_t p[15U] = { 0U }; - point_mul_g(p, k_q); - { - uint64_t *x = p; - uint64_t *z = p + (uint32_t)10U; - Hacl_Impl_K256_Finv_finv(tmp, z); - Hacl_K256_Field_fmul(tmp, x, tmp); - Hacl_K256_Field_fnormalize(tmp, tmp); - Hacl_K256_Field_store_felem(x_bytes, tmp); - load_qelem_modq(r_q, x_bytes); - { - uint64_t z0[4U] = { 0U }; - uint64_t kinv[4U] = { 0U }; - load_qelem_modq(z0, msgHash); - qinv(kinv, k_q); - qmul(s_q, r_q, d_a); - qadd(s_q, z0, s_q); - qmul(s_q, kinv, s_q); - store_qelem(signature, r_q); - store_qelem(signature + (uint32_t)32U, s_q); - { - uint64_t is_r_zero = is_qelem_zero(r_q); - uint64_t is_s_zero = is_qelem_zero(s_q); - if - (is_r_zero == (uint64_t)0xFFFFFFFFFFFFFFFFU || is_s_zero == (uint64_t)0xFFFFFFFFFFFFFFFFU) - { - return false; - } - return true; - } - } - } - } -} - -/** -Create an ECDSA signature. - - The function returns `true` for successful creation of an ECDSA signature and `false` otherwise. - - The outparam `signature` (R || S) points to 64 bytes of valid memory, i.e., uint8_t[64]. - The argument `msg` points to `msg_len` bytes of valid memory, i.e., uint8_t[msg_len]. - The arguments `private_key` and `nonce` point to 32 bytes of valid memory, i.e., uint8_t[32]. - - The function first hashes a message `msg` with SHA2-256 and then calls `ecdsa_sign_hashed_msg`. - - The function DOESN'T perform low-S normalization, see `secp256k1_ecdsa_sign_sha256` if needed. -*/ -bool -Hacl_K256_ECDSA_ecdsa_sign_sha256( - uint8_t *signature, - uint32_t msg_len, - uint8_t *msg, - uint8_t *private_key, - uint8_t *nonce -) -{ - uint8_t msgHash[32U] = { 0U }; - bool b; - Hacl_Hash_SHA2_hash_256(msg, msg_len, msgHash); - b = Hacl_K256_ECDSA_ecdsa_sign_hashed_msg(signature, msgHash, private_key, nonce); - return b; -} - -/** -Verify an ECDSA signature. - - The function returns `true` if the signature is valid and `false` otherwise. - - The argument `msgHash` points to 32 bytes of valid memory, i.e., uint8_t[32]. - The arguments `public_key` (x || y) and `signature` (R || S) point to 64 bytes of valid memory, i.e., uint8_t[64]. - - The function ACCEPTS non low-S normalized signatures, see `secp256k1_ecdsa_verify_hashed_msg` if needed. - - The function also checks whether a public key (x || y) is valid: - • 0 < x and x < prime - • 0 < y and y < prime - • (x, y) is on the curve -*/ -bool -Hacl_K256_ECDSA_ecdsa_verify_hashed_msg(uint8_t *m, uint8_t *public_key, uint8_t *signature) -{ - uint64_t pk_x[5U] = { 0U }; - uint64_t pk_y[5U] = { 0U }; - uint64_t r_q[4U] = { 0U }; - uint64_t s_q[4U] = { 0U }; - uint64_t z[4U] = { 0U }; - bool is_xy_on_curve = load_public_key(public_key, pk_x, pk_y); - bool is_r_valid = load_qelem_vartime(r_q, signature); - bool is_s_valid = load_qelem_vartime(s_q, signature + (uint32_t)32U); - bool res0; - load_qelem_modq(z, m); - if (!(is_xy_on_curve && is_r_valid && is_s_valid)) - { - res0 = false; - } - else - { - uint64_t p[15U] = { 0U }; - uint64_t res[15U] = { 0U }; - uint64_t *x1 = p; - uint64_t *y1 = p + (uint32_t)5U; - uint64_t *z10 = p + (uint32_t)10U; - memcpy(x1, pk_x, (uint32_t)5U * sizeof (uint64_t)); - memcpy(y1, pk_y, (uint32_t)5U * sizeof (uint64_t)); - memset(z10, 0U, (uint32_t)5U * sizeof (uint64_t)); - z10[0U] = (uint64_t)1U; - { - uint64_t sinv[4U] = { 0U }; - uint64_t u1[4U] = { 0U }; - uint64_t u2[4U] = { 0U }; - qinv(sinv, s_q); - qmul(u1, z, sinv); - qmul(u2, r_q, sinv); - point_mul_g_double_vartime(res, u1, u2, p); - { - uint64_t tmp[5U] = { 0U }; - uint64_t *pz = res + (uint32_t)10U; - Hacl_K256_Field_fnormalize(tmp, pz); - { - bool b0 = Hacl_K256_Field_is_felem_zero_vartime(tmp); - bool b; - if (b0) - { - b = false; - } - else - { - uint64_t *x = res; - uint64_t *z1 = res + (uint32_t)10U; - uint8_t r_bytes[32U] = { 0U }; - uint64_t r_fe[5U] = { 0U }; - uint64_t tmp_q[5U] = { 0U }; - uint64_t tmp_x[5U] = { 0U }; - store_qelem(r_bytes, r_q); - Hacl_K256_Field_load_felem(r_fe, r_bytes); - Hacl_K256_Field_fnormalize(tmp_x, x); - { - bool is_rz_x = fmul_eq_vartime(r_fe, z1, tmp_x); - bool res1; - if (!is_rz_x) - { - bool is_r_lt_p_m_q = Hacl_K256_Field_is_felem_lt_prime_minus_order_vartime(r_fe); - if (is_r_lt_p_m_q) - { - tmp_q[0U] = (uint64_t)0x25e8cd0364141U; - tmp_q[1U] = (uint64_t)0xe6af48a03bbfdU; - tmp_q[2U] = (uint64_t)0xffffffebaaedcU; - tmp_q[3U] = (uint64_t)0xfffffffffffffU; - tmp_q[4U] = (uint64_t)0xffffffffffffU; - Hacl_K256_Field_fadd(tmp_q, r_fe, tmp_q); - res1 = fmul_eq_vartime(tmp_q, z1, tmp_x); - } - else - { - res1 = false; - } - } - else - { - res1 = true; - } - b = res1; - } - } - res0 = b; - } - } - } - } - return res0; -} - -/** -Verify an ECDSA signature. - - The function returns `true` if the signature is valid and `false` otherwise. - - The argument `msg` points to `msg_len` bytes of valid memory, i.e., uint8_t[msg_len]. - The arguments `public_key` (x || y) and `signature` (R || S) point to 64 bytes of valid memory, i.e., uint8_t[64]. - - The function first hashes a message `msg` with SHA2-256 and then calls `ecdsa_verify_hashed_msg`. - - The function ACCEPTS non low-S normalized signatures, see `secp256k1_ecdsa_verify_sha256` if needed. -*/ -bool -Hacl_K256_ECDSA_ecdsa_verify_sha256( - uint32_t msg_len, - uint8_t *msg, - uint8_t *public_key, - uint8_t *signature -) -{ - uint8_t mHash[32U] = { 0U }; - bool b; - Hacl_Hash_SHA2_hash_256(msg, msg_len, mHash); - b = Hacl_K256_ECDSA_ecdsa_verify_hashed_msg(mHash, public_key, signature); - return b; -} - -/** -Compute canonical lowest S value for `signature` (R || S). - - The function returns `true` for successful normalization of S and `false` otherwise. - - The argument `signature` (R || S) points to 64 bytes of valid memory, i.e., uint8_t[64]. -*/ -bool Hacl_K256_ECDSA_secp256k1_ecdsa_signature_normalize(uint8_t *signature) -{ - uint64_t s_q[4U] = { 0U }; - uint8_t *s = signature + (uint32_t)32U; - bool is_sk_valid = load_qelem_vartime(s_q, s); - if (!is_sk_valid) - { - return false; - } - { - bool is_sk_lt_q_halved = is_qelem_le_q_halved_vartime(s_q); - qnegate_conditional_vartime(s_q, !is_sk_lt_q_halved); - store_qelem(signature + (uint32_t)32U, s_q); - return true; - } -} - -/** -Check whether `signature` (R || S) is in canonical form. - - The function returns `true` if S is low-S normalized and `false` otherwise. - - The argument `signature` (R || S) points to 64 bytes of valid memory, i.e., uint8_t[64]. -*/ -bool Hacl_K256_ECDSA_secp256k1_ecdsa_is_signature_normalized(uint8_t *signature) -{ - uint64_t s_q[4U] = { 0U }; - uint8_t *s = signature + (uint32_t)32U; - bool is_s_valid = load_qelem_vartime(s_q, s); - bool is_s_lt_q_halved = is_qelem_le_q_halved_vartime(s_q); - return is_s_valid && is_s_lt_q_halved; -} - -/** -Create an ECDSA signature. - - The function returns `true` for successful creation of an ECDSA signature and `false` otherwise. - - The outparam `signature` (R || S) points to 64 bytes of valid memory, i.e., uint8_t[64]. - The arguments `msgHash`, `private_key`, and `nonce` point to 32 bytes of valid memory, i.e., uint8_t[32]. - - The function ALWAYS performs low-S normalization, see `ecdsa_sign_hashed_msg` if needed. - - The function also checks whether `private_key` and `nonce` are valid values: - • 0 < `private_key` and `private_key` < the order of the curve - • 0 < `nonce` and `nonce` < the order of the curve -*/ -bool -Hacl_K256_ECDSA_secp256k1_ecdsa_sign_hashed_msg( - uint8_t *signature, - uint8_t *msgHash, - uint8_t *private_key, - uint8_t *nonce -) -{ - bool b = Hacl_K256_ECDSA_ecdsa_sign_hashed_msg(signature, msgHash, private_key, nonce); - if (b) - { - return Hacl_K256_ECDSA_secp256k1_ecdsa_signature_normalize(signature); - } - return false; -} - -/** -Create an ECDSA signature. - - The function returns `true` for successful creation of an ECDSA signature and `false` otherwise. - - The outparam `signature` (R || S) points to 64 bytes of valid memory, i.e., uint8_t[64]. - The argument `msg` points to `msg_len` bytes of valid memory, i.e., uint8_t[msg_len]. - The arguments `private_key` and `nonce` point to 32 bytes of valid memory, i.e., uint8_t[32]. - - The function first hashes a message `msg` with SHA2-256 and then calls `secp256k1_ecdsa_sign_hashed_msg`. - - The function ALWAYS performs low-S normalization, see `ecdsa_sign_hashed_msg` if needed. -*/ -bool -Hacl_K256_ECDSA_secp256k1_ecdsa_sign_sha256( - uint8_t *signature, - uint32_t msg_len, - uint8_t *msg, - uint8_t *private_key, - uint8_t *nonce -) -{ - uint8_t msgHash[32U] = { 0U }; - bool b; - Hacl_Hash_SHA2_hash_256(msg, msg_len, msgHash); - b = Hacl_K256_ECDSA_secp256k1_ecdsa_sign_hashed_msg(signature, msgHash, private_key, nonce); - return b; -} - -/** -Verify an ECDSA signature. - - The function returns `true` if the signature is valid and `false` otherwise. - - The argument `msgHash` points to 32 bytes of valid memory, i.e., uint8_t[32]. - The arguments `public_key` (x || y) and `signature` (R || S) point to 64 bytes of valid memory, i.e., uint8_t[64]. - - The function DOESN'T accept non low-S normalized signatures, see `ecdsa_verify_hashed_msg` if needed. - - The function also checks whether a public key (x || y) is valid: - • 0 < x and x < prime - • 0 < y and y < prime - • (x, y) is on the curve -*/ -bool -Hacl_K256_ECDSA_secp256k1_ecdsa_verify_hashed_msg( - uint8_t *msgHash, - uint8_t *public_key, - uint8_t *signature -) -{ - bool is_s_normalized = Hacl_K256_ECDSA_secp256k1_ecdsa_is_signature_normalized(signature); - if (!is_s_normalized) - { - return false; - } - return Hacl_K256_ECDSA_ecdsa_verify_hashed_msg(msgHash, public_key, signature); -} - -/** -Verify an ECDSA signature. - - The function returns `true` if the signature is valid and `false` otherwise. - - The argument `msg` points to `msg_len` bytes of valid memory, i.e., uint8_t[msg_len]. - The arguments `public_key` (x || y) and `signature` (R || S) point to 64 bytes of valid memory, i.e., uint8_t[64]. - - The function first hashes a message `msg` with SHA2-256 and then calls `secp256k1_ecdsa_verify_hashed_msg`. - - The function DOESN'T accept non low-S normalized signatures, see `ecdsa_verify_sha256` if needed. -*/ -bool -Hacl_K256_ECDSA_secp256k1_ecdsa_verify_sha256( - uint32_t msg_len, - uint8_t *msg, - uint8_t *public_key, - uint8_t *signature -) -{ - uint8_t mHash[32U] = { 0U }; - bool b; - Hacl_Hash_SHA2_hash_256(msg, msg_len, mHash); - b = Hacl_K256_ECDSA_secp256k1_ecdsa_verify_hashed_msg(mHash, public_key, signature); - return b; -} - -/******************************************************************************* - Parsing and Serializing public keys. - - A public key is a point (x, y) on the secp256k1 curve. - - The point can be represented in the following three ways. - • raw = [ x || y ], 64 bytes - • uncompressed = [ 0x04 || x || y ], 65 bytes - • compressed = [ (0x02 for even `y` and 0x03 for odd `y`) || x ], 33 bytes - -*******************************************************************************/ - - -/** -Convert a public key from uncompressed to its raw form. - - The function returns `true` for successful conversion of a public key and `false` otherwise. - - The outparam `pk_raw` points to 64 bytes of valid memory, i.e., uint8_t[64]. - The argument `pk` points to 65 bytes of valid memory, i.e., uint8_t[65]. - - The function DOESN'T check whether (x, y) is valid point. -*/ -bool Hacl_K256_ECDSA_public_key_uncompressed_to_raw(uint8_t *pk_raw, uint8_t *pk) -{ - uint8_t pk0 = pk[0U]; - if (pk0 != (uint8_t)0x04U) - { - return false; - } - memcpy(pk_raw, pk + (uint32_t)1U, (uint32_t)64U * sizeof (uint8_t)); - return true; -} - -/** -Convert a public key from raw to its uncompressed form. - - The outparam `pk` points to 65 bytes of valid memory, i.e., uint8_t[65]. - The argument `pk_raw` points to 64 bytes of valid memory, i.e., uint8_t[64]. - - The function DOESN'T check whether (x, y) is valid point. -*/ -void Hacl_K256_ECDSA_public_key_uncompressed_from_raw(uint8_t *pk, uint8_t *pk_raw) -{ - pk[0U] = (uint8_t)0x04U; - memcpy(pk + (uint32_t)1U, pk_raw, (uint32_t)64U * sizeof (uint8_t)); -} - -/** -Convert a public key from compressed to its raw form. - - The function returns `true` for successful conversion of a public key and `false` otherwise. - - The outparam `pk_raw` points to 64 bytes of valid memory, i.e., uint8_t[64]. - The argument `pk` points to 33 bytes of valid memory, i.e., uint8_t[33]. - - The function also checks whether (x, y) is valid point. -*/ -bool Hacl_K256_ECDSA_public_key_compressed_to_raw(uint8_t *pk_raw, uint8_t *pk) -{ - uint64_t xa[5U] = { 0U }; - uint64_t ya[5U] = { 0U }; - uint8_t *pk_xb = pk + (uint32_t)1U; - bool b = Hacl_Impl_K256_Point_aff_point_decompress_vartime(xa, ya, pk); - if (b) - { - memcpy(pk_raw, pk_xb, (uint32_t)32U * sizeof (uint8_t)); - Hacl_K256_Field_store_felem(pk_raw + (uint32_t)32U, ya); - } - return b; -} - -/** -Convert a public key from raw to its compressed form. - - The outparam `pk` points to 33 bytes of valid memory, i.e., uint8_t[33]. - The argument `pk_raw` points to 64 bytes of valid memory, i.e., uint8_t[64]. - - The function DOESN'T check whether (x, y) is valid point. -*/ -void Hacl_K256_ECDSA_public_key_compressed_from_raw(uint8_t *pk, uint8_t *pk_raw) -{ - uint8_t *pk_x = pk_raw; - uint8_t *pk_y = pk_raw + (uint32_t)32U; - uint8_t x0 = pk_y[31U]; - bool is_pk_y_odd = (x0 & (uint8_t)1U) == (uint8_t)1U; - uint8_t ite; - if (is_pk_y_odd) - { - ite = (uint8_t)0x03U; - } - else - { - ite = (uint8_t)0x02U; - } - pk[0U] = ite; - memcpy(pk + (uint32_t)1U, pk_x, (uint32_t)32U * sizeof (uint8_t)); -} - diff --git a/dist/c89-compatible/Hacl_K256_ECDSA.h b/dist/c89-compatible/Hacl_K256_ECDSA.h deleted file mode 100644 index c7882dad7c..0000000000 --- a/dist/c89-compatible/Hacl_K256_ECDSA.h +++ /dev/null @@ -1,305 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#ifndef __Hacl_K256_ECDSA_H -#define __Hacl_K256_ECDSA_H - -#if defined(__cplusplus) -extern "C" { -#endif - -#include -#include "krml/internal/types.h" -#include "krml/lowstar_endianness.h" -#include "krml/internal/target.h" - - -#include "Hacl_Krmllib.h" -#include "Hacl_Hash_SHA2.h" -#include "Hacl_Bignum_K256.h" -#include "Hacl_Bignum_Base.h" -#include "evercrypt_targetconfig.h" -#include "lib_intrinsics.h" -/******************************************************************************* - Verified C library for ECDSA signing and verification on the secp256k1 curve. - - For the comments on low-S normalization (or canonical lowest S value), see: - • https://en.bitcoin.it/wiki/BIP_0062 - • https://yondon.blog/2019/01/01/how-not-to-use-ecdsa/ - • https://eklitzke.org/bitcoin-transaction-malleability - - For example, bitcoin-core/secp256k1 *always* performs low-S normalization. - -*******************************************************************************/ - - -/** -Create an ECDSA signature. - - The function returns `true` for successful creation of an ECDSA signature and `false` otherwise. - - The outparam `signature` (R || S) points to 64 bytes of valid memory, i.e., uint8_t[64]. - The arguments `msgHash`, `private_key`, and `nonce` point to 32 bytes of valid memory, i.e., uint8_t[32]. - - The function DOESN'T perform low-S normalization, see `secp256k1_ecdsa_sign_hashed_msg` if needed. - - The function also checks whether `private_key` and `nonce` are valid values: - • 0 < `private_key` and `private_key` < the order of the curve - • 0 < `nonce` and `nonce` < the order of the curve -*/ -bool -Hacl_K256_ECDSA_ecdsa_sign_hashed_msg( - uint8_t *signature, - uint8_t *msgHash, - uint8_t *private_key, - uint8_t *nonce -); - -/** -Create an ECDSA signature. - - The function returns `true` for successful creation of an ECDSA signature and `false` otherwise. - - The outparam `signature` (R || S) points to 64 bytes of valid memory, i.e., uint8_t[64]. - The argument `msg` points to `msg_len` bytes of valid memory, i.e., uint8_t[msg_len]. - The arguments `private_key` and `nonce` point to 32 bytes of valid memory, i.e., uint8_t[32]. - - The function first hashes a message `msg` with SHA2-256 and then calls `ecdsa_sign_hashed_msg`. - - The function DOESN'T perform low-S normalization, see `secp256k1_ecdsa_sign_sha256` if needed. -*/ -bool -Hacl_K256_ECDSA_ecdsa_sign_sha256( - uint8_t *signature, - uint32_t msg_len, - uint8_t *msg, - uint8_t *private_key, - uint8_t *nonce -); - -/** -Verify an ECDSA signature. - - The function returns `true` if the signature is valid and `false` otherwise. - - The argument `msgHash` points to 32 bytes of valid memory, i.e., uint8_t[32]. - The arguments `public_key` (x || y) and `signature` (R || S) point to 64 bytes of valid memory, i.e., uint8_t[64]. - - The function ACCEPTS non low-S normalized signatures, see `secp256k1_ecdsa_verify_hashed_msg` if needed. - - The function also checks whether a public key (x || y) is valid: - • 0 < x and x < prime - • 0 < y and y < prime - • (x, y) is on the curve -*/ -bool -Hacl_K256_ECDSA_ecdsa_verify_hashed_msg(uint8_t *m, uint8_t *public_key, uint8_t *signature); - -/** -Verify an ECDSA signature. - - The function returns `true` if the signature is valid and `false` otherwise. - - The argument `msg` points to `msg_len` bytes of valid memory, i.e., uint8_t[msg_len]. - The arguments `public_key` (x || y) and `signature` (R || S) point to 64 bytes of valid memory, i.e., uint8_t[64]. - - The function first hashes a message `msg` with SHA2-256 and then calls `ecdsa_verify_hashed_msg`. - - The function ACCEPTS non low-S normalized signatures, see `secp256k1_ecdsa_verify_sha256` if needed. -*/ -bool -Hacl_K256_ECDSA_ecdsa_verify_sha256( - uint32_t msg_len, - uint8_t *msg, - uint8_t *public_key, - uint8_t *signature -); - -/** -Compute canonical lowest S value for `signature` (R || S). - - The function returns `true` for successful normalization of S and `false` otherwise. - - The argument `signature` (R || S) points to 64 bytes of valid memory, i.e., uint8_t[64]. -*/ -bool Hacl_K256_ECDSA_secp256k1_ecdsa_signature_normalize(uint8_t *signature); - -/** -Check whether `signature` (R || S) is in canonical form. - - The function returns `true` if S is low-S normalized and `false` otherwise. - - The argument `signature` (R || S) points to 64 bytes of valid memory, i.e., uint8_t[64]. -*/ -bool Hacl_K256_ECDSA_secp256k1_ecdsa_is_signature_normalized(uint8_t *signature); - -/** -Create an ECDSA signature. - - The function returns `true` for successful creation of an ECDSA signature and `false` otherwise. - - The outparam `signature` (R || S) points to 64 bytes of valid memory, i.e., uint8_t[64]. - The arguments `msgHash`, `private_key`, and `nonce` point to 32 bytes of valid memory, i.e., uint8_t[32]. - - The function ALWAYS performs low-S normalization, see `ecdsa_sign_hashed_msg` if needed. - - The function also checks whether `private_key` and `nonce` are valid values: - • 0 < `private_key` and `private_key` < the order of the curve - • 0 < `nonce` and `nonce` < the order of the curve -*/ -bool -Hacl_K256_ECDSA_secp256k1_ecdsa_sign_hashed_msg( - uint8_t *signature, - uint8_t *msgHash, - uint8_t *private_key, - uint8_t *nonce -); - -/** -Create an ECDSA signature. - - The function returns `true` for successful creation of an ECDSA signature and `false` otherwise. - - The outparam `signature` (R || S) points to 64 bytes of valid memory, i.e., uint8_t[64]. - The argument `msg` points to `msg_len` bytes of valid memory, i.e., uint8_t[msg_len]. - The arguments `private_key` and `nonce` point to 32 bytes of valid memory, i.e., uint8_t[32]. - - The function first hashes a message `msg` with SHA2-256 and then calls `secp256k1_ecdsa_sign_hashed_msg`. - - The function ALWAYS performs low-S normalization, see `ecdsa_sign_hashed_msg` if needed. -*/ -bool -Hacl_K256_ECDSA_secp256k1_ecdsa_sign_sha256( - uint8_t *signature, - uint32_t msg_len, - uint8_t *msg, - uint8_t *private_key, - uint8_t *nonce -); - -/** -Verify an ECDSA signature. - - The function returns `true` if the signature is valid and `false` otherwise. - - The argument `msgHash` points to 32 bytes of valid memory, i.e., uint8_t[32]. - The arguments `public_key` (x || y) and `signature` (R || S) point to 64 bytes of valid memory, i.e., uint8_t[64]. - - The function DOESN'T accept non low-S normalized signatures, see `ecdsa_verify_hashed_msg` if needed. - - The function also checks whether a public key (x || y) is valid: - • 0 < x and x < prime - • 0 < y and y < prime - • (x, y) is on the curve -*/ -bool -Hacl_K256_ECDSA_secp256k1_ecdsa_verify_hashed_msg( - uint8_t *msgHash, - uint8_t *public_key, - uint8_t *signature -); - -/** -Verify an ECDSA signature. - - The function returns `true` if the signature is valid and `false` otherwise. - - The argument `msg` points to `msg_len` bytes of valid memory, i.e., uint8_t[msg_len]. - The arguments `public_key` (x || y) and `signature` (R || S) point to 64 bytes of valid memory, i.e., uint8_t[64]. - - The function first hashes a message `msg` with SHA2-256 and then calls `secp256k1_ecdsa_verify_hashed_msg`. - - The function DOESN'T accept non low-S normalized signatures, see `ecdsa_verify_sha256` if needed. -*/ -bool -Hacl_K256_ECDSA_secp256k1_ecdsa_verify_sha256( - uint32_t msg_len, - uint8_t *msg, - uint8_t *public_key, - uint8_t *signature -); - -/******************************************************************************* - Parsing and Serializing public keys. - - A public key is a point (x, y) on the secp256k1 curve. - - The point can be represented in the following three ways. - • raw = [ x || y ], 64 bytes - • uncompressed = [ 0x04 || x || y ], 65 bytes - • compressed = [ (0x02 for even `y` and 0x03 for odd `y`) || x ], 33 bytes - -*******************************************************************************/ - - -/** -Convert a public key from uncompressed to its raw form. - - The function returns `true` for successful conversion of a public key and `false` otherwise. - - The outparam `pk_raw` points to 64 bytes of valid memory, i.e., uint8_t[64]. - The argument `pk` points to 65 bytes of valid memory, i.e., uint8_t[65]. - - The function DOESN'T check whether (x, y) is valid point. -*/ -bool Hacl_K256_ECDSA_public_key_uncompressed_to_raw(uint8_t *pk_raw, uint8_t *pk); - -/** -Convert a public key from raw to its uncompressed form. - - The outparam `pk` points to 65 bytes of valid memory, i.e., uint8_t[65]. - The argument `pk_raw` points to 64 bytes of valid memory, i.e., uint8_t[64]. - - The function DOESN'T check whether (x, y) is valid point. -*/ -void Hacl_K256_ECDSA_public_key_uncompressed_from_raw(uint8_t *pk, uint8_t *pk_raw); - -/** -Convert a public key from compressed to its raw form. - - The function returns `true` for successful conversion of a public key and `false` otherwise. - - The outparam `pk_raw` points to 64 bytes of valid memory, i.e., uint8_t[64]. - The argument `pk` points to 33 bytes of valid memory, i.e., uint8_t[33]. - - The function also checks whether (x, y) is valid point. -*/ -bool Hacl_K256_ECDSA_public_key_compressed_to_raw(uint8_t *pk_raw, uint8_t *pk); - -/** -Convert a public key from raw to its compressed form. - - The outparam `pk` points to 33 bytes of valid memory, i.e., uint8_t[33]. - The argument `pk_raw` points to 64 bytes of valid memory, i.e., uint8_t[64]. - - The function DOESN'T check whether (x, y) is valid point. -*/ -void Hacl_K256_ECDSA_public_key_compressed_from_raw(uint8_t *pk, uint8_t *pk_raw); - -#if defined(__cplusplus) -} -#endif - -#define __Hacl_K256_ECDSA_H_DEFINED -#endif diff --git a/dist/c89-compatible/Hacl_Krmllib.h b/dist/c89-compatible/Hacl_Krmllib.h deleted file mode 100644 index 2c5cced7b6..0000000000 --- a/dist/c89-compatible/Hacl_Krmllib.h +++ /dev/null @@ -1,85 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#ifndef __Hacl_Krmllib_H -#define __Hacl_Krmllib_H - -#if defined(__cplusplus) -extern "C" { -#endif - -#include -#include "krml/internal/types.h" -#include "krml/lowstar_endianness.h" -#include "krml/internal/target.h" - - - -#include "evercrypt_targetconfig.h" -static inline uint32_t FStar_UInt32_eq_mask(uint32_t a, uint32_t b); - -static inline uint32_t FStar_UInt32_gte_mask(uint32_t a, uint32_t b); - -static inline uint8_t FStar_UInt8_eq_mask(uint8_t a, uint8_t b); - -static inline uint64_t FStar_UInt64_eq_mask(uint64_t a, uint64_t b); - -static inline uint64_t FStar_UInt64_gte_mask(uint64_t a, uint64_t b); - -static inline uint16_t FStar_UInt16_eq_mask(uint16_t a, uint16_t b); - -static inline FStar_UInt128_uint128 -FStar_UInt128_add(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b); - -static inline FStar_UInt128_uint128 -FStar_UInt128_add_mod(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b); - -static inline FStar_UInt128_uint128 -FStar_UInt128_sub_mod(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b); - -static inline FStar_UInt128_uint128 -FStar_UInt128_logor(FStar_UInt128_uint128 a, FStar_UInt128_uint128 b); - -static inline FStar_UInt128_uint128 -FStar_UInt128_shift_left(FStar_UInt128_uint128 a, uint32_t s); - -static inline FStar_UInt128_uint128 -FStar_UInt128_shift_right(FStar_UInt128_uint128 a, uint32_t s); - -static inline FStar_UInt128_uint128 FStar_UInt128_uint64_to_uint128(uint64_t a); - -static inline uint64_t FStar_UInt128_uint128_to_uint64(FStar_UInt128_uint128 a); - -static inline FStar_UInt128_uint128 FStar_UInt128_mul_wide(uint64_t x, uint64_t y); - -static inline void store128_be(uint8_t *x0, FStar_UInt128_uint128 x1); - -static inline FStar_UInt128_uint128 load128_be(uint8_t *x0); - -#if defined(__cplusplus) -} -#endif - -#define __Hacl_Krmllib_H_DEFINED -#endif diff --git a/dist/c89-compatible/Hacl_NaCl.c b/dist/c89-compatible/Hacl_NaCl.c deleted file mode 100644 index 73d2f23559..0000000000 --- a/dist/c89-compatible/Hacl_NaCl.c +++ /dev/null @@ -1,442 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#include "Hacl_NaCl.h" - - - -static void secretbox_init(uint8_t *xkeys, uint8_t *k, uint8_t *n) -{ - uint8_t *subkey = xkeys; - uint8_t *aekey = xkeys + (uint32_t)32U; - uint8_t *n0 = n; - uint8_t *n1 = n + (uint32_t)16U; - Hacl_Salsa20_hsalsa20(subkey, k, n0); - Hacl_Salsa20_salsa20_key_block0(aekey, subkey, n1); -} - -static void -secretbox_detached(uint32_t mlen, uint8_t *c, uint8_t *tag, uint8_t *k, uint8_t *n, uint8_t *m) -{ - uint8_t xkeys[96U] = { 0U }; - uint8_t *mkey; - secretbox_init(xkeys, k, n); - mkey = xkeys + (uint32_t)32U; - { - uint8_t *n1 = n + (uint32_t)16U; - uint8_t *subkey = xkeys; - uint8_t *ekey0 = xkeys + (uint32_t)64U; - uint32_t mlen0; - if (mlen <= (uint32_t)32U) - { - mlen0 = mlen; - } - else - { - mlen0 = (uint32_t)32U; - } - { - uint32_t mlen1 = mlen - mlen0; - uint8_t *m0 = m; - uint8_t *m1 = m + mlen0; - uint8_t block0[32U] = { 0U }; - uint8_t *c0; - uint8_t *c1; - memcpy(block0, m0, mlen0 * sizeof (uint8_t)); - { - uint32_t i; - for (i = (uint32_t)0U; i < (uint32_t)32U; i++) - { - uint8_t *os = block0; - uint8_t x = block0[i] ^ ekey0[i]; - os[i] = x; - } - } - c0 = c; - c1 = c + mlen0; - memcpy(c0, block0, mlen0 * sizeof (uint8_t)); - Hacl_Salsa20_salsa20_encrypt(mlen1, c1, m1, subkey, n1, (uint32_t)1U); - Hacl_Poly1305_32_poly1305_mac(tag, mlen, c, mkey); - } - } -} - -static uint32_t -secretbox_open_detached( - uint32_t mlen, - uint8_t *m, - uint8_t *k, - uint8_t *n, - uint8_t *c, - uint8_t *tag -) -{ - uint8_t xkeys[96U] = { 0U }; - uint8_t *mkey; - secretbox_init(xkeys, k, n); - mkey = xkeys + (uint32_t)32U; - { - uint8_t tag_[16U] = { 0U }; - Hacl_Poly1305_32_poly1305_mac(tag_, mlen, c, mkey); - { - uint8_t res0 = (uint8_t)255U; - uint8_t z; - uint32_t res; - KRML_MAYBE_FOR16(i, - (uint32_t)0U, - (uint32_t)16U, - (uint32_t)1U, - uint8_t uu____0 = FStar_UInt8_eq_mask(tag[i], tag_[i]); - res0 = uu____0 & res0;); - z = res0; - if (z == (uint8_t)255U) - { - uint8_t *subkey = xkeys; - uint8_t *ekey0 = xkeys + (uint32_t)64U; - uint8_t *n1 = n + (uint32_t)16U; - uint32_t mlen0; - if (mlen <= (uint32_t)32U) - { - mlen0 = mlen; - } - else - { - mlen0 = (uint32_t)32U; - } - { - uint32_t mlen1 = mlen - mlen0; - uint8_t *c0 = c; - uint8_t *c1 = c + mlen0; - uint8_t block0[32U] = { 0U }; - memcpy(block0, c0, mlen0 * sizeof (uint8_t)); - { - uint32_t i; - for (i = (uint32_t)0U; i < (uint32_t)32U; i++) - { - uint8_t *os = block0; - uint8_t x = block0[i] ^ ekey0[i]; - os[i] = x; - } - } - { - uint8_t *m0 = m; - uint8_t *m1 = m + mlen0; - memcpy(m0, block0, mlen0 * sizeof (uint8_t)); - Hacl_Salsa20_salsa20_decrypt(mlen1, m1, c1, subkey, n1, (uint32_t)1U); - res = (uint32_t)0U; - } - } - } - else - { - res = (uint32_t)0xffffffffU; - } - return res; - } - } -} - -static void secretbox_easy(uint32_t mlen, uint8_t *c, uint8_t *k, uint8_t *n, uint8_t *m) -{ - uint8_t *tag = c; - uint8_t *cip = c + (uint32_t)16U; - secretbox_detached(mlen, cip, tag, k, n, m); -} - -static uint32_t -secretbox_open_easy(uint32_t mlen, uint8_t *m, uint8_t *k, uint8_t *n, uint8_t *c) -{ - uint8_t *tag = c; - uint8_t *cip = c + (uint32_t)16U; - return secretbox_open_detached(mlen, m, k, n, cip, tag); -} - -static inline uint32_t box_beforenm(uint8_t *k, uint8_t *pk, uint8_t *sk) -{ - uint8_t n0[16U] = { 0U }; - bool r = Hacl_Curve25519_51_ecdh(k, sk, pk); - if (r) - { - Hacl_Salsa20_hsalsa20(k, k, n0); - return (uint32_t)0U; - } - return (uint32_t)0xffffffffU; -} - -static inline uint32_t -box_detached_afternm( - uint32_t mlen, - uint8_t *c, - uint8_t *tag, - uint8_t *k, - uint8_t *n, - uint8_t *m -) -{ - secretbox_detached(mlen, c, tag, k, n, m); - return (uint32_t)0U; -} - -static inline uint32_t -box_detached( - uint32_t mlen, - uint8_t *c, - uint8_t *tag, - uint8_t *sk, - uint8_t *pk, - uint8_t *n, - uint8_t *m -) -{ - uint8_t k[32U] = { 0U }; - uint32_t r = box_beforenm(k, pk, sk); - if (r == (uint32_t)0U) - { - return box_detached_afternm(mlen, c, tag, k, n, m); - } - return (uint32_t)0xffffffffU; -} - -static inline uint32_t -box_open_detached_afternm( - uint32_t mlen, - uint8_t *m, - uint8_t *k, - uint8_t *n, - uint8_t *c, - uint8_t *tag -) -{ - return secretbox_open_detached(mlen, m, k, n, c, tag); -} - -static inline uint32_t -box_open_detached( - uint32_t mlen, - uint8_t *m, - uint8_t *pk, - uint8_t *sk, - uint8_t *n, - uint8_t *c, - uint8_t *tag -) -{ - uint8_t k[32U] = { 0U }; - uint32_t r = box_beforenm(k, pk, sk); - if (r == (uint32_t)0U) - { - return box_open_detached_afternm(mlen, m, k, n, c, tag); - } - return (uint32_t)0xffffffffU; -} - -static inline uint32_t -box_easy_afternm(uint32_t mlen, uint8_t *c, uint8_t *k, uint8_t *n, uint8_t *m) -{ - uint8_t *tag = c; - uint8_t *cip = c + (uint32_t)16U; - uint32_t res = box_detached_afternm(mlen, cip, tag, k, n, m); - return res; -} - -static inline uint32_t -box_easy(uint32_t mlen, uint8_t *c, uint8_t *sk, uint8_t *pk, uint8_t *n, uint8_t *m) -{ - uint8_t *tag = c; - uint8_t *cip = c + (uint32_t)16U; - uint32_t res = box_detached(mlen, cip, tag, sk, pk, n, m); - return res; -} - -static inline uint32_t -box_open_easy_afternm(uint32_t mlen, uint8_t *m, uint8_t *k, uint8_t *n, uint8_t *c) -{ - uint8_t *tag = c; - uint8_t *cip = c + (uint32_t)16U; - return box_open_detached_afternm(mlen, m, k, n, cip, tag); -} - -static inline uint32_t -box_open_easy(uint32_t mlen, uint8_t *m, uint8_t *pk, uint8_t *sk, uint8_t *n, uint8_t *c) -{ - uint8_t *tag = c; - uint8_t *cip = c + (uint32_t)16U; - return box_open_detached(mlen, m, pk, sk, n, cip, tag); -} - -uint32_t -Hacl_NaCl_crypto_secretbox_detached( - uint8_t *c, - uint8_t *tag, - uint8_t *m, - uint32_t mlen, - uint8_t *n, - uint8_t *k -) -{ - secretbox_detached(mlen, c, tag, k, n, m); - return (uint32_t)0U; -} - -uint32_t -Hacl_NaCl_crypto_secretbox_open_detached( - uint8_t *m, - uint8_t *c, - uint8_t *tag, - uint32_t mlen, - uint8_t *n, - uint8_t *k -) -{ - return secretbox_open_detached(mlen, m, k, n, c, tag); -} - -uint32_t -Hacl_NaCl_crypto_secretbox_easy(uint8_t *c, uint8_t *m, uint32_t mlen, uint8_t *n, uint8_t *k) -{ - secretbox_easy(mlen, c, k, n, m); - return (uint32_t)0U; -} - -uint32_t -Hacl_NaCl_crypto_secretbox_open_easy( - uint8_t *m, - uint8_t *c, - uint32_t clen, - uint8_t *n, - uint8_t *k -) -{ - return secretbox_open_easy(clen - (uint32_t)16U, m, k, n, c); -} - -uint32_t Hacl_NaCl_crypto_box_beforenm(uint8_t *k, uint8_t *pk, uint8_t *sk) -{ - return box_beforenm(k, pk, sk); -} - -uint32_t -Hacl_NaCl_crypto_box_detached_afternm( - uint8_t *c, - uint8_t *tag, - uint8_t *m, - uint32_t mlen, - uint8_t *n, - uint8_t *k -) -{ - return box_detached_afternm(mlen, c, tag, k, n, m); -} - -uint32_t -Hacl_NaCl_crypto_box_detached( - uint8_t *c, - uint8_t *tag, - uint8_t *m, - uint32_t mlen, - uint8_t *n, - uint8_t *pk, - uint8_t *sk -) -{ - return box_detached(mlen, c, tag, sk, pk, n, m); -} - -uint32_t -Hacl_NaCl_crypto_box_open_detached_afternm( - uint8_t *m, - uint8_t *c, - uint8_t *tag, - uint32_t mlen, - uint8_t *n, - uint8_t *k -) -{ - return box_open_detached_afternm(mlen, m, k, n, c, tag); -} - -uint32_t -Hacl_NaCl_crypto_box_open_detached( - uint8_t *m, - uint8_t *c, - uint8_t *tag, - uint32_t mlen, - uint8_t *n, - uint8_t *pk, - uint8_t *sk -) -{ - return box_open_detached(mlen, m, pk, sk, n, c, tag); -} - -uint32_t -Hacl_NaCl_crypto_box_easy_afternm( - uint8_t *c, - uint8_t *m, - uint32_t mlen, - uint8_t *n, - uint8_t *k -) -{ - return box_easy_afternm(mlen, c, k, n, m); -} - -uint32_t -Hacl_NaCl_crypto_box_easy( - uint8_t *c, - uint8_t *m, - uint32_t mlen, - uint8_t *n, - uint8_t *pk, - uint8_t *sk -) -{ - return box_easy(mlen, c, sk, pk, n, m); -} - -uint32_t -Hacl_NaCl_crypto_box_open_easy_afternm( - uint8_t *m, - uint8_t *c, - uint32_t clen, - uint8_t *n, - uint8_t *k -) -{ - return box_open_easy_afternm(clen - (uint32_t)16U, m, k, n, c); -} - -uint32_t -Hacl_NaCl_crypto_box_open_easy( - uint8_t *m, - uint8_t *c, - uint32_t clen, - uint8_t *n, - uint8_t *pk, - uint8_t *sk -) -{ - return box_open_easy(clen - (uint32_t)16U, m, pk, sk, n, c); -} - diff --git a/dist/c89-compatible/Hacl_NaCl.h b/dist/c89-compatible/Hacl_NaCl.h deleted file mode 100644 index 6c9bc6ff49..0000000000 --- a/dist/c89-compatible/Hacl_NaCl.h +++ /dev/null @@ -1,161 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#ifndef __Hacl_NaCl_H -#define __Hacl_NaCl_H - -#if defined(__cplusplus) -extern "C" { -#endif - -#include -#include "krml/internal/types.h" -#include "krml/lowstar_endianness.h" -#include "krml/internal/target.h" - - -#include "Hacl_Salsa20.h" -#include "Hacl_Poly1305_32.h" -#include "Hacl_Curve25519_51.h" -#include "evercrypt_targetconfig.h" -uint32_t -Hacl_NaCl_crypto_secretbox_detached( - uint8_t *c, - uint8_t *tag, - uint8_t *m, - uint32_t mlen, - uint8_t *n, - uint8_t *k -); - -uint32_t -Hacl_NaCl_crypto_secretbox_open_detached( - uint8_t *m, - uint8_t *c, - uint8_t *tag, - uint32_t mlen, - uint8_t *n, - uint8_t *k -); - -uint32_t -Hacl_NaCl_crypto_secretbox_easy(uint8_t *c, uint8_t *m, uint32_t mlen, uint8_t *n, uint8_t *k); - -uint32_t -Hacl_NaCl_crypto_secretbox_open_easy( - uint8_t *m, - uint8_t *c, - uint32_t clen, - uint8_t *n, - uint8_t *k -); - -uint32_t Hacl_NaCl_crypto_box_beforenm(uint8_t *k, uint8_t *pk, uint8_t *sk); - -uint32_t -Hacl_NaCl_crypto_box_detached_afternm( - uint8_t *c, - uint8_t *tag, - uint8_t *m, - uint32_t mlen, - uint8_t *n, - uint8_t *k -); - -uint32_t -Hacl_NaCl_crypto_box_detached( - uint8_t *c, - uint8_t *tag, - uint8_t *m, - uint32_t mlen, - uint8_t *n, - uint8_t *pk, - uint8_t *sk -); - -uint32_t -Hacl_NaCl_crypto_box_open_detached_afternm( - uint8_t *m, - uint8_t *c, - uint8_t *tag, - uint32_t mlen, - uint8_t *n, - uint8_t *k -); - -uint32_t -Hacl_NaCl_crypto_box_open_detached( - uint8_t *m, - uint8_t *c, - uint8_t *tag, - uint32_t mlen, - uint8_t *n, - uint8_t *pk, - uint8_t *sk -); - -uint32_t -Hacl_NaCl_crypto_box_easy_afternm( - uint8_t *c, - uint8_t *m, - uint32_t mlen, - uint8_t *n, - uint8_t *k -); - -uint32_t -Hacl_NaCl_crypto_box_easy( - uint8_t *c, - uint8_t *m, - uint32_t mlen, - uint8_t *n, - uint8_t *pk, - uint8_t *sk -); - -uint32_t -Hacl_NaCl_crypto_box_open_easy_afternm( - uint8_t *m, - uint8_t *c, - uint32_t clen, - uint8_t *n, - uint8_t *k -); - -uint32_t -Hacl_NaCl_crypto_box_open_easy( - uint8_t *m, - uint8_t *c, - uint32_t clen, - uint8_t *n, - uint8_t *pk, - uint8_t *sk -); - -#if defined(__cplusplus) -} -#endif - -#define __Hacl_NaCl_H_DEFINED -#endif diff --git a/dist/c89-compatible/Hacl_P256.c b/dist/c89-compatible/Hacl_P256.c deleted file mode 100644 index 152ef4f990..0000000000 --- a/dist/c89-compatible/Hacl_P256.c +++ /dev/null @@ -1,3245 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#include "internal/Hacl_P256.h" - -#include "internal/Hacl_Spec.h" -#include "lib_intrinsics.h" -static uint64_t isZero_uint64_CT(uint64_t *f) -{ - uint64_t a0 = f[0U]; - uint64_t a1 = f[1U]; - uint64_t a2 = f[2U]; - uint64_t a3 = f[3U]; - uint64_t r0 = FStar_UInt64_eq_mask(a0, (uint64_t)0U); - uint64_t r1 = FStar_UInt64_eq_mask(a1, (uint64_t)0U); - uint64_t r2 = FStar_UInt64_eq_mask(a2, (uint64_t)0U); - uint64_t r3 = FStar_UInt64_eq_mask(a3, (uint64_t)0U); - uint64_t r01 = r0 & r1; - uint64_t r23 = r2 & r3; - return r01 & r23; -} - -static uint64_t compare_felem(uint64_t *a, uint64_t *b) -{ - uint64_t a_0 = a[0U]; - uint64_t a_1 = a[1U]; - uint64_t a_2 = a[2U]; - uint64_t a_3 = a[3U]; - uint64_t b_0 = b[0U]; - uint64_t b_1 = b[1U]; - uint64_t b_2 = b[2U]; - uint64_t b_3 = b[3U]; - uint64_t r_0 = FStar_UInt64_eq_mask(a_0, b_0); - uint64_t r_1 = FStar_UInt64_eq_mask(a_1, b_1); - uint64_t r_2 = FStar_UInt64_eq_mask(a_2, b_2); - uint64_t r_3 = FStar_UInt64_eq_mask(a_3, b_3); - uint64_t r01 = r_0 & r_1; - uint64_t r23 = r_2 & r_3; - return r01 & r23; -} - -static void copy_conditional(uint64_t *out, uint64_t *x, uint64_t mask) -{ - uint64_t out_0 = out[0U]; - uint64_t out_1 = out[1U]; - uint64_t out_2 = out[2U]; - uint64_t out_3 = out[3U]; - uint64_t x_0 = x[0U]; - uint64_t x_1 = x[1U]; - uint64_t x_2 = x[2U]; - uint64_t x_3 = x[3U]; - uint64_t r_0 = out_0 ^ (mask & (out_0 ^ x_0)); - uint64_t r_1 = out_1 ^ (mask & (out_1 ^ x_1)); - uint64_t r_2 = out_2 ^ (mask & (out_2 ^ x_2)); - uint64_t r_3 = out_3 ^ (mask & (out_3 ^ x_3)); - out[0U] = r_0; - out[1U] = r_1; - out[2U] = r_2; - out[3U] = r_3; -} - -static uint64_t add4(uint64_t *x, uint64_t *y, uint64_t *result) -{ - uint64_t *r0 = result; - uint64_t *r1 = result + (uint32_t)1U; - uint64_t *r2 = result + (uint32_t)2U; - uint64_t *r3 = result + (uint32_t)3U; - uint64_t cc0 = Lib_IntTypes_Intrinsics_add_carry_u64((uint64_t)0U, x[0U], y[0U], r0); - uint64_t cc1 = Lib_IntTypes_Intrinsics_add_carry_u64(cc0, x[1U], y[1U], r1); - uint64_t cc2 = Lib_IntTypes_Intrinsics_add_carry_u64(cc1, x[2U], y[2U], r2); - uint64_t cc3 = Lib_IntTypes_Intrinsics_add_carry_u64(cc2, x[3U], y[3U], r3); - return cc3; -} - -static uint64_t add4_with_carry(uint64_t c, uint64_t *x, uint64_t *y, uint64_t *result) -{ - uint64_t *r0 = result; - uint64_t *r1 = result + (uint32_t)1U; - uint64_t *r2 = result + (uint32_t)2U; - uint64_t *r3 = result + (uint32_t)3U; - uint64_t cc = Lib_IntTypes_Intrinsics_add_carry_u64(c, x[0U], y[0U], r0); - uint64_t cc1 = Lib_IntTypes_Intrinsics_add_carry_u64(cc, x[1U], y[1U], r1); - uint64_t cc2 = Lib_IntTypes_Intrinsics_add_carry_u64(cc1, x[2U], y[2U], r2); - uint64_t cc3 = Lib_IntTypes_Intrinsics_add_carry_u64(cc2, x[3U], y[3U], r3); - return cc3; -} - -static uint64_t add8(uint64_t *x, uint64_t *y, uint64_t *result) -{ - uint64_t *a0 = x; - uint64_t *a1 = x + (uint32_t)4U; - uint64_t *b0 = y; - uint64_t *b1 = y + (uint32_t)4U; - uint64_t *c0 = result; - uint64_t *c1 = result + (uint32_t)4U; - uint64_t carry0 = add4(a0, b0, c0); - uint64_t carry1 = add4_with_carry(carry0, a1, b1, c1); - return carry1; -} - -static uint64_t -add4_variables( - uint64_t *x, - uint64_t cin, - uint64_t y0, - uint64_t y1, - uint64_t y2, - uint64_t y3, - uint64_t *result -) -{ - uint64_t *r0 = result; - uint64_t *r1 = result + (uint32_t)1U; - uint64_t *r2 = result + (uint32_t)2U; - uint64_t *r3 = result + (uint32_t)3U; - uint64_t cc = Lib_IntTypes_Intrinsics_add_carry_u64(cin, x[0U], y0, r0); - uint64_t cc1 = Lib_IntTypes_Intrinsics_add_carry_u64(cc, x[1U], y1, r1); - uint64_t cc2 = Lib_IntTypes_Intrinsics_add_carry_u64(cc1, x[2U], y2, r2); - uint64_t cc3 = Lib_IntTypes_Intrinsics_add_carry_u64(cc2, x[3U], y3, r3); - return cc3; -} - -static uint64_t sub4_il(uint64_t *x, const uint64_t *y, uint64_t *result) -{ - uint64_t *r0 = result; - uint64_t *r1 = result + (uint32_t)1U; - uint64_t *r2 = result + (uint32_t)2U; - uint64_t *r3 = result + (uint32_t)3U; - uint64_t cc = Lib_IntTypes_Intrinsics_sub_borrow_u64((uint64_t)0U, x[0U], y[0U], r0); - uint64_t cc1 = Lib_IntTypes_Intrinsics_sub_borrow_u64(cc, x[1U], y[1U], r1); - uint64_t cc2 = Lib_IntTypes_Intrinsics_sub_borrow_u64(cc1, x[2U], y[2U], r2); - uint64_t cc3 = Lib_IntTypes_Intrinsics_sub_borrow_u64(cc2, x[3U], y[3U], r3); - return cc3; -} - -static uint64_t sub4(uint64_t *x, uint64_t *y, uint64_t *result) -{ - uint64_t *r0 = result; - uint64_t *r1 = result + (uint32_t)1U; - uint64_t *r2 = result + (uint32_t)2U; - uint64_t *r3 = result + (uint32_t)3U; - uint64_t cc = Lib_IntTypes_Intrinsics_sub_borrow_u64((uint64_t)0U, x[0U], y[0U], r0); - uint64_t cc1 = Lib_IntTypes_Intrinsics_sub_borrow_u64(cc, x[1U], y[1U], r1); - uint64_t cc2 = Lib_IntTypes_Intrinsics_sub_borrow_u64(cc1, x[2U], y[2U], r2); - uint64_t cc3 = Lib_IntTypes_Intrinsics_sub_borrow_u64(cc2, x[3U], y[3U], r3); - return cc3; -} - -static void mul64(uint64_t x, uint64_t y, uint64_t *result, uint64_t *temp) -{ - FStar_UInt128_uint128 res = FStar_UInt128_mul_wide(x, y); - uint64_t l0 = FStar_UInt128_uint128_to_uint64(res); - uint64_t h0 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(res, (uint32_t)64U)); - result[0U] = l0; - temp[0U] = h0; -} - -static void sq(uint64_t *f, uint64_t *out) -{ - uint64_t c0; - memset(out, 0U, (uint32_t)8U * sizeof (uint64_t)); - KRML_MAYBE_FOR4(i0, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *ab = f; - uint64_t a_j = f[i0]; - uint64_t *res_j = out + i0; - uint64_t c = (uint64_t)0U; - { - uint32_t i; - for (i = (uint32_t)0U; i < i0 / (uint32_t)4U; i++) - { - uint64_t a_i = ab[(uint32_t)4U * i]; - uint64_t *res_i0 = res_j + (uint32_t)4U * i; - c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, a_j, c, res_i0); - { - uint64_t a_i0 = ab[(uint32_t)4U * i + (uint32_t)1U]; - uint64_t *res_i1 = res_j + (uint32_t)4U * i + (uint32_t)1U; - c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i0, a_j, c, res_i1); - { - uint64_t a_i1 = ab[(uint32_t)4U * i + (uint32_t)2U]; - uint64_t *res_i2 = res_j + (uint32_t)4U * i + (uint32_t)2U; - c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i1, a_j, c, res_i2); - { - uint64_t a_i2 = ab[(uint32_t)4U * i + (uint32_t)3U]; - uint64_t *res_i = res_j + (uint32_t)4U * i + (uint32_t)3U; - c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i2, a_j, c, res_i); - } - } - } - } - } - { - uint32_t i; - for (i = i0 / (uint32_t)4U * (uint32_t)4U; i < i0; i++) - { - uint64_t a_i = ab[i]; - uint64_t *res_i = res_j + i; - c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, a_j, c, res_i); - } - } - uint64_t r = c; - out[i0 + i0] = r;); - c0 = Hacl_Bignum_Addition_bn_add_eq_len_u64((uint32_t)8U, out, out, out); - { - uint64_t tmp[8U] = { 0U }; - uint64_t c1; - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - FStar_UInt128_uint128 res = FStar_UInt128_mul_wide(f[i], f[i]); - uint64_t hi = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(res, (uint32_t)64U)); - uint64_t lo = FStar_UInt128_uint128_to_uint64(res); - tmp[(uint32_t)2U * i] = lo; - tmp[(uint32_t)2U * i + (uint32_t)1U] = hi;); - c1 = Hacl_Bignum_Addition_bn_add_eq_len_u64((uint32_t)8U, out, tmp, out); - } -} - -static void cmovznz4(uint64_t cin, uint64_t *x, uint64_t *y, uint64_t *r) -{ - uint64_t mask = ~FStar_UInt64_eq_mask(cin, (uint64_t)0U); - uint64_t r0 = (y[0U] & mask) | (x[0U] & ~mask); - uint64_t r1 = (y[1U] & mask) | (x[1U] & ~mask); - uint64_t r2 = (y[2U] & mask) | (x[2U] & ~mask); - uint64_t r3 = (y[3U] & mask) | (x[3U] & ~mask); - r[0U] = r0; - r[1U] = r1; - r[2U] = r2; - r[3U] = r3; -} - -static void shift_256_impl(uint64_t *i, uint64_t *o) -{ - o[0U] = (uint64_t)0U; - o[1U] = (uint64_t)0U; - o[2U] = (uint64_t)0U; - o[3U] = (uint64_t)0U; - o[4U] = i[0U]; - o[5U] = i[1U]; - o[6U] = i[2U]; - o[7U] = i[3U]; -} - -static void shift8(uint64_t *t, uint64_t *out) -{ - uint64_t t1 = t[1U]; - uint64_t t2 = t[2U]; - uint64_t t3 = t[3U]; - uint64_t t4 = t[4U]; - uint64_t t5 = t[5U]; - uint64_t t6 = t[6U]; - uint64_t t7 = t[7U]; - out[0U] = t1; - out[1U] = t2; - out[2U] = t3; - out[3U] = t4; - out[4U] = t5; - out[5U] = t6; - out[6U] = t7; - out[7U] = (uint64_t)0U; -} - -static void uploadZeroImpl(uint64_t *f) -{ - f[0U] = (uint64_t)0U; - f[1U] = (uint64_t)0U; - f[2U] = (uint64_t)0U; - f[3U] = (uint64_t)0U; -} - -static void uploadOneImpl(uint64_t *f) -{ - f[0U] = (uint64_t)1U; - f[1U] = (uint64_t)0U; - f[2U] = (uint64_t)0U; - f[3U] = (uint64_t)0U; -} - -void Hacl_Impl_P256_LowLevel_toUint8(uint64_t *i, uint8_t *o) -{ - uint32_t i0; - for (i0 = (uint32_t)0U; i0 < (uint32_t)4U; i0++) - { - store64_be(o + i0 * (uint32_t)8U, i[i0]); - } -} - -void Hacl_Impl_P256_LowLevel_changeEndian(uint64_t *i) -{ - uint64_t zero = i[0U]; - uint64_t one = i[1U]; - uint64_t two = i[2U]; - uint64_t three = i[3U]; - i[0U] = three; - i[1U] = two; - i[2U] = one; - i[3U] = zero; -} - -void Hacl_Impl_P256_LowLevel_toUint64ChangeEndian(uint8_t *i, uint64_t *o) -{ - KRML_MAYBE_FOR4(i0, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t *os = o; - uint8_t *bj = i + i0 * (uint32_t)8U; - uint64_t u = load64_be(bj); - uint64_t r = u; - uint64_t x = r; - os[i0] = x;); - Hacl_Impl_P256_LowLevel_changeEndian(o); -} - -static const -uint64_t -prime256_buffer[4U] = - { - (uint64_t)0xffffffffffffffffU, - (uint64_t)0xffffffffU, - (uint64_t)0U, - (uint64_t)0xffffffff00000001U - }; - -static void reduction_prime_2prime_impl(uint64_t *x, uint64_t *result) -{ - uint64_t tempBuffer[4U] = { 0U }; - uint64_t c = sub4_il(x, prime256_buffer, tempBuffer); - cmovznz4(c, tempBuffer, x, result); -} - -static void p256_add(uint64_t *arg1, uint64_t *arg2, uint64_t *out) -{ - uint64_t t = add4(arg1, arg2, out); - uint64_t tempBuffer[4U] = { 0U }; - uint64_t tempBufferForSubborrow = (uint64_t)0U; - uint64_t c = sub4_il(out, prime256_buffer, tempBuffer); - uint64_t - carry = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t, (uint64_t)0U, &tempBufferForSubborrow); - cmovznz4(carry, tempBuffer, out, out); -} - -static void p256_double(uint64_t *arg1, uint64_t *out) -{ - uint64_t t = add4(arg1, arg1, out); - uint64_t tempBuffer[4U] = { 0U }; - uint64_t tempBufferForSubborrow = (uint64_t)0U; - uint64_t c = sub4_il(out, prime256_buffer, tempBuffer); - uint64_t - carry = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t, (uint64_t)0U, &tempBufferForSubborrow); - cmovznz4(carry, tempBuffer, out, out); -} - -static void p256_sub(uint64_t *arg1, uint64_t *arg2, uint64_t *out) -{ - uint64_t t = sub4(arg1, arg2, out); - uint64_t t0 = (uint64_t)0U - t; - uint64_t t1 = ((uint64_t)0U - t) >> (uint32_t)32U; - uint64_t t2 = (uint64_t)0U; - uint64_t t3 = t - (t << (uint32_t)32U); - uint64_t c = add4_variables(out, (uint64_t)0U, t0, t1, t2, t3, out); -} - -static void montgomery_multiplication_buffer_by_one(uint64_t *a, uint64_t *result) -{ - uint64_t t[8U] = { 0U }; - uint64_t *t_low = t; - uint64_t round2[8U] = { 0U }; - uint64_t round4[8U] = { 0U }; - memcpy(t_low, a, (uint32_t)4U * sizeof (uint64_t)); - { - uint64_t tempRound[8U] = { 0U }; - uint64_t t20[8U] = { 0U }; - uint64_t t30[8U] = { 0U }; - uint64_t t10 = t[0U]; - uint64_t *result040 = t20; - uint64_t temp1 = (uint64_t)0U; - uint64_t f10 = prime256_buffer[1U]; - uint64_t f20 = prime256_buffer[2U]; - uint64_t f30 = prime256_buffer[3U]; - uint64_t *o00 = result040; - uint64_t *o10 = result040 + (uint32_t)1U; - uint64_t *o20 = result040 + (uint32_t)2U; - uint64_t *o30 = result040 + (uint32_t)3U; - uint64_t f010 = prime256_buffer[0U]; - uint64_t h0; - uint64_t l0; - uint64_t c10; - uint64_t h1; - uint64_t l1; - uint64_t c20; - uint64_t h2; - uint64_t l2; - uint64_t c30; - uint64_t temp00; - uint64_t c0; - uint64_t uu____0; - mul64(f010, t10, o00, &temp1); - h0 = temp1; - mul64(f10, t10, o10, &temp1); - l0 = o10[0U]; - c10 = Lib_IntTypes_Intrinsics_add_carry_u64((uint64_t)0U, l0, h0, o10); - h1 = temp1; - mul64(f20, t10, o20, &temp1); - l1 = o20[0U]; - c20 = Lib_IntTypes_Intrinsics_add_carry_u64(c10, l1, h1, o20); - h2 = temp1; - mul64(f30, t10, o30, &temp1); - l2 = o30[0U]; - c30 = Lib_IntTypes_Intrinsics_add_carry_u64(c20, l2, h2, o30); - temp00 = temp1; - c0 = c30 + temp00; - t20[4U] = c0; - uu____0 = add8(t, t20, t30); - shift8(t30, tempRound); - { - uint64_t t21[8U] = { 0U }; - uint64_t t31[8U] = { 0U }; - uint64_t t11 = tempRound[0U]; - uint64_t *result041 = t21; - uint64_t temp2 = (uint64_t)0U; - uint64_t f11 = prime256_buffer[1U]; - uint64_t f21 = prime256_buffer[2U]; - uint64_t f31 = prime256_buffer[3U]; - uint64_t *o01 = result041; - uint64_t *o11 = result041 + (uint32_t)1U; - uint64_t *o21 = result041 + (uint32_t)2U; - uint64_t *o31 = result041 + (uint32_t)3U; - uint64_t f011 = prime256_buffer[0U]; - uint64_t h3; - uint64_t l3; - uint64_t c11; - uint64_t h4; - uint64_t l4; - uint64_t c21; - uint64_t h5; - uint64_t l5; - uint64_t c31; - uint64_t temp01; - uint64_t c4; - uint64_t uu____1; - mul64(f011, t11, o01, &temp2); - h3 = temp2; - mul64(f11, t11, o11, &temp2); - l3 = o11[0U]; - c11 = Lib_IntTypes_Intrinsics_add_carry_u64((uint64_t)0U, l3, h3, o11); - h4 = temp2; - mul64(f21, t11, o21, &temp2); - l4 = o21[0U]; - c21 = Lib_IntTypes_Intrinsics_add_carry_u64(c11, l4, h4, o21); - h5 = temp2; - mul64(f31, t11, o31, &temp2); - l5 = o31[0U]; - c31 = Lib_IntTypes_Intrinsics_add_carry_u64(c21, l5, h5, o31); - temp01 = temp2; - c4 = c31 + temp01; - t21[4U] = c4; - uu____1 = add8(tempRound, t21, t31); - shift8(t31, round2); - { - uint64_t tempRound0[8U] = { 0U }; - uint64_t t2[8U] = { 0U }; - uint64_t t32[8U] = { 0U }; - uint64_t t12 = round2[0U]; - uint64_t *result042 = t2; - uint64_t temp3 = (uint64_t)0U; - uint64_t f12 = prime256_buffer[1U]; - uint64_t f22 = prime256_buffer[2U]; - uint64_t f32 = prime256_buffer[3U]; - uint64_t *o02 = result042; - uint64_t *o12 = result042 + (uint32_t)1U; - uint64_t *o22 = result042 + (uint32_t)2U; - uint64_t *o32 = result042 + (uint32_t)3U; - uint64_t f012 = prime256_buffer[0U]; - uint64_t h6; - uint64_t l6; - uint64_t c12; - uint64_t h7; - uint64_t l7; - uint64_t c22; - uint64_t h8; - uint64_t l8; - uint64_t c32; - uint64_t temp02; - uint64_t c5; - uint64_t uu____2; - mul64(f012, t12, o02, &temp3); - h6 = temp3; - mul64(f12, t12, o12, &temp3); - l6 = o12[0U]; - c12 = Lib_IntTypes_Intrinsics_add_carry_u64((uint64_t)0U, l6, h6, o12); - h7 = temp3; - mul64(f22, t12, o22, &temp3); - l7 = o22[0U]; - c22 = Lib_IntTypes_Intrinsics_add_carry_u64(c12, l7, h7, o22); - h8 = temp3; - mul64(f32, t12, o32, &temp3); - l8 = o32[0U]; - c32 = Lib_IntTypes_Intrinsics_add_carry_u64(c22, l8, h8, o32); - temp02 = temp3; - c5 = c32 + temp02; - t2[4U] = c5; - uu____2 = add8(round2, t2, t32); - shift8(t32, tempRound0); - { - uint64_t t22[8U] = { 0U }; - uint64_t t3[8U] = { 0U }; - uint64_t t1 = tempRound0[0U]; - uint64_t *result04 = t22; - uint64_t temp = (uint64_t)0U; - uint64_t f1 = prime256_buffer[1U]; - uint64_t f2 = prime256_buffer[2U]; - uint64_t f3 = prime256_buffer[3U]; - uint64_t *o0 = result04; - uint64_t *o1 = result04 + (uint32_t)1U; - uint64_t *o2 = result04 + (uint32_t)2U; - uint64_t *o3 = result04 + (uint32_t)3U; - uint64_t f01 = prime256_buffer[0U]; - uint64_t h9; - uint64_t l9; - uint64_t c1; - uint64_t h10; - uint64_t l10; - uint64_t c2; - uint64_t h; - uint64_t l; - uint64_t c3; - uint64_t temp0; - uint64_t c6; - uint64_t uu____3; - mul64(f01, t1, o0, &temp); - h9 = temp; - mul64(f1, t1, o1, &temp); - l9 = o1[0U]; - c1 = Lib_IntTypes_Intrinsics_add_carry_u64((uint64_t)0U, l9, h9, o1); - h10 = temp; - mul64(f2, t1, o2, &temp); - l10 = o2[0U]; - c2 = Lib_IntTypes_Intrinsics_add_carry_u64(c1, l10, h10, o2); - h = temp; - mul64(f3, t1, o3, &temp); - l = o3[0U]; - c3 = Lib_IntTypes_Intrinsics_add_carry_u64(c2, l, h, o3); - temp0 = temp; - c6 = c3 + temp0; - t22[4U] = c6; - uu____3 = add8(tempRound0, t22, t3); - shift8(t3, round4); - { - uint64_t tempBuffer[4U] = { 0U }; - uint64_t tempBufferForSubborrow = (uint64_t)0U; - uint64_t cin = round4[4U]; - uint64_t *x_ = round4; - uint64_t c = sub4_il(x_, prime256_buffer, tempBuffer); - uint64_t - carry = - Lib_IntTypes_Intrinsics_sub_borrow_u64(c, - cin, - (uint64_t)0U, - &tempBufferForSubborrow); - cmovznz4(carry, tempBuffer, x_, result); - } - } - } - } - } -} - -static void montgomery_multiplication_buffer(uint64_t *a, uint64_t *b, uint64_t *result) -{ - uint64_t t[8U] = { 0U }; - uint64_t round2[8U] = { 0U }; - uint64_t round4[8U] = { 0U }; - memset(t, 0U, (uint32_t)8U * sizeof (uint64_t)); - KRML_MAYBE_FOR4(i0, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t bj = b[i0]; - uint64_t *res_j = t + i0; - uint64_t c = (uint64_t)0U; - { - uint64_t a_i = a[(uint32_t)4U * (uint32_t)0U]; - uint64_t *res_i0 = res_j + (uint32_t)4U * (uint32_t)0U; - c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, bj, c, res_i0); - { - uint64_t a_i0 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U]; - uint64_t *res_i1 = res_j + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U; - c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i0, bj, c, res_i1); - { - uint64_t a_i1 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U]; - uint64_t *res_i2 = res_j + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U; - c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i1, bj, c, res_i2); - { - uint64_t a_i2 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U]; - uint64_t *res_i = res_j + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U; - c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i2, bj, c, res_i); - } - } - } - } - uint64_t r = c; - t[(uint32_t)4U + i0] = r;); - { - uint64_t tempRound[8U] = { 0U }; - uint64_t t20[8U] = { 0U }; - uint64_t t30[8U] = { 0U }; - uint64_t t10 = t[0U]; - uint64_t *result040 = t20; - uint64_t temp1 = (uint64_t)0U; - uint64_t f10 = prime256_buffer[1U]; - uint64_t f20 = prime256_buffer[2U]; - uint64_t f30 = prime256_buffer[3U]; - uint64_t *o00 = result040; - uint64_t *o10 = result040 + (uint32_t)1U; - uint64_t *o20 = result040 + (uint32_t)2U; - uint64_t *o30 = result040 + (uint32_t)3U; - uint64_t f010 = prime256_buffer[0U]; - uint64_t h0; - uint64_t l0; - uint64_t c10; - uint64_t h1; - uint64_t l1; - uint64_t c20; - uint64_t h2; - uint64_t l2; - uint64_t c30; - uint64_t temp00; - uint64_t c0; - uint64_t uu____0; - mul64(f010, t10, o00, &temp1); - h0 = temp1; - mul64(f10, t10, o10, &temp1); - l0 = o10[0U]; - c10 = Lib_IntTypes_Intrinsics_add_carry_u64((uint64_t)0U, l0, h0, o10); - h1 = temp1; - mul64(f20, t10, o20, &temp1); - l1 = o20[0U]; - c20 = Lib_IntTypes_Intrinsics_add_carry_u64(c10, l1, h1, o20); - h2 = temp1; - mul64(f30, t10, o30, &temp1); - l2 = o30[0U]; - c30 = Lib_IntTypes_Intrinsics_add_carry_u64(c20, l2, h2, o30); - temp00 = temp1; - c0 = c30 + temp00; - t20[4U] = c0; - uu____0 = add8(t, t20, t30); - shift8(t30, tempRound); - { - uint64_t t21[8U] = { 0U }; - uint64_t t31[8U] = { 0U }; - uint64_t t11 = tempRound[0U]; - uint64_t *result041 = t21; - uint64_t temp2 = (uint64_t)0U; - uint64_t f11 = prime256_buffer[1U]; - uint64_t f21 = prime256_buffer[2U]; - uint64_t f31 = prime256_buffer[3U]; - uint64_t *o01 = result041; - uint64_t *o11 = result041 + (uint32_t)1U; - uint64_t *o21 = result041 + (uint32_t)2U; - uint64_t *o31 = result041 + (uint32_t)3U; - uint64_t f011 = prime256_buffer[0U]; - uint64_t h3; - uint64_t l3; - uint64_t c11; - uint64_t h4; - uint64_t l4; - uint64_t c21; - uint64_t h5; - uint64_t l5; - uint64_t c31; - uint64_t temp01; - uint64_t c4; - uint64_t uu____1; - mul64(f011, t11, o01, &temp2); - h3 = temp2; - mul64(f11, t11, o11, &temp2); - l3 = o11[0U]; - c11 = Lib_IntTypes_Intrinsics_add_carry_u64((uint64_t)0U, l3, h3, o11); - h4 = temp2; - mul64(f21, t11, o21, &temp2); - l4 = o21[0U]; - c21 = Lib_IntTypes_Intrinsics_add_carry_u64(c11, l4, h4, o21); - h5 = temp2; - mul64(f31, t11, o31, &temp2); - l5 = o31[0U]; - c31 = Lib_IntTypes_Intrinsics_add_carry_u64(c21, l5, h5, o31); - temp01 = temp2; - c4 = c31 + temp01; - t21[4U] = c4; - uu____1 = add8(tempRound, t21, t31); - shift8(t31, round2); - { - uint64_t tempRound0[8U] = { 0U }; - uint64_t t2[8U] = { 0U }; - uint64_t t32[8U] = { 0U }; - uint64_t t12 = round2[0U]; - uint64_t *result042 = t2; - uint64_t temp3 = (uint64_t)0U; - uint64_t f12 = prime256_buffer[1U]; - uint64_t f22 = prime256_buffer[2U]; - uint64_t f32 = prime256_buffer[3U]; - uint64_t *o02 = result042; - uint64_t *o12 = result042 + (uint32_t)1U; - uint64_t *o22 = result042 + (uint32_t)2U; - uint64_t *o32 = result042 + (uint32_t)3U; - uint64_t f012 = prime256_buffer[0U]; - uint64_t h6; - uint64_t l6; - uint64_t c12; - uint64_t h7; - uint64_t l7; - uint64_t c22; - uint64_t h8; - uint64_t l8; - uint64_t c32; - uint64_t temp02; - uint64_t c5; - uint64_t uu____2; - mul64(f012, t12, o02, &temp3); - h6 = temp3; - mul64(f12, t12, o12, &temp3); - l6 = o12[0U]; - c12 = Lib_IntTypes_Intrinsics_add_carry_u64((uint64_t)0U, l6, h6, o12); - h7 = temp3; - mul64(f22, t12, o22, &temp3); - l7 = o22[0U]; - c22 = Lib_IntTypes_Intrinsics_add_carry_u64(c12, l7, h7, o22); - h8 = temp3; - mul64(f32, t12, o32, &temp3); - l8 = o32[0U]; - c32 = Lib_IntTypes_Intrinsics_add_carry_u64(c22, l8, h8, o32); - temp02 = temp3; - c5 = c32 + temp02; - t2[4U] = c5; - uu____2 = add8(round2, t2, t32); - shift8(t32, tempRound0); - { - uint64_t t22[8U] = { 0U }; - uint64_t t3[8U] = { 0U }; - uint64_t t1 = tempRound0[0U]; - uint64_t *result04 = t22; - uint64_t temp = (uint64_t)0U; - uint64_t f1 = prime256_buffer[1U]; - uint64_t f2 = prime256_buffer[2U]; - uint64_t f3 = prime256_buffer[3U]; - uint64_t *o0 = result04; - uint64_t *o1 = result04 + (uint32_t)1U; - uint64_t *o2 = result04 + (uint32_t)2U; - uint64_t *o3 = result04 + (uint32_t)3U; - uint64_t f01 = prime256_buffer[0U]; - uint64_t h9; - uint64_t l9; - uint64_t c1; - uint64_t h10; - uint64_t l10; - uint64_t c2; - uint64_t h; - uint64_t l; - uint64_t c3; - uint64_t temp0; - uint64_t c6; - uint64_t uu____3; - mul64(f01, t1, o0, &temp); - h9 = temp; - mul64(f1, t1, o1, &temp); - l9 = o1[0U]; - c1 = Lib_IntTypes_Intrinsics_add_carry_u64((uint64_t)0U, l9, h9, o1); - h10 = temp; - mul64(f2, t1, o2, &temp); - l10 = o2[0U]; - c2 = Lib_IntTypes_Intrinsics_add_carry_u64(c1, l10, h10, o2); - h = temp; - mul64(f3, t1, o3, &temp); - l = o3[0U]; - c3 = Lib_IntTypes_Intrinsics_add_carry_u64(c2, l, h, o3); - temp0 = temp; - c6 = c3 + temp0; - t22[4U] = c6; - uu____3 = add8(tempRound0, t22, t3); - shift8(t3, round4); - { - uint64_t tempBuffer[4U] = { 0U }; - uint64_t tempBufferForSubborrow = (uint64_t)0U; - uint64_t cin = round4[4U]; - uint64_t *x_ = round4; - uint64_t c = sub4_il(x_, prime256_buffer, tempBuffer); - uint64_t - carry = - Lib_IntTypes_Intrinsics_sub_borrow_u64(c, - cin, - (uint64_t)0U, - &tempBufferForSubborrow); - cmovznz4(carry, tempBuffer, x_, result); - } - } - } - } - } -} - -static void montgomery_square_buffer(uint64_t *a, uint64_t *result) -{ - uint64_t t[8U] = { 0U }; - uint64_t round2[8U] = { 0U }; - uint64_t round4[8U] = { 0U }; - sq(a, t); - { - uint64_t tempRound[8U] = { 0U }; - uint64_t t20[8U] = { 0U }; - uint64_t t30[8U] = { 0U }; - uint64_t t10 = t[0U]; - uint64_t *result040 = t20; - uint64_t temp1 = (uint64_t)0U; - uint64_t f10 = prime256_buffer[1U]; - uint64_t f20 = prime256_buffer[2U]; - uint64_t f30 = prime256_buffer[3U]; - uint64_t *o00 = result040; - uint64_t *o10 = result040 + (uint32_t)1U; - uint64_t *o20 = result040 + (uint32_t)2U; - uint64_t *o30 = result040 + (uint32_t)3U; - uint64_t f010 = prime256_buffer[0U]; - uint64_t h0; - uint64_t l0; - uint64_t c10; - uint64_t h1; - uint64_t l1; - uint64_t c20; - uint64_t h2; - uint64_t l2; - uint64_t c30; - uint64_t temp00; - uint64_t c0; - uint64_t uu____0; - mul64(f010, t10, o00, &temp1); - h0 = temp1; - mul64(f10, t10, o10, &temp1); - l0 = o10[0U]; - c10 = Lib_IntTypes_Intrinsics_add_carry_u64((uint64_t)0U, l0, h0, o10); - h1 = temp1; - mul64(f20, t10, o20, &temp1); - l1 = o20[0U]; - c20 = Lib_IntTypes_Intrinsics_add_carry_u64(c10, l1, h1, o20); - h2 = temp1; - mul64(f30, t10, o30, &temp1); - l2 = o30[0U]; - c30 = Lib_IntTypes_Intrinsics_add_carry_u64(c20, l2, h2, o30); - temp00 = temp1; - c0 = c30 + temp00; - t20[4U] = c0; - uu____0 = add8(t, t20, t30); - shift8(t30, tempRound); - { - uint64_t t21[8U] = { 0U }; - uint64_t t31[8U] = { 0U }; - uint64_t t11 = tempRound[0U]; - uint64_t *result041 = t21; - uint64_t temp2 = (uint64_t)0U; - uint64_t f11 = prime256_buffer[1U]; - uint64_t f21 = prime256_buffer[2U]; - uint64_t f31 = prime256_buffer[3U]; - uint64_t *o01 = result041; - uint64_t *o11 = result041 + (uint32_t)1U; - uint64_t *o21 = result041 + (uint32_t)2U; - uint64_t *o31 = result041 + (uint32_t)3U; - uint64_t f011 = prime256_buffer[0U]; - uint64_t h3; - uint64_t l3; - uint64_t c11; - uint64_t h4; - uint64_t l4; - uint64_t c21; - uint64_t h5; - uint64_t l5; - uint64_t c31; - uint64_t temp01; - uint64_t c4; - uint64_t uu____1; - mul64(f011, t11, o01, &temp2); - h3 = temp2; - mul64(f11, t11, o11, &temp2); - l3 = o11[0U]; - c11 = Lib_IntTypes_Intrinsics_add_carry_u64((uint64_t)0U, l3, h3, o11); - h4 = temp2; - mul64(f21, t11, o21, &temp2); - l4 = o21[0U]; - c21 = Lib_IntTypes_Intrinsics_add_carry_u64(c11, l4, h4, o21); - h5 = temp2; - mul64(f31, t11, o31, &temp2); - l5 = o31[0U]; - c31 = Lib_IntTypes_Intrinsics_add_carry_u64(c21, l5, h5, o31); - temp01 = temp2; - c4 = c31 + temp01; - t21[4U] = c4; - uu____1 = add8(tempRound, t21, t31); - shift8(t31, round2); - { - uint64_t tempRound0[8U] = { 0U }; - uint64_t t2[8U] = { 0U }; - uint64_t t32[8U] = { 0U }; - uint64_t t12 = round2[0U]; - uint64_t *result042 = t2; - uint64_t temp3 = (uint64_t)0U; - uint64_t f12 = prime256_buffer[1U]; - uint64_t f22 = prime256_buffer[2U]; - uint64_t f32 = prime256_buffer[3U]; - uint64_t *o02 = result042; - uint64_t *o12 = result042 + (uint32_t)1U; - uint64_t *o22 = result042 + (uint32_t)2U; - uint64_t *o32 = result042 + (uint32_t)3U; - uint64_t f012 = prime256_buffer[0U]; - uint64_t h6; - uint64_t l6; - uint64_t c12; - uint64_t h7; - uint64_t l7; - uint64_t c22; - uint64_t h8; - uint64_t l8; - uint64_t c32; - uint64_t temp02; - uint64_t c5; - uint64_t uu____2; - mul64(f012, t12, o02, &temp3); - h6 = temp3; - mul64(f12, t12, o12, &temp3); - l6 = o12[0U]; - c12 = Lib_IntTypes_Intrinsics_add_carry_u64((uint64_t)0U, l6, h6, o12); - h7 = temp3; - mul64(f22, t12, o22, &temp3); - l7 = o22[0U]; - c22 = Lib_IntTypes_Intrinsics_add_carry_u64(c12, l7, h7, o22); - h8 = temp3; - mul64(f32, t12, o32, &temp3); - l8 = o32[0U]; - c32 = Lib_IntTypes_Intrinsics_add_carry_u64(c22, l8, h8, o32); - temp02 = temp3; - c5 = c32 + temp02; - t2[4U] = c5; - uu____2 = add8(round2, t2, t32); - shift8(t32, tempRound0); - { - uint64_t t22[8U] = { 0U }; - uint64_t t3[8U] = { 0U }; - uint64_t t1 = tempRound0[0U]; - uint64_t *result04 = t22; - uint64_t temp = (uint64_t)0U; - uint64_t f1 = prime256_buffer[1U]; - uint64_t f2 = prime256_buffer[2U]; - uint64_t f3 = prime256_buffer[3U]; - uint64_t *o0 = result04; - uint64_t *o1 = result04 + (uint32_t)1U; - uint64_t *o2 = result04 + (uint32_t)2U; - uint64_t *o3 = result04 + (uint32_t)3U; - uint64_t f01 = prime256_buffer[0U]; - uint64_t h9; - uint64_t l9; - uint64_t c1; - uint64_t h10; - uint64_t l10; - uint64_t c2; - uint64_t h; - uint64_t l; - uint64_t c3; - uint64_t temp0; - uint64_t c6; - uint64_t uu____3; - mul64(f01, t1, o0, &temp); - h9 = temp; - mul64(f1, t1, o1, &temp); - l9 = o1[0U]; - c1 = Lib_IntTypes_Intrinsics_add_carry_u64((uint64_t)0U, l9, h9, o1); - h10 = temp; - mul64(f2, t1, o2, &temp); - l10 = o2[0U]; - c2 = Lib_IntTypes_Intrinsics_add_carry_u64(c1, l10, h10, o2); - h = temp; - mul64(f3, t1, o3, &temp); - l = o3[0U]; - c3 = Lib_IntTypes_Intrinsics_add_carry_u64(c2, l, h, o3); - temp0 = temp; - c6 = c3 + temp0; - t22[4U] = c6; - uu____3 = add8(tempRound0, t22, t3); - shift8(t3, round4); - { - uint64_t tempBuffer[4U] = { 0U }; - uint64_t tempBufferForSubborrow = (uint64_t)0U; - uint64_t cin = round4[4U]; - uint64_t *x_ = round4; - uint64_t c = sub4_il(x_, prime256_buffer, tempBuffer); - uint64_t - carry = - Lib_IntTypes_Intrinsics_sub_borrow_u64(c, - cin, - (uint64_t)0U, - &tempBufferForSubborrow); - cmovznz4(carry, tempBuffer, x_, result); - } - } - } - } - } -} - -static void fsquarePowN(uint32_t n, uint64_t *a) -{ - uint32_t i; - for (i = (uint32_t)0U; i < n; i++) - { - montgomery_square_buffer(a, a); - } -} - -static void fsquarePowNminusOne(uint32_t n, uint64_t *a, uint64_t *b) -{ - uint32_t i; - b[0U] = (uint64_t)1U; - b[1U] = (uint64_t)18446744069414584320U; - b[2U] = (uint64_t)18446744073709551615U; - b[3U] = (uint64_t)4294967294U; - for (i = (uint32_t)0U; i < n; i++) - { - montgomery_multiplication_buffer(b, a, b); - montgomery_square_buffer(a, a); - } -} - -static void exponent(uint64_t *a, uint64_t *result, uint64_t *tempBuffer) -{ - uint64_t *buffer_norm_1 = tempBuffer; - uint64_t *buffer_result1 = tempBuffer + (uint32_t)4U; - uint64_t *buffer_result2 = tempBuffer + (uint32_t)8U; - uint64_t *buffer_norm_3 = tempBuffer + (uint32_t)12U; - uint64_t *buffer_result3 = tempBuffer + (uint32_t)16U; - uint64_t *buffer_a0; - uint64_t *buffer_b0; - uint64_t *buffer_a; - uint64_t *buffer_b; - memcpy(buffer_norm_1, a, (uint32_t)4U * sizeof (uint64_t)); - buffer_a0 = buffer_norm_1; - buffer_b0 = buffer_norm_1 + (uint32_t)4U; - fsquarePowNminusOne((uint32_t)32U, buffer_a0, buffer_b0); - fsquarePowN((uint32_t)224U, buffer_b0); - memcpy(buffer_result2, a, (uint32_t)4U * sizeof (uint64_t)); - fsquarePowN((uint32_t)192U, buffer_result2); - memcpy(buffer_norm_3, a, (uint32_t)4U * sizeof (uint64_t)); - buffer_a = buffer_norm_3; - buffer_b = buffer_norm_3 + (uint32_t)4U; - fsquarePowNminusOne((uint32_t)94U, buffer_a, buffer_b); - fsquarePowN((uint32_t)2U, buffer_b); - montgomery_multiplication_buffer(buffer_result1, buffer_result2, buffer_result1); - montgomery_multiplication_buffer(buffer_result1, buffer_result3, buffer_result1); - montgomery_multiplication_buffer(buffer_result1, a, buffer_result1); - memcpy(result, buffer_result1, (uint32_t)4U * sizeof (uint64_t)); -} - -static void cube(uint64_t *a, uint64_t *result) -{ - montgomery_square_buffer(a, result); - montgomery_multiplication_buffer(result, a, result); -} - -static void multByTwo(uint64_t *a, uint64_t *out) -{ - p256_add(a, a, out); -} - -static void multByThree(uint64_t *a, uint64_t *result) -{ - multByTwo(a, result); - p256_add(a, result, result); -} - -static void multByFour(uint64_t *a, uint64_t *result) -{ - multByTwo(a, result); - multByTwo(result, result); -} - -static void multByEight(uint64_t *a, uint64_t *result) -{ - multByTwo(a, result); - multByTwo(result, result); - multByTwo(result, result); -} - -static uint64_t store_high_low_u(uint32_t high, uint32_t low) -{ - uint64_t as_uint64_high = (uint64_t)high; - uint64_t as_uint64_high1 = as_uint64_high << (uint32_t)32U; - uint64_t as_uint64_low = (uint64_t)low; - return as_uint64_low ^ as_uint64_high1; -} - -static void solinas_reduction_impl(uint64_t *i, uint64_t *o) -{ - uint64_t tempBuffer[36U] = { 0U }; - uint64_t i0 = i[0U]; - uint64_t i1 = i[1U]; - uint64_t i2 = i[2U]; - uint64_t i3 = i[3U]; - uint64_t i4 = i[4U]; - uint64_t i5 = i[5U]; - uint64_t i6 = i[6U]; - uint64_t i7 = i[7U]; - uint32_t c0 = (uint32_t)i0; - uint32_t c1 = (uint32_t)(i0 >> (uint32_t)32U); - uint32_t c2 = (uint32_t)i1; - uint32_t c3 = (uint32_t)(i1 >> (uint32_t)32U); - uint32_t c4 = (uint32_t)i2; - uint32_t c5 = (uint32_t)(i2 >> (uint32_t)32U); - uint32_t c6 = (uint32_t)i3; - uint32_t c7 = (uint32_t)(i3 >> (uint32_t)32U); - uint32_t c8 = (uint32_t)i4; - uint32_t c9 = (uint32_t)(i4 >> (uint32_t)32U); - uint32_t c10 = (uint32_t)i5; - uint32_t c11 = (uint32_t)(i5 >> (uint32_t)32U); - uint32_t c12 = (uint32_t)i6; - uint32_t c13 = (uint32_t)(i6 >> (uint32_t)32U); - uint32_t c14 = (uint32_t)i7; - uint32_t c15 = (uint32_t)(i7 >> (uint32_t)32U); - uint64_t *t010 = tempBuffer; - uint64_t *t110 = tempBuffer + (uint32_t)4U; - uint64_t *t210 = tempBuffer + (uint32_t)8U; - uint64_t *t310 = tempBuffer + (uint32_t)12U; - uint64_t *t410 = tempBuffer + (uint32_t)16U; - uint64_t *t510 = tempBuffer + (uint32_t)20U; - uint64_t *t610 = tempBuffer + (uint32_t)24U; - uint64_t *t710 = tempBuffer + (uint32_t)28U; - uint64_t *t810 = tempBuffer + (uint32_t)32U; - uint64_t b00 = store_high_low_u(c1, c0); - uint64_t b10 = store_high_low_u(c3, c2); - uint64_t b20 = store_high_low_u(c5, c4); - uint64_t b30 = store_high_low_u(c7, c6); - uint64_t b01; - uint64_t b11; - uint64_t b21; - uint64_t b31; - uint64_t b02; - uint64_t b12; - uint64_t b22; - uint64_t b32; - uint64_t b03; - uint64_t b13; - uint64_t b23; - uint64_t b33; - uint64_t b04; - uint64_t b14; - uint64_t b24; - uint64_t b34; - uint64_t b05; - uint64_t b15; - uint64_t b25; - uint64_t b35; - uint64_t b06; - uint64_t b16; - uint64_t b26; - uint64_t b36; - uint64_t b07; - uint64_t b17; - uint64_t b27; - uint64_t b37; - uint64_t b0; - uint64_t b1; - uint64_t b2; - uint64_t b3; - uint64_t *t01; - uint64_t *t11; - uint64_t *t21; - uint64_t *t31; - uint64_t *t41; - uint64_t *t51; - uint64_t *t61; - uint64_t *t71; - uint64_t *t81; - t010[0U] = b00; - t010[1U] = b10; - t010[2U] = b20; - t010[3U] = b30; - reduction_prime_2prime_impl(t010, t010); - b01 = (uint64_t)0U; - b11 = store_high_low_u(c11, (uint32_t)0U); - b21 = store_high_low_u(c13, c12); - b31 = store_high_low_u(c15, c14); - t110[0U] = b01; - t110[1U] = b11; - t110[2U] = b21; - t110[3U] = b31; - reduction_prime_2prime_impl(t110, t110); - b02 = (uint64_t)0U; - b12 = store_high_low_u(c12, (uint32_t)0U); - b22 = store_high_low_u(c14, c13); - b32 = store_high_low_u((uint32_t)0U, c15); - t210[0U] = b02; - t210[1U] = b12; - t210[2U] = b22; - t210[3U] = b32; - b03 = store_high_low_u(c9, c8); - b13 = store_high_low_u((uint32_t)0U, c10); - b23 = (uint64_t)0U; - b33 = store_high_low_u(c15, c14); - t310[0U] = b03; - t310[1U] = b13; - t310[2U] = b23; - t310[3U] = b33; - reduction_prime_2prime_impl(t310, t310); - b04 = store_high_low_u(c10, c9); - b14 = store_high_low_u(c13, c11); - b24 = store_high_low_u(c15, c14); - b34 = store_high_low_u(c8, c13); - t410[0U] = b04; - t410[1U] = b14; - t410[2U] = b24; - t410[3U] = b34; - reduction_prime_2prime_impl(t410, t410); - b05 = store_high_low_u(c12, c11); - b15 = store_high_low_u((uint32_t)0U, c13); - b25 = (uint64_t)0U; - b35 = store_high_low_u(c10, c8); - t510[0U] = b05; - t510[1U] = b15; - t510[2U] = b25; - t510[3U] = b35; - reduction_prime_2prime_impl(t510, t510); - b06 = store_high_low_u(c13, c12); - b16 = store_high_low_u(c15, c14); - b26 = (uint64_t)0U; - b36 = store_high_low_u(c11, c9); - t610[0U] = b06; - t610[1U] = b16; - t610[2U] = b26; - t610[3U] = b36; - reduction_prime_2prime_impl(t610, t610); - b07 = store_high_low_u(c14, c13); - b17 = store_high_low_u(c8, c15); - b27 = store_high_low_u(c10, c9); - b37 = store_high_low_u(c12, (uint32_t)0U); - t710[0U] = b07; - t710[1U] = b17; - t710[2U] = b27; - t710[3U] = b37; - reduction_prime_2prime_impl(t710, t710); - b0 = store_high_low_u(c15, c14); - b1 = store_high_low_u(c9, (uint32_t)0U); - b2 = store_high_low_u(c11, c10); - b3 = store_high_low_u(c13, (uint32_t)0U); - t810[0U] = b0; - t810[1U] = b1; - t810[2U] = b2; - t810[3U] = b3; - reduction_prime_2prime_impl(t810, t810); - t01 = tempBuffer; - t11 = tempBuffer + (uint32_t)4U; - t21 = tempBuffer + (uint32_t)8U; - t31 = tempBuffer + (uint32_t)12U; - t41 = tempBuffer + (uint32_t)16U; - t51 = tempBuffer + (uint32_t)20U; - t61 = tempBuffer + (uint32_t)24U; - t71 = tempBuffer + (uint32_t)28U; - t81 = tempBuffer + (uint32_t)32U; - p256_double(t21, t21); - p256_double(t11, t11); - p256_add(t01, t11, o); - p256_add(t21, o, o); - p256_add(t31, o, o); - p256_add(t41, o, o); - p256_sub(o, t51, o); - p256_sub(o, t61, o); - p256_sub(o, t71, o); - p256_sub(o, t81, o); -} - -static void -point_double_a_b_g( - uint64_t *p, - uint64_t *alpha, - uint64_t *beta, - uint64_t *gamma, - uint64_t *delta, - uint64_t *tempBuffer -) -{ - uint64_t *pX = p; - uint64_t *pY = p + (uint32_t)4U; - uint64_t *pZ = p + (uint32_t)8U; - uint64_t *a0 = tempBuffer; - uint64_t *a1 = tempBuffer + (uint32_t)4U; - uint64_t *alpha0 = tempBuffer + (uint32_t)8U; - montgomery_square_buffer(pZ, delta); - montgomery_square_buffer(pY, gamma); - montgomery_multiplication_buffer(pX, gamma, beta); - p256_sub(pX, delta, a0); - p256_add(pX, delta, a1); - montgomery_multiplication_buffer(a0, a1, alpha0); - multByThree(alpha0, alpha); -} - -static void -point_double_x3( - uint64_t *x3, - uint64_t *alpha, - uint64_t *fourBeta, - uint64_t *beta, - uint64_t *eightBeta -) -{ - montgomery_square_buffer(alpha, x3); - multByFour(beta, fourBeta); - multByTwo(fourBeta, eightBeta); - p256_sub(x3, eightBeta, x3); -} - -static void -point_double_z3(uint64_t *z3, uint64_t *pY, uint64_t *pZ, uint64_t *gamma, uint64_t *delta) -{ - p256_add(pY, pZ, z3); - montgomery_square_buffer(z3, z3); - p256_sub(z3, gamma, z3); - p256_sub(z3, delta, z3); -} - -static void -point_double_y3( - uint64_t *y3, - uint64_t *x3, - uint64_t *alpha, - uint64_t *gamma, - uint64_t *eightGamma, - uint64_t *fourBeta -) -{ - p256_sub(fourBeta, x3, y3); - montgomery_multiplication_buffer(alpha, y3, y3); - montgomery_square_buffer(gamma, gamma); - multByEight(gamma, eightGamma); - p256_sub(y3, eightGamma, y3); -} - -static void point_double(uint64_t *p, uint64_t *result, uint64_t *tempBuffer) -{ - uint64_t *pY = p + (uint32_t)4U; - uint64_t *pZ = p + (uint32_t)8U; - uint64_t *x3 = result; - uint64_t *y3 = result + (uint32_t)4U; - uint64_t *z3 = result + (uint32_t)8U; - uint64_t *delta = tempBuffer; - uint64_t *gamma = tempBuffer + (uint32_t)4U; - uint64_t *beta = tempBuffer + (uint32_t)8U; - uint64_t *alpha = tempBuffer + (uint32_t)16U; - uint64_t *fourBeta = tempBuffer + (uint32_t)20U; - uint64_t *eightBeta = tempBuffer + (uint32_t)24U; - uint64_t *eightGamma = tempBuffer + (uint32_t)28U; - uint64_t *tmp = tempBuffer + (uint32_t)32U; - point_double_a_b_g(p, alpha, beta, gamma, delta, tmp); - point_double_x3(x3, alpha, fourBeta, beta, eightBeta); - point_double_z3(z3, pY, pZ, gamma, delta); - point_double_y3(y3, x3, alpha, gamma, eightGamma, fourBeta); -} - -static void -copy_point_conditional( - uint64_t *x3_out, - uint64_t *y3_out, - uint64_t *z3_out, - uint64_t *p, - uint64_t *maskPoint -) -{ - uint64_t *z = maskPoint + (uint32_t)8U; - uint64_t mask = isZero_uint64_CT(z); - uint64_t *p_x = p; - uint64_t *p_y = p + (uint32_t)4U; - uint64_t *p_z = p + (uint32_t)8U; - copy_conditional(x3_out, p_x, mask); - copy_conditional(y3_out, p_y, mask); - copy_conditional(z3_out, p_z, mask); -} - -static void point_add(uint64_t *p, uint64_t *q, uint64_t *result, uint64_t *tempBuffer) -{ - uint64_t *tempBuffer16 = tempBuffer; - uint64_t *u1 = tempBuffer + (uint32_t)16U; - uint64_t *u2 = tempBuffer + (uint32_t)20U; - uint64_t *s1 = tempBuffer + (uint32_t)24U; - uint64_t *s2 = tempBuffer + (uint32_t)28U; - uint64_t *h = tempBuffer + (uint32_t)32U; - uint64_t *r = tempBuffer + (uint32_t)36U; - uint64_t *uh = tempBuffer + (uint32_t)40U; - uint64_t *hCube = tempBuffer + (uint32_t)44U; - uint64_t *tempBuffer28 = tempBuffer + (uint32_t)60U; - uint64_t *pX = p; - uint64_t *pY = p + (uint32_t)4U; - uint64_t *pZ0 = p + (uint32_t)8U; - uint64_t *qX = q; - uint64_t *qY = q + (uint32_t)4U; - uint64_t *qZ0 = q + (uint32_t)8U; - uint64_t *z2Square = tempBuffer16; - uint64_t *z1Square = tempBuffer16 + (uint32_t)4U; - uint64_t *z2Cube = tempBuffer16 + (uint32_t)8U; - uint64_t *z1Cube = tempBuffer16 + (uint32_t)12U; - uint64_t *temp; - uint64_t *pZ; - uint64_t *qZ; - uint64_t *tempBuffer161; - uint64_t *x3_out1; - uint64_t *y3_out1; - uint64_t *z3_out1; - uint64_t *rSquare; - uint64_t *rH; - uint64_t *twoUh; - uint64_t *s1hCube; - uint64_t *u1hx3; - uint64_t *ru1hx3; - uint64_t *z1z2; - montgomery_square_buffer(qZ0, z2Square); - montgomery_square_buffer(pZ0, z1Square); - montgomery_multiplication_buffer(z2Square, qZ0, z2Cube); - montgomery_multiplication_buffer(z1Square, pZ0, z1Cube); - montgomery_multiplication_buffer(z2Square, pX, u1); - montgomery_multiplication_buffer(z1Square, qX, u2); - montgomery_multiplication_buffer(z2Cube, pY, s1); - montgomery_multiplication_buffer(z1Cube, qY, s2); - temp = tempBuffer16; - p256_sub(u2, u1, h); - p256_sub(s2, s1, r); - montgomery_square_buffer(h, temp); - montgomery_multiplication_buffer(temp, u1, uh); - montgomery_multiplication_buffer(temp, h, hCube); - pZ = p + (uint32_t)8U; - qZ = q + (uint32_t)8U; - tempBuffer161 = tempBuffer28; - x3_out1 = tempBuffer28 + (uint32_t)16U; - y3_out1 = tempBuffer28 + (uint32_t)20U; - z3_out1 = tempBuffer28 + (uint32_t)24U; - rSquare = tempBuffer161; - rH = tempBuffer161 + (uint32_t)4U; - twoUh = tempBuffer161 + (uint32_t)8U; - montgomery_square_buffer(r, rSquare); - p256_sub(rSquare, hCube, rH); - multByTwo(uh, twoUh); - p256_sub(rH, twoUh, x3_out1); - s1hCube = tempBuffer161; - u1hx3 = tempBuffer161 + (uint32_t)4U; - ru1hx3 = tempBuffer161 + (uint32_t)8U; - montgomery_multiplication_buffer(s1, hCube, s1hCube); - p256_sub(uh, x3_out1, u1hx3); - montgomery_multiplication_buffer(u1hx3, r, ru1hx3); - p256_sub(ru1hx3, s1hCube, y3_out1); - z1z2 = tempBuffer161; - montgomery_multiplication_buffer(pZ, qZ, z1z2); - montgomery_multiplication_buffer(z1z2, h, z3_out1); - copy_point_conditional(x3_out1, y3_out1, z3_out1, q, p); - copy_point_conditional(x3_out1, y3_out1, z3_out1, p, q); - memcpy(result, x3_out1, (uint32_t)4U * sizeof (uint64_t)); - memcpy(result + (uint32_t)4U, y3_out1, (uint32_t)4U * sizeof (uint64_t)); - memcpy(result + (uint32_t)8U, z3_out1, (uint32_t)4U * sizeof (uint64_t)); -} - -static void pointToDomain(uint64_t *p, uint64_t *result) -{ - uint64_t *p_x = p; - uint64_t *p_y = p + (uint32_t)4U; - uint64_t *p_z = p + (uint32_t)8U; - uint64_t *r_x = result; - uint64_t *r_y = result + (uint32_t)4U; - uint64_t *r_z = result + (uint32_t)8U; - uint64_t multBuffer[8U] = { 0U }; - shift_256_impl(p_x, multBuffer); - solinas_reduction_impl(multBuffer, r_x); - { - uint64_t multBuffer0[8U] = { 0U }; - shift_256_impl(p_y, multBuffer0); - solinas_reduction_impl(multBuffer0, r_y); - { - uint64_t multBuffer1[8U] = { 0U }; - shift_256_impl(p_z, multBuffer1); - solinas_reduction_impl(multBuffer1, r_z); - } - } -} - -static void copy_point(uint64_t *p, uint64_t *result) -{ - memcpy(result, p, (uint32_t)12U * sizeof (uint64_t)); -} - -uint64_t Hacl_Impl_P256_Core_isPointAtInfinityPrivate(uint64_t *p) -{ - uint64_t z0 = p[8U]; - uint64_t z1 = p[9U]; - uint64_t z2 = p[10U]; - uint64_t z3 = p[11U]; - uint64_t z0_zero = FStar_UInt64_eq_mask(z0, (uint64_t)0U); - uint64_t z1_zero = FStar_UInt64_eq_mask(z1, (uint64_t)0U); - uint64_t z2_zero = FStar_UInt64_eq_mask(z2, (uint64_t)0U); - uint64_t z3_zero = FStar_UInt64_eq_mask(z3, (uint64_t)0U); - return (z0_zero & z1_zero) & (z2_zero & z3_zero); -} - -static inline void cswap(uint64_t bit, uint64_t *p1, uint64_t *p2) -{ - uint64_t mask = (uint64_t)0U - bit; - uint32_t i; - for (i = (uint32_t)0U; i < (uint32_t)12U; i++) - { - uint64_t dummy = mask & (p1[i] ^ p2[i]); - p1[i] = p1[i] ^ dummy; - p2[i] = p2[i] ^ dummy; - } -} - -static void norm(uint64_t *p, uint64_t *resultPoint, uint64_t *tempBuffer) -{ - uint64_t *xf = p; - uint64_t *yf = p + (uint32_t)4U; - uint64_t *zf = p + (uint32_t)8U; - uint64_t *z2f = tempBuffer + (uint32_t)4U; - uint64_t *z3f = tempBuffer + (uint32_t)8U; - uint64_t *tempBuffer20 = tempBuffer + (uint32_t)12U; - montgomery_square_buffer(zf, z2f); - montgomery_multiplication_buffer(z2f, zf, z3f); - exponent(z2f, z2f, tempBuffer20); - exponent(z3f, z3f, tempBuffer20); - montgomery_multiplication_buffer(xf, z2f, z2f); - montgomery_multiplication_buffer(yf, z3f, z3f); - { - uint64_t zeroBuffer[4U] = { 0U }; - uint64_t *resultX = resultPoint; - uint64_t *resultY = resultPoint + (uint32_t)4U; - uint64_t *resultZ = resultPoint + (uint32_t)8U; - uint64_t bit = Hacl_Impl_P256_Core_isPointAtInfinityPrivate(p); - montgomery_multiplication_buffer_by_one(z2f, resultX); - montgomery_multiplication_buffer_by_one(z3f, resultY); - uploadOneImpl(resultZ); - copy_conditional(resultZ, zeroBuffer, bit); - } -} - -static void normX(uint64_t *p, uint64_t *result, uint64_t *tempBuffer) -{ - uint64_t *xf = p; - uint64_t *zf = p + (uint32_t)8U; - uint64_t *z2f = tempBuffer + (uint32_t)4U; - uint64_t *tempBuffer20 = tempBuffer + (uint32_t)12U; - montgomery_square_buffer(zf, z2f); - exponent(z2f, z2f, tempBuffer20); - montgomery_multiplication_buffer(z2f, xf, z2f); - montgomery_multiplication_buffer_by_one(z2f, result); -} - -static void zero_buffer(uint64_t *p) -{ - p[0U] = (uint64_t)0U; - p[1U] = (uint64_t)0U; - p[2U] = (uint64_t)0U; - p[3U] = (uint64_t)0U; - p[4U] = (uint64_t)0U; - p[5U] = (uint64_t)0U; - p[6U] = (uint64_t)0U; - p[7U] = (uint64_t)0U; - p[8U] = (uint64_t)0U; - p[9U] = (uint64_t)0U; - p[10U] = (uint64_t)0U; - p[11U] = (uint64_t)0U; -} - -static void -scalarMultiplicationL(uint64_t *p, uint64_t *result, uint8_t *scalar, uint64_t *tempBuffer) -{ - uint64_t *q = tempBuffer; - uint64_t *buff; - zero_buffer(q); - buff = tempBuffer + (uint32_t)12U; - pointToDomain(p, result); - { - uint32_t i; - for (i = (uint32_t)0U; i < (uint32_t)256U; i++) - { - uint32_t bit0 = (uint32_t)255U - i; - uint64_t - bit = - (uint64_t)(scalar[(uint32_t)31U - bit0 / (uint32_t)8U] >> bit0 % (uint32_t)8U & (uint8_t)1U); - cswap(bit, q, result); - point_add(q, result, result, buff); - point_double(q, q, buff); - cswap(bit, q, result); - } - } - norm(q, result, buff); -} - -static void -scalarMultiplicationC( - uint64_t *p, - uint64_t *result, - const uint8_t *scalar, - uint64_t *tempBuffer -) -{ - uint64_t *q = tempBuffer; - uint64_t *buff; - zero_buffer(q); - buff = tempBuffer + (uint32_t)12U; - pointToDomain(p, result); - { - uint32_t i; - for (i = (uint32_t)0U; i < (uint32_t)256U; i++) - { - uint32_t bit0 = (uint32_t)255U - i; - uint64_t - bit = - (uint64_t)(scalar[(uint32_t)31U - bit0 / (uint32_t)8U] >> bit0 % (uint32_t)8U & (uint8_t)1U); - cswap(bit, q, result); - point_add(q, result, result, buff); - point_double(q, q, buff); - cswap(bit, q, result); - } - } - norm(q, result, buff); -} - -static void uploadBasePoint(uint64_t *p) -{ - p[0U] = (uint64_t)8784043285714375740U; - p[1U] = (uint64_t)8483257759279461889U; - p[2U] = (uint64_t)8789745728267363600U; - p[3U] = (uint64_t)1770019616739251654U; - p[4U] = (uint64_t)15992936863339206154U; - p[5U] = (uint64_t)10037038012062884956U; - p[6U] = (uint64_t)15197544864945402661U; - p[7U] = (uint64_t)9615747158586711429U; - p[8U] = (uint64_t)1U; - p[9U] = (uint64_t)18446744069414584320U; - p[10U] = (uint64_t)18446744073709551615U; - p[11U] = (uint64_t)4294967294U; -} - -static void -scalarMultiplicationWithoutNorm( - uint64_t *p, - uint64_t *result, - uint8_t *scalar, - uint64_t *tempBuffer -) -{ - uint64_t *q = tempBuffer; - uint64_t *buff; - zero_buffer(q); - buff = tempBuffer + (uint32_t)12U; - pointToDomain(p, result); - { - uint32_t i; - for (i = (uint32_t)0U; i < (uint32_t)256U; i++) - { - uint32_t bit0 = (uint32_t)255U - i; - uint64_t - bit = - (uint64_t)(scalar[(uint32_t)31U - bit0 / (uint32_t)8U] >> bit0 % (uint32_t)8U & (uint8_t)1U); - cswap(bit, q, result); - point_add(q, result, result, buff); - point_double(q, q, buff); - cswap(bit, q, result); - } - } - copy_point(q, result); -} - -void -Hacl_Impl_P256_Core_secretToPublic(uint64_t *result, uint8_t *scalar, uint64_t *tempBuffer) -{ - uint64_t basePoint[12U] = { 0U }; - uint64_t *q; - uint64_t *buff; - uploadBasePoint(basePoint); - q = tempBuffer; - buff = tempBuffer + (uint32_t)12U; - zero_buffer(q); - { - uint32_t i; - for (i = (uint32_t)0U; i < (uint32_t)256U; i++) - { - uint32_t bit0 = (uint32_t)255U - i; - uint64_t - bit = - (uint64_t)(scalar[(uint32_t)31U - bit0 / (uint32_t)8U] >> bit0 % (uint32_t)8U & (uint8_t)1U); - cswap(bit, q, basePoint); - point_add(q, basePoint, basePoint, buff); - point_double(q, q, buff); - cswap(bit, q, basePoint); - } - } - norm(q, result, buff); -} - -static void secretToPublicWithoutNorm(uint64_t *result, uint8_t *scalar, uint64_t *tempBuffer) -{ - uint64_t basePoint[12U] = { 0U }; - uint64_t *q; - uint64_t *buff; - uploadBasePoint(basePoint); - q = tempBuffer; - buff = tempBuffer + (uint32_t)12U; - zero_buffer(q); - { - uint32_t i; - for (i = (uint32_t)0U; i < (uint32_t)256U; i++) - { - uint32_t bit0 = (uint32_t)255U - i; - uint64_t - bit = - (uint64_t)(scalar[(uint32_t)31U - bit0 / (uint32_t)8U] >> bit0 % (uint32_t)8U & (uint8_t)1U); - cswap(bit, q, basePoint); - point_add(q, basePoint, basePoint, buff); - point_double(q, q, buff); - cswap(bit, q, basePoint); - } - } - copy_point(q, result); -} - -static const -uint64_t -prime256order_buffer[4U] = - { - (uint64_t)17562291160714782033U, - (uint64_t)13611842547513532036U, - (uint64_t)18446744073709551615U, - (uint64_t)18446744069414584320U - }; - -static const -uint8_t -order_inverse_buffer[32U] = - { - (uint8_t)79U, (uint8_t)37U, (uint8_t)99U, (uint8_t)252U, (uint8_t)194U, (uint8_t)202U, - (uint8_t)185U, (uint8_t)243U, (uint8_t)132U, (uint8_t)158U, (uint8_t)23U, (uint8_t)167U, - (uint8_t)173U, (uint8_t)250U, (uint8_t)230U, (uint8_t)188U, (uint8_t)255U, (uint8_t)255U, - (uint8_t)255U, (uint8_t)255U, (uint8_t)255U, (uint8_t)255U, (uint8_t)255U, (uint8_t)255U, - (uint8_t)0U, (uint8_t)0U, (uint8_t)0U, (uint8_t)0U, (uint8_t)255U, (uint8_t)255U, (uint8_t)255U, - (uint8_t)255U - }; - -static const -uint8_t -order_buffer[32U] = - { - (uint8_t)255U, (uint8_t)255U, (uint8_t)255U, (uint8_t)255U, (uint8_t)0U, (uint8_t)0U, - (uint8_t)0U, (uint8_t)0U, (uint8_t)255U, (uint8_t)255U, (uint8_t)255U, (uint8_t)255U, - (uint8_t)255U, (uint8_t)255U, (uint8_t)255U, (uint8_t)255U, (uint8_t)188U, (uint8_t)230U, - (uint8_t)250U, (uint8_t)173U, (uint8_t)167U, (uint8_t)23U, (uint8_t)158U, (uint8_t)132U, - (uint8_t)243U, (uint8_t)185U, (uint8_t)202U, (uint8_t)194U, (uint8_t)252U, (uint8_t)99U, - (uint8_t)37U, (uint8_t)81U - }; - -static void montgomery_multiplication_round(uint64_t *t, uint64_t *round, uint64_t k0) -{ - uint64_t temp = (uint64_t)0U; - uint64_t y = (uint64_t)0U; - uint64_t t2[8U] = { 0U }; - uint64_t t3[8U] = { 0U }; - uint64_t t1 = t[0U]; - uint64_t y_; - uint64_t *result04; - mul64(t1, k0, &y, &temp); - y_ = y; - result04 = t2; - { - uint64_t temp1 = (uint64_t)0U; - uint64_t f1 = prime256order_buffer[1U]; - uint64_t f2 = prime256order_buffer[2U]; - uint64_t f3 = prime256order_buffer[3U]; - uint64_t *o0 = result04; - uint64_t *o1 = result04 + (uint32_t)1U; - uint64_t *o2 = result04 + (uint32_t)2U; - uint64_t *o3 = result04 + (uint32_t)3U; - uint64_t f01 = prime256order_buffer[0U]; - uint64_t h0; - uint64_t l0; - uint64_t c1; - uint64_t h1; - uint64_t l1; - uint64_t c2; - uint64_t h; - uint64_t l; - uint64_t c3; - uint64_t temp0; - uint64_t c; - uint64_t uu____0; - mul64(f01, y_, o0, &temp1); - h0 = temp1; - mul64(f1, y_, o1, &temp1); - l0 = o1[0U]; - c1 = Lib_IntTypes_Intrinsics_add_carry_u64((uint64_t)0U, l0, h0, o1); - h1 = temp1; - mul64(f2, y_, o2, &temp1); - l1 = o2[0U]; - c2 = Lib_IntTypes_Intrinsics_add_carry_u64(c1, l1, h1, o2); - h = temp1; - mul64(f3, y_, o3, &temp1); - l = o3[0U]; - c3 = Lib_IntTypes_Intrinsics_add_carry_u64(c2, l, h, o3); - temp0 = temp1; - c = c3 + temp0; - t2[4U] = c; - uu____0 = add8(t, t2, t3); - shift8(t3, round); - } -} - -static void montgomery_multiplication_round_twice(uint64_t *t, uint64_t *result, uint64_t k0) -{ - uint64_t tempRound[8U] = { 0U }; - montgomery_multiplication_round(t, tempRound, k0); - montgomery_multiplication_round(tempRound, result, k0); -} - -static void reduction_prime_2prime_with_carry(uint64_t *x, uint64_t *result) -{ - uint64_t tempBuffer[4U] = { 0U }; - uint64_t tempBufferForSubborrow = (uint64_t)0U; - uint64_t cin = x[4U]; - uint64_t *x_ = x; - uint64_t c = sub4_il(x_, prime256order_buffer, tempBuffer); - uint64_t - carry = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, cin, (uint64_t)0U, &tempBufferForSubborrow); - cmovznz4(carry, tempBuffer, x_, result); -} - -static void reduction_prime_2prime_order(uint64_t *x, uint64_t *result) -{ - uint64_t tempBuffer[4U] = { 0U }; - uint64_t c = sub4_il(x, prime256order_buffer, tempBuffer); - cmovznz4(c, tempBuffer, x, result); -} - -static void montgomery_multiplication_ecdsa_module(uint64_t *a, uint64_t *b, uint64_t *result) -{ - uint64_t t[8U] = { 0U }; - uint64_t round2[8U] = { 0U }; - uint64_t round4[8U] = { 0U }; - uint64_t prime_p256_orderBuffer[4U] = { 0U }; - uint64_t k0 = (uint64_t)14758798090332847183U; - memset(t, 0U, (uint32_t)8U * sizeof (uint64_t)); - KRML_MAYBE_FOR4(i0, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint64_t bj = b[i0]; - uint64_t *res_j = t + i0; - uint64_t c = (uint64_t)0U; - { - uint64_t a_i = a[(uint32_t)4U * (uint32_t)0U]; - uint64_t *res_i0 = res_j + (uint32_t)4U * (uint32_t)0U; - c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, bj, c, res_i0); - { - uint64_t a_i0 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U]; - uint64_t *res_i1 = res_j + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U; - c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i0, bj, c, res_i1); - { - uint64_t a_i1 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U]; - uint64_t *res_i2 = res_j + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U; - c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i1, bj, c, res_i2); - { - uint64_t a_i2 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U]; - uint64_t *res_i = res_j + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U; - c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i2, bj, c, res_i); - } - } - } - } - uint64_t r = c; - t[(uint32_t)4U + i0] = r;); - montgomery_multiplication_round_twice(t, round2, k0); - montgomery_multiplication_round_twice(round2, round4, k0); - reduction_prime_2prime_with_carry(round4, result); -} - -static void bufferToJac(uint64_t *p, uint64_t *result) -{ - uint64_t *partPoint = result; - memcpy(partPoint, p, (uint32_t)8U * sizeof (uint64_t)); - result[8U] = (uint64_t)1U; - result[9U] = (uint64_t)0U; - result[10U] = (uint64_t)0U; - result[11U] = (uint64_t)0U; -} - -/** - The input of the function is considered to be public, -thus this code is not secret independent with respect to the operations done over the input. -*/ -static bool isPointAtInfinityPublic(uint64_t *p) -{ - uint64_t z0 = p[8U]; - uint64_t z1 = p[9U]; - uint64_t z2 = p[10U]; - uint64_t z3 = p[11U]; - bool z0_zero = z0 == (uint64_t)0U; - bool z1_zero = z1 == (uint64_t)0U; - bool z2_zero = z2 == (uint64_t)0U; - bool z3_zero = z3 == (uint64_t)0U; - return z0_zero && z1_zero && z2_zero && z3_zero; -} - -/** - The input of the function is considered to be public, -thus this code is not secret independent with respect to the operations done over the input. -*/ -static bool isPointOnCurvePublic(uint64_t *p) -{ - uint64_t y2Buffer[4U] = { 0U }; - uint64_t xBuffer[4U] = { 0U }; - uint64_t *x = p; - uint64_t *y = p + (uint32_t)4U; - uint64_t multBuffer0[8U] = { 0U }; - shift_256_impl(y, multBuffer0); - solinas_reduction_impl(multBuffer0, y2Buffer); - montgomery_square_buffer(y2Buffer, y2Buffer); - { - uint64_t xToDomainBuffer[4U] = { 0U }; - uint64_t minusThreeXBuffer[4U] = { 0U }; - uint64_t p256_constant[4U] = { 0U }; - uint64_t multBuffer[8U] = { 0U }; - uint64_t r; - shift_256_impl(x, multBuffer); - solinas_reduction_impl(multBuffer, xToDomainBuffer); - montgomery_square_buffer(xToDomainBuffer, xBuffer); - montgomery_multiplication_buffer(xBuffer, xToDomainBuffer, xBuffer); - multByThree(xToDomainBuffer, minusThreeXBuffer); - p256_sub(xBuffer, minusThreeXBuffer, xBuffer); - p256_constant[0U] = (uint64_t)15608596021259845087U; - p256_constant[1U] = (uint64_t)12461466548982526096U; - p256_constant[2U] = (uint64_t)16546823903870267094U; - p256_constant[3U] = (uint64_t)15866188208926050356U; - p256_add(xBuffer, p256_constant, xBuffer); - r = compare_felem(y2Buffer, xBuffer); - return !(r == (uint64_t)0U); - } -} - -static bool isCoordinateValid(uint64_t *p) -{ - uint64_t tempBuffer[4U] = { 0U }; - uint64_t *x = p; - uint64_t *y = p + (uint32_t)4U; - uint64_t carryX = sub4_il(x, prime256_buffer, tempBuffer); - uint64_t carryY = sub4_il(y, prime256_buffer, tempBuffer); - bool lessX = carryX == (uint64_t)1U; - bool lessY = carryY == (uint64_t)1U; - return lessX && lessY; -} - -/** - The input of the function is considered to be public, -thus this code is not secret independent with respect to the operations done over the input. -*/ -static bool isOrderCorrect(uint64_t *p, uint64_t *tempBuffer) -{ - uint64_t multResult[12U] = { 0U }; - uint64_t pBuffer[12U] = { 0U }; - bool result; - memcpy(pBuffer, p, (uint32_t)12U * sizeof (uint64_t)); - scalarMultiplicationC(pBuffer, multResult, order_buffer, tempBuffer); - result = isPointAtInfinityPublic(multResult); - return result; -} - -/** - The input of the function is considered to be public, -thus this code is not secret independent with respect to the operations done over the input. -*/ -static bool verifyQValidCurvePoint(uint64_t *pubKeyAsPoint, uint64_t *tempBuffer) -{ - bool coordinatesValid = isCoordinateValid(pubKeyAsPoint); - if (!coordinatesValid) - { - return false; - } - { - bool belongsToCurve = isPointOnCurvePublic(pubKeyAsPoint); - bool orderCorrect = isOrderCorrect(pubKeyAsPoint, tempBuffer); - return coordinatesValid && belongsToCurve && orderCorrect; - } -} - -static bool isMoreThanZeroLessThanOrder(uint8_t *x) -{ - uint64_t xAsFelem[4U] = { 0U }; - Hacl_Impl_P256_LowLevel_toUint64ChangeEndian(x, xAsFelem); - { - uint64_t tempBuffer[4U] = { 0U }; - uint64_t carry = sub4_il(xAsFelem, prime256order_buffer, tempBuffer); - uint64_t less = FStar_UInt64_eq_mask(carry, (uint64_t)1U); - uint64_t more = isZero_uint64_CT(xAsFelem); - uint64_t notMore = ~more; - uint64_t result = less & notMore; - return ~result == (uint64_t)0U; - } -} - -/** - The pub(lic)_key input of the function is considered to be public, - thus this code is not secret independent with respect to the operations done over this variable. -*/ -uint64_t Hacl_Impl_P256_DH__ecp256dh_r(uint64_t *result, uint64_t *pubKey, uint8_t *scalar) -{ - uint64_t tempBuffer[100U] = { 0U }; - uint64_t publicKeyBuffer[12U] = { 0U }; - bool publicKeyCorrect; - uint64_t ite; - bufferToJac(pubKey, publicKeyBuffer); - publicKeyCorrect = verifyQValidCurvePoint(publicKeyBuffer, tempBuffer); - if (publicKeyCorrect) - { - scalarMultiplicationL(publicKeyBuffer, result, scalar, tempBuffer); - { - uint64_t flag = Hacl_Impl_P256_Core_isPointAtInfinityPrivate(result); - ite = flag; - } - } - else - { - ite = (uint64_t)18446744073709551615U; - } - return ite; -} - -static inline void cswap0(uint64_t bit, uint64_t *p1, uint64_t *p2) -{ - uint64_t mask = (uint64_t)0U - bit; - uint32_t i; - for (i = (uint32_t)0U; i < (uint32_t)4U; i++) - { - uint64_t dummy = mask & (p1[i] ^ p2[i]); - p1[i] = p1[i] ^ dummy; - p2[i] = p2[i] ^ dummy; - } -} - -static void montgomery_ladder_exponent(uint64_t *r) -{ - uint64_t p[4U] = { 0U }; - p[0U] = (uint64_t)884452912994769583U; - p[1U] = (uint64_t)4834901526196019579U; - p[2U] = (uint64_t)0U; - p[3U] = (uint64_t)4294967295U; - { - uint32_t i; - for (i = (uint32_t)0U; i < (uint32_t)256U; i++) - { - uint32_t bit0 = (uint32_t)255U - i; - uint64_t - bit = - (uint64_t)(order_inverse_buffer[bit0 / (uint32_t)8U] >> bit0 % (uint32_t)8U & (uint8_t)1U); - cswap0(bit, p, r); - montgomery_multiplication_ecdsa_module(p, r, r); - montgomery_multiplication_ecdsa_module(p, p, p); - cswap0(bit, p, r); - } - } - memcpy(r, p, (uint32_t)4U * sizeof (uint64_t)); -} - -static void fromDomainImpl(uint64_t *a, uint64_t *result) -{ - uint64_t one[4U] = { 0U }; - uploadOneImpl(one); - montgomery_multiplication_ecdsa_module(one, a, result); -} - -static void multPowerPartial(uint64_t *a, uint64_t *b, uint64_t *result) -{ - uint64_t buffFromDB[4U] = { 0U }; - fromDomainImpl(b, buffFromDB); - fromDomainImpl(buffFromDB, buffFromDB); - montgomery_multiplication_ecdsa_module(a, buffFromDB, result); -} - -/** - The input of the function is considered to be public, -thus this code is not secret independent with respect to the operations done over the input. -*/ -static bool isMoreThanZeroLessThanOrderMinusOne(uint64_t *f) -{ - uint64_t tempBuffer[4U] = { 0U }; - uint64_t carry = sub4_il(f, prime256order_buffer, tempBuffer); - bool less = carry == (uint64_t)1U; - uint64_t f0 = f[0U]; - uint64_t f1 = f[1U]; - uint64_t f2 = f[2U]; - uint64_t f3 = f[3U]; - bool z0_zero = f0 == (uint64_t)0U; - bool z1_zero = f1 == (uint64_t)0U; - bool z2_zero = f2 == (uint64_t)0U; - bool z3_zero = f3 == (uint64_t)0U; - bool more = z0_zero && z1_zero && z2_zero && z3_zero; - return less && !more; -} - -/** - The input of the function is considered to be public, -thus this code is not secret independent with respect to the operations done over the input. -*/ -static bool compare_felem_bool(uint64_t *a, uint64_t *b) -{ - uint64_t a_0 = a[0U]; - uint64_t a_1 = a[1U]; - uint64_t a_2 = a[2U]; - uint64_t a_3 = a[3U]; - uint64_t b_0 = b[0U]; - uint64_t b_1 = b[1U]; - uint64_t b_2 = b[2U]; - uint64_t b_3 = b[3U]; - return a_0 == b_0 && a_1 == b_1 && a_2 == b_2 && a_3 == b_3; -} - -/** - The input of the function is considered to be public, -thus this code is not secret independent with respect to the operations done over the input. -*/ -static bool -ecdsa_verification_( - Spec_ECDSA_hash_alg_ecdsa alg, - uint64_t *pubKey, - uint64_t *r, - uint64_t *s, - uint32_t mLen, - uint8_t *m -) -{ - uint64_t tempBufferU64[120U] = { 0U }; - uint64_t *publicKeyBuffer = tempBufferU64; - uint64_t *hashAsFelem = tempBufferU64 + (uint32_t)12U; - uint64_t *tempBuffer = tempBufferU64 + (uint32_t)16U; - uint64_t *xBuffer = tempBufferU64 + (uint32_t)116U; - bool publicKeyCorrect; - bool ite; - bufferToJac(pubKey, publicKeyBuffer); - publicKeyCorrect = verifyQValidCurvePoint(publicKeyBuffer, tempBuffer); - if (publicKeyCorrect == false) - { - ite = false; - } - else - { - bool isRCorrect = isMoreThanZeroLessThanOrderMinusOne(r); - bool isSCorrect = isMoreThanZeroLessThanOrderMinusOne(s); - bool step1 = isRCorrect && isSCorrect; - if (step1 == false) - { - ite = false; - } - else - { - uint8_t tempBufferU8[64U] = { 0U }; - uint8_t *bufferU1 = tempBufferU8; - uint8_t *bufferU2 = tempBufferU8 + (uint32_t)32U; - uint32_t sz; - if (alg.tag == Spec_ECDSA_NoHash) - { - sz = mLen; - } - else if (alg.tag == Spec_ECDSA_Hash) - { - Spec_Hash_Definitions_hash_alg a = alg._0; - switch (a) - { - case Spec_Hash_Definitions_MD5: - { - sz = (uint32_t)16U; - break; - } - case Spec_Hash_Definitions_SHA1: - { - sz = (uint32_t)20U; - break; - } - case Spec_Hash_Definitions_SHA2_224: - { - sz = (uint32_t)28U; - break; - } - case Spec_Hash_Definitions_SHA2_256: - { - sz = (uint32_t)32U; - break; - } - case Spec_Hash_Definitions_SHA2_384: - { - sz = (uint32_t)48U; - break; - } - case Spec_Hash_Definitions_SHA2_512: - { - sz = (uint32_t)64U; - break; - } - case Spec_Hash_Definitions_SHA3_256: - { - sz = (uint32_t)32U; - break; - } - case Spec_Hash_Definitions_Blake2S: - { - sz = (uint32_t)32U; - break; - } - case Spec_Hash_Definitions_Blake2B: - { - sz = (uint32_t)64U; - break; - } - default: - { - KRML_HOST_PRINTF("KaRaMeL incomplete match at %s:%d\n", __FILE__, __LINE__); - KRML_HOST_EXIT(253U); - } - } - } - else - { - sz = KRML_EABORT(uint32_t, "unreachable (pattern matches are exhaustive in F*)"); - } - KRML_CHECK_SIZE(sizeof (uint8_t), sz); - { - uint8_t mHash[sz]; - memset(mHash, 0U, sz * sizeof (uint8_t)); - if (alg.tag == Spec_ECDSA_NoHash) - { - memcpy(mHash, m, sz * sizeof (uint8_t)); - } - else if (alg.tag == Spec_ECDSA_Hash) - { - Spec_Hash_Definitions_hash_alg a = alg._0; - switch (a) - { - case Spec_Hash_Definitions_SHA2_256: - { - Hacl_Hash_SHA2_hash_256(m, mLen, mHash); - break; - } - case Spec_Hash_Definitions_SHA2_384: - { - Hacl_Hash_SHA2_hash_384(m, mLen, mHash); - break; - } - case Spec_Hash_Definitions_SHA2_512: - { - Hacl_Hash_SHA2_hash_512(m, mLen, mHash); - break; - } - default: - { - KRML_HOST_PRINTF("KaRaMeL incomplete match at %s:%d\n", __FILE__, __LINE__); - KRML_HOST_EXIT(253U); - } - } - } - else - { - KRML_HOST_PRINTF("KaRaMeL abort at %s:%d\n%s\n", - __FILE__, - __LINE__, - "unreachable (pattern matches are exhaustive in F*)"); - KRML_HOST_EXIT(255U); - } - { - uint8_t *cutHash = mHash; - Hacl_Impl_P256_LowLevel_toUint64ChangeEndian(cutHash, hashAsFelem); - reduction_prime_2prime_order(hashAsFelem, hashAsFelem); - { - uint64_t tempBuffer1[12U] = { 0U }; - uint64_t *inverseS = tempBuffer1; - uint64_t *u1 = tempBuffer1 + (uint32_t)4U; - uint64_t *u2 = tempBuffer1 + (uint32_t)8U; - fromDomainImpl(s, inverseS); - montgomery_ladder_exponent(inverseS); - multPowerPartial(inverseS, hashAsFelem, u1); - multPowerPartial(inverseS, r, u2); - Hacl_Impl_P256_LowLevel_changeEndian(u1); - Hacl_Impl_P256_LowLevel_changeEndian(u2); - Hacl_Impl_P256_LowLevel_toUint8(u1, bufferU1); - Hacl_Impl_P256_LowLevel_toUint8(u2, bufferU2); - { - uint64_t pointSum[12U] = { 0U }; - uint64_t points[24U] = { 0U }; - uint64_t *buff = tempBuffer + (uint32_t)12U; - uint64_t *pointU1G = points; - uint64_t *pointU2Q0 = points + (uint32_t)12U; - secretToPublicWithoutNorm(pointU1G, bufferU1, tempBuffer); - scalarMultiplicationWithoutNorm(publicKeyBuffer, pointU2Q0, bufferU2, tempBuffer); - { - uint64_t *pointU1G0 = points; - uint64_t *pointU2Q = points + (uint32_t)12U; - uint64_t tmp[112U] = { 0U }; - uint64_t *tmpForNorm = tmp; - uint64_t *result0Norm = tmp + (uint32_t)88U; - uint64_t *result1Norm = tmp + (uint32_t)100U; - uint64_t *pointU1G1 = points; - uint64_t *pointU2Q1 = points + (uint32_t)12U; - norm(pointU1G1, result0Norm, tmpForNorm); - norm(pointU2Q1, result1Norm, tmpForNorm); - { - uint64_t *x0 = result0Norm; - uint64_t *y0 = result0Norm + (uint32_t)4U; - uint64_t *z0 = result0Norm + (uint32_t)8U; - uint64_t *x1 = result1Norm; - uint64_t *y1 = result1Norm + (uint32_t)4U; - uint64_t *z1 = result1Norm + (uint32_t)8U; - bool xEqual = compare_felem_bool(x0, x1); - bool yEqual = compare_felem_bool(y0, y1); - bool zEqual = compare_felem_bool(z0, z1); - bool equalX = xEqual && yEqual && zEqual; - bool equalX0 = equalX; - if (equalX0) - { - point_double(pointU1G0, pointSum, buff); - } - else - { - point_add(pointU1G0, pointU2Q, pointSum, buff); - } - norm(pointSum, pointSum, buff); - { - bool resultIsPAI = isPointAtInfinityPublic(pointSum); - uint64_t *xCoordinateSum = pointSum; - memcpy(xBuffer, xCoordinateSum, (uint32_t)4U * sizeof (uint64_t)); - reduction_prime_2prime_order(xBuffer, xBuffer); - { - bool r1 = !resultIsPAI; - bool state = r1; - if (state == false) - { - ite = false; - } - else - { - bool result = compare_felem_bool(xBuffer, r); - ite = result; - } - } - } - } - } - } - } - } - } - } - } - return ite; -} - -static uint64_t -ecdsa_signature_core( - Spec_ECDSA_hash_alg_ecdsa alg, - uint64_t *r, - uint64_t *s, - uint32_t mLen, - uint8_t *m, - uint64_t *privKeyAsFelem, - uint8_t *k -) -{ - uint64_t hashAsFelem[4U] = { 0U }; - uint64_t tempBuffer[100U] = { 0U }; - uint64_t kAsFelem[4U] = { 0U }; - Hacl_Impl_P256_LowLevel_toUint64ChangeEndian(k, kAsFelem); - { - uint32_t sz; - if (alg.tag == Spec_ECDSA_NoHash) - { - sz = mLen; - } - else if (alg.tag == Spec_ECDSA_Hash) - { - Spec_Hash_Definitions_hash_alg a = alg._0; - switch (a) - { - case Spec_Hash_Definitions_MD5: - { - sz = (uint32_t)16U; - break; - } - case Spec_Hash_Definitions_SHA1: - { - sz = (uint32_t)20U; - break; - } - case Spec_Hash_Definitions_SHA2_224: - { - sz = (uint32_t)28U; - break; - } - case Spec_Hash_Definitions_SHA2_256: - { - sz = (uint32_t)32U; - break; - } - case Spec_Hash_Definitions_SHA2_384: - { - sz = (uint32_t)48U; - break; - } - case Spec_Hash_Definitions_SHA2_512: - { - sz = (uint32_t)64U; - break; - } - case Spec_Hash_Definitions_SHA3_256: - { - sz = (uint32_t)32U; - break; - } - case Spec_Hash_Definitions_Blake2S: - { - sz = (uint32_t)32U; - break; - } - case Spec_Hash_Definitions_Blake2B: - { - sz = (uint32_t)64U; - break; - } - default: - { - KRML_HOST_PRINTF("KaRaMeL incomplete match at %s:%d\n", __FILE__, __LINE__); - KRML_HOST_EXIT(253U); - } - } - } - else - { - sz = KRML_EABORT(uint32_t, "unreachable (pattern matches are exhaustive in F*)"); - } - KRML_CHECK_SIZE(sizeof (uint8_t), sz); - { - uint8_t mHash[sz]; - memset(mHash, 0U, sz * sizeof (uint8_t)); - { - uint8_t *cutHash; - if (alg.tag == Spec_ECDSA_NoHash) - { - memcpy(mHash, m, sz * sizeof (uint8_t)); - } - else if (alg.tag == Spec_ECDSA_Hash) - { - Spec_Hash_Definitions_hash_alg a = alg._0; - switch (a) - { - case Spec_Hash_Definitions_SHA2_256: - { - Hacl_Hash_SHA2_hash_256(m, mLen, mHash); - break; - } - case Spec_Hash_Definitions_SHA2_384: - { - Hacl_Hash_SHA2_hash_384(m, mLen, mHash); - break; - } - case Spec_Hash_Definitions_SHA2_512: - { - Hacl_Hash_SHA2_hash_512(m, mLen, mHash); - break; - } - default: - { - KRML_HOST_PRINTF("KaRaMeL incomplete match at %s:%d\n", __FILE__, __LINE__); - KRML_HOST_EXIT(253U); - } - } - } - else - { - KRML_HOST_PRINTF("KaRaMeL abort at %s:%d\n%s\n", - __FILE__, - __LINE__, - "unreachable (pattern matches are exhaustive in F*)"); - KRML_HOST_EXIT(255U); - } - cutHash = mHash; - Hacl_Impl_P256_LowLevel_toUint64ChangeEndian(cutHash, hashAsFelem); - reduction_prime_2prime_order(hashAsFelem, hashAsFelem); - { - uint64_t result[12U] = { 0U }; - uint64_t *tempForNorm = tempBuffer; - uint64_t step5Flag; - secretToPublicWithoutNorm(result, k, tempBuffer); - normX(result, r, tempForNorm); - reduction_prime_2prime_order(r, r); - step5Flag = isZero_uint64_CT(r); - { - uint64_t rda[4U] = { 0U }; - uint64_t zBuffer[4U] = { 0U }; - uint64_t kInv[4U] = { 0U }; - uint64_t t; - montgomery_multiplication_ecdsa_module(r, privKeyAsFelem, rda); - fromDomainImpl(hashAsFelem, zBuffer); - t = add4(rda, zBuffer, zBuffer); - { - uint64_t tempBuffer1[4U] = { 0U }; - uint64_t tempBufferForSubborrow = (uint64_t)0U; - uint64_t c = sub4_il(zBuffer, prime256order_buffer, tempBuffer1); - uint64_t - carry = - Lib_IntTypes_Intrinsics_sub_borrow_u64(c, - t, - (uint64_t)0U, - &tempBufferForSubborrow); - uint64_t sIsZero; - cmovznz4(carry, tempBuffer1, zBuffer, zBuffer); - memcpy(kInv, kAsFelem, (uint32_t)4U * sizeof (uint64_t)); - montgomery_ladder_exponent(kInv); - montgomery_multiplication_ecdsa_module(zBuffer, kInv, s); - sIsZero = isZero_uint64_CT(s); - return step5Flag | sIsZero; - } - } - } - } - } - } -} - -static inline void cswap1(uint64_t bit, uint64_t *p1, uint64_t *p2) -{ - uint64_t mask = (uint64_t)0U - bit; - uint32_t i; - for (i = (uint32_t)0U; i < (uint32_t)4U; i++) - { - uint64_t dummy = mask & (p1[i] ^ p2[i]); - p1[i] = p1[i] ^ dummy; - p2[i] = p2[i] ^ dummy; - } -} - -static void montgomery_ladder_power(uint64_t *a, const uint8_t *scalar, uint64_t *result) -{ - uint64_t p[4U] = { 0U }; - p[0U] = (uint64_t)1U; - p[1U] = (uint64_t)18446744069414584320U; - p[2U] = (uint64_t)18446744073709551615U; - p[3U] = (uint64_t)4294967294U; - { - uint32_t i; - for (i = (uint32_t)0U; i < (uint32_t)256U; i++) - { - uint32_t bit0 = (uint32_t)255U - i; - uint64_t bit = (uint64_t)(scalar[bit0 / (uint32_t)8U] >> bit0 % (uint32_t)8U & (uint8_t)1U); - cswap1(bit, p, a); - montgomery_multiplication_buffer(p, a, a); - montgomery_square_buffer(p, p); - cswap1(bit, p, a); - } - } - memcpy(result, p, (uint32_t)4U * sizeof (uint64_t)); -} - -static const -uint8_t -sqPower_buffer[32U] = - { - (uint8_t)0U, (uint8_t)0U, (uint8_t)0U, (uint8_t)0U, (uint8_t)0U, (uint8_t)0U, (uint8_t)0U, - (uint8_t)0U, (uint8_t)0U, (uint8_t)0U, (uint8_t)0U, (uint8_t)64U, (uint8_t)0U, (uint8_t)0U, - (uint8_t)0U, (uint8_t)0U, (uint8_t)0U, (uint8_t)0U, (uint8_t)0U, (uint8_t)0U, (uint8_t)0U, - (uint8_t)0U, (uint8_t)0U, (uint8_t)64U, (uint8_t)0U, (uint8_t)0U, (uint8_t)0U, (uint8_t)192U, - (uint8_t)255U, (uint8_t)255U, (uint8_t)255U, (uint8_t)63U - }; - -static void computeYFromX(uint64_t *x, uint64_t *result, uint64_t sign) -{ - uint64_t aCoordinateBuffer[4U] = { 0U }; - uint64_t bCoordinateBuffer[4U] = { 0U }; - uint64_t word; - uint64_t bitToCheck; - uint64_t flag; - aCoordinateBuffer[0U] = (uint64_t)18446744073709551612U; - aCoordinateBuffer[1U] = (uint64_t)17179869183U; - aCoordinateBuffer[2U] = (uint64_t)0U; - aCoordinateBuffer[3U] = (uint64_t)18446744056529682436U; - bCoordinateBuffer[0U] = (uint64_t)15608596021259845087U; - bCoordinateBuffer[1U] = (uint64_t)12461466548982526096U; - bCoordinateBuffer[2U] = (uint64_t)16546823903870267094U; - bCoordinateBuffer[3U] = (uint64_t)15866188208926050356U; - montgomery_multiplication_buffer(aCoordinateBuffer, x, aCoordinateBuffer); - cube(x, result); - p256_add(result, aCoordinateBuffer, result); - p256_add(result, bCoordinateBuffer, result); - uploadZeroImpl(aCoordinateBuffer); - montgomery_ladder_power(result, sqPower_buffer, result); - montgomery_multiplication_buffer_by_one(result, result); - p256_sub(aCoordinateBuffer, result, bCoordinateBuffer); - word = result[0U]; - bitToCheck = word & (uint64_t)1U; - flag = FStar_UInt64_eq_mask(bitToCheck, sign); - cmovznz4(flag, bCoordinateBuffer, result, result); -} - - -/******************************************************************************* - -ECDSA and ECDH functions over the P-256 NIST curve. - -This module implements signing and verification, key validation, conversions -between various point representations, and ECDH key agreement. - -*******************************************************************************/ - -/**************/ -/* Signatures */ -/**************/ - -/* - Per the standard, a hash function *shall* be used. Therefore, we recommend - using one of the three combined hash-and-sign variants. -*/ - -/** -Hash the message with SHA2-256, then sign the resulting digest with the P256 signature function. - -Input: result buffer: uint8[64], - m buffer: uint8 [mLen], - priv(ate)Key: uint8[32], - k (nonce): uint32[32]. - - Output: bool, where True stands for the correct signature generation. False value means that an error has occurred. - - The private key and the nonce are expected to be more than 0 and less than the curve order. -*/ -bool -Hacl_P256_ecdsa_sign_p256_sha2( - uint8_t *result, - uint32_t mLen, - uint8_t *m, - uint8_t *privKey, - uint8_t *k -) -{ - uint64_t privKeyAsFelem[4U] = { 0U }; - uint64_t r[4U] = { 0U }; - uint64_t s[4U] = { 0U }; - uint8_t *resultR = result; - uint8_t *resultS = result + (uint32_t)32U; - Hacl_Impl_P256_LowLevel_toUint64ChangeEndian(privKey, privKeyAsFelem); - { - Spec_ECDSA_hash_alg_ecdsa lit; - uint64_t flag; - lit.tag = Spec_ECDSA_Hash; - lit._0 = Spec_Hash_Definitions_SHA2_256; - flag = ecdsa_signature_core(lit, r, s, mLen, m, privKeyAsFelem, k); - Hacl_Impl_P256_LowLevel_changeEndian(r); - Hacl_Impl_P256_LowLevel_toUint8(r, resultR); - Hacl_Impl_P256_LowLevel_changeEndian(s); - Hacl_Impl_P256_LowLevel_toUint8(s, resultS); - return flag == (uint64_t)0U; - } -} - -/** -Hash the message with SHA2-384, then sign the resulting digest with the P256 signature function. - -Input: result buffer: uint8[64], - m buffer: uint8 [mLen], - priv(ate)Key: uint8[32], - k (nonce): uint32[32]. - - Output: bool, where True stands for the correct signature generation. False value means that an error has occurred. - - The private key and the nonce are expected to be more than 0 and less than the curve order. -*/ -bool -Hacl_P256_ecdsa_sign_p256_sha384( - uint8_t *result, - uint32_t mLen, - uint8_t *m, - uint8_t *privKey, - uint8_t *k -) -{ - uint64_t privKeyAsFelem[4U] = { 0U }; - uint64_t r[4U] = { 0U }; - uint64_t s[4U] = { 0U }; - uint8_t *resultR = result; - uint8_t *resultS = result + (uint32_t)32U; - Hacl_Impl_P256_LowLevel_toUint64ChangeEndian(privKey, privKeyAsFelem); - { - Spec_ECDSA_hash_alg_ecdsa lit; - uint64_t flag; - lit.tag = Spec_ECDSA_Hash; - lit._0 = Spec_Hash_Definitions_SHA2_384; - flag = ecdsa_signature_core(lit, r, s, mLen, m, privKeyAsFelem, k); - Hacl_Impl_P256_LowLevel_changeEndian(r); - Hacl_Impl_P256_LowLevel_toUint8(r, resultR); - Hacl_Impl_P256_LowLevel_changeEndian(s); - Hacl_Impl_P256_LowLevel_toUint8(s, resultS); - return flag == (uint64_t)0U; - } -} - -/** -Hash the message with SHA2-512, then sign the resulting digest with the P256 signature function. - -Input: result buffer: uint8[64], - m buffer: uint8 [mLen], - priv(ate)Key: uint8[32], - k (nonce): uint32[32]. - - Output: bool, where True stands for the correct signature generation. False value means that an error has occurred. - - The private key and the nonce are expected to be more than 0 and less than the curve order. -*/ -bool -Hacl_P256_ecdsa_sign_p256_sha512( - uint8_t *result, - uint32_t mLen, - uint8_t *m, - uint8_t *privKey, - uint8_t *k -) -{ - uint64_t privKeyAsFelem[4U] = { 0U }; - uint64_t r[4U] = { 0U }; - uint64_t s[4U] = { 0U }; - uint8_t *resultR = result; - uint8_t *resultS = result + (uint32_t)32U; - Hacl_Impl_P256_LowLevel_toUint64ChangeEndian(privKey, privKeyAsFelem); - { - Spec_ECDSA_hash_alg_ecdsa lit; - uint64_t flag; - lit.tag = Spec_ECDSA_Hash; - lit._0 = Spec_Hash_Definitions_SHA2_512; - flag = ecdsa_signature_core(lit, r, s, mLen, m, privKeyAsFelem, k); - Hacl_Impl_P256_LowLevel_changeEndian(r); - Hacl_Impl_P256_LowLevel_toUint8(r, resultR); - Hacl_Impl_P256_LowLevel_changeEndian(s); - Hacl_Impl_P256_LowLevel_toUint8(s, resultS); - return flag == (uint64_t)0U; - } -} - -/** -P256 signature WITHOUT hashing first. - -This function is intended to receive a hash of the input. For convenience, we -recommend using one of the hash-and-sign combined functions above. - -The argument `m` MUST be at least 32 bytes (i.e. `mLen >= 32`). - -NOTE: The equivalent functions in OpenSSL and Fiat-Crypto both accept inputs -smaller than 32 bytes. These libraries left-pad the input with enough zeroes to -reach the minimum 32 byte size. Clients who need behavior identical to OpenSSL -need to perform the left-padding themselves. - -Input: result buffer: uint8[64], - m buffer: uint8 [mLen], - priv(ate)Key: uint8[32], - k (nonce): uint32[32]. - - Output: bool, where True stands for the correct signature generation. False value means that an error has occurred. - - The private key and the nonce are expected to be more than 0 and less than the curve order. - - The message m is expected to be hashed by a strong hash function, the lenght of the message is expected to be 32 bytes and more. -*/ -bool -Hacl_P256_ecdsa_sign_p256_without_hash( - uint8_t *result, - uint32_t mLen, - uint8_t *m, - uint8_t *privKey, - uint8_t *k -) -{ - uint64_t privKeyAsFelem[4U] = { 0U }; - uint64_t r[4U] = { 0U }; - uint64_t s[4U] = { 0U }; - uint8_t *resultR = result; - uint8_t *resultS = result + (uint32_t)32U; - Hacl_Impl_P256_LowLevel_toUint64ChangeEndian(privKey, privKeyAsFelem); - { - Spec_ECDSA_hash_alg_ecdsa lit; - uint64_t flag; - lit.tag = Spec_ECDSA_NoHash; - flag = ecdsa_signature_core(lit, r, s, mLen, m, privKeyAsFelem, k); - Hacl_Impl_P256_LowLevel_changeEndian(r); - Hacl_Impl_P256_LowLevel_toUint8(r, resultR); - Hacl_Impl_P256_LowLevel_changeEndian(s); - Hacl_Impl_P256_LowLevel_toUint8(s, resultS); - return flag == (uint64_t)0U; - } -} - - -/****************/ -/* Verification */ -/****************/ - -/* - Verify a message signature. These functions internally validate the public key using validate_public_key. -*/ - - -/** - The input of the function is considered to be public, - thus this code is not secret independent with respect to the operations done over the input. - - Input: m buffer: uint8 [mLen], - pub(lic)Key: uint8[64], - r: uint8[32], - s: uint8[32]. - - Output: bool, where true stands for the correct signature verification. -*/ -bool -Hacl_P256_ecdsa_verif_p256_sha2( - uint32_t mLen, - uint8_t *m, - uint8_t *pubKey, - uint8_t *r, - uint8_t *s -) -{ - uint64_t publicKeyAsFelem[8U] = { 0U }; - uint64_t *publicKeyFelemX = publicKeyAsFelem; - uint64_t *publicKeyFelemY = publicKeyAsFelem + (uint32_t)4U; - uint64_t rAsFelem[4U] = { 0U }; - uint64_t sAsFelem[4U] = { 0U }; - uint8_t *pubKeyX = pubKey; - uint8_t *pubKeyY = pubKey + (uint32_t)32U; - Hacl_Impl_P256_LowLevel_toUint64ChangeEndian(pubKeyX, publicKeyFelemX); - Hacl_Impl_P256_LowLevel_toUint64ChangeEndian(pubKeyY, publicKeyFelemY); - Hacl_Impl_P256_LowLevel_toUint64ChangeEndian(r, rAsFelem); - Hacl_Impl_P256_LowLevel_toUint64ChangeEndian(s, sAsFelem); - { - Spec_ECDSA_hash_alg_ecdsa lit; - bool result; - lit.tag = Spec_ECDSA_Hash; - lit._0 = Spec_Hash_Definitions_SHA2_256; - result = ecdsa_verification_(lit, publicKeyAsFelem, rAsFelem, sAsFelem, mLen, m); - return result; - } -} - -/** - The input of the function is considered to be public, - thus this code is not secret independent with respect to the operations done over the input. - - Input: m buffer: uint8 [mLen], - pub(lic)Key: uint8[64], - r: uint8[32], - s: uint8[32]. - - Output: bool, where true stands for the correct signature verification. -*/ -bool -Hacl_P256_ecdsa_verif_p256_sha384( - uint32_t mLen, - uint8_t *m, - uint8_t *pubKey, - uint8_t *r, - uint8_t *s -) -{ - uint64_t publicKeyAsFelem[8U] = { 0U }; - uint64_t *publicKeyFelemX = publicKeyAsFelem; - uint64_t *publicKeyFelemY = publicKeyAsFelem + (uint32_t)4U; - uint64_t rAsFelem[4U] = { 0U }; - uint64_t sAsFelem[4U] = { 0U }; - uint8_t *pubKeyX = pubKey; - uint8_t *pubKeyY = pubKey + (uint32_t)32U; - Hacl_Impl_P256_LowLevel_toUint64ChangeEndian(pubKeyX, publicKeyFelemX); - Hacl_Impl_P256_LowLevel_toUint64ChangeEndian(pubKeyY, publicKeyFelemY); - Hacl_Impl_P256_LowLevel_toUint64ChangeEndian(r, rAsFelem); - Hacl_Impl_P256_LowLevel_toUint64ChangeEndian(s, sAsFelem); - { - Spec_ECDSA_hash_alg_ecdsa lit; - bool result; - lit.tag = Spec_ECDSA_Hash; - lit._0 = Spec_Hash_Definitions_SHA2_384; - result = ecdsa_verification_(lit, publicKeyAsFelem, rAsFelem, sAsFelem, mLen, m); - return result; - } -} - -/** - The input of the function is considered to be public, - thus this code is not secret independent with respect to the operations done over the input. - - Input: m buffer: uint8 [mLen], - pub(lic)Key: uint8[64], - r: uint8[32], - s: uint8[32]. - - Output: bool, where true stands for the correct signature verification. -*/ -bool -Hacl_P256_ecdsa_verif_p256_sha512( - uint32_t mLen, - uint8_t *m, - uint8_t *pubKey, - uint8_t *r, - uint8_t *s -) -{ - uint64_t publicKeyAsFelem[8U] = { 0U }; - uint64_t *publicKeyFelemX = publicKeyAsFelem; - uint64_t *publicKeyFelemY = publicKeyAsFelem + (uint32_t)4U; - uint64_t rAsFelem[4U] = { 0U }; - uint64_t sAsFelem[4U] = { 0U }; - uint8_t *pubKeyX = pubKey; - uint8_t *pubKeyY = pubKey + (uint32_t)32U; - Hacl_Impl_P256_LowLevel_toUint64ChangeEndian(pubKeyX, publicKeyFelemX); - Hacl_Impl_P256_LowLevel_toUint64ChangeEndian(pubKeyY, publicKeyFelemY); - Hacl_Impl_P256_LowLevel_toUint64ChangeEndian(r, rAsFelem); - Hacl_Impl_P256_LowLevel_toUint64ChangeEndian(s, sAsFelem); - { - Spec_ECDSA_hash_alg_ecdsa lit; - bool result; - lit.tag = Spec_ECDSA_Hash; - lit._0 = Spec_Hash_Definitions_SHA2_512; - result = ecdsa_verification_(lit, publicKeyAsFelem, rAsFelem, sAsFelem, mLen, m); - return result; - } -} - -/** - The input of the function is considered to be public, - thus this code is not secret independent with respect to the operations done over the input. - - Input: m buffer: uint8 [mLen], - pub(lic)Key: uint8[64], - r: uint8[32], - s: uint8[32]. - - Output: bool, where true stands for the correct signature verification. - - The message m is expected to be hashed by a strong hash function, the lenght of the message is expected to be 32 bytes and more. -*/ -bool -Hacl_P256_ecdsa_verif_without_hash( - uint32_t mLen, - uint8_t *m, - uint8_t *pubKey, - uint8_t *r, - uint8_t *s -) -{ - uint64_t publicKeyAsFelem[8U] = { 0U }; - uint64_t *publicKeyFelemX = publicKeyAsFelem; - uint64_t *publicKeyFelemY = publicKeyAsFelem + (uint32_t)4U; - uint64_t rAsFelem[4U] = { 0U }; - uint64_t sAsFelem[4U] = { 0U }; - uint8_t *pubKeyX = pubKey; - uint8_t *pubKeyY = pubKey + (uint32_t)32U; - Hacl_Impl_P256_LowLevel_toUint64ChangeEndian(pubKeyX, publicKeyFelemX); - Hacl_Impl_P256_LowLevel_toUint64ChangeEndian(pubKeyY, publicKeyFelemY); - Hacl_Impl_P256_LowLevel_toUint64ChangeEndian(r, rAsFelem); - Hacl_Impl_P256_LowLevel_toUint64ChangeEndian(s, sAsFelem); - { - Spec_ECDSA_hash_alg_ecdsa lit; - bool result; - lit.tag = Spec_ECDSA_NoHash; - result = ecdsa_verification_(lit, publicKeyAsFelem, rAsFelem, sAsFelem, mLen, m); - return result; - } -} - - -/******************/ -/* Key validation */ -/******************/ - - -/** -Validate a public key. - - - The input of the function is considered to be public, - thus this code is not secret independent with respect to the operations done over the input. - - Input: pub(lic)Key: uint8[64]. - - Output: bool, where 0 stands for the public key to be correct with respect to SP 800-56A: - Verify that the public key is not the “point at infinity”, represented as O. - Verify that the affine x and y coordinates of the point represented by the public key are in the range [0, p – 1] where p is the prime defining the finite field. - Verify that y2 = x3 + ax + b where a and b are the coefficients of the curve equation. - Verify that nQ = O (the point at infinity), where n is the order of the curve and Q is the public key point. - - The last extract is taken from : https://neilmadden.blog/2017/05/17/so-how-do-you-validate-nist-ecdh-public-keys/ -*/ -bool Hacl_P256_validate_public_key(uint8_t *pubKey) -{ - uint8_t *pubKeyX = pubKey; - uint8_t *pubKeyY = pubKey + (uint32_t)32U; - uint64_t tempBuffer[120U] = { 0U }; - uint64_t *tempBufferV = tempBuffer; - uint64_t *publicKeyJ = tempBuffer + (uint32_t)100U; - uint64_t *publicKeyB = tempBuffer + (uint32_t)112U; - uint64_t *publicKeyX = publicKeyB; - uint64_t *publicKeyY = publicKeyB + (uint32_t)4U; - bool r; - Hacl_Impl_P256_LowLevel_toUint64ChangeEndian(pubKeyX, publicKeyX); - Hacl_Impl_P256_LowLevel_toUint64ChangeEndian(pubKeyY, publicKeyY); - bufferToJac(publicKeyB, publicKeyJ); - r = verifyQValidCurvePoint(publicKeyJ, tempBufferV); - return r; -} - -/** -Validate a private key, e.g. prior to signing. - -Input: scalar: uint8[32]. - - Output: bool, where true stands for the scalar to be more than 0 and less than order. -*/ -bool Hacl_P256_validate_private_key(uint8_t *x) -{ - return isMoreThanZeroLessThanOrder(x); -} - - -/*****************************************/ -/* Point representations and conversions */ -/*****************************************/ - -/* - Elliptic curve points have 2 32-byte coordinates (x, y) and can be represented in 3 ways: - - - "raw" form (64 bytes): the concatenation of the 2 coordinates, also known as "internal" - - "compressed" form (33 bytes): first the sign byte of y (either 0x02 or 0x03), followed by x - - "uncompressed" form (65 bytes): first a constant byte (always 0x04), followed by the "raw" form - - For all of the conversation functions below, the input and output MUST NOT overlap. -*/ - - -/** -Convert 65-byte uncompressed to raw. - -The function errors out if the first byte is incorrect, or if the resulting point is invalid. - - - - Input: a point in not compressed form (uint8[65]), - result: uint8[64] (internal point representation). - - Output: bool, where true stands for the correct decompression. - -*/ -bool Hacl_P256_uncompressed_to_raw(uint8_t *b, uint8_t *result) -{ - uint8_t compressionIdentifier = b[0U]; - bool correctIdentifier = (uint8_t)4U == compressionIdentifier; - if (correctIdentifier) - { - memcpy(result, b + (uint32_t)1U, (uint32_t)64U * sizeof (uint8_t)); - } - return correctIdentifier; -} - -/** -Convert 33-byte compressed to raw. - -The function errors out if the first byte is incorrect, or if the resulting point is invalid. - -Input: a point in compressed form (uint8[33]), - result: uint8[64] (internal point representation). - - Output: bool, where true stands for the correct decompression. - -*/ -bool Hacl_P256_compressed_to_raw(uint8_t *b, uint8_t *result) -{ - uint64_t temp[8U] = { 0U }; - uint64_t *t0 = temp; - uint64_t *t1 = temp + (uint32_t)4U; - uint8_t compressedIdentifier = b[0U]; - uint8_t correctIdentifier2 = FStar_UInt8_eq_mask((uint8_t)2U, compressedIdentifier); - uint8_t correctIdentifier3 = FStar_UInt8_eq_mask((uint8_t)3U, compressedIdentifier); - uint8_t isIdentifierCorrect = correctIdentifier2 | correctIdentifier3; - bool flag = isIdentifierCorrect == (uint8_t)255U; - if (flag) - { - uint8_t *x = b + (uint32_t)1U; - memcpy(result, x, (uint32_t)32U * sizeof (uint8_t)); - Hacl_Impl_P256_LowLevel_toUint64ChangeEndian(x, t0); - { - uint64_t tempBuffer[4U] = { 0U }; - uint64_t carry = sub4_il(t0, prime256_buffer, tempBuffer); - bool lessThanPrimeXCoordinate = carry == (uint64_t)1U; - if (!lessThanPrimeXCoordinate) - { - return false; - } - { - uint64_t multBuffer[8U] = { 0U }; - shift_256_impl(t0, multBuffer); - solinas_reduction_impl(multBuffer, t0); - { - uint64_t identifierBit = (uint64_t)(compressedIdentifier & (uint8_t)1U); - computeYFromX(t0, t1, identifierBit); - Hacl_Impl_P256_LowLevel_changeEndian(t1); - Hacl_Impl_P256_LowLevel_toUint8(t1, result + (uint32_t)32U); - return true; - } - } - } - } - return false; -} - -/** -Convert raw to 65-byte uncompressed. - -This function effectively prepends a 0x04 byte. - -Input: a point buffer (internal representation: uint8[64]), - result: a point in not compressed form (uint8[65]). -*/ -void Hacl_P256_raw_to_uncompressed(uint8_t *b, uint8_t *result) -{ - uint8_t *to = result + (uint32_t)1U; - memcpy(to, b, (uint32_t)64U * sizeof (uint8_t)); - result[0U] = (uint8_t)4U; -} - -/** -Convert raw to 33-byte compressed. - - Input: `b`, the pointer buffer in internal representation, of type `uint8[64]` - Output: `result`, a point in compressed form, of type `uint8[33]` - -*/ -void Hacl_P256_raw_to_compressed(uint8_t *b, uint8_t *result) -{ - uint8_t *y = b + (uint32_t)32U; - uint8_t lastWordY = y[31U]; - uint8_t lastBitY = lastWordY & (uint8_t)1U; - uint8_t identifier = lastBitY + (uint8_t)2U; - memcpy(result + (uint32_t)1U, b, (uint32_t)32U * sizeof (uint8_t)); - result[0U] = identifier; -} - - -/******************/ -/* ECDH agreement */ -/******************/ - -/** -Convert a private key into a raw public key. - -This function performs no key validation. - - Input: `scalar`, the private key, of type `uint8[32]`. - Output: `result`, the public key, of type `uint8[64]`. - Returns: - - `true`, for success, meaning the public key is not a point at infinity - - `false`, otherwise. - - `scalar` and `result` MUST NOT overlap. -*/ -bool Hacl_P256_dh_initiator(uint8_t *result, uint8_t *scalar) -{ - uint64_t tempBuffer[100U] = { 0U }; - uint64_t resultBuffer[12U] = { 0U }; - uint64_t *resultBufferX = resultBuffer; - uint64_t *resultBufferY = resultBuffer + (uint32_t)4U; - uint8_t *resultX = result; - uint8_t *resultY = result + (uint32_t)32U; - uint64_t flag; - Hacl_Impl_P256_Core_secretToPublic(resultBuffer, scalar, tempBuffer); - flag = Hacl_Impl_P256_Core_isPointAtInfinityPrivate(resultBuffer); - Hacl_Impl_P256_LowLevel_changeEndian(resultBufferX); - Hacl_Impl_P256_LowLevel_changeEndian(resultBufferY); - Hacl_Impl_P256_LowLevel_toUint8(resultBufferX, resultX); - Hacl_Impl_P256_LowLevel_toUint8(resultBufferY, resultY); - return flag == (uint64_t)0U; -} - -/** -ECDH key agreement. - -This function takes a 32-byte secret key, another party's 64-byte raw public -key, and computeds the 64-byte ECDH shared key. - -This function ONLY validates the public key. - - The pub(lic)_key input of the function is considered to be public, - thus this code is not secret independent with respect to the operations done over this variable. - - Input: result: uint8[64], - pub(lic)Key: uint8[64], - scalar: uint8[32]. - - Output: bool, where True stands for the correct key generation. False value means that an error has occurred (possibly the provided public key was incorrect or the result represents point at infinity). - -*/ -bool Hacl_P256_dh_responder(uint8_t *result, uint8_t *pubKey, uint8_t *scalar) -{ - uint64_t resultBufferFelem[12U] = { 0U }; - uint64_t *resultBufferFelemX = resultBufferFelem; - uint64_t *resultBufferFelemY = resultBufferFelem + (uint32_t)4U; - uint8_t *resultX = result; - uint8_t *resultY = result + (uint32_t)32U; - uint64_t publicKeyAsFelem[8U] = { 0U }; - uint64_t *publicKeyFelemX = publicKeyAsFelem; - uint64_t *publicKeyFelemY = publicKeyAsFelem + (uint32_t)4U; - uint8_t *pubKeyX = pubKey; - uint8_t *pubKeyY = pubKey + (uint32_t)32U; - uint64_t flag; - Hacl_Impl_P256_LowLevel_toUint64ChangeEndian(pubKeyX, publicKeyFelemX); - Hacl_Impl_P256_LowLevel_toUint64ChangeEndian(pubKeyY, publicKeyFelemY); - flag = Hacl_Impl_P256_DH__ecp256dh_r(resultBufferFelem, publicKeyAsFelem, scalar); - Hacl_Impl_P256_LowLevel_changeEndian(resultBufferFelemX); - Hacl_Impl_P256_LowLevel_changeEndian(resultBufferFelemY); - Hacl_Impl_P256_LowLevel_toUint8(resultBufferFelemX, resultX); - Hacl_Impl_P256_LowLevel_toUint8(resultBufferFelemY, resultY); - return flag == (uint64_t)0U; -} - diff --git a/dist/c89-compatible/Hacl_P256.h b/dist/c89-compatible/Hacl_P256.h deleted file mode 100644 index 55f14fa344..0000000000 --- a/dist/c89-compatible/Hacl_P256.h +++ /dev/null @@ -1,393 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#ifndef __Hacl_P256_H -#define __Hacl_P256_H - -#if defined(__cplusplus) -extern "C" { -#endif - -#include -#include "krml/internal/types.h" -#include "krml/lowstar_endianness.h" -#include "krml/internal/target.h" - - -#include "Hacl_Spec.h" -#include "Hacl_Krmllib.h" -#include "Hacl_Hash_SHA2.h" -#include "Hacl_Bignum_Base.h" -#include "evercrypt_targetconfig.h" -#include "lib_intrinsics.h" - -/******************************************************************************* - -ECDSA and ECDH functions over the P-256 NIST curve. - -This module implements signing and verification, key validation, conversions -between various point representations, and ECDH key agreement. - -*******************************************************************************/ - -/**************/ -/* Signatures */ -/**************/ - -/* - Per the standard, a hash function *shall* be used. Therefore, we recommend - using one of the three combined hash-and-sign variants. -*/ - -/** -Hash the message with SHA2-256, then sign the resulting digest with the P256 signature function. - -Input: result buffer: uint8[64], - m buffer: uint8 [mLen], - priv(ate)Key: uint8[32], - k (nonce): uint32[32]. - - Output: bool, where True stands for the correct signature generation. False value means that an error has occurred. - - The private key and the nonce are expected to be more than 0 and less than the curve order. -*/ -bool -Hacl_P256_ecdsa_sign_p256_sha2( - uint8_t *result, - uint32_t mLen, - uint8_t *m, - uint8_t *privKey, - uint8_t *k -); - -/** -Hash the message with SHA2-384, then sign the resulting digest with the P256 signature function. - -Input: result buffer: uint8[64], - m buffer: uint8 [mLen], - priv(ate)Key: uint8[32], - k (nonce): uint32[32]. - - Output: bool, where True stands for the correct signature generation. False value means that an error has occurred. - - The private key and the nonce are expected to be more than 0 and less than the curve order. -*/ -bool -Hacl_P256_ecdsa_sign_p256_sha384( - uint8_t *result, - uint32_t mLen, - uint8_t *m, - uint8_t *privKey, - uint8_t *k -); - -/** -Hash the message with SHA2-512, then sign the resulting digest with the P256 signature function. - -Input: result buffer: uint8[64], - m buffer: uint8 [mLen], - priv(ate)Key: uint8[32], - k (nonce): uint32[32]. - - Output: bool, where True stands for the correct signature generation. False value means that an error has occurred. - - The private key and the nonce are expected to be more than 0 and less than the curve order. -*/ -bool -Hacl_P256_ecdsa_sign_p256_sha512( - uint8_t *result, - uint32_t mLen, - uint8_t *m, - uint8_t *privKey, - uint8_t *k -); - -/** -P256 signature WITHOUT hashing first. - -This function is intended to receive a hash of the input. For convenience, we -recommend using one of the hash-and-sign combined functions above. - -The argument `m` MUST be at least 32 bytes (i.e. `mLen >= 32`). - -NOTE: The equivalent functions in OpenSSL and Fiat-Crypto both accept inputs -smaller than 32 bytes. These libraries left-pad the input with enough zeroes to -reach the minimum 32 byte size. Clients who need behavior identical to OpenSSL -need to perform the left-padding themselves. - -Input: result buffer: uint8[64], - m buffer: uint8 [mLen], - priv(ate)Key: uint8[32], - k (nonce): uint32[32]. - - Output: bool, where True stands for the correct signature generation. False value means that an error has occurred. - - The private key and the nonce are expected to be more than 0 and less than the curve order. - - The message m is expected to be hashed by a strong hash function, the lenght of the message is expected to be 32 bytes and more. -*/ -bool -Hacl_P256_ecdsa_sign_p256_without_hash( - uint8_t *result, - uint32_t mLen, - uint8_t *m, - uint8_t *privKey, - uint8_t *k -); - - -/****************/ -/* Verification */ -/****************/ - -/* - Verify a message signature. These functions internally validate the public key using validate_public_key. -*/ - - -/** - The input of the function is considered to be public, - thus this code is not secret independent with respect to the operations done over the input. - - Input: m buffer: uint8 [mLen], - pub(lic)Key: uint8[64], - r: uint8[32], - s: uint8[32]. - - Output: bool, where true stands for the correct signature verification. -*/ -bool -Hacl_P256_ecdsa_verif_p256_sha2( - uint32_t mLen, - uint8_t *m, - uint8_t *pubKey, - uint8_t *r, - uint8_t *s -); - -/** - The input of the function is considered to be public, - thus this code is not secret independent with respect to the operations done over the input. - - Input: m buffer: uint8 [mLen], - pub(lic)Key: uint8[64], - r: uint8[32], - s: uint8[32]. - - Output: bool, where true stands for the correct signature verification. -*/ -bool -Hacl_P256_ecdsa_verif_p256_sha384( - uint32_t mLen, - uint8_t *m, - uint8_t *pubKey, - uint8_t *r, - uint8_t *s -); - -/** - The input of the function is considered to be public, - thus this code is not secret independent with respect to the operations done over the input. - - Input: m buffer: uint8 [mLen], - pub(lic)Key: uint8[64], - r: uint8[32], - s: uint8[32]. - - Output: bool, where true stands for the correct signature verification. -*/ -bool -Hacl_P256_ecdsa_verif_p256_sha512( - uint32_t mLen, - uint8_t *m, - uint8_t *pubKey, - uint8_t *r, - uint8_t *s -); - -/** - The input of the function is considered to be public, - thus this code is not secret independent with respect to the operations done over the input. - - Input: m buffer: uint8 [mLen], - pub(lic)Key: uint8[64], - r: uint8[32], - s: uint8[32]. - - Output: bool, where true stands for the correct signature verification. - - The message m is expected to be hashed by a strong hash function, the lenght of the message is expected to be 32 bytes and more. -*/ -bool -Hacl_P256_ecdsa_verif_without_hash( - uint32_t mLen, - uint8_t *m, - uint8_t *pubKey, - uint8_t *r, - uint8_t *s -); - - -/******************/ -/* Key validation */ -/******************/ - - -/** -Validate a public key. - - - The input of the function is considered to be public, - thus this code is not secret independent with respect to the operations done over the input. - - Input: pub(lic)Key: uint8[64]. - - Output: bool, where 0 stands for the public key to be correct with respect to SP 800-56A: - Verify that the public key is not the “point at infinity”, represented as O. - Verify that the affine x and y coordinates of the point represented by the public key are in the range [0, p – 1] where p is the prime defining the finite field. - Verify that y2 = x3 + ax + b where a and b are the coefficients of the curve equation. - Verify that nQ = O (the point at infinity), where n is the order of the curve and Q is the public key point. - - The last extract is taken from : https://neilmadden.blog/2017/05/17/so-how-do-you-validate-nist-ecdh-public-keys/ -*/ -bool Hacl_P256_validate_public_key(uint8_t *pubKey); - -/** -Validate a private key, e.g. prior to signing. - -Input: scalar: uint8[32]. - - Output: bool, where true stands for the scalar to be more than 0 and less than order. -*/ -bool Hacl_P256_validate_private_key(uint8_t *x); - - -/*****************************************/ -/* Point representations and conversions */ -/*****************************************/ - -/* - Elliptic curve points have 2 32-byte coordinates (x, y) and can be represented in 3 ways: - - - "raw" form (64 bytes): the concatenation of the 2 coordinates, also known as "internal" - - "compressed" form (33 bytes): first the sign byte of y (either 0x02 or 0x03), followed by x - - "uncompressed" form (65 bytes): first a constant byte (always 0x04), followed by the "raw" form - - For all of the conversation functions below, the input and output MUST NOT overlap. -*/ - - -/** -Convert 65-byte uncompressed to raw. - -The function errors out if the first byte is incorrect, or if the resulting point is invalid. - - - - Input: a point in not compressed form (uint8[65]), - result: uint8[64] (internal point representation). - - Output: bool, where true stands for the correct decompression. - -*/ -bool Hacl_P256_uncompressed_to_raw(uint8_t *b, uint8_t *result); - -/** -Convert 33-byte compressed to raw. - -The function errors out if the first byte is incorrect, or if the resulting point is invalid. - -Input: a point in compressed form (uint8[33]), - result: uint8[64] (internal point representation). - - Output: bool, where true stands for the correct decompression. - -*/ -bool Hacl_P256_compressed_to_raw(uint8_t *b, uint8_t *result); - -/** -Convert raw to 65-byte uncompressed. - -This function effectively prepends a 0x04 byte. - -Input: a point buffer (internal representation: uint8[64]), - result: a point in not compressed form (uint8[65]). -*/ -void Hacl_P256_raw_to_uncompressed(uint8_t *b, uint8_t *result); - -/** -Convert raw to 33-byte compressed. - - Input: `b`, the pointer buffer in internal representation, of type `uint8[64]` - Output: `result`, a point in compressed form, of type `uint8[33]` - -*/ -void Hacl_P256_raw_to_compressed(uint8_t *b, uint8_t *result); - - -/******************/ -/* ECDH agreement */ -/******************/ - -/** -Convert a private key into a raw public key. - -This function performs no key validation. - - Input: `scalar`, the private key, of type `uint8[32]`. - Output: `result`, the public key, of type `uint8[64]`. - Returns: - - `true`, for success, meaning the public key is not a point at infinity - - `false`, otherwise. - - `scalar` and `result` MUST NOT overlap. -*/ -bool Hacl_P256_dh_initiator(uint8_t *result, uint8_t *scalar); - -/** -ECDH key agreement. - -This function takes a 32-byte secret key, another party's 64-byte raw public -key, and computeds the 64-byte ECDH shared key. - -This function ONLY validates the public key. - - The pub(lic)_key input of the function is considered to be public, - thus this code is not secret independent with respect to the operations done over this variable. - - Input: result: uint8[64], - pub(lic)Key: uint8[64], - scalar: uint8[32]. - - Output: bool, where True stands for the correct key generation. False value means that an error has occurred (possibly the provided public key was incorrect or the result represents point at infinity). - -*/ -bool Hacl_P256_dh_responder(uint8_t *result, uint8_t *pubKey, uint8_t *scalar); - -#if defined(__cplusplus) -} -#endif - -#define __Hacl_P256_H_DEFINED -#endif diff --git a/dist/c89-compatible/Hacl_Poly1305_128.c b/dist/c89-compatible/Hacl_Poly1305_128.c deleted file mode 100644 index 00dbb42750..0000000000 --- a/dist/c89-compatible/Hacl_Poly1305_128.c +++ /dev/null @@ -1,1900 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#include "internal/Hacl_Poly1305_128.h" - - - -void -Hacl_Impl_Poly1305_Field32xN_128_load_acc2(Lib_IntVector_Intrinsics_vec128 *acc, uint8_t *b) -{ - KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 e[5U] KRML_POST_ALIGN(16) = { 0U }; - Lib_IntVector_Intrinsics_vec128 b10 = Lib_IntVector_Intrinsics_vec128_load64_le(b); - Lib_IntVector_Intrinsics_vec128 - b2 = Lib_IntVector_Intrinsics_vec128_load64_le(b + (uint32_t)16U); - Lib_IntVector_Intrinsics_vec128 lo = Lib_IntVector_Intrinsics_vec128_interleave_low64(b10, b2); - Lib_IntVector_Intrinsics_vec128 - hi = Lib_IntVector_Intrinsics_vec128_interleave_high64(b10, b2); - Lib_IntVector_Intrinsics_vec128 - f00 = - Lib_IntVector_Intrinsics_vec128_and(lo, - Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU)); - Lib_IntVector_Intrinsics_vec128 - f10 = - Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(lo, - (uint32_t)26U), - Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU)); - Lib_IntVector_Intrinsics_vec128 - f20 = - Lib_IntVector_Intrinsics_vec128_or(Lib_IntVector_Intrinsics_vec128_shift_right64(lo, - (uint32_t)52U), - Lib_IntVector_Intrinsics_vec128_shift_left64(Lib_IntVector_Intrinsics_vec128_and(hi, - Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3fffU)), - (uint32_t)12U)); - Lib_IntVector_Intrinsics_vec128 - f30 = - Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(hi, - (uint32_t)14U), - Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU)); - Lib_IntVector_Intrinsics_vec128 - f40 = Lib_IntVector_Intrinsics_vec128_shift_right64(hi, (uint32_t)40U); - Lib_IntVector_Intrinsics_vec128 f02 = f00; - Lib_IntVector_Intrinsics_vec128 f12 = f10; - Lib_IntVector_Intrinsics_vec128 f22 = f20; - Lib_IntVector_Intrinsics_vec128 f32 = f30; - Lib_IntVector_Intrinsics_vec128 f42 = f40; - uint64_t b1; - Lib_IntVector_Intrinsics_vec128 mask; - Lib_IntVector_Intrinsics_vec128 f43; - Lib_IntVector_Intrinsics_vec128 acc0; - Lib_IntVector_Intrinsics_vec128 acc1; - Lib_IntVector_Intrinsics_vec128 acc2; - Lib_IntVector_Intrinsics_vec128 acc3; - Lib_IntVector_Intrinsics_vec128 acc4; - Lib_IntVector_Intrinsics_vec128 e0; - Lib_IntVector_Intrinsics_vec128 e1; - Lib_IntVector_Intrinsics_vec128 e2; - Lib_IntVector_Intrinsics_vec128 e3; - Lib_IntVector_Intrinsics_vec128 e4; - Lib_IntVector_Intrinsics_vec128 f0; - Lib_IntVector_Intrinsics_vec128 f1; - Lib_IntVector_Intrinsics_vec128 f2; - Lib_IntVector_Intrinsics_vec128 f3; - Lib_IntVector_Intrinsics_vec128 f4; - Lib_IntVector_Intrinsics_vec128 f01; - Lib_IntVector_Intrinsics_vec128 f11; - Lib_IntVector_Intrinsics_vec128 f21; - Lib_IntVector_Intrinsics_vec128 f31; - Lib_IntVector_Intrinsics_vec128 f41; - Lib_IntVector_Intrinsics_vec128 acc01; - Lib_IntVector_Intrinsics_vec128 acc11; - Lib_IntVector_Intrinsics_vec128 acc21; - Lib_IntVector_Intrinsics_vec128 acc31; - Lib_IntVector_Intrinsics_vec128 acc41; - e[0U] = f02; - e[1U] = f12; - e[2U] = f22; - e[3U] = f32; - e[4U] = f42; - b1 = (uint64_t)0x1000000U; - mask = Lib_IntVector_Intrinsics_vec128_load64(b1); - f43 = e[4U]; - e[4U] = Lib_IntVector_Intrinsics_vec128_or(f43, mask); - acc0 = acc[0U]; - acc1 = acc[1U]; - acc2 = acc[2U]; - acc3 = acc[3U]; - acc4 = acc[4U]; - e0 = e[0U]; - e1 = e[1U]; - e2 = e[2U]; - e3 = e[3U]; - e4 = e[4U]; - f0 = Lib_IntVector_Intrinsics_vec128_insert64(acc0, (uint64_t)0U, (uint32_t)1U); - f1 = Lib_IntVector_Intrinsics_vec128_insert64(acc1, (uint64_t)0U, (uint32_t)1U); - f2 = Lib_IntVector_Intrinsics_vec128_insert64(acc2, (uint64_t)0U, (uint32_t)1U); - f3 = Lib_IntVector_Intrinsics_vec128_insert64(acc3, (uint64_t)0U, (uint32_t)1U); - f4 = Lib_IntVector_Intrinsics_vec128_insert64(acc4, (uint64_t)0U, (uint32_t)1U); - f01 = Lib_IntVector_Intrinsics_vec128_add64(f0, e0); - f11 = Lib_IntVector_Intrinsics_vec128_add64(f1, e1); - f21 = Lib_IntVector_Intrinsics_vec128_add64(f2, e2); - f31 = Lib_IntVector_Intrinsics_vec128_add64(f3, e3); - f41 = Lib_IntVector_Intrinsics_vec128_add64(f4, e4); - acc01 = f01; - acc11 = f11; - acc21 = f21; - acc31 = f31; - acc41 = f41; - acc[0U] = acc01; - acc[1U] = acc11; - acc[2U] = acc21; - acc[3U] = acc31; - acc[4U] = acc41; -} - -void -Hacl_Impl_Poly1305_Field32xN_128_fmul_r2_normalize( - Lib_IntVector_Intrinsics_vec128 *out, - Lib_IntVector_Intrinsics_vec128 *p -) -{ - Lib_IntVector_Intrinsics_vec128 *r = p; - Lib_IntVector_Intrinsics_vec128 *r2 = p + (uint32_t)10U; - Lib_IntVector_Intrinsics_vec128 a0 = out[0U]; - Lib_IntVector_Intrinsics_vec128 a1 = out[1U]; - Lib_IntVector_Intrinsics_vec128 a2 = out[2U]; - Lib_IntVector_Intrinsics_vec128 a3 = out[3U]; - Lib_IntVector_Intrinsics_vec128 a4 = out[4U]; - Lib_IntVector_Intrinsics_vec128 r10 = r[0U]; - Lib_IntVector_Intrinsics_vec128 r11 = r[1U]; - Lib_IntVector_Intrinsics_vec128 r12 = r[2U]; - Lib_IntVector_Intrinsics_vec128 r13 = r[3U]; - Lib_IntVector_Intrinsics_vec128 r14 = r[4U]; - Lib_IntVector_Intrinsics_vec128 r20 = r2[0U]; - Lib_IntVector_Intrinsics_vec128 r21 = r2[1U]; - Lib_IntVector_Intrinsics_vec128 r22 = r2[2U]; - Lib_IntVector_Intrinsics_vec128 r23 = r2[3U]; - Lib_IntVector_Intrinsics_vec128 r24 = r2[4U]; - Lib_IntVector_Intrinsics_vec128 - r201 = Lib_IntVector_Intrinsics_vec128_interleave_low64(r20, r10); - Lib_IntVector_Intrinsics_vec128 - r211 = Lib_IntVector_Intrinsics_vec128_interleave_low64(r21, r11); - Lib_IntVector_Intrinsics_vec128 - r221 = Lib_IntVector_Intrinsics_vec128_interleave_low64(r22, r12); - Lib_IntVector_Intrinsics_vec128 - r231 = Lib_IntVector_Intrinsics_vec128_interleave_low64(r23, r13); - Lib_IntVector_Intrinsics_vec128 - r241 = Lib_IntVector_Intrinsics_vec128_interleave_low64(r24, r14); - Lib_IntVector_Intrinsics_vec128 - r251 = Lib_IntVector_Intrinsics_vec128_smul64(r211, (uint64_t)5U); - Lib_IntVector_Intrinsics_vec128 - r252 = Lib_IntVector_Intrinsics_vec128_smul64(r221, (uint64_t)5U); - Lib_IntVector_Intrinsics_vec128 - r253 = Lib_IntVector_Intrinsics_vec128_smul64(r231, (uint64_t)5U); - Lib_IntVector_Intrinsics_vec128 - r254 = Lib_IntVector_Intrinsics_vec128_smul64(r241, (uint64_t)5U); - Lib_IntVector_Intrinsics_vec128 a01 = Lib_IntVector_Intrinsics_vec128_mul64(r201, a0); - Lib_IntVector_Intrinsics_vec128 a11 = Lib_IntVector_Intrinsics_vec128_mul64(r211, a0); - Lib_IntVector_Intrinsics_vec128 a21 = Lib_IntVector_Intrinsics_vec128_mul64(r221, a0); - Lib_IntVector_Intrinsics_vec128 a31 = Lib_IntVector_Intrinsics_vec128_mul64(r231, a0); - Lib_IntVector_Intrinsics_vec128 a41 = Lib_IntVector_Intrinsics_vec128_mul64(r241, a0); - Lib_IntVector_Intrinsics_vec128 - a02 = - Lib_IntVector_Intrinsics_vec128_add64(a01, - Lib_IntVector_Intrinsics_vec128_mul64(r254, a1)); - Lib_IntVector_Intrinsics_vec128 - a12 = - Lib_IntVector_Intrinsics_vec128_add64(a11, - Lib_IntVector_Intrinsics_vec128_mul64(r201, a1)); - Lib_IntVector_Intrinsics_vec128 - a22 = - Lib_IntVector_Intrinsics_vec128_add64(a21, - Lib_IntVector_Intrinsics_vec128_mul64(r211, a1)); - Lib_IntVector_Intrinsics_vec128 - a32 = - Lib_IntVector_Intrinsics_vec128_add64(a31, - Lib_IntVector_Intrinsics_vec128_mul64(r221, a1)); - Lib_IntVector_Intrinsics_vec128 - a42 = - Lib_IntVector_Intrinsics_vec128_add64(a41, - Lib_IntVector_Intrinsics_vec128_mul64(r231, a1)); - Lib_IntVector_Intrinsics_vec128 - a03 = - Lib_IntVector_Intrinsics_vec128_add64(a02, - Lib_IntVector_Intrinsics_vec128_mul64(r253, a2)); - Lib_IntVector_Intrinsics_vec128 - a13 = - Lib_IntVector_Intrinsics_vec128_add64(a12, - Lib_IntVector_Intrinsics_vec128_mul64(r254, a2)); - Lib_IntVector_Intrinsics_vec128 - a23 = - Lib_IntVector_Intrinsics_vec128_add64(a22, - Lib_IntVector_Intrinsics_vec128_mul64(r201, a2)); - Lib_IntVector_Intrinsics_vec128 - a33 = - Lib_IntVector_Intrinsics_vec128_add64(a32, - Lib_IntVector_Intrinsics_vec128_mul64(r211, a2)); - Lib_IntVector_Intrinsics_vec128 - a43 = - Lib_IntVector_Intrinsics_vec128_add64(a42, - Lib_IntVector_Intrinsics_vec128_mul64(r221, a2)); - Lib_IntVector_Intrinsics_vec128 - a04 = - Lib_IntVector_Intrinsics_vec128_add64(a03, - Lib_IntVector_Intrinsics_vec128_mul64(r252, a3)); - Lib_IntVector_Intrinsics_vec128 - a14 = - Lib_IntVector_Intrinsics_vec128_add64(a13, - Lib_IntVector_Intrinsics_vec128_mul64(r253, a3)); - Lib_IntVector_Intrinsics_vec128 - a24 = - Lib_IntVector_Intrinsics_vec128_add64(a23, - Lib_IntVector_Intrinsics_vec128_mul64(r254, a3)); - Lib_IntVector_Intrinsics_vec128 - a34 = - Lib_IntVector_Intrinsics_vec128_add64(a33, - Lib_IntVector_Intrinsics_vec128_mul64(r201, a3)); - Lib_IntVector_Intrinsics_vec128 - a44 = - Lib_IntVector_Intrinsics_vec128_add64(a43, - Lib_IntVector_Intrinsics_vec128_mul64(r211, a3)); - Lib_IntVector_Intrinsics_vec128 - a05 = - Lib_IntVector_Intrinsics_vec128_add64(a04, - Lib_IntVector_Intrinsics_vec128_mul64(r251, a4)); - Lib_IntVector_Intrinsics_vec128 - a15 = - Lib_IntVector_Intrinsics_vec128_add64(a14, - Lib_IntVector_Intrinsics_vec128_mul64(r252, a4)); - Lib_IntVector_Intrinsics_vec128 - a25 = - Lib_IntVector_Intrinsics_vec128_add64(a24, - Lib_IntVector_Intrinsics_vec128_mul64(r253, a4)); - Lib_IntVector_Intrinsics_vec128 - a35 = - Lib_IntVector_Intrinsics_vec128_add64(a34, - Lib_IntVector_Intrinsics_vec128_mul64(r254, a4)); - Lib_IntVector_Intrinsics_vec128 - a45 = - Lib_IntVector_Intrinsics_vec128_add64(a44, - Lib_IntVector_Intrinsics_vec128_mul64(r201, a4)); - Lib_IntVector_Intrinsics_vec128 t0 = a05; - Lib_IntVector_Intrinsics_vec128 t1 = a15; - Lib_IntVector_Intrinsics_vec128 t2 = a25; - Lib_IntVector_Intrinsics_vec128 t3 = a35; - Lib_IntVector_Intrinsics_vec128 t4 = a45; - Lib_IntVector_Intrinsics_vec128 - mask26 = Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU); - Lib_IntVector_Intrinsics_vec128 - z0 = Lib_IntVector_Intrinsics_vec128_shift_right64(t0, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec128 - z1 = Lib_IntVector_Intrinsics_vec128_shift_right64(t3, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec128 x0 = Lib_IntVector_Intrinsics_vec128_and(t0, mask26); - Lib_IntVector_Intrinsics_vec128 x3 = Lib_IntVector_Intrinsics_vec128_and(t3, mask26); - Lib_IntVector_Intrinsics_vec128 x1 = Lib_IntVector_Intrinsics_vec128_add64(t1, z0); - Lib_IntVector_Intrinsics_vec128 x4 = Lib_IntVector_Intrinsics_vec128_add64(t4, z1); - Lib_IntVector_Intrinsics_vec128 - z01 = Lib_IntVector_Intrinsics_vec128_shift_right64(x1, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec128 - z11 = Lib_IntVector_Intrinsics_vec128_shift_right64(x4, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec128 - t = Lib_IntVector_Intrinsics_vec128_shift_left64(z11, (uint32_t)2U); - Lib_IntVector_Intrinsics_vec128 z12 = Lib_IntVector_Intrinsics_vec128_add64(z11, t); - Lib_IntVector_Intrinsics_vec128 x11 = Lib_IntVector_Intrinsics_vec128_and(x1, mask26); - Lib_IntVector_Intrinsics_vec128 x41 = Lib_IntVector_Intrinsics_vec128_and(x4, mask26); - Lib_IntVector_Intrinsics_vec128 x2 = Lib_IntVector_Intrinsics_vec128_add64(t2, z01); - Lib_IntVector_Intrinsics_vec128 x01 = Lib_IntVector_Intrinsics_vec128_add64(x0, z12); - Lib_IntVector_Intrinsics_vec128 - z02 = Lib_IntVector_Intrinsics_vec128_shift_right64(x2, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec128 - z13 = Lib_IntVector_Intrinsics_vec128_shift_right64(x01, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec128 x21 = Lib_IntVector_Intrinsics_vec128_and(x2, mask26); - Lib_IntVector_Intrinsics_vec128 x02 = Lib_IntVector_Intrinsics_vec128_and(x01, mask26); - Lib_IntVector_Intrinsics_vec128 x31 = Lib_IntVector_Intrinsics_vec128_add64(x3, z02); - Lib_IntVector_Intrinsics_vec128 x12 = Lib_IntVector_Intrinsics_vec128_add64(x11, z13); - Lib_IntVector_Intrinsics_vec128 - z03 = Lib_IntVector_Intrinsics_vec128_shift_right64(x31, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec128 x32 = Lib_IntVector_Intrinsics_vec128_and(x31, mask26); - Lib_IntVector_Intrinsics_vec128 x42 = Lib_IntVector_Intrinsics_vec128_add64(x41, z03); - Lib_IntVector_Intrinsics_vec128 o0 = x02; - Lib_IntVector_Intrinsics_vec128 o10 = x12; - Lib_IntVector_Intrinsics_vec128 o20 = x21; - Lib_IntVector_Intrinsics_vec128 o30 = x32; - Lib_IntVector_Intrinsics_vec128 o40 = x42; - Lib_IntVector_Intrinsics_vec128 - o01 = - Lib_IntVector_Intrinsics_vec128_add64(o0, - Lib_IntVector_Intrinsics_vec128_interleave_high64(o0, o0)); - Lib_IntVector_Intrinsics_vec128 - o11 = - Lib_IntVector_Intrinsics_vec128_add64(o10, - Lib_IntVector_Intrinsics_vec128_interleave_high64(o10, o10)); - Lib_IntVector_Intrinsics_vec128 - o21 = - Lib_IntVector_Intrinsics_vec128_add64(o20, - Lib_IntVector_Intrinsics_vec128_interleave_high64(o20, o20)); - Lib_IntVector_Intrinsics_vec128 - o31 = - Lib_IntVector_Intrinsics_vec128_add64(o30, - Lib_IntVector_Intrinsics_vec128_interleave_high64(o30, o30)); - Lib_IntVector_Intrinsics_vec128 - o41 = - Lib_IntVector_Intrinsics_vec128_add64(o40, - Lib_IntVector_Intrinsics_vec128_interleave_high64(o40, o40)); - Lib_IntVector_Intrinsics_vec128 - l = Lib_IntVector_Intrinsics_vec128_add64(o01, Lib_IntVector_Intrinsics_vec128_zero); - Lib_IntVector_Intrinsics_vec128 - tmp0 = - Lib_IntVector_Intrinsics_vec128_and(l, - Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU)); - Lib_IntVector_Intrinsics_vec128 - c0 = Lib_IntVector_Intrinsics_vec128_shift_right64(l, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec128 l0 = Lib_IntVector_Intrinsics_vec128_add64(o11, c0); - Lib_IntVector_Intrinsics_vec128 - tmp1 = - Lib_IntVector_Intrinsics_vec128_and(l0, - Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU)); - Lib_IntVector_Intrinsics_vec128 - c1 = Lib_IntVector_Intrinsics_vec128_shift_right64(l0, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec128 l1 = Lib_IntVector_Intrinsics_vec128_add64(o21, c1); - Lib_IntVector_Intrinsics_vec128 - tmp2 = - Lib_IntVector_Intrinsics_vec128_and(l1, - Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU)); - Lib_IntVector_Intrinsics_vec128 - c2 = Lib_IntVector_Intrinsics_vec128_shift_right64(l1, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec128 l2 = Lib_IntVector_Intrinsics_vec128_add64(o31, c2); - Lib_IntVector_Intrinsics_vec128 - tmp3 = - Lib_IntVector_Intrinsics_vec128_and(l2, - Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU)); - Lib_IntVector_Intrinsics_vec128 - c3 = Lib_IntVector_Intrinsics_vec128_shift_right64(l2, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec128 l3 = Lib_IntVector_Intrinsics_vec128_add64(o41, c3); - Lib_IntVector_Intrinsics_vec128 - tmp4 = - Lib_IntVector_Intrinsics_vec128_and(l3, - Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU)); - Lib_IntVector_Intrinsics_vec128 - c4 = Lib_IntVector_Intrinsics_vec128_shift_right64(l3, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec128 - o00 = - Lib_IntVector_Intrinsics_vec128_add64(tmp0, - Lib_IntVector_Intrinsics_vec128_smul64(c4, (uint64_t)5U)); - Lib_IntVector_Intrinsics_vec128 o1 = tmp1; - Lib_IntVector_Intrinsics_vec128 o2 = tmp2; - Lib_IntVector_Intrinsics_vec128 o3 = tmp3; - Lib_IntVector_Intrinsics_vec128 o4 = tmp4; - out[0U] = o00; - out[1U] = o1; - out[2U] = o2; - out[3U] = o3; - out[4U] = o4; -} - -void Hacl_Poly1305_128_poly1305_init(Lib_IntVector_Intrinsics_vec128 *ctx, uint8_t *key) -{ - Lib_IntVector_Intrinsics_vec128 *acc = ctx; - Lib_IntVector_Intrinsics_vec128 *pre = ctx + (uint32_t)5U; - uint8_t *kr = key; - uint64_t u0; - uint64_t lo; - uint64_t u; - uint64_t hi; - uint64_t mask0; - uint64_t mask1; - uint64_t lo1; - uint64_t hi1; - Lib_IntVector_Intrinsics_vec128 *r; - Lib_IntVector_Intrinsics_vec128 *r5; - Lib_IntVector_Intrinsics_vec128 *rn; - Lib_IntVector_Intrinsics_vec128 *rn_5; - Lib_IntVector_Intrinsics_vec128 r_vec0; - Lib_IntVector_Intrinsics_vec128 r_vec1; - Lib_IntVector_Intrinsics_vec128 f00; - Lib_IntVector_Intrinsics_vec128 f15; - Lib_IntVector_Intrinsics_vec128 f25; - Lib_IntVector_Intrinsics_vec128 f30; - Lib_IntVector_Intrinsics_vec128 f40; - Lib_IntVector_Intrinsics_vec128 f0; - Lib_IntVector_Intrinsics_vec128 f1; - Lib_IntVector_Intrinsics_vec128 f2; - Lib_IntVector_Intrinsics_vec128 f3; - Lib_IntVector_Intrinsics_vec128 f4; - Lib_IntVector_Intrinsics_vec128 f200; - Lib_IntVector_Intrinsics_vec128 f210; - Lib_IntVector_Intrinsics_vec128 f220; - Lib_IntVector_Intrinsics_vec128 f230; - Lib_IntVector_Intrinsics_vec128 f240; - Lib_IntVector_Intrinsics_vec128 r0; - Lib_IntVector_Intrinsics_vec128 r1; - Lib_IntVector_Intrinsics_vec128 r2; - Lib_IntVector_Intrinsics_vec128 r3; - Lib_IntVector_Intrinsics_vec128 r4; - Lib_IntVector_Intrinsics_vec128 r51; - Lib_IntVector_Intrinsics_vec128 r52; - Lib_IntVector_Intrinsics_vec128 r53; - Lib_IntVector_Intrinsics_vec128 r54; - Lib_IntVector_Intrinsics_vec128 f10; - Lib_IntVector_Intrinsics_vec128 f11; - Lib_IntVector_Intrinsics_vec128 f12; - Lib_IntVector_Intrinsics_vec128 f13; - Lib_IntVector_Intrinsics_vec128 f14; - Lib_IntVector_Intrinsics_vec128 a0; - Lib_IntVector_Intrinsics_vec128 a1; - Lib_IntVector_Intrinsics_vec128 a2; - Lib_IntVector_Intrinsics_vec128 a3; - Lib_IntVector_Intrinsics_vec128 a4; - Lib_IntVector_Intrinsics_vec128 a01; - Lib_IntVector_Intrinsics_vec128 a11; - Lib_IntVector_Intrinsics_vec128 a21; - Lib_IntVector_Intrinsics_vec128 a31; - Lib_IntVector_Intrinsics_vec128 a41; - Lib_IntVector_Intrinsics_vec128 a02; - Lib_IntVector_Intrinsics_vec128 a12; - Lib_IntVector_Intrinsics_vec128 a22; - Lib_IntVector_Intrinsics_vec128 a32; - Lib_IntVector_Intrinsics_vec128 a42; - Lib_IntVector_Intrinsics_vec128 a03; - Lib_IntVector_Intrinsics_vec128 a13; - Lib_IntVector_Intrinsics_vec128 a23; - Lib_IntVector_Intrinsics_vec128 a33; - Lib_IntVector_Intrinsics_vec128 a43; - Lib_IntVector_Intrinsics_vec128 a04; - Lib_IntVector_Intrinsics_vec128 a14; - Lib_IntVector_Intrinsics_vec128 a24; - Lib_IntVector_Intrinsics_vec128 a34; - Lib_IntVector_Intrinsics_vec128 a44; - Lib_IntVector_Intrinsics_vec128 t0; - Lib_IntVector_Intrinsics_vec128 t1; - Lib_IntVector_Intrinsics_vec128 t2; - Lib_IntVector_Intrinsics_vec128 t3; - Lib_IntVector_Intrinsics_vec128 t4; - Lib_IntVector_Intrinsics_vec128 mask26; - Lib_IntVector_Intrinsics_vec128 z0; - Lib_IntVector_Intrinsics_vec128 z1; - Lib_IntVector_Intrinsics_vec128 x0; - Lib_IntVector_Intrinsics_vec128 x3; - Lib_IntVector_Intrinsics_vec128 x1; - Lib_IntVector_Intrinsics_vec128 x4; - Lib_IntVector_Intrinsics_vec128 z01; - Lib_IntVector_Intrinsics_vec128 z11; - Lib_IntVector_Intrinsics_vec128 t; - Lib_IntVector_Intrinsics_vec128 z12; - Lib_IntVector_Intrinsics_vec128 x11; - Lib_IntVector_Intrinsics_vec128 x41; - Lib_IntVector_Intrinsics_vec128 x2; - Lib_IntVector_Intrinsics_vec128 x01; - Lib_IntVector_Intrinsics_vec128 z02; - Lib_IntVector_Intrinsics_vec128 z13; - Lib_IntVector_Intrinsics_vec128 x21; - Lib_IntVector_Intrinsics_vec128 x02; - Lib_IntVector_Intrinsics_vec128 x31; - Lib_IntVector_Intrinsics_vec128 x12; - Lib_IntVector_Intrinsics_vec128 z03; - Lib_IntVector_Intrinsics_vec128 x32; - Lib_IntVector_Intrinsics_vec128 x42; - Lib_IntVector_Intrinsics_vec128 o0; - Lib_IntVector_Intrinsics_vec128 o1; - Lib_IntVector_Intrinsics_vec128 o2; - Lib_IntVector_Intrinsics_vec128 o3; - Lib_IntVector_Intrinsics_vec128 o4; - Lib_IntVector_Intrinsics_vec128 f20; - Lib_IntVector_Intrinsics_vec128 f21; - Lib_IntVector_Intrinsics_vec128 f22; - Lib_IntVector_Intrinsics_vec128 f23; - Lib_IntVector_Intrinsics_vec128 f24; - acc[0U] = Lib_IntVector_Intrinsics_vec128_zero; - acc[1U] = Lib_IntVector_Intrinsics_vec128_zero; - acc[2U] = Lib_IntVector_Intrinsics_vec128_zero; - acc[3U] = Lib_IntVector_Intrinsics_vec128_zero; - acc[4U] = Lib_IntVector_Intrinsics_vec128_zero; - u0 = load64_le(kr); - lo = u0; - u = load64_le(kr + (uint32_t)8U); - hi = u; - mask0 = (uint64_t)0x0ffffffc0fffffffU; - mask1 = (uint64_t)0x0ffffffc0ffffffcU; - lo1 = lo & mask0; - hi1 = hi & mask1; - r = pre; - r5 = pre + (uint32_t)5U; - rn = pre + (uint32_t)10U; - rn_5 = pre + (uint32_t)15U; - r_vec0 = Lib_IntVector_Intrinsics_vec128_load64(lo1); - r_vec1 = Lib_IntVector_Intrinsics_vec128_load64(hi1); - f00 = - Lib_IntVector_Intrinsics_vec128_and(r_vec0, - Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU)); - f15 = - Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(r_vec0, - (uint32_t)26U), - Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU)); - f25 = - Lib_IntVector_Intrinsics_vec128_or(Lib_IntVector_Intrinsics_vec128_shift_right64(r_vec0, - (uint32_t)52U), - Lib_IntVector_Intrinsics_vec128_shift_left64(Lib_IntVector_Intrinsics_vec128_and(r_vec1, - Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3fffU)), - (uint32_t)12U)); - f30 = - Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(r_vec1, - (uint32_t)14U), - Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU)); - f40 = Lib_IntVector_Intrinsics_vec128_shift_right64(r_vec1, (uint32_t)40U); - f0 = f00; - f1 = f15; - f2 = f25; - f3 = f30; - f4 = f40; - r[0U] = f0; - r[1U] = f1; - r[2U] = f2; - r[3U] = f3; - r[4U] = f4; - f200 = r[0U]; - f210 = r[1U]; - f220 = r[2U]; - f230 = r[3U]; - f240 = r[4U]; - r5[0U] = Lib_IntVector_Intrinsics_vec128_smul64(f200, (uint64_t)5U); - r5[1U] = Lib_IntVector_Intrinsics_vec128_smul64(f210, (uint64_t)5U); - r5[2U] = Lib_IntVector_Intrinsics_vec128_smul64(f220, (uint64_t)5U); - r5[3U] = Lib_IntVector_Intrinsics_vec128_smul64(f230, (uint64_t)5U); - r5[4U] = Lib_IntVector_Intrinsics_vec128_smul64(f240, (uint64_t)5U); - r0 = r[0U]; - r1 = r[1U]; - r2 = r[2U]; - r3 = r[3U]; - r4 = r[4U]; - r51 = r5[1U]; - r52 = r5[2U]; - r53 = r5[3U]; - r54 = r5[4U]; - f10 = r[0U]; - f11 = r[1U]; - f12 = r[2U]; - f13 = r[3U]; - f14 = r[4U]; - a0 = Lib_IntVector_Intrinsics_vec128_mul64(r0, f10); - a1 = Lib_IntVector_Intrinsics_vec128_mul64(r1, f10); - a2 = Lib_IntVector_Intrinsics_vec128_mul64(r2, f10); - a3 = Lib_IntVector_Intrinsics_vec128_mul64(r3, f10); - a4 = Lib_IntVector_Intrinsics_vec128_mul64(r4, f10); - a01 = - Lib_IntVector_Intrinsics_vec128_add64(a0, - Lib_IntVector_Intrinsics_vec128_mul64(r54, f11)); - a11 = Lib_IntVector_Intrinsics_vec128_add64(a1, Lib_IntVector_Intrinsics_vec128_mul64(r0, f11)); - a21 = Lib_IntVector_Intrinsics_vec128_add64(a2, Lib_IntVector_Intrinsics_vec128_mul64(r1, f11)); - a31 = Lib_IntVector_Intrinsics_vec128_add64(a3, Lib_IntVector_Intrinsics_vec128_mul64(r2, f11)); - a41 = Lib_IntVector_Intrinsics_vec128_add64(a4, Lib_IntVector_Intrinsics_vec128_mul64(r3, f11)); - a02 = - Lib_IntVector_Intrinsics_vec128_add64(a01, - Lib_IntVector_Intrinsics_vec128_mul64(r53, f12)); - a12 = - Lib_IntVector_Intrinsics_vec128_add64(a11, - Lib_IntVector_Intrinsics_vec128_mul64(r54, f12)); - a22 = - Lib_IntVector_Intrinsics_vec128_add64(a21, - Lib_IntVector_Intrinsics_vec128_mul64(r0, f12)); - a32 = - Lib_IntVector_Intrinsics_vec128_add64(a31, - Lib_IntVector_Intrinsics_vec128_mul64(r1, f12)); - a42 = - Lib_IntVector_Intrinsics_vec128_add64(a41, - Lib_IntVector_Intrinsics_vec128_mul64(r2, f12)); - a03 = - Lib_IntVector_Intrinsics_vec128_add64(a02, - Lib_IntVector_Intrinsics_vec128_mul64(r52, f13)); - a13 = - Lib_IntVector_Intrinsics_vec128_add64(a12, - Lib_IntVector_Intrinsics_vec128_mul64(r53, f13)); - a23 = - Lib_IntVector_Intrinsics_vec128_add64(a22, - Lib_IntVector_Intrinsics_vec128_mul64(r54, f13)); - a33 = - Lib_IntVector_Intrinsics_vec128_add64(a32, - Lib_IntVector_Intrinsics_vec128_mul64(r0, f13)); - a43 = - Lib_IntVector_Intrinsics_vec128_add64(a42, - Lib_IntVector_Intrinsics_vec128_mul64(r1, f13)); - a04 = - Lib_IntVector_Intrinsics_vec128_add64(a03, - Lib_IntVector_Intrinsics_vec128_mul64(r51, f14)); - a14 = - Lib_IntVector_Intrinsics_vec128_add64(a13, - Lib_IntVector_Intrinsics_vec128_mul64(r52, f14)); - a24 = - Lib_IntVector_Intrinsics_vec128_add64(a23, - Lib_IntVector_Intrinsics_vec128_mul64(r53, f14)); - a34 = - Lib_IntVector_Intrinsics_vec128_add64(a33, - Lib_IntVector_Intrinsics_vec128_mul64(r54, f14)); - a44 = - Lib_IntVector_Intrinsics_vec128_add64(a43, - Lib_IntVector_Intrinsics_vec128_mul64(r0, f14)); - t0 = a04; - t1 = a14; - t2 = a24; - t3 = a34; - t4 = a44; - mask26 = Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU); - z0 = Lib_IntVector_Intrinsics_vec128_shift_right64(t0, (uint32_t)26U); - z1 = Lib_IntVector_Intrinsics_vec128_shift_right64(t3, (uint32_t)26U); - x0 = Lib_IntVector_Intrinsics_vec128_and(t0, mask26); - x3 = Lib_IntVector_Intrinsics_vec128_and(t3, mask26); - x1 = Lib_IntVector_Intrinsics_vec128_add64(t1, z0); - x4 = Lib_IntVector_Intrinsics_vec128_add64(t4, z1); - z01 = Lib_IntVector_Intrinsics_vec128_shift_right64(x1, (uint32_t)26U); - z11 = Lib_IntVector_Intrinsics_vec128_shift_right64(x4, (uint32_t)26U); - t = Lib_IntVector_Intrinsics_vec128_shift_left64(z11, (uint32_t)2U); - z12 = Lib_IntVector_Intrinsics_vec128_add64(z11, t); - x11 = Lib_IntVector_Intrinsics_vec128_and(x1, mask26); - x41 = Lib_IntVector_Intrinsics_vec128_and(x4, mask26); - x2 = Lib_IntVector_Intrinsics_vec128_add64(t2, z01); - x01 = Lib_IntVector_Intrinsics_vec128_add64(x0, z12); - z02 = Lib_IntVector_Intrinsics_vec128_shift_right64(x2, (uint32_t)26U); - z13 = Lib_IntVector_Intrinsics_vec128_shift_right64(x01, (uint32_t)26U); - x21 = Lib_IntVector_Intrinsics_vec128_and(x2, mask26); - x02 = Lib_IntVector_Intrinsics_vec128_and(x01, mask26); - x31 = Lib_IntVector_Intrinsics_vec128_add64(x3, z02); - x12 = Lib_IntVector_Intrinsics_vec128_add64(x11, z13); - z03 = Lib_IntVector_Intrinsics_vec128_shift_right64(x31, (uint32_t)26U); - x32 = Lib_IntVector_Intrinsics_vec128_and(x31, mask26); - x42 = Lib_IntVector_Intrinsics_vec128_add64(x41, z03); - o0 = x02; - o1 = x12; - o2 = x21; - o3 = x32; - o4 = x42; - rn[0U] = o0; - rn[1U] = o1; - rn[2U] = o2; - rn[3U] = o3; - rn[4U] = o4; - f20 = rn[0U]; - f21 = rn[1U]; - f22 = rn[2U]; - f23 = rn[3U]; - f24 = rn[4U]; - rn_5[0U] = Lib_IntVector_Intrinsics_vec128_smul64(f20, (uint64_t)5U); - rn_5[1U] = Lib_IntVector_Intrinsics_vec128_smul64(f21, (uint64_t)5U); - rn_5[2U] = Lib_IntVector_Intrinsics_vec128_smul64(f22, (uint64_t)5U); - rn_5[3U] = Lib_IntVector_Intrinsics_vec128_smul64(f23, (uint64_t)5U); - rn_5[4U] = Lib_IntVector_Intrinsics_vec128_smul64(f24, (uint64_t)5U); -} - -void Hacl_Poly1305_128_poly1305_update1(Lib_IntVector_Intrinsics_vec128 *ctx, uint8_t *text) -{ - Lib_IntVector_Intrinsics_vec128 *pre = ctx + (uint32_t)5U; - Lib_IntVector_Intrinsics_vec128 *acc = ctx; - KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 e[5U] KRML_POST_ALIGN(16) = { 0U }; - uint64_t u0 = load64_le(text); - uint64_t lo = u0; - uint64_t u = load64_le(text + (uint32_t)8U); - uint64_t hi = u; - Lib_IntVector_Intrinsics_vec128 f0 = Lib_IntVector_Intrinsics_vec128_load64(lo); - Lib_IntVector_Intrinsics_vec128 f1 = Lib_IntVector_Intrinsics_vec128_load64(hi); - Lib_IntVector_Intrinsics_vec128 - f010 = - Lib_IntVector_Intrinsics_vec128_and(f0, - Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU)); - Lib_IntVector_Intrinsics_vec128 - f110 = - Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(f0, - (uint32_t)26U), - Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU)); - Lib_IntVector_Intrinsics_vec128 - f20 = - Lib_IntVector_Intrinsics_vec128_or(Lib_IntVector_Intrinsics_vec128_shift_right64(f0, - (uint32_t)52U), - Lib_IntVector_Intrinsics_vec128_shift_left64(Lib_IntVector_Intrinsics_vec128_and(f1, - Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3fffU)), - (uint32_t)12U)); - Lib_IntVector_Intrinsics_vec128 - f30 = - Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(f1, - (uint32_t)14U), - Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU)); - Lib_IntVector_Intrinsics_vec128 - f40 = Lib_IntVector_Intrinsics_vec128_shift_right64(f1, (uint32_t)40U); - Lib_IntVector_Intrinsics_vec128 f01 = f010; - Lib_IntVector_Intrinsics_vec128 f111 = f110; - Lib_IntVector_Intrinsics_vec128 f2 = f20; - Lib_IntVector_Intrinsics_vec128 f3 = f30; - Lib_IntVector_Intrinsics_vec128 f41 = f40; - uint64_t b; - Lib_IntVector_Intrinsics_vec128 mask; - Lib_IntVector_Intrinsics_vec128 f4; - Lib_IntVector_Intrinsics_vec128 *r; - Lib_IntVector_Intrinsics_vec128 *r5; - Lib_IntVector_Intrinsics_vec128 r0; - Lib_IntVector_Intrinsics_vec128 r1; - Lib_IntVector_Intrinsics_vec128 r2; - Lib_IntVector_Intrinsics_vec128 r3; - Lib_IntVector_Intrinsics_vec128 r4; - Lib_IntVector_Intrinsics_vec128 r51; - Lib_IntVector_Intrinsics_vec128 r52; - Lib_IntVector_Intrinsics_vec128 r53; - Lib_IntVector_Intrinsics_vec128 r54; - Lib_IntVector_Intrinsics_vec128 f10; - Lib_IntVector_Intrinsics_vec128 f11; - Lib_IntVector_Intrinsics_vec128 f12; - Lib_IntVector_Intrinsics_vec128 f13; - Lib_IntVector_Intrinsics_vec128 f14; - Lib_IntVector_Intrinsics_vec128 a0; - Lib_IntVector_Intrinsics_vec128 a1; - Lib_IntVector_Intrinsics_vec128 a2; - Lib_IntVector_Intrinsics_vec128 a3; - Lib_IntVector_Intrinsics_vec128 a4; - Lib_IntVector_Intrinsics_vec128 a01; - Lib_IntVector_Intrinsics_vec128 a11; - Lib_IntVector_Intrinsics_vec128 a21; - Lib_IntVector_Intrinsics_vec128 a31; - Lib_IntVector_Intrinsics_vec128 a41; - Lib_IntVector_Intrinsics_vec128 a02; - Lib_IntVector_Intrinsics_vec128 a12; - Lib_IntVector_Intrinsics_vec128 a22; - Lib_IntVector_Intrinsics_vec128 a32; - Lib_IntVector_Intrinsics_vec128 a42; - Lib_IntVector_Intrinsics_vec128 a03; - Lib_IntVector_Intrinsics_vec128 a13; - Lib_IntVector_Intrinsics_vec128 a23; - Lib_IntVector_Intrinsics_vec128 a33; - Lib_IntVector_Intrinsics_vec128 a43; - Lib_IntVector_Intrinsics_vec128 a04; - Lib_IntVector_Intrinsics_vec128 a14; - Lib_IntVector_Intrinsics_vec128 a24; - Lib_IntVector_Intrinsics_vec128 a34; - Lib_IntVector_Intrinsics_vec128 a44; - Lib_IntVector_Intrinsics_vec128 a05; - Lib_IntVector_Intrinsics_vec128 a15; - Lib_IntVector_Intrinsics_vec128 a25; - Lib_IntVector_Intrinsics_vec128 a35; - Lib_IntVector_Intrinsics_vec128 a45; - Lib_IntVector_Intrinsics_vec128 a06; - Lib_IntVector_Intrinsics_vec128 a16; - Lib_IntVector_Intrinsics_vec128 a26; - Lib_IntVector_Intrinsics_vec128 a36; - Lib_IntVector_Intrinsics_vec128 a46; - Lib_IntVector_Intrinsics_vec128 t0; - Lib_IntVector_Intrinsics_vec128 t1; - Lib_IntVector_Intrinsics_vec128 t2; - Lib_IntVector_Intrinsics_vec128 t3; - Lib_IntVector_Intrinsics_vec128 t4; - Lib_IntVector_Intrinsics_vec128 mask26; - Lib_IntVector_Intrinsics_vec128 z0; - Lib_IntVector_Intrinsics_vec128 z1; - Lib_IntVector_Intrinsics_vec128 x0; - Lib_IntVector_Intrinsics_vec128 x3; - Lib_IntVector_Intrinsics_vec128 x1; - Lib_IntVector_Intrinsics_vec128 x4; - Lib_IntVector_Intrinsics_vec128 z01; - Lib_IntVector_Intrinsics_vec128 z11; - Lib_IntVector_Intrinsics_vec128 t; - Lib_IntVector_Intrinsics_vec128 z12; - Lib_IntVector_Intrinsics_vec128 x11; - Lib_IntVector_Intrinsics_vec128 x41; - Lib_IntVector_Intrinsics_vec128 x2; - Lib_IntVector_Intrinsics_vec128 x01; - Lib_IntVector_Intrinsics_vec128 z02; - Lib_IntVector_Intrinsics_vec128 z13; - Lib_IntVector_Intrinsics_vec128 x21; - Lib_IntVector_Intrinsics_vec128 x02; - Lib_IntVector_Intrinsics_vec128 x31; - Lib_IntVector_Intrinsics_vec128 x12; - Lib_IntVector_Intrinsics_vec128 z03; - Lib_IntVector_Intrinsics_vec128 x32; - Lib_IntVector_Intrinsics_vec128 x42; - Lib_IntVector_Intrinsics_vec128 o0; - Lib_IntVector_Intrinsics_vec128 o1; - Lib_IntVector_Intrinsics_vec128 o2; - Lib_IntVector_Intrinsics_vec128 o3; - Lib_IntVector_Intrinsics_vec128 o4; - e[0U] = f01; - e[1U] = f111; - e[2U] = f2; - e[3U] = f3; - e[4U] = f41; - b = (uint64_t)0x1000000U; - mask = Lib_IntVector_Intrinsics_vec128_load64(b); - f4 = e[4U]; - e[4U] = Lib_IntVector_Intrinsics_vec128_or(f4, mask); - r = pre; - r5 = pre + (uint32_t)5U; - r0 = r[0U]; - r1 = r[1U]; - r2 = r[2U]; - r3 = r[3U]; - r4 = r[4U]; - r51 = r5[1U]; - r52 = r5[2U]; - r53 = r5[3U]; - r54 = r5[4U]; - f10 = e[0U]; - f11 = e[1U]; - f12 = e[2U]; - f13 = e[3U]; - f14 = e[4U]; - a0 = acc[0U]; - a1 = acc[1U]; - a2 = acc[2U]; - a3 = acc[3U]; - a4 = acc[4U]; - a01 = Lib_IntVector_Intrinsics_vec128_add64(a0, f10); - a11 = Lib_IntVector_Intrinsics_vec128_add64(a1, f11); - a21 = Lib_IntVector_Intrinsics_vec128_add64(a2, f12); - a31 = Lib_IntVector_Intrinsics_vec128_add64(a3, f13); - a41 = Lib_IntVector_Intrinsics_vec128_add64(a4, f14); - a02 = Lib_IntVector_Intrinsics_vec128_mul64(r0, a01); - a12 = Lib_IntVector_Intrinsics_vec128_mul64(r1, a01); - a22 = Lib_IntVector_Intrinsics_vec128_mul64(r2, a01); - a32 = Lib_IntVector_Intrinsics_vec128_mul64(r3, a01); - a42 = Lib_IntVector_Intrinsics_vec128_mul64(r4, a01); - a03 = - Lib_IntVector_Intrinsics_vec128_add64(a02, - Lib_IntVector_Intrinsics_vec128_mul64(r54, a11)); - a13 = - Lib_IntVector_Intrinsics_vec128_add64(a12, - Lib_IntVector_Intrinsics_vec128_mul64(r0, a11)); - a23 = - Lib_IntVector_Intrinsics_vec128_add64(a22, - Lib_IntVector_Intrinsics_vec128_mul64(r1, a11)); - a33 = - Lib_IntVector_Intrinsics_vec128_add64(a32, - Lib_IntVector_Intrinsics_vec128_mul64(r2, a11)); - a43 = - Lib_IntVector_Intrinsics_vec128_add64(a42, - Lib_IntVector_Intrinsics_vec128_mul64(r3, a11)); - a04 = - Lib_IntVector_Intrinsics_vec128_add64(a03, - Lib_IntVector_Intrinsics_vec128_mul64(r53, a21)); - a14 = - Lib_IntVector_Intrinsics_vec128_add64(a13, - Lib_IntVector_Intrinsics_vec128_mul64(r54, a21)); - a24 = - Lib_IntVector_Intrinsics_vec128_add64(a23, - Lib_IntVector_Intrinsics_vec128_mul64(r0, a21)); - a34 = - Lib_IntVector_Intrinsics_vec128_add64(a33, - Lib_IntVector_Intrinsics_vec128_mul64(r1, a21)); - a44 = - Lib_IntVector_Intrinsics_vec128_add64(a43, - Lib_IntVector_Intrinsics_vec128_mul64(r2, a21)); - a05 = - Lib_IntVector_Intrinsics_vec128_add64(a04, - Lib_IntVector_Intrinsics_vec128_mul64(r52, a31)); - a15 = - Lib_IntVector_Intrinsics_vec128_add64(a14, - Lib_IntVector_Intrinsics_vec128_mul64(r53, a31)); - a25 = - Lib_IntVector_Intrinsics_vec128_add64(a24, - Lib_IntVector_Intrinsics_vec128_mul64(r54, a31)); - a35 = - Lib_IntVector_Intrinsics_vec128_add64(a34, - Lib_IntVector_Intrinsics_vec128_mul64(r0, a31)); - a45 = - Lib_IntVector_Intrinsics_vec128_add64(a44, - Lib_IntVector_Intrinsics_vec128_mul64(r1, a31)); - a06 = - Lib_IntVector_Intrinsics_vec128_add64(a05, - Lib_IntVector_Intrinsics_vec128_mul64(r51, a41)); - a16 = - Lib_IntVector_Intrinsics_vec128_add64(a15, - Lib_IntVector_Intrinsics_vec128_mul64(r52, a41)); - a26 = - Lib_IntVector_Intrinsics_vec128_add64(a25, - Lib_IntVector_Intrinsics_vec128_mul64(r53, a41)); - a36 = - Lib_IntVector_Intrinsics_vec128_add64(a35, - Lib_IntVector_Intrinsics_vec128_mul64(r54, a41)); - a46 = - Lib_IntVector_Intrinsics_vec128_add64(a45, - Lib_IntVector_Intrinsics_vec128_mul64(r0, a41)); - t0 = a06; - t1 = a16; - t2 = a26; - t3 = a36; - t4 = a46; - mask26 = Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU); - z0 = Lib_IntVector_Intrinsics_vec128_shift_right64(t0, (uint32_t)26U); - z1 = Lib_IntVector_Intrinsics_vec128_shift_right64(t3, (uint32_t)26U); - x0 = Lib_IntVector_Intrinsics_vec128_and(t0, mask26); - x3 = Lib_IntVector_Intrinsics_vec128_and(t3, mask26); - x1 = Lib_IntVector_Intrinsics_vec128_add64(t1, z0); - x4 = Lib_IntVector_Intrinsics_vec128_add64(t4, z1); - z01 = Lib_IntVector_Intrinsics_vec128_shift_right64(x1, (uint32_t)26U); - z11 = Lib_IntVector_Intrinsics_vec128_shift_right64(x4, (uint32_t)26U); - t = Lib_IntVector_Intrinsics_vec128_shift_left64(z11, (uint32_t)2U); - z12 = Lib_IntVector_Intrinsics_vec128_add64(z11, t); - x11 = Lib_IntVector_Intrinsics_vec128_and(x1, mask26); - x41 = Lib_IntVector_Intrinsics_vec128_and(x4, mask26); - x2 = Lib_IntVector_Intrinsics_vec128_add64(t2, z01); - x01 = Lib_IntVector_Intrinsics_vec128_add64(x0, z12); - z02 = Lib_IntVector_Intrinsics_vec128_shift_right64(x2, (uint32_t)26U); - z13 = Lib_IntVector_Intrinsics_vec128_shift_right64(x01, (uint32_t)26U); - x21 = Lib_IntVector_Intrinsics_vec128_and(x2, mask26); - x02 = Lib_IntVector_Intrinsics_vec128_and(x01, mask26); - x31 = Lib_IntVector_Intrinsics_vec128_add64(x3, z02); - x12 = Lib_IntVector_Intrinsics_vec128_add64(x11, z13); - z03 = Lib_IntVector_Intrinsics_vec128_shift_right64(x31, (uint32_t)26U); - x32 = Lib_IntVector_Intrinsics_vec128_and(x31, mask26); - x42 = Lib_IntVector_Intrinsics_vec128_add64(x41, z03); - o0 = x02; - o1 = x12; - o2 = x21; - o3 = x32; - o4 = x42; - acc[0U] = o0; - acc[1U] = o1; - acc[2U] = o2; - acc[3U] = o3; - acc[4U] = o4; -} - -void -Hacl_Poly1305_128_poly1305_update( - Lib_IntVector_Intrinsics_vec128 *ctx, - uint32_t len, - uint8_t *text -) -{ - Lib_IntVector_Intrinsics_vec128 *pre = ctx + (uint32_t)5U; - Lib_IntVector_Intrinsics_vec128 *acc = ctx; - uint32_t sz_block = (uint32_t)32U; - uint32_t len0 = len / sz_block * sz_block; - uint8_t *t0 = text; - uint32_t len1; - uint8_t *t10; - uint32_t nb0; - uint32_t rem; - if (len0 > (uint32_t)0U) - { - uint32_t bs = (uint32_t)32U; - uint8_t *text0 = t0; - Hacl_Impl_Poly1305_Field32xN_128_load_acc2(acc, text0); - { - uint32_t len10 = len0 - bs; - uint8_t *text1 = t0 + bs; - uint32_t nb = len10 / bs; - { - uint32_t i; - for (i = (uint32_t)0U; i < nb; i++) - { - uint8_t *block = text1 + i * bs; - KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 e[5U] KRML_POST_ALIGN(16) = { 0U }; - Lib_IntVector_Intrinsics_vec128 b1 = Lib_IntVector_Intrinsics_vec128_load64_le(block); - Lib_IntVector_Intrinsics_vec128 - b2 = Lib_IntVector_Intrinsics_vec128_load64_le(block + (uint32_t)16U); - Lib_IntVector_Intrinsics_vec128 - lo = Lib_IntVector_Intrinsics_vec128_interleave_low64(b1, b2); - Lib_IntVector_Intrinsics_vec128 - hi = Lib_IntVector_Intrinsics_vec128_interleave_high64(b1, b2); - Lib_IntVector_Intrinsics_vec128 - f00 = - Lib_IntVector_Intrinsics_vec128_and(lo, - Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU)); - Lib_IntVector_Intrinsics_vec128 - f15 = - Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(lo, - (uint32_t)26U), - Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU)); - Lib_IntVector_Intrinsics_vec128 - f25 = - Lib_IntVector_Intrinsics_vec128_or(Lib_IntVector_Intrinsics_vec128_shift_right64(lo, - (uint32_t)52U), - Lib_IntVector_Intrinsics_vec128_shift_left64(Lib_IntVector_Intrinsics_vec128_and(hi, - Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3fffU)), - (uint32_t)12U)); - Lib_IntVector_Intrinsics_vec128 - f30 = - Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(hi, - (uint32_t)14U), - Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU)); - Lib_IntVector_Intrinsics_vec128 - f40 = Lib_IntVector_Intrinsics_vec128_shift_right64(hi, (uint32_t)40U); - Lib_IntVector_Intrinsics_vec128 f0 = f00; - Lib_IntVector_Intrinsics_vec128 f1 = f15; - Lib_IntVector_Intrinsics_vec128 f2 = f25; - Lib_IntVector_Intrinsics_vec128 f3 = f30; - Lib_IntVector_Intrinsics_vec128 f41 = f40; - e[0U] = f0; - e[1U] = f1; - e[2U] = f2; - e[3U] = f3; - e[4U] = f41; - { - uint64_t b = (uint64_t)0x1000000U; - Lib_IntVector_Intrinsics_vec128 mask = Lib_IntVector_Intrinsics_vec128_load64(b); - Lib_IntVector_Intrinsics_vec128 f4 = e[4U]; - e[4U] = Lib_IntVector_Intrinsics_vec128_or(f4, mask); - { - Lib_IntVector_Intrinsics_vec128 *rn = pre + (uint32_t)10U; - Lib_IntVector_Intrinsics_vec128 *rn5 = pre + (uint32_t)15U; - Lib_IntVector_Intrinsics_vec128 r0 = rn[0U]; - Lib_IntVector_Intrinsics_vec128 r1 = rn[1U]; - Lib_IntVector_Intrinsics_vec128 r2 = rn[2U]; - Lib_IntVector_Intrinsics_vec128 r3 = rn[3U]; - Lib_IntVector_Intrinsics_vec128 r4 = rn[4U]; - Lib_IntVector_Intrinsics_vec128 r51 = rn5[1U]; - Lib_IntVector_Intrinsics_vec128 r52 = rn5[2U]; - Lib_IntVector_Intrinsics_vec128 r53 = rn5[3U]; - Lib_IntVector_Intrinsics_vec128 r54 = rn5[4U]; - Lib_IntVector_Intrinsics_vec128 f10 = acc[0U]; - Lib_IntVector_Intrinsics_vec128 f110 = acc[1U]; - Lib_IntVector_Intrinsics_vec128 f120 = acc[2U]; - Lib_IntVector_Intrinsics_vec128 f130 = acc[3U]; - Lib_IntVector_Intrinsics_vec128 f140 = acc[4U]; - Lib_IntVector_Intrinsics_vec128 a0 = Lib_IntVector_Intrinsics_vec128_mul64(r0, f10); - Lib_IntVector_Intrinsics_vec128 a1 = Lib_IntVector_Intrinsics_vec128_mul64(r1, f10); - Lib_IntVector_Intrinsics_vec128 a2 = Lib_IntVector_Intrinsics_vec128_mul64(r2, f10); - Lib_IntVector_Intrinsics_vec128 a3 = Lib_IntVector_Intrinsics_vec128_mul64(r3, f10); - Lib_IntVector_Intrinsics_vec128 a4 = Lib_IntVector_Intrinsics_vec128_mul64(r4, f10); - Lib_IntVector_Intrinsics_vec128 - a01 = - Lib_IntVector_Intrinsics_vec128_add64(a0, - Lib_IntVector_Intrinsics_vec128_mul64(r54, f110)); - Lib_IntVector_Intrinsics_vec128 - a11 = - Lib_IntVector_Intrinsics_vec128_add64(a1, - Lib_IntVector_Intrinsics_vec128_mul64(r0, f110)); - Lib_IntVector_Intrinsics_vec128 - a21 = - Lib_IntVector_Intrinsics_vec128_add64(a2, - Lib_IntVector_Intrinsics_vec128_mul64(r1, f110)); - Lib_IntVector_Intrinsics_vec128 - a31 = - Lib_IntVector_Intrinsics_vec128_add64(a3, - Lib_IntVector_Intrinsics_vec128_mul64(r2, f110)); - Lib_IntVector_Intrinsics_vec128 - a41 = - Lib_IntVector_Intrinsics_vec128_add64(a4, - Lib_IntVector_Intrinsics_vec128_mul64(r3, f110)); - Lib_IntVector_Intrinsics_vec128 - a02 = - Lib_IntVector_Intrinsics_vec128_add64(a01, - Lib_IntVector_Intrinsics_vec128_mul64(r53, f120)); - Lib_IntVector_Intrinsics_vec128 - a12 = - Lib_IntVector_Intrinsics_vec128_add64(a11, - Lib_IntVector_Intrinsics_vec128_mul64(r54, f120)); - Lib_IntVector_Intrinsics_vec128 - a22 = - Lib_IntVector_Intrinsics_vec128_add64(a21, - Lib_IntVector_Intrinsics_vec128_mul64(r0, f120)); - Lib_IntVector_Intrinsics_vec128 - a32 = - Lib_IntVector_Intrinsics_vec128_add64(a31, - Lib_IntVector_Intrinsics_vec128_mul64(r1, f120)); - Lib_IntVector_Intrinsics_vec128 - a42 = - Lib_IntVector_Intrinsics_vec128_add64(a41, - Lib_IntVector_Intrinsics_vec128_mul64(r2, f120)); - Lib_IntVector_Intrinsics_vec128 - a03 = - Lib_IntVector_Intrinsics_vec128_add64(a02, - Lib_IntVector_Intrinsics_vec128_mul64(r52, f130)); - Lib_IntVector_Intrinsics_vec128 - a13 = - Lib_IntVector_Intrinsics_vec128_add64(a12, - Lib_IntVector_Intrinsics_vec128_mul64(r53, f130)); - Lib_IntVector_Intrinsics_vec128 - a23 = - Lib_IntVector_Intrinsics_vec128_add64(a22, - Lib_IntVector_Intrinsics_vec128_mul64(r54, f130)); - Lib_IntVector_Intrinsics_vec128 - a33 = - Lib_IntVector_Intrinsics_vec128_add64(a32, - Lib_IntVector_Intrinsics_vec128_mul64(r0, f130)); - Lib_IntVector_Intrinsics_vec128 - a43 = - Lib_IntVector_Intrinsics_vec128_add64(a42, - Lib_IntVector_Intrinsics_vec128_mul64(r1, f130)); - Lib_IntVector_Intrinsics_vec128 - a04 = - Lib_IntVector_Intrinsics_vec128_add64(a03, - Lib_IntVector_Intrinsics_vec128_mul64(r51, f140)); - Lib_IntVector_Intrinsics_vec128 - a14 = - Lib_IntVector_Intrinsics_vec128_add64(a13, - Lib_IntVector_Intrinsics_vec128_mul64(r52, f140)); - Lib_IntVector_Intrinsics_vec128 - a24 = - Lib_IntVector_Intrinsics_vec128_add64(a23, - Lib_IntVector_Intrinsics_vec128_mul64(r53, f140)); - Lib_IntVector_Intrinsics_vec128 - a34 = - Lib_IntVector_Intrinsics_vec128_add64(a33, - Lib_IntVector_Intrinsics_vec128_mul64(r54, f140)); - Lib_IntVector_Intrinsics_vec128 - a44 = - Lib_IntVector_Intrinsics_vec128_add64(a43, - Lib_IntVector_Intrinsics_vec128_mul64(r0, f140)); - Lib_IntVector_Intrinsics_vec128 t01 = a04; - Lib_IntVector_Intrinsics_vec128 t1 = a14; - Lib_IntVector_Intrinsics_vec128 t2 = a24; - Lib_IntVector_Intrinsics_vec128 t3 = a34; - Lib_IntVector_Intrinsics_vec128 t4 = a44; - Lib_IntVector_Intrinsics_vec128 - mask26 = Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU); - Lib_IntVector_Intrinsics_vec128 - z0 = Lib_IntVector_Intrinsics_vec128_shift_right64(t01, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec128 - z1 = Lib_IntVector_Intrinsics_vec128_shift_right64(t3, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec128 x0 = Lib_IntVector_Intrinsics_vec128_and(t01, mask26); - Lib_IntVector_Intrinsics_vec128 x3 = Lib_IntVector_Intrinsics_vec128_and(t3, mask26); - Lib_IntVector_Intrinsics_vec128 x1 = Lib_IntVector_Intrinsics_vec128_add64(t1, z0); - Lib_IntVector_Intrinsics_vec128 x4 = Lib_IntVector_Intrinsics_vec128_add64(t4, z1); - Lib_IntVector_Intrinsics_vec128 - z01 = Lib_IntVector_Intrinsics_vec128_shift_right64(x1, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec128 - z11 = Lib_IntVector_Intrinsics_vec128_shift_right64(x4, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec128 - t = Lib_IntVector_Intrinsics_vec128_shift_left64(z11, (uint32_t)2U); - Lib_IntVector_Intrinsics_vec128 z12 = Lib_IntVector_Intrinsics_vec128_add64(z11, t); - Lib_IntVector_Intrinsics_vec128 x11 = Lib_IntVector_Intrinsics_vec128_and(x1, mask26); - Lib_IntVector_Intrinsics_vec128 x41 = Lib_IntVector_Intrinsics_vec128_and(x4, mask26); - Lib_IntVector_Intrinsics_vec128 x2 = Lib_IntVector_Intrinsics_vec128_add64(t2, z01); - Lib_IntVector_Intrinsics_vec128 x01 = Lib_IntVector_Intrinsics_vec128_add64(x0, z12); - Lib_IntVector_Intrinsics_vec128 - z02 = Lib_IntVector_Intrinsics_vec128_shift_right64(x2, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec128 - z13 = Lib_IntVector_Intrinsics_vec128_shift_right64(x01, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec128 x21 = Lib_IntVector_Intrinsics_vec128_and(x2, mask26); - Lib_IntVector_Intrinsics_vec128 - x02 = Lib_IntVector_Intrinsics_vec128_and(x01, mask26); - Lib_IntVector_Intrinsics_vec128 x31 = Lib_IntVector_Intrinsics_vec128_add64(x3, z02); - Lib_IntVector_Intrinsics_vec128 x12 = Lib_IntVector_Intrinsics_vec128_add64(x11, z13); - Lib_IntVector_Intrinsics_vec128 - z03 = Lib_IntVector_Intrinsics_vec128_shift_right64(x31, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec128 - x32 = Lib_IntVector_Intrinsics_vec128_and(x31, mask26); - Lib_IntVector_Intrinsics_vec128 x42 = Lib_IntVector_Intrinsics_vec128_add64(x41, z03); - Lib_IntVector_Intrinsics_vec128 o00 = x02; - Lib_IntVector_Intrinsics_vec128 o10 = x12; - Lib_IntVector_Intrinsics_vec128 o20 = x21; - Lib_IntVector_Intrinsics_vec128 o30 = x32; - Lib_IntVector_Intrinsics_vec128 o40 = x42; - acc[0U] = o00; - acc[1U] = o10; - acc[2U] = o20; - acc[3U] = o30; - acc[4U] = o40; - { - Lib_IntVector_Intrinsics_vec128 f100 = acc[0U]; - Lib_IntVector_Intrinsics_vec128 f11 = acc[1U]; - Lib_IntVector_Intrinsics_vec128 f12 = acc[2U]; - Lib_IntVector_Intrinsics_vec128 f13 = acc[3U]; - Lib_IntVector_Intrinsics_vec128 f14 = acc[4U]; - Lib_IntVector_Intrinsics_vec128 f20 = e[0U]; - Lib_IntVector_Intrinsics_vec128 f21 = e[1U]; - Lib_IntVector_Intrinsics_vec128 f22 = e[2U]; - Lib_IntVector_Intrinsics_vec128 f23 = e[3U]; - Lib_IntVector_Intrinsics_vec128 f24 = e[4U]; - Lib_IntVector_Intrinsics_vec128 - o0 = Lib_IntVector_Intrinsics_vec128_add64(f100, f20); - Lib_IntVector_Intrinsics_vec128 - o1 = Lib_IntVector_Intrinsics_vec128_add64(f11, f21); - Lib_IntVector_Intrinsics_vec128 - o2 = Lib_IntVector_Intrinsics_vec128_add64(f12, f22); - Lib_IntVector_Intrinsics_vec128 - o3 = Lib_IntVector_Intrinsics_vec128_add64(f13, f23); - Lib_IntVector_Intrinsics_vec128 - o4 = Lib_IntVector_Intrinsics_vec128_add64(f14, f24); - acc[0U] = o0; - acc[1U] = o1; - acc[2U] = o2; - acc[3U] = o3; - acc[4U] = o4; - } - } - } - } - } - Hacl_Impl_Poly1305_Field32xN_128_fmul_r2_normalize(acc, pre); - } - } - len1 = len - len0; - t10 = text + len0; - nb0 = len1 / (uint32_t)16U; - rem = len1 % (uint32_t)16U; - { - uint32_t i; - for (i = (uint32_t)0U; i < nb0; i++) - { - uint8_t *block = t10 + i * (uint32_t)16U; - KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 e[5U] KRML_POST_ALIGN(16) = { 0U }; - uint64_t u0 = load64_le(block); - uint64_t lo = u0; - uint64_t u = load64_le(block + (uint32_t)8U); - uint64_t hi = u; - Lib_IntVector_Intrinsics_vec128 f0 = Lib_IntVector_Intrinsics_vec128_load64(lo); - Lib_IntVector_Intrinsics_vec128 f1 = Lib_IntVector_Intrinsics_vec128_load64(hi); - Lib_IntVector_Intrinsics_vec128 - f010 = - Lib_IntVector_Intrinsics_vec128_and(f0, - Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU)); - Lib_IntVector_Intrinsics_vec128 - f110 = - Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(f0, - (uint32_t)26U), - Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU)); - Lib_IntVector_Intrinsics_vec128 - f20 = - Lib_IntVector_Intrinsics_vec128_or(Lib_IntVector_Intrinsics_vec128_shift_right64(f0, - (uint32_t)52U), - Lib_IntVector_Intrinsics_vec128_shift_left64(Lib_IntVector_Intrinsics_vec128_and(f1, - Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3fffU)), - (uint32_t)12U)); - Lib_IntVector_Intrinsics_vec128 - f30 = - Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(f1, - (uint32_t)14U), - Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU)); - Lib_IntVector_Intrinsics_vec128 - f40 = Lib_IntVector_Intrinsics_vec128_shift_right64(f1, (uint32_t)40U); - Lib_IntVector_Intrinsics_vec128 f01 = f010; - Lib_IntVector_Intrinsics_vec128 f111 = f110; - Lib_IntVector_Intrinsics_vec128 f2 = f20; - Lib_IntVector_Intrinsics_vec128 f3 = f30; - Lib_IntVector_Intrinsics_vec128 f41 = f40; - e[0U] = f01; - e[1U] = f111; - e[2U] = f2; - e[3U] = f3; - e[4U] = f41; - { - uint64_t b = (uint64_t)0x1000000U; - Lib_IntVector_Intrinsics_vec128 mask = Lib_IntVector_Intrinsics_vec128_load64(b); - Lib_IntVector_Intrinsics_vec128 f4 = e[4U]; - e[4U] = Lib_IntVector_Intrinsics_vec128_or(f4, mask); - { - Lib_IntVector_Intrinsics_vec128 *r = pre; - Lib_IntVector_Intrinsics_vec128 *r5 = pre + (uint32_t)5U; - Lib_IntVector_Intrinsics_vec128 r0 = r[0U]; - Lib_IntVector_Intrinsics_vec128 r1 = r[1U]; - Lib_IntVector_Intrinsics_vec128 r2 = r[2U]; - Lib_IntVector_Intrinsics_vec128 r3 = r[3U]; - Lib_IntVector_Intrinsics_vec128 r4 = r[4U]; - Lib_IntVector_Intrinsics_vec128 r51 = r5[1U]; - Lib_IntVector_Intrinsics_vec128 r52 = r5[2U]; - Lib_IntVector_Intrinsics_vec128 r53 = r5[3U]; - Lib_IntVector_Intrinsics_vec128 r54 = r5[4U]; - Lib_IntVector_Intrinsics_vec128 f10 = e[0U]; - Lib_IntVector_Intrinsics_vec128 f11 = e[1U]; - Lib_IntVector_Intrinsics_vec128 f12 = e[2U]; - Lib_IntVector_Intrinsics_vec128 f13 = e[3U]; - Lib_IntVector_Intrinsics_vec128 f14 = e[4U]; - Lib_IntVector_Intrinsics_vec128 a0 = acc[0U]; - Lib_IntVector_Intrinsics_vec128 a1 = acc[1U]; - Lib_IntVector_Intrinsics_vec128 a2 = acc[2U]; - Lib_IntVector_Intrinsics_vec128 a3 = acc[3U]; - Lib_IntVector_Intrinsics_vec128 a4 = acc[4U]; - Lib_IntVector_Intrinsics_vec128 a01 = Lib_IntVector_Intrinsics_vec128_add64(a0, f10); - Lib_IntVector_Intrinsics_vec128 a11 = Lib_IntVector_Intrinsics_vec128_add64(a1, f11); - Lib_IntVector_Intrinsics_vec128 a21 = Lib_IntVector_Intrinsics_vec128_add64(a2, f12); - Lib_IntVector_Intrinsics_vec128 a31 = Lib_IntVector_Intrinsics_vec128_add64(a3, f13); - Lib_IntVector_Intrinsics_vec128 a41 = Lib_IntVector_Intrinsics_vec128_add64(a4, f14); - Lib_IntVector_Intrinsics_vec128 a02 = Lib_IntVector_Intrinsics_vec128_mul64(r0, a01); - Lib_IntVector_Intrinsics_vec128 a12 = Lib_IntVector_Intrinsics_vec128_mul64(r1, a01); - Lib_IntVector_Intrinsics_vec128 a22 = Lib_IntVector_Intrinsics_vec128_mul64(r2, a01); - Lib_IntVector_Intrinsics_vec128 a32 = Lib_IntVector_Intrinsics_vec128_mul64(r3, a01); - Lib_IntVector_Intrinsics_vec128 a42 = Lib_IntVector_Intrinsics_vec128_mul64(r4, a01); - Lib_IntVector_Intrinsics_vec128 - a03 = - Lib_IntVector_Intrinsics_vec128_add64(a02, - Lib_IntVector_Intrinsics_vec128_mul64(r54, a11)); - Lib_IntVector_Intrinsics_vec128 - a13 = - Lib_IntVector_Intrinsics_vec128_add64(a12, - Lib_IntVector_Intrinsics_vec128_mul64(r0, a11)); - Lib_IntVector_Intrinsics_vec128 - a23 = - Lib_IntVector_Intrinsics_vec128_add64(a22, - Lib_IntVector_Intrinsics_vec128_mul64(r1, a11)); - Lib_IntVector_Intrinsics_vec128 - a33 = - Lib_IntVector_Intrinsics_vec128_add64(a32, - Lib_IntVector_Intrinsics_vec128_mul64(r2, a11)); - Lib_IntVector_Intrinsics_vec128 - a43 = - Lib_IntVector_Intrinsics_vec128_add64(a42, - Lib_IntVector_Intrinsics_vec128_mul64(r3, a11)); - Lib_IntVector_Intrinsics_vec128 - a04 = - Lib_IntVector_Intrinsics_vec128_add64(a03, - Lib_IntVector_Intrinsics_vec128_mul64(r53, a21)); - Lib_IntVector_Intrinsics_vec128 - a14 = - Lib_IntVector_Intrinsics_vec128_add64(a13, - Lib_IntVector_Intrinsics_vec128_mul64(r54, a21)); - Lib_IntVector_Intrinsics_vec128 - a24 = - Lib_IntVector_Intrinsics_vec128_add64(a23, - Lib_IntVector_Intrinsics_vec128_mul64(r0, a21)); - Lib_IntVector_Intrinsics_vec128 - a34 = - Lib_IntVector_Intrinsics_vec128_add64(a33, - Lib_IntVector_Intrinsics_vec128_mul64(r1, a21)); - Lib_IntVector_Intrinsics_vec128 - a44 = - Lib_IntVector_Intrinsics_vec128_add64(a43, - Lib_IntVector_Intrinsics_vec128_mul64(r2, a21)); - Lib_IntVector_Intrinsics_vec128 - a05 = - Lib_IntVector_Intrinsics_vec128_add64(a04, - Lib_IntVector_Intrinsics_vec128_mul64(r52, a31)); - Lib_IntVector_Intrinsics_vec128 - a15 = - Lib_IntVector_Intrinsics_vec128_add64(a14, - Lib_IntVector_Intrinsics_vec128_mul64(r53, a31)); - Lib_IntVector_Intrinsics_vec128 - a25 = - Lib_IntVector_Intrinsics_vec128_add64(a24, - Lib_IntVector_Intrinsics_vec128_mul64(r54, a31)); - Lib_IntVector_Intrinsics_vec128 - a35 = - Lib_IntVector_Intrinsics_vec128_add64(a34, - Lib_IntVector_Intrinsics_vec128_mul64(r0, a31)); - Lib_IntVector_Intrinsics_vec128 - a45 = - Lib_IntVector_Intrinsics_vec128_add64(a44, - Lib_IntVector_Intrinsics_vec128_mul64(r1, a31)); - Lib_IntVector_Intrinsics_vec128 - a06 = - Lib_IntVector_Intrinsics_vec128_add64(a05, - Lib_IntVector_Intrinsics_vec128_mul64(r51, a41)); - Lib_IntVector_Intrinsics_vec128 - a16 = - Lib_IntVector_Intrinsics_vec128_add64(a15, - Lib_IntVector_Intrinsics_vec128_mul64(r52, a41)); - Lib_IntVector_Intrinsics_vec128 - a26 = - Lib_IntVector_Intrinsics_vec128_add64(a25, - Lib_IntVector_Intrinsics_vec128_mul64(r53, a41)); - Lib_IntVector_Intrinsics_vec128 - a36 = - Lib_IntVector_Intrinsics_vec128_add64(a35, - Lib_IntVector_Intrinsics_vec128_mul64(r54, a41)); - Lib_IntVector_Intrinsics_vec128 - a46 = - Lib_IntVector_Intrinsics_vec128_add64(a45, - Lib_IntVector_Intrinsics_vec128_mul64(r0, a41)); - Lib_IntVector_Intrinsics_vec128 t01 = a06; - Lib_IntVector_Intrinsics_vec128 t11 = a16; - Lib_IntVector_Intrinsics_vec128 t2 = a26; - Lib_IntVector_Intrinsics_vec128 t3 = a36; - Lib_IntVector_Intrinsics_vec128 t4 = a46; - Lib_IntVector_Intrinsics_vec128 - mask26 = Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU); - Lib_IntVector_Intrinsics_vec128 - z0 = Lib_IntVector_Intrinsics_vec128_shift_right64(t01, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec128 - z1 = Lib_IntVector_Intrinsics_vec128_shift_right64(t3, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec128 x0 = Lib_IntVector_Intrinsics_vec128_and(t01, mask26); - Lib_IntVector_Intrinsics_vec128 x3 = Lib_IntVector_Intrinsics_vec128_and(t3, mask26); - Lib_IntVector_Intrinsics_vec128 x1 = Lib_IntVector_Intrinsics_vec128_add64(t11, z0); - Lib_IntVector_Intrinsics_vec128 x4 = Lib_IntVector_Intrinsics_vec128_add64(t4, z1); - Lib_IntVector_Intrinsics_vec128 - z01 = Lib_IntVector_Intrinsics_vec128_shift_right64(x1, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec128 - z11 = Lib_IntVector_Intrinsics_vec128_shift_right64(x4, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec128 - t = Lib_IntVector_Intrinsics_vec128_shift_left64(z11, (uint32_t)2U); - Lib_IntVector_Intrinsics_vec128 z12 = Lib_IntVector_Intrinsics_vec128_add64(z11, t); - Lib_IntVector_Intrinsics_vec128 x11 = Lib_IntVector_Intrinsics_vec128_and(x1, mask26); - Lib_IntVector_Intrinsics_vec128 x41 = Lib_IntVector_Intrinsics_vec128_and(x4, mask26); - Lib_IntVector_Intrinsics_vec128 x2 = Lib_IntVector_Intrinsics_vec128_add64(t2, z01); - Lib_IntVector_Intrinsics_vec128 x01 = Lib_IntVector_Intrinsics_vec128_add64(x0, z12); - Lib_IntVector_Intrinsics_vec128 - z02 = Lib_IntVector_Intrinsics_vec128_shift_right64(x2, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec128 - z13 = Lib_IntVector_Intrinsics_vec128_shift_right64(x01, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec128 x21 = Lib_IntVector_Intrinsics_vec128_and(x2, mask26); - Lib_IntVector_Intrinsics_vec128 x02 = Lib_IntVector_Intrinsics_vec128_and(x01, mask26); - Lib_IntVector_Intrinsics_vec128 x31 = Lib_IntVector_Intrinsics_vec128_add64(x3, z02); - Lib_IntVector_Intrinsics_vec128 x12 = Lib_IntVector_Intrinsics_vec128_add64(x11, z13); - Lib_IntVector_Intrinsics_vec128 - z03 = Lib_IntVector_Intrinsics_vec128_shift_right64(x31, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec128 x32 = Lib_IntVector_Intrinsics_vec128_and(x31, mask26); - Lib_IntVector_Intrinsics_vec128 x42 = Lib_IntVector_Intrinsics_vec128_add64(x41, z03); - Lib_IntVector_Intrinsics_vec128 o0 = x02; - Lib_IntVector_Intrinsics_vec128 o1 = x12; - Lib_IntVector_Intrinsics_vec128 o2 = x21; - Lib_IntVector_Intrinsics_vec128 o3 = x32; - Lib_IntVector_Intrinsics_vec128 o4 = x42; - acc[0U] = o0; - acc[1U] = o1; - acc[2U] = o2; - acc[3U] = o3; - acc[4U] = o4; - } - } - } - } - if (rem > (uint32_t)0U) - { - uint8_t *last = t10 + nb0 * (uint32_t)16U; - KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 e[5U] KRML_POST_ALIGN(16) = { 0U }; - uint8_t tmp[16U] = { 0U }; - uint64_t u0; - uint64_t lo; - uint64_t u; - uint64_t hi; - Lib_IntVector_Intrinsics_vec128 f0; - Lib_IntVector_Intrinsics_vec128 f1; - Lib_IntVector_Intrinsics_vec128 f010; - Lib_IntVector_Intrinsics_vec128 f110; - Lib_IntVector_Intrinsics_vec128 f20; - Lib_IntVector_Intrinsics_vec128 f30; - Lib_IntVector_Intrinsics_vec128 f40; - Lib_IntVector_Intrinsics_vec128 f01; - Lib_IntVector_Intrinsics_vec128 f111; - Lib_IntVector_Intrinsics_vec128 f2; - Lib_IntVector_Intrinsics_vec128 f3; - Lib_IntVector_Intrinsics_vec128 f4; - uint64_t b; - Lib_IntVector_Intrinsics_vec128 mask; - Lib_IntVector_Intrinsics_vec128 fi; - Lib_IntVector_Intrinsics_vec128 *r; - Lib_IntVector_Intrinsics_vec128 *r5; - Lib_IntVector_Intrinsics_vec128 r0; - Lib_IntVector_Intrinsics_vec128 r1; - Lib_IntVector_Intrinsics_vec128 r2; - Lib_IntVector_Intrinsics_vec128 r3; - Lib_IntVector_Intrinsics_vec128 r4; - Lib_IntVector_Intrinsics_vec128 r51; - Lib_IntVector_Intrinsics_vec128 r52; - Lib_IntVector_Intrinsics_vec128 r53; - Lib_IntVector_Intrinsics_vec128 r54; - Lib_IntVector_Intrinsics_vec128 f10; - Lib_IntVector_Intrinsics_vec128 f11; - Lib_IntVector_Intrinsics_vec128 f12; - Lib_IntVector_Intrinsics_vec128 f13; - Lib_IntVector_Intrinsics_vec128 f14; - Lib_IntVector_Intrinsics_vec128 a0; - Lib_IntVector_Intrinsics_vec128 a1; - Lib_IntVector_Intrinsics_vec128 a2; - Lib_IntVector_Intrinsics_vec128 a3; - Lib_IntVector_Intrinsics_vec128 a4; - Lib_IntVector_Intrinsics_vec128 a01; - Lib_IntVector_Intrinsics_vec128 a11; - Lib_IntVector_Intrinsics_vec128 a21; - Lib_IntVector_Intrinsics_vec128 a31; - Lib_IntVector_Intrinsics_vec128 a41; - Lib_IntVector_Intrinsics_vec128 a02; - Lib_IntVector_Intrinsics_vec128 a12; - Lib_IntVector_Intrinsics_vec128 a22; - Lib_IntVector_Intrinsics_vec128 a32; - Lib_IntVector_Intrinsics_vec128 a42; - Lib_IntVector_Intrinsics_vec128 a03; - Lib_IntVector_Intrinsics_vec128 a13; - Lib_IntVector_Intrinsics_vec128 a23; - Lib_IntVector_Intrinsics_vec128 a33; - Lib_IntVector_Intrinsics_vec128 a43; - Lib_IntVector_Intrinsics_vec128 a04; - Lib_IntVector_Intrinsics_vec128 a14; - Lib_IntVector_Intrinsics_vec128 a24; - Lib_IntVector_Intrinsics_vec128 a34; - Lib_IntVector_Intrinsics_vec128 a44; - Lib_IntVector_Intrinsics_vec128 a05; - Lib_IntVector_Intrinsics_vec128 a15; - Lib_IntVector_Intrinsics_vec128 a25; - Lib_IntVector_Intrinsics_vec128 a35; - Lib_IntVector_Intrinsics_vec128 a45; - Lib_IntVector_Intrinsics_vec128 a06; - Lib_IntVector_Intrinsics_vec128 a16; - Lib_IntVector_Intrinsics_vec128 a26; - Lib_IntVector_Intrinsics_vec128 a36; - Lib_IntVector_Intrinsics_vec128 a46; - Lib_IntVector_Intrinsics_vec128 t01; - Lib_IntVector_Intrinsics_vec128 t11; - Lib_IntVector_Intrinsics_vec128 t2; - Lib_IntVector_Intrinsics_vec128 t3; - Lib_IntVector_Intrinsics_vec128 t4; - Lib_IntVector_Intrinsics_vec128 mask26; - Lib_IntVector_Intrinsics_vec128 z0; - Lib_IntVector_Intrinsics_vec128 z1; - Lib_IntVector_Intrinsics_vec128 x0; - Lib_IntVector_Intrinsics_vec128 x3; - Lib_IntVector_Intrinsics_vec128 x1; - Lib_IntVector_Intrinsics_vec128 x4; - Lib_IntVector_Intrinsics_vec128 z01; - Lib_IntVector_Intrinsics_vec128 z11; - Lib_IntVector_Intrinsics_vec128 t; - Lib_IntVector_Intrinsics_vec128 z12; - Lib_IntVector_Intrinsics_vec128 x11; - Lib_IntVector_Intrinsics_vec128 x41; - Lib_IntVector_Intrinsics_vec128 x2; - Lib_IntVector_Intrinsics_vec128 x01; - Lib_IntVector_Intrinsics_vec128 z02; - Lib_IntVector_Intrinsics_vec128 z13; - Lib_IntVector_Intrinsics_vec128 x21; - Lib_IntVector_Intrinsics_vec128 x02; - Lib_IntVector_Intrinsics_vec128 x31; - Lib_IntVector_Intrinsics_vec128 x12; - Lib_IntVector_Intrinsics_vec128 z03; - Lib_IntVector_Intrinsics_vec128 x32; - Lib_IntVector_Intrinsics_vec128 x42; - Lib_IntVector_Intrinsics_vec128 o0; - Lib_IntVector_Intrinsics_vec128 o1; - Lib_IntVector_Intrinsics_vec128 o2; - Lib_IntVector_Intrinsics_vec128 o3; - Lib_IntVector_Intrinsics_vec128 o4; - memcpy(tmp, last, rem * sizeof (uint8_t)); - u0 = load64_le(tmp); - lo = u0; - u = load64_le(tmp + (uint32_t)8U); - hi = u; - f0 = Lib_IntVector_Intrinsics_vec128_load64(lo); - f1 = Lib_IntVector_Intrinsics_vec128_load64(hi); - f010 = - Lib_IntVector_Intrinsics_vec128_and(f0, - Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU)); - f110 = - Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(f0, - (uint32_t)26U), - Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU)); - f20 = - Lib_IntVector_Intrinsics_vec128_or(Lib_IntVector_Intrinsics_vec128_shift_right64(f0, - (uint32_t)52U), - Lib_IntVector_Intrinsics_vec128_shift_left64(Lib_IntVector_Intrinsics_vec128_and(f1, - Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3fffU)), - (uint32_t)12U)); - f30 = - Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(f1, - (uint32_t)14U), - Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU)); - f40 = Lib_IntVector_Intrinsics_vec128_shift_right64(f1, (uint32_t)40U); - f01 = f010; - f111 = f110; - f2 = f20; - f3 = f30; - f4 = f40; - e[0U] = f01; - e[1U] = f111; - e[2U] = f2; - e[3U] = f3; - e[4U] = f4; - b = (uint64_t)1U << rem * (uint32_t)8U % (uint32_t)26U; - mask = Lib_IntVector_Intrinsics_vec128_load64(b); - fi = e[rem * (uint32_t)8U / (uint32_t)26U]; - e[rem * (uint32_t)8U / (uint32_t)26U] = Lib_IntVector_Intrinsics_vec128_or(fi, mask); - r = pre; - r5 = pre + (uint32_t)5U; - r0 = r[0U]; - r1 = r[1U]; - r2 = r[2U]; - r3 = r[3U]; - r4 = r[4U]; - r51 = r5[1U]; - r52 = r5[2U]; - r53 = r5[3U]; - r54 = r5[4U]; - f10 = e[0U]; - f11 = e[1U]; - f12 = e[2U]; - f13 = e[3U]; - f14 = e[4U]; - a0 = acc[0U]; - a1 = acc[1U]; - a2 = acc[2U]; - a3 = acc[3U]; - a4 = acc[4U]; - a01 = Lib_IntVector_Intrinsics_vec128_add64(a0, f10); - a11 = Lib_IntVector_Intrinsics_vec128_add64(a1, f11); - a21 = Lib_IntVector_Intrinsics_vec128_add64(a2, f12); - a31 = Lib_IntVector_Intrinsics_vec128_add64(a3, f13); - a41 = Lib_IntVector_Intrinsics_vec128_add64(a4, f14); - a02 = Lib_IntVector_Intrinsics_vec128_mul64(r0, a01); - a12 = Lib_IntVector_Intrinsics_vec128_mul64(r1, a01); - a22 = Lib_IntVector_Intrinsics_vec128_mul64(r2, a01); - a32 = Lib_IntVector_Intrinsics_vec128_mul64(r3, a01); - a42 = Lib_IntVector_Intrinsics_vec128_mul64(r4, a01); - a03 = - Lib_IntVector_Intrinsics_vec128_add64(a02, - Lib_IntVector_Intrinsics_vec128_mul64(r54, a11)); - a13 = - Lib_IntVector_Intrinsics_vec128_add64(a12, - Lib_IntVector_Intrinsics_vec128_mul64(r0, a11)); - a23 = - Lib_IntVector_Intrinsics_vec128_add64(a22, - Lib_IntVector_Intrinsics_vec128_mul64(r1, a11)); - a33 = - Lib_IntVector_Intrinsics_vec128_add64(a32, - Lib_IntVector_Intrinsics_vec128_mul64(r2, a11)); - a43 = - Lib_IntVector_Intrinsics_vec128_add64(a42, - Lib_IntVector_Intrinsics_vec128_mul64(r3, a11)); - a04 = - Lib_IntVector_Intrinsics_vec128_add64(a03, - Lib_IntVector_Intrinsics_vec128_mul64(r53, a21)); - a14 = - Lib_IntVector_Intrinsics_vec128_add64(a13, - Lib_IntVector_Intrinsics_vec128_mul64(r54, a21)); - a24 = - Lib_IntVector_Intrinsics_vec128_add64(a23, - Lib_IntVector_Intrinsics_vec128_mul64(r0, a21)); - a34 = - Lib_IntVector_Intrinsics_vec128_add64(a33, - Lib_IntVector_Intrinsics_vec128_mul64(r1, a21)); - a44 = - Lib_IntVector_Intrinsics_vec128_add64(a43, - Lib_IntVector_Intrinsics_vec128_mul64(r2, a21)); - a05 = - Lib_IntVector_Intrinsics_vec128_add64(a04, - Lib_IntVector_Intrinsics_vec128_mul64(r52, a31)); - a15 = - Lib_IntVector_Intrinsics_vec128_add64(a14, - Lib_IntVector_Intrinsics_vec128_mul64(r53, a31)); - a25 = - Lib_IntVector_Intrinsics_vec128_add64(a24, - Lib_IntVector_Intrinsics_vec128_mul64(r54, a31)); - a35 = - Lib_IntVector_Intrinsics_vec128_add64(a34, - Lib_IntVector_Intrinsics_vec128_mul64(r0, a31)); - a45 = - Lib_IntVector_Intrinsics_vec128_add64(a44, - Lib_IntVector_Intrinsics_vec128_mul64(r1, a31)); - a06 = - Lib_IntVector_Intrinsics_vec128_add64(a05, - Lib_IntVector_Intrinsics_vec128_mul64(r51, a41)); - a16 = - Lib_IntVector_Intrinsics_vec128_add64(a15, - Lib_IntVector_Intrinsics_vec128_mul64(r52, a41)); - a26 = - Lib_IntVector_Intrinsics_vec128_add64(a25, - Lib_IntVector_Intrinsics_vec128_mul64(r53, a41)); - a36 = - Lib_IntVector_Intrinsics_vec128_add64(a35, - Lib_IntVector_Intrinsics_vec128_mul64(r54, a41)); - a46 = - Lib_IntVector_Intrinsics_vec128_add64(a45, - Lib_IntVector_Intrinsics_vec128_mul64(r0, a41)); - t01 = a06; - t11 = a16; - t2 = a26; - t3 = a36; - t4 = a46; - mask26 = Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU); - z0 = Lib_IntVector_Intrinsics_vec128_shift_right64(t01, (uint32_t)26U); - z1 = Lib_IntVector_Intrinsics_vec128_shift_right64(t3, (uint32_t)26U); - x0 = Lib_IntVector_Intrinsics_vec128_and(t01, mask26); - x3 = Lib_IntVector_Intrinsics_vec128_and(t3, mask26); - x1 = Lib_IntVector_Intrinsics_vec128_add64(t11, z0); - x4 = Lib_IntVector_Intrinsics_vec128_add64(t4, z1); - z01 = Lib_IntVector_Intrinsics_vec128_shift_right64(x1, (uint32_t)26U); - z11 = Lib_IntVector_Intrinsics_vec128_shift_right64(x4, (uint32_t)26U); - t = Lib_IntVector_Intrinsics_vec128_shift_left64(z11, (uint32_t)2U); - z12 = Lib_IntVector_Intrinsics_vec128_add64(z11, t); - x11 = Lib_IntVector_Intrinsics_vec128_and(x1, mask26); - x41 = Lib_IntVector_Intrinsics_vec128_and(x4, mask26); - x2 = Lib_IntVector_Intrinsics_vec128_add64(t2, z01); - x01 = Lib_IntVector_Intrinsics_vec128_add64(x0, z12); - z02 = Lib_IntVector_Intrinsics_vec128_shift_right64(x2, (uint32_t)26U); - z13 = Lib_IntVector_Intrinsics_vec128_shift_right64(x01, (uint32_t)26U); - x21 = Lib_IntVector_Intrinsics_vec128_and(x2, mask26); - x02 = Lib_IntVector_Intrinsics_vec128_and(x01, mask26); - x31 = Lib_IntVector_Intrinsics_vec128_add64(x3, z02); - x12 = Lib_IntVector_Intrinsics_vec128_add64(x11, z13); - z03 = Lib_IntVector_Intrinsics_vec128_shift_right64(x31, (uint32_t)26U); - x32 = Lib_IntVector_Intrinsics_vec128_and(x31, mask26); - x42 = Lib_IntVector_Intrinsics_vec128_add64(x41, z03); - o0 = x02; - o1 = x12; - o2 = x21; - o3 = x32; - o4 = x42; - acc[0U] = o0; - acc[1U] = o1; - acc[2U] = o2; - acc[3U] = o3; - acc[4U] = o4; - return; - } -} - -void -Hacl_Poly1305_128_poly1305_finish( - uint8_t *tag, - uint8_t *key, - Lib_IntVector_Intrinsics_vec128 *ctx -) -{ - Lib_IntVector_Intrinsics_vec128 *acc = ctx; - uint8_t *ks = key + (uint32_t)16U; - Lib_IntVector_Intrinsics_vec128 f00 = acc[0U]; - Lib_IntVector_Intrinsics_vec128 f13 = acc[1U]; - Lib_IntVector_Intrinsics_vec128 f23 = acc[2U]; - Lib_IntVector_Intrinsics_vec128 f33 = acc[3U]; - Lib_IntVector_Intrinsics_vec128 f40 = acc[4U]; - Lib_IntVector_Intrinsics_vec128 - l0 = Lib_IntVector_Intrinsics_vec128_add64(f00, Lib_IntVector_Intrinsics_vec128_zero); - Lib_IntVector_Intrinsics_vec128 - tmp00 = - Lib_IntVector_Intrinsics_vec128_and(l0, - Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU)); - Lib_IntVector_Intrinsics_vec128 - c00 = Lib_IntVector_Intrinsics_vec128_shift_right64(l0, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec128 l1 = Lib_IntVector_Intrinsics_vec128_add64(f13, c00); - Lib_IntVector_Intrinsics_vec128 - tmp10 = - Lib_IntVector_Intrinsics_vec128_and(l1, - Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU)); - Lib_IntVector_Intrinsics_vec128 - c10 = Lib_IntVector_Intrinsics_vec128_shift_right64(l1, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec128 l2 = Lib_IntVector_Intrinsics_vec128_add64(f23, c10); - Lib_IntVector_Intrinsics_vec128 - tmp20 = - Lib_IntVector_Intrinsics_vec128_and(l2, - Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU)); - Lib_IntVector_Intrinsics_vec128 - c20 = Lib_IntVector_Intrinsics_vec128_shift_right64(l2, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec128 l3 = Lib_IntVector_Intrinsics_vec128_add64(f33, c20); - Lib_IntVector_Intrinsics_vec128 - tmp30 = - Lib_IntVector_Intrinsics_vec128_and(l3, - Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU)); - Lib_IntVector_Intrinsics_vec128 - c30 = Lib_IntVector_Intrinsics_vec128_shift_right64(l3, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec128 l4 = Lib_IntVector_Intrinsics_vec128_add64(f40, c30); - Lib_IntVector_Intrinsics_vec128 - tmp40 = - Lib_IntVector_Intrinsics_vec128_and(l4, - Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU)); - Lib_IntVector_Intrinsics_vec128 - c40 = Lib_IntVector_Intrinsics_vec128_shift_right64(l4, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec128 - f010 = - Lib_IntVector_Intrinsics_vec128_add64(tmp00, - Lib_IntVector_Intrinsics_vec128_smul64(c40, (uint64_t)5U)); - Lib_IntVector_Intrinsics_vec128 f110 = tmp10; - Lib_IntVector_Intrinsics_vec128 f210 = tmp20; - Lib_IntVector_Intrinsics_vec128 f310 = tmp30; - Lib_IntVector_Intrinsics_vec128 f410 = tmp40; - Lib_IntVector_Intrinsics_vec128 - l = Lib_IntVector_Intrinsics_vec128_add64(f010, Lib_IntVector_Intrinsics_vec128_zero); - Lib_IntVector_Intrinsics_vec128 - tmp0 = - Lib_IntVector_Intrinsics_vec128_and(l, - Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU)); - Lib_IntVector_Intrinsics_vec128 - c0 = Lib_IntVector_Intrinsics_vec128_shift_right64(l, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec128 l5 = Lib_IntVector_Intrinsics_vec128_add64(f110, c0); - Lib_IntVector_Intrinsics_vec128 - tmp1 = - Lib_IntVector_Intrinsics_vec128_and(l5, - Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU)); - Lib_IntVector_Intrinsics_vec128 - c1 = Lib_IntVector_Intrinsics_vec128_shift_right64(l5, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec128 l6 = Lib_IntVector_Intrinsics_vec128_add64(f210, c1); - Lib_IntVector_Intrinsics_vec128 - tmp2 = - Lib_IntVector_Intrinsics_vec128_and(l6, - Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU)); - Lib_IntVector_Intrinsics_vec128 - c2 = Lib_IntVector_Intrinsics_vec128_shift_right64(l6, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec128 l7 = Lib_IntVector_Intrinsics_vec128_add64(f310, c2); - Lib_IntVector_Intrinsics_vec128 - tmp3 = - Lib_IntVector_Intrinsics_vec128_and(l7, - Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU)); - Lib_IntVector_Intrinsics_vec128 - c3 = Lib_IntVector_Intrinsics_vec128_shift_right64(l7, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec128 l8 = Lib_IntVector_Intrinsics_vec128_add64(f410, c3); - Lib_IntVector_Intrinsics_vec128 - tmp4 = - Lib_IntVector_Intrinsics_vec128_and(l8, - Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU)); - Lib_IntVector_Intrinsics_vec128 - c4 = Lib_IntVector_Intrinsics_vec128_shift_right64(l8, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec128 - f02 = - Lib_IntVector_Intrinsics_vec128_add64(tmp0, - Lib_IntVector_Intrinsics_vec128_smul64(c4, (uint64_t)5U)); - Lib_IntVector_Intrinsics_vec128 f12 = tmp1; - Lib_IntVector_Intrinsics_vec128 f22 = tmp2; - Lib_IntVector_Intrinsics_vec128 f32 = tmp3; - Lib_IntVector_Intrinsics_vec128 f42 = tmp4; - Lib_IntVector_Intrinsics_vec128 - mh = Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU); - Lib_IntVector_Intrinsics_vec128 - ml = Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3fffffbU); - Lib_IntVector_Intrinsics_vec128 mask = Lib_IntVector_Intrinsics_vec128_eq64(f42, mh); - Lib_IntVector_Intrinsics_vec128 - mask1 = - Lib_IntVector_Intrinsics_vec128_and(mask, - Lib_IntVector_Intrinsics_vec128_eq64(f32, mh)); - Lib_IntVector_Intrinsics_vec128 - mask2 = - Lib_IntVector_Intrinsics_vec128_and(mask1, - Lib_IntVector_Intrinsics_vec128_eq64(f22, mh)); - Lib_IntVector_Intrinsics_vec128 - mask3 = - Lib_IntVector_Intrinsics_vec128_and(mask2, - Lib_IntVector_Intrinsics_vec128_eq64(f12, mh)); - Lib_IntVector_Intrinsics_vec128 - mask4 = - Lib_IntVector_Intrinsics_vec128_and(mask3, - Lib_IntVector_Intrinsics_vec128_lognot(Lib_IntVector_Intrinsics_vec128_gt64(ml, f02))); - Lib_IntVector_Intrinsics_vec128 ph = Lib_IntVector_Intrinsics_vec128_and(mask4, mh); - Lib_IntVector_Intrinsics_vec128 pl = Lib_IntVector_Intrinsics_vec128_and(mask4, ml); - Lib_IntVector_Intrinsics_vec128 o0 = Lib_IntVector_Intrinsics_vec128_sub64(f02, pl); - Lib_IntVector_Intrinsics_vec128 o1 = Lib_IntVector_Intrinsics_vec128_sub64(f12, ph); - Lib_IntVector_Intrinsics_vec128 o2 = Lib_IntVector_Intrinsics_vec128_sub64(f22, ph); - Lib_IntVector_Intrinsics_vec128 o3 = Lib_IntVector_Intrinsics_vec128_sub64(f32, ph); - Lib_IntVector_Intrinsics_vec128 o4 = Lib_IntVector_Intrinsics_vec128_sub64(f42, ph); - Lib_IntVector_Intrinsics_vec128 f011 = o0; - Lib_IntVector_Intrinsics_vec128 f111 = o1; - Lib_IntVector_Intrinsics_vec128 f211 = o2; - Lib_IntVector_Intrinsics_vec128 f311 = o3; - Lib_IntVector_Intrinsics_vec128 f411 = o4; - Lib_IntVector_Intrinsics_vec128 f0; - Lib_IntVector_Intrinsics_vec128 f1; - Lib_IntVector_Intrinsics_vec128 f2; - Lib_IntVector_Intrinsics_vec128 f3; - Lib_IntVector_Intrinsics_vec128 f4; - uint64_t f01; - uint64_t f112; - uint64_t f212; - uint64_t f312; - uint64_t f41; - uint64_t lo0; - uint64_t hi0; - uint64_t f10; - uint64_t f11; - uint64_t u0; - uint64_t lo; - uint64_t u; - uint64_t hi; - uint64_t f20; - uint64_t f21; - uint64_t r0; - uint64_t r1; - uint64_t c; - uint64_t r11; - uint64_t f30; - uint64_t f31; - acc[0U] = f011; - acc[1U] = f111; - acc[2U] = f211; - acc[3U] = f311; - acc[4U] = f411; - f0 = acc[0U]; - f1 = acc[1U]; - f2 = acc[2U]; - f3 = acc[3U]; - f4 = acc[4U]; - f01 = Lib_IntVector_Intrinsics_vec128_extract64(f0, (uint32_t)0U); - f112 = Lib_IntVector_Intrinsics_vec128_extract64(f1, (uint32_t)0U); - f212 = Lib_IntVector_Intrinsics_vec128_extract64(f2, (uint32_t)0U); - f312 = Lib_IntVector_Intrinsics_vec128_extract64(f3, (uint32_t)0U); - f41 = Lib_IntVector_Intrinsics_vec128_extract64(f4, (uint32_t)0U); - lo0 = (f01 | f112 << (uint32_t)26U) | f212 << (uint32_t)52U; - hi0 = (f212 >> (uint32_t)12U | f312 << (uint32_t)14U) | f41 << (uint32_t)40U; - f10 = lo0; - f11 = hi0; - u0 = load64_le(ks); - lo = u0; - u = load64_le(ks + (uint32_t)8U); - hi = u; - f20 = lo; - f21 = hi; - r0 = f10 + f20; - r1 = f11 + f21; - c = (r0 ^ ((r0 ^ f20) | ((r0 - f20) ^ f20))) >> (uint32_t)63U; - r11 = r1 + c; - f30 = r0; - f31 = r11; - store64_le(tag, f30); - store64_le(tag + (uint32_t)8U, f31); -} - -void Hacl_Poly1305_128_poly1305_mac(uint8_t *tag, uint32_t len, uint8_t *text, uint8_t *key) -{ - KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 ctx[25U] KRML_POST_ALIGN(16) = { 0U }; - Hacl_Poly1305_128_poly1305_init(ctx, key); - Hacl_Poly1305_128_poly1305_update(ctx, len, text); - Hacl_Poly1305_128_poly1305_finish(tag, key, ctx); -} - diff --git a/dist/c89-compatible/Hacl_Poly1305_128.h b/dist/c89-compatible/Hacl_Poly1305_128.h deleted file mode 100644 index 4f738cc41f..0000000000 --- a/dist/c89-compatible/Hacl_Poly1305_128.h +++ /dev/null @@ -1,68 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#ifndef __Hacl_Poly1305_128_H -#define __Hacl_Poly1305_128_H - -#if defined(__cplusplus) -extern "C" { -#endif - -#include -#include "krml/internal/types.h" -#include "krml/lowstar_endianness.h" -#include "krml/internal/target.h" - - -#include "Hacl_Krmllib.h" -#include "evercrypt_targetconfig.h" -#include "libintvector.h" -typedef Lib_IntVector_Intrinsics_vec128 *Hacl_Poly1305_128_poly1305_ctx; - -void Hacl_Poly1305_128_poly1305_init(Lib_IntVector_Intrinsics_vec128 *ctx, uint8_t *key); - -void Hacl_Poly1305_128_poly1305_update1(Lib_IntVector_Intrinsics_vec128 *ctx, uint8_t *text); - -void -Hacl_Poly1305_128_poly1305_update( - Lib_IntVector_Intrinsics_vec128 *ctx, - uint32_t len, - uint8_t *text -); - -void -Hacl_Poly1305_128_poly1305_finish( - uint8_t *tag, - uint8_t *key, - Lib_IntVector_Intrinsics_vec128 *ctx -); - -void Hacl_Poly1305_128_poly1305_mac(uint8_t *tag, uint32_t len, uint8_t *text, uint8_t *key); - -#if defined(__cplusplus) -} -#endif - -#define __Hacl_Poly1305_128_H_DEFINED -#endif diff --git a/dist/c89-compatible/Hacl_Poly1305_256.c b/dist/c89-compatible/Hacl_Poly1305_256.c deleted file mode 100644 index 60f76d1846..0000000000 --- a/dist/c89-compatible/Hacl_Poly1305_256.c +++ /dev/null @@ -1,2421 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#include "internal/Hacl_Poly1305_256.h" - - - -void -Hacl_Impl_Poly1305_Field32xN_256_load_acc4(Lib_IntVector_Intrinsics_vec256 *acc, uint8_t *b) -{ - KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 e[5U] KRML_POST_ALIGN(32) = { 0U }; - Lib_IntVector_Intrinsics_vec256 lo = Lib_IntVector_Intrinsics_vec256_load64_le(b); - Lib_IntVector_Intrinsics_vec256 - hi = Lib_IntVector_Intrinsics_vec256_load64_le(b + (uint32_t)32U); - Lib_IntVector_Intrinsics_vec256 - mask26 = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU); - Lib_IntVector_Intrinsics_vec256 m0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(lo, hi); - Lib_IntVector_Intrinsics_vec256 - m1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(lo, hi); - Lib_IntVector_Intrinsics_vec256 - m2 = Lib_IntVector_Intrinsics_vec256_shift_right(m0, (uint32_t)48U); - Lib_IntVector_Intrinsics_vec256 - m3 = Lib_IntVector_Intrinsics_vec256_shift_right(m1, (uint32_t)48U); - Lib_IntVector_Intrinsics_vec256 m4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(m0, m1); - Lib_IntVector_Intrinsics_vec256 t0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(m0, m1); - Lib_IntVector_Intrinsics_vec256 t3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(m2, m3); - Lib_IntVector_Intrinsics_vec256 - t2 = Lib_IntVector_Intrinsics_vec256_shift_right64(t3, (uint32_t)4U); - Lib_IntVector_Intrinsics_vec256 o20 = Lib_IntVector_Intrinsics_vec256_and(t2, mask26); - Lib_IntVector_Intrinsics_vec256 - t1 = Lib_IntVector_Intrinsics_vec256_shift_right64(t0, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec256 o10 = Lib_IntVector_Intrinsics_vec256_and(t1, mask26); - Lib_IntVector_Intrinsics_vec256 o5 = Lib_IntVector_Intrinsics_vec256_and(t0, mask26); - Lib_IntVector_Intrinsics_vec256 - t31 = Lib_IntVector_Intrinsics_vec256_shift_right64(t3, (uint32_t)30U); - Lib_IntVector_Intrinsics_vec256 o30 = Lib_IntVector_Intrinsics_vec256_and(t31, mask26); - Lib_IntVector_Intrinsics_vec256 - o40 = Lib_IntVector_Intrinsics_vec256_shift_right64(m4, (uint32_t)40U); - Lib_IntVector_Intrinsics_vec256 o0 = o5; - Lib_IntVector_Intrinsics_vec256 o1 = o10; - Lib_IntVector_Intrinsics_vec256 o2 = o20; - Lib_IntVector_Intrinsics_vec256 o3 = o30; - Lib_IntVector_Intrinsics_vec256 o4 = o40; - uint64_t b1; - Lib_IntVector_Intrinsics_vec256 mask; - Lib_IntVector_Intrinsics_vec256 f40; - Lib_IntVector_Intrinsics_vec256 acc0; - Lib_IntVector_Intrinsics_vec256 acc1; - Lib_IntVector_Intrinsics_vec256 acc2; - Lib_IntVector_Intrinsics_vec256 acc3; - Lib_IntVector_Intrinsics_vec256 acc4; - Lib_IntVector_Intrinsics_vec256 e0; - Lib_IntVector_Intrinsics_vec256 e1; - Lib_IntVector_Intrinsics_vec256 e2; - Lib_IntVector_Intrinsics_vec256 e3; - Lib_IntVector_Intrinsics_vec256 e4; - Lib_IntVector_Intrinsics_vec256 r0; - Lib_IntVector_Intrinsics_vec256 r1; - Lib_IntVector_Intrinsics_vec256 r2; - Lib_IntVector_Intrinsics_vec256 r3; - Lib_IntVector_Intrinsics_vec256 r4; - Lib_IntVector_Intrinsics_vec256 r01; - Lib_IntVector_Intrinsics_vec256 r11; - Lib_IntVector_Intrinsics_vec256 r21; - Lib_IntVector_Intrinsics_vec256 r31; - Lib_IntVector_Intrinsics_vec256 r41; - Lib_IntVector_Intrinsics_vec256 f0; - Lib_IntVector_Intrinsics_vec256 f1; - Lib_IntVector_Intrinsics_vec256 f2; - Lib_IntVector_Intrinsics_vec256 f3; - Lib_IntVector_Intrinsics_vec256 f4; - Lib_IntVector_Intrinsics_vec256 acc01; - Lib_IntVector_Intrinsics_vec256 acc11; - Lib_IntVector_Intrinsics_vec256 acc21; - Lib_IntVector_Intrinsics_vec256 acc31; - Lib_IntVector_Intrinsics_vec256 acc41; - e[0U] = o0; - e[1U] = o1; - e[2U] = o2; - e[3U] = o3; - e[4U] = o4; - b1 = (uint64_t)0x1000000U; - mask = Lib_IntVector_Intrinsics_vec256_load64(b1); - f40 = e[4U]; - e[4U] = Lib_IntVector_Intrinsics_vec256_or(f40, mask); - acc0 = acc[0U]; - acc1 = acc[1U]; - acc2 = acc[2U]; - acc3 = acc[3U]; - acc4 = acc[4U]; - e0 = e[0U]; - e1 = e[1U]; - e2 = e[2U]; - e3 = e[3U]; - e4 = e[4U]; - r0 = Lib_IntVector_Intrinsics_vec256_zero; - r1 = Lib_IntVector_Intrinsics_vec256_zero; - r2 = Lib_IntVector_Intrinsics_vec256_zero; - r3 = Lib_IntVector_Intrinsics_vec256_zero; - r4 = Lib_IntVector_Intrinsics_vec256_zero; - r01 = - Lib_IntVector_Intrinsics_vec256_insert64(r0, - Lib_IntVector_Intrinsics_vec256_extract64(acc0, (uint32_t)0U), - (uint32_t)0U); - r11 = - Lib_IntVector_Intrinsics_vec256_insert64(r1, - Lib_IntVector_Intrinsics_vec256_extract64(acc1, (uint32_t)0U), - (uint32_t)0U); - r21 = - Lib_IntVector_Intrinsics_vec256_insert64(r2, - Lib_IntVector_Intrinsics_vec256_extract64(acc2, (uint32_t)0U), - (uint32_t)0U); - r31 = - Lib_IntVector_Intrinsics_vec256_insert64(r3, - Lib_IntVector_Intrinsics_vec256_extract64(acc3, (uint32_t)0U), - (uint32_t)0U); - r41 = - Lib_IntVector_Intrinsics_vec256_insert64(r4, - Lib_IntVector_Intrinsics_vec256_extract64(acc4, (uint32_t)0U), - (uint32_t)0U); - f0 = Lib_IntVector_Intrinsics_vec256_add64(r01, e0); - f1 = Lib_IntVector_Intrinsics_vec256_add64(r11, e1); - f2 = Lib_IntVector_Intrinsics_vec256_add64(r21, e2); - f3 = Lib_IntVector_Intrinsics_vec256_add64(r31, e3); - f4 = Lib_IntVector_Intrinsics_vec256_add64(r41, e4); - acc01 = f0; - acc11 = f1; - acc21 = f2; - acc31 = f3; - acc41 = f4; - acc[0U] = acc01; - acc[1U] = acc11; - acc[2U] = acc21; - acc[3U] = acc31; - acc[4U] = acc41; -} - -void -Hacl_Impl_Poly1305_Field32xN_256_fmul_r4_normalize( - Lib_IntVector_Intrinsics_vec256 *out, - Lib_IntVector_Intrinsics_vec256 *p -) -{ - Lib_IntVector_Intrinsics_vec256 *r = p; - Lib_IntVector_Intrinsics_vec256 *r_5 = p + (uint32_t)5U; - Lib_IntVector_Intrinsics_vec256 *r4 = p + (uint32_t)10U; - Lib_IntVector_Intrinsics_vec256 a0 = out[0U]; - Lib_IntVector_Intrinsics_vec256 a1 = out[1U]; - Lib_IntVector_Intrinsics_vec256 a2 = out[2U]; - Lib_IntVector_Intrinsics_vec256 a3 = out[3U]; - Lib_IntVector_Intrinsics_vec256 a4 = out[4U]; - Lib_IntVector_Intrinsics_vec256 r10 = r[0U]; - Lib_IntVector_Intrinsics_vec256 r11 = r[1U]; - Lib_IntVector_Intrinsics_vec256 r12 = r[2U]; - Lib_IntVector_Intrinsics_vec256 r13 = r[3U]; - Lib_IntVector_Intrinsics_vec256 r14 = r[4U]; - Lib_IntVector_Intrinsics_vec256 r151 = r_5[1U]; - Lib_IntVector_Intrinsics_vec256 r152 = r_5[2U]; - Lib_IntVector_Intrinsics_vec256 r153 = r_5[3U]; - Lib_IntVector_Intrinsics_vec256 r154 = r_5[4U]; - Lib_IntVector_Intrinsics_vec256 r40 = r4[0U]; - Lib_IntVector_Intrinsics_vec256 r41 = r4[1U]; - Lib_IntVector_Intrinsics_vec256 r42 = r4[2U]; - Lib_IntVector_Intrinsics_vec256 r43 = r4[3U]; - Lib_IntVector_Intrinsics_vec256 r44 = r4[4U]; - Lib_IntVector_Intrinsics_vec256 a010 = Lib_IntVector_Intrinsics_vec256_mul64(r10, r10); - Lib_IntVector_Intrinsics_vec256 a110 = Lib_IntVector_Intrinsics_vec256_mul64(r11, r10); - Lib_IntVector_Intrinsics_vec256 a210 = Lib_IntVector_Intrinsics_vec256_mul64(r12, r10); - Lib_IntVector_Intrinsics_vec256 a310 = Lib_IntVector_Intrinsics_vec256_mul64(r13, r10); - Lib_IntVector_Intrinsics_vec256 a410 = Lib_IntVector_Intrinsics_vec256_mul64(r14, r10); - Lib_IntVector_Intrinsics_vec256 - a020 = - Lib_IntVector_Intrinsics_vec256_add64(a010, - Lib_IntVector_Intrinsics_vec256_mul64(r154, r11)); - Lib_IntVector_Intrinsics_vec256 - a120 = - Lib_IntVector_Intrinsics_vec256_add64(a110, - Lib_IntVector_Intrinsics_vec256_mul64(r10, r11)); - Lib_IntVector_Intrinsics_vec256 - a220 = - Lib_IntVector_Intrinsics_vec256_add64(a210, - Lib_IntVector_Intrinsics_vec256_mul64(r11, r11)); - Lib_IntVector_Intrinsics_vec256 - a320 = - Lib_IntVector_Intrinsics_vec256_add64(a310, - Lib_IntVector_Intrinsics_vec256_mul64(r12, r11)); - Lib_IntVector_Intrinsics_vec256 - a420 = - Lib_IntVector_Intrinsics_vec256_add64(a410, - Lib_IntVector_Intrinsics_vec256_mul64(r13, r11)); - Lib_IntVector_Intrinsics_vec256 - a030 = - Lib_IntVector_Intrinsics_vec256_add64(a020, - Lib_IntVector_Intrinsics_vec256_mul64(r153, r12)); - Lib_IntVector_Intrinsics_vec256 - a130 = - Lib_IntVector_Intrinsics_vec256_add64(a120, - Lib_IntVector_Intrinsics_vec256_mul64(r154, r12)); - Lib_IntVector_Intrinsics_vec256 - a230 = - Lib_IntVector_Intrinsics_vec256_add64(a220, - Lib_IntVector_Intrinsics_vec256_mul64(r10, r12)); - Lib_IntVector_Intrinsics_vec256 - a330 = - Lib_IntVector_Intrinsics_vec256_add64(a320, - Lib_IntVector_Intrinsics_vec256_mul64(r11, r12)); - Lib_IntVector_Intrinsics_vec256 - a430 = - Lib_IntVector_Intrinsics_vec256_add64(a420, - Lib_IntVector_Intrinsics_vec256_mul64(r12, r12)); - Lib_IntVector_Intrinsics_vec256 - a040 = - Lib_IntVector_Intrinsics_vec256_add64(a030, - Lib_IntVector_Intrinsics_vec256_mul64(r152, r13)); - Lib_IntVector_Intrinsics_vec256 - a140 = - Lib_IntVector_Intrinsics_vec256_add64(a130, - Lib_IntVector_Intrinsics_vec256_mul64(r153, r13)); - Lib_IntVector_Intrinsics_vec256 - a240 = - Lib_IntVector_Intrinsics_vec256_add64(a230, - Lib_IntVector_Intrinsics_vec256_mul64(r154, r13)); - Lib_IntVector_Intrinsics_vec256 - a340 = - Lib_IntVector_Intrinsics_vec256_add64(a330, - Lib_IntVector_Intrinsics_vec256_mul64(r10, r13)); - Lib_IntVector_Intrinsics_vec256 - a440 = - Lib_IntVector_Intrinsics_vec256_add64(a430, - Lib_IntVector_Intrinsics_vec256_mul64(r11, r13)); - Lib_IntVector_Intrinsics_vec256 - a050 = - Lib_IntVector_Intrinsics_vec256_add64(a040, - Lib_IntVector_Intrinsics_vec256_mul64(r151, r14)); - Lib_IntVector_Intrinsics_vec256 - a150 = - Lib_IntVector_Intrinsics_vec256_add64(a140, - Lib_IntVector_Intrinsics_vec256_mul64(r152, r14)); - Lib_IntVector_Intrinsics_vec256 - a250 = - Lib_IntVector_Intrinsics_vec256_add64(a240, - Lib_IntVector_Intrinsics_vec256_mul64(r153, r14)); - Lib_IntVector_Intrinsics_vec256 - a350 = - Lib_IntVector_Intrinsics_vec256_add64(a340, - Lib_IntVector_Intrinsics_vec256_mul64(r154, r14)); - Lib_IntVector_Intrinsics_vec256 - a450 = - Lib_IntVector_Intrinsics_vec256_add64(a440, - Lib_IntVector_Intrinsics_vec256_mul64(r10, r14)); - Lib_IntVector_Intrinsics_vec256 t00 = a050; - Lib_IntVector_Intrinsics_vec256 t10 = a150; - Lib_IntVector_Intrinsics_vec256 t20 = a250; - Lib_IntVector_Intrinsics_vec256 t30 = a350; - Lib_IntVector_Intrinsics_vec256 t40 = a450; - Lib_IntVector_Intrinsics_vec256 - mask260 = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU); - Lib_IntVector_Intrinsics_vec256 - z00 = Lib_IntVector_Intrinsics_vec256_shift_right64(t00, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec256 - z10 = Lib_IntVector_Intrinsics_vec256_shift_right64(t30, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec256 x00 = Lib_IntVector_Intrinsics_vec256_and(t00, mask260); - Lib_IntVector_Intrinsics_vec256 x30 = Lib_IntVector_Intrinsics_vec256_and(t30, mask260); - Lib_IntVector_Intrinsics_vec256 x10 = Lib_IntVector_Intrinsics_vec256_add64(t10, z00); - Lib_IntVector_Intrinsics_vec256 x40 = Lib_IntVector_Intrinsics_vec256_add64(t40, z10); - Lib_IntVector_Intrinsics_vec256 - z010 = Lib_IntVector_Intrinsics_vec256_shift_right64(x10, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec256 - z110 = Lib_IntVector_Intrinsics_vec256_shift_right64(x40, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec256 - t5 = Lib_IntVector_Intrinsics_vec256_shift_left64(z110, (uint32_t)2U); - Lib_IntVector_Intrinsics_vec256 z12 = Lib_IntVector_Intrinsics_vec256_add64(z110, t5); - Lib_IntVector_Intrinsics_vec256 x110 = Lib_IntVector_Intrinsics_vec256_and(x10, mask260); - Lib_IntVector_Intrinsics_vec256 x410 = Lib_IntVector_Intrinsics_vec256_and(x40, mask260); - Lib_IntVector_Intrinsics_vec256 x20 = Lib_IntVector_Intrinsics_vec256_add64(t20, z010); - Lib_IntVector_Intrinsics_vec256 x010 = Lib_IntVector_Intrinsics_vec256_add64(x00, z12); - Lib_IntVector_Intrinsics_vec256 - z020 = Lib_IntVector_Intrinsics_vec256_shift_right64(x20, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec256 - z130 = Lib_IntVector_Intrinsics_vec256_shift_right64(x010, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec256 x210 = Lib_IntVector_Intrinsics_vec256_and(x20, mask260); - Lib_IntVector_Intrinsics_vec256 x020 = Lib_IntVector_Intrinsics_vec256_and(x010, mask260); - Lib_IntVector_Intrinsics_vec256 x310 = Lib_IntVector_Intrinsics_vec256_add64(x30, z020); - Lib_IntVector_Intrinsics_vec256 x120 = Lib_IntVector_Intrinsics_vec256_add64(x110, z130); - Lib_IntVector_Intrinsics_vec256 - z030 = Lib_IntVector_Intrinsics_vec256_shift_right64(x310, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec256 x320 = Lib_IntVector_Intrinsics_vec256_and(x310, mask260); - Lib_IntVector_Intrinsics_vec256 x420 = Lib_IntVector_Intrinsics_vec256_add64(x410, z030); - Lib_IntVector_Intrinsics_vec256 r20 = x020; - Lib_IntVector_Intrinsics_vec256 r21 = x120; - Lib_IntVector_Intrinsics_vec256 r22 = x210; - Lib_IntVector_Intrinsics_vec256 r23 = x320; - Lib_IntVector_Intrinsics_vec256 r24 = x420; - Lib_IntVector_Intrinsics_vec256 a011 = Lib_IntVector_Intrinsics_vec256_mul64(r10, r20); - Lib_IntVector_Intrinsics_vec256 a111 = Lib_IntVector_Intrinsics_vec256_mul64(r11, r20); - Lib_IntVector_Intrinsics_vec256 a211 = Lib_IntVector_Intrinsics_vec256_mul64(r12, r20); - Lib_IntVector_Intrinsics_vec256 a311 = Lib_IntVector_Intrinsics_vec256_mul64(r13, r20); - Lib_IntVector_Intrinsics_vec256 a411 = Lib_IntVector_Intrinsics_vec256_mul64(r14, r20); - Lib_IntVector_Intrinsics_vec256 - a021 = - Lib_IntVector_Intrinsics_vec256_add64(a011, - Lib_IntVector_Intrinsics_vec256_mul64(r154, r21)); - Lib_IntVector_Intrinsics_vec256 - a121 = - Lib_IntVector_Intrinsics_vec256_add64(a111, - Lib_IntVector_Intrinsics_vec256_mul64(r10, r21)); - Lib_IntVector_Intrinsics_vec256 - a221 = - Lib_IntVector_Intrinsics_vec256_add64(a211, - Lib_IntVector_Intrinsics_vec256_mul64(r11, r21)); - Lib_IntVector_Intrinsics_vec256 - a321 = - Lib_IntVector_Intrinsics_vec256_add64(a311, - Lib_IntVector_Intrinsics_vec256_mul64(r12, r21)); - Lib_IntVector_Intrinsics_vec256 - a421 = - Lib_IntVector_Intrinsics_vec256_add64(a411, - Lib_IntVector_Intrinsics_vec256_mul64(r13, r21)); - Lib_IntVector_Intrinsics_vec256 - a031 = - Lib_IntVector_Intrinsics_vec256_add64(a021, - Lib_IntVector_Intrinsics_vec256_mul64(r153, r22)); - Lib_IntVector_Intrinsics_vec256 - a131 = - Lib_IntVector_Intrinsics_vec256_add64(a121, - Lib_IntVector_Intrinsics_vec256_mul64(r154, r22)); - Lib_IntVector_Intrinsics_vec256 - a231 = - Lib_IntVector_Intrinsics_vec256_add64(a221, - Lib_IntVector_Intrinsics_vec256_mul64(r10, r22)); - Lib_IntVector_Intrinsics_vec256 - a331 = - Lib_IntVector_Intrinsics_vec256_add64(a321, - Lib_IntVector_Intrinsics_vec256_mul64(r11, r22)); - Lib_IntVector_Intrinsics_vec256 - a431 = - Lib_IntVector_Intrinsics_vec256_add64(a421, - Lib_IntVector_Intrinsics_vec256_mul64(r12, r22)); - Lib_IntVector_Intrinsics_vec256 - a041 = - Lib_IntVector_Intrinsics_vec256_add64(a031, - Lib_IntVector_Intrinsics_vec256_mul64(r152, r23)); - Lib_IntVector_Intrinsics_vec256 - a141 = - Lib_IntVector_Intrinsics_vec256_add64(a131, - Lib_IntVector_Intrinsics_vec256_mul64(r153, r23)); - Lib_IntVector_Intrinsics_vec256 - a241 = - Lib_IntVector_Intrinsics_vec256_add64(a231, - Lib_IntVector_Intrinsics_vec256_mul64(r154, r23)); - Lib_IntVector_Intrinsics_vec256 - a341 = - Lib_IntVector_Intrinsics_vec256_add64(a331, - Lib_IntVector_Intrinsics_vec256_mul64(r10, r23)); - Lib_IntVector_Intrinsics_vec256 - a441 = - Lib_IntVector_Intrinsics_vec256_add64(a431, - Lib_IntVector_Intrinsics_vec256_mul64(r11, r23)); - Lib_IntVector_Intrinsics_vec256 - a051 = - Lib_IntVector_Intrinsics_vec256_add64(a041, - Lib_IntVector_Intrinsics_vec256_mul64(r151, r24)); - Lib_IntVector_Intrinsics_vec256 - a151 = - Lib_IntVector_Intrinsics_vec256_add64(a141, - Lib_IntVector_Intrinsics_vec256_mul64(r152, r24)); - Lib_IntVector_Intrinsics_vec256 - a251 = - Lib_IntVector_Intrinsics_vec256_add64(a241, - Lib_IntVector_Intrinsics_vec256_mul64(r153, r24)); - Lib_IntVector_Intrinsics_vec256 - a351 = - Lib_IntVector_Intrinsics_vec256_add64(a341, - Lib_IntVector_Intrinsics_vec256_mul64(r154, r24)); - Lib_IntVector_Intrinsics_vec256 - a451 = - Lib_IntVector_Intrinsics_vec256_add64(a441, - Lib_IntVector_Intrinsics_vec256_mul64(r10, r24)); - Lib_IntVector_Intrinsics_vec256 t01 = a051; - Lib_IntVector_Intrinsics_vec256 t11 = a151; - Lib_IntVector_Intrinsics_vec256 t21 = a251; - Lib_IntVector_Intrinsics_vec256 t31 = a351; - Lib_IntVector_Intrinsics_vec256 t41 = a451; - Lib_IntVector_Intrinsics_vec256 - mask261 = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU); - Lib_IntVector_Intrinsics_vec256 - z04 = Lib_IntVector_Intrinsics_vec256_shift_right64(t01, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec256 - z14 = Lib_IntVector_Intrinsics_vec256_shift_right64(t31, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec256 x03 = Lib_IntVector_Intrinsics_vec256_and(t01, mask261); - Lib_IntVector_Intrinsics_vec256 x33 = Lib_IntVector_Intrinsics_vec256_and(t31, mask261); - Lib_IntVector_Intrinsics_vec256 x13 = Lib_IntVector_Intrinsics_vec256_add64(t11, z04); - Lib_IntVector_Intrinsics_vec256 x43 = Lib_IntVector_Intrinsics_vec256_add64(t41, z14); - Lib_IntVector_Intrinsics_vec256 - z011 = Lib_IntVector_Intrinsics_vec256_shift_right64(x13, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec256 - z111 = Lib_IntVector_Intrinsics_vec256_shift_right64(x43, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec256 - t6 = Lib_IntVector_Intrinsics_vec256_shift_left64(z111, (uint32_t)2U); - Lib_IntVector_Intrinsics_vec256 z120 = Lib_IntVector_Intrinsics_vec256_add64(z111, t6); - Lib_IntVector_Intrinsics_vec256 x111 = Lib_IntVector_Intrinsics_vec256_and(x13, mask261); - Lib_IntVector_Intrinsics_vec256 x411 = Lib_IntVector_Intrinsics_vec256_and(x43, mask261); - Lib_IntVector_Intrinsics_vec256 x22 = Lib_IntVector_Intrinsics_vec256_add64(t21, z011); - Lib_IntVector_Intrinsics_vec256 x011 = Lib_IntVector_Intrinsics_vec256_add64(x03, z120); - Lib_IntVector_Intrinsics_vec256 - z021 = Lib_IntVector_Intrinsics_vec256_shift_right64(x22, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec256 - z131 = Lib_IntVector_Intrinsics_vec256_shift_right64(x011, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec256 x211 = Lib_IntVector_Intrinsics_vec256_and(x22, mask261); - Lib_IntVector_Intrinsics_vec256 x021 = Lib_IntVector_Intrinsics_vec256_and(x011, mask261); - Lib_IntVector_Intrinsics_vec256 x311 = Lib_IntVector_Intrinsics_vec256_add64(x33, z021); - Lib_IntVector_Intrinsics_vec256 x121 = Lib_IntVector_Intrinsics_vec256_add64(x111, z131); - Lib_IntVector_Intrinsics_vec256 - z031 = Lib_IntVector_Intrinsics_vec256_shift_right64(x311, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec256 x321 = Lib_IntVector_Intrinsics_vec256_and(x311, mask261); - Lib_IntVector_Intrinsics_vec256 x421 = Lib_IntVector_Intrinsics_vec256_add64(x411, z031); - Lib_IntVector_Intrinsics_vec256 r30 = x021; - Lib_IntVector_Intrinsics_vec256 r31 = x121; - Lib_IntVector_Intrinsics_vec256 r32 = x211; - Lib_IntVector_Intrinsics_vec256 r33 = x321; - Lib_IntVector_Intrinsics_vec256 r34 = x421; - Lib_IntVector_Intrinsics_vec256 - v12120 = Lib_IntVector_Intrinsics_vec256_interleave_low64(r20, r10); - Lib_IntVector_Intrinsics_vec256 - v34340 = Lib_IntVector_Intrinsics_vec256_interleave_low64(r40, r30); - Lib_IntVector_Intrinsics_vec256 - r12340 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v34340, v12120); - Lib_IntVector_Intrinsics_vec256 - v12121 = Lib_IntVector_Intrinsics_vec256_interleave_low64(r21, r11); - Lib_IntVector_Intrinsics_vec256 - v34341 = Lib_IntVector_Intrinsics_vec256_interleave_low64(r41, r31); - Lib_IntVector_Intrinsics_vec256 - r12341 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v34341, v12121); - Lib_IntVector_Intrinsics_vec256 - v12122 = Lib_IntVector_Intrinsics_vec256_interleave_low64(r22, r12); - Lib_IntVector_Intrinsics_vec256 - v34342 = Lib_IntVector_Intrinsics_vec256_interleave_low64(r42, r32); - Lib_IntVector_Intrinsics_vec256 - r12342 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v34342, v12122); - Lib_IntVector_Intrinsics_vec256 - v12123 = Lib_IntVector_Intrinsics_vec256_interleave_low64(r23, r13); - Lib_IntVector_Intrinsics_vec256 - v34343 = Lib_IntVector_Intrinsics_vec256_interleave_low64(r43, r33); - Lib_IntVector_Intrinsics_vec256 - r12343 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v34343, v12123); - Lib_IntVector_Intrinsics_vec256 - v12124 = Lib_IntVector_Intrinsics_vec256_interleave_low64(r24, r14); - Lib_IntVector_Intrinsics_vec256 - v34344 = Lib_IntVector_Intrinsics_vec256_interleave_low64(r44, r34); - Lib_IntVector_Intrinsics_vec256 - r12344 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v34344, v12124); - Lib_IntVector_Intrinsics_vec256 - r123451 = Lib_IntVector_Intrinsics_vec256_smul64(r12341, (uint64_t)5U); - Lib_IntVector_Intrinsics_vec256 - r123452 = Lib_IntVector_Intrinsics_vec256_smul64(r12342, (uint64_t)5U); - Lib_IntVector_Intrinsics_vec256 - r123453 = Lib_IntVector_Intrinsics_vec256_smul64(r12343, (uint64_t)5U); - Lib_IntVector_Intrinsics_vec256 - r123454 = Lib_IntVector_Intrinsics_vec256_smul64(r12344, (uint64_t)5U); - Lib_IntVector_Intrinsics_vec256 a01 = Lib_IntVector_Intrinsics_vec256_mul64(r12340, a0); - Lib_IntVector_Intrinsics_vec256 a11 = Lib_IntVector_Intrinsics_vec256_mul64(r12341, a0); - Lib_IntVector_Intrinsics_vec256 a21 = Lib_IntVector_Intrinsics_vec256_mul64(r12342, a0); - Lib_IntVector_Intrinsics_vec256 a31 = Lib_IntVector_Intrinsics_vec256_mul64(r12343, a0); - Lib_IntVector_Intrinsics_vec256 a41 = Lib_IntVector_Intrinsics_vec256_mul64(r12344, a0); - Lib_IntVector_Intrinsics_vec256 - a02 = - Lib_IntVector_Intrinsics_vec256_add64(a01, - Lib_IntVector_Intrinsics_vec256_mul64(r123454, a1)); - Lib_IntVector_Intrinsics_vec256 - a12 = - Lib_IntVector_Intrinsics_vec256_add64(a11, - Lib_IntVector_Intrinsics_vec256_mul64(r12340, a1)); - Lib_IntVector_Intrinsics_vec256 - a22 = - Lib_IntVector_Intrinsics_vec256_add64(a21, - Lib_IntVector_Intrinsics_vec256_mul64(r12341, a1)); - Lib_IntVector_Intrinsics_vec256 - a32 = - Lib_IntVector_Intrinsics_vec256_add64(a31, - Lib_IntVector_Intrinsics_vec256_mul64(r12342, a1)); - Lib_IntVector_Intrinsics_vec256 - a42 = - Lib_IntVector_Intrinsics_vec256_add64(a41, - Lib_IntVector_Intrinsics_vec256_mul64(r12343, a1)); - Lib_IntVector_Intrinsics_vec256 - a03 = - Lib_IntVector_Intrinsics_vec256_add64(a02, - Lib_IntVector_Intrinsics_vec256_mul64(r123453, a2)); - Lib_IntVector_Intrinsics_vec256 - a13 = - Lib_IntVector_Intrinsics_vec256_add64(a12, - Lib_IntVector_Intrinsics_vec256_mul64(r123454, a2)); - Lib_IntVector_Intrinsics_vec256 - a23 = - Lib_IntVector_Intrinsics_vec256_add64(a22, - Lib_IntVector_Intrinsics_vec256_mul64(r12340, a2)); - Lib_IntVector_Intrinsics_vec256 - a33 = - Lib_IntVector_Intrinsics_vec256_add64(a32, - Lib_IntVector_Intrinsics_vec256_mul64(r12341, a2)); - Lib_IntVector_Intrinsics_vec256 - a43 = - Lib_IntVector_Intrinsics_vec256_add64(a42, - Lib_IntVector_Intrinsics_vec256_mul64(r12342, a2)); - Lib_IntVector_Intrinsics_vec256 - a04 = - Lib_IntVector_Intrinsics_vec256_add64(a03, - Lib_IntVector_Intrinsics_vec256_mul64(r123452, a3)); - Lib_IntVector_Intrinsics_vec256 - a14 = - Lib_IntVector_Intrinsics_vec256_add64(a13, - Lib_IntVector_Intrinsics_vec256_mul64(r123453, a3)); - Lib_IntVector_Intrinsics_vec256 - a24 = - Lib_IntVector_Intrinsics_vec256_add64(a23, - Lib_IntVector_Intrinsics_vec256_mul64(r123454, a3)); - Lib_IntVector_Intrinsics_vec256 - a34 = - Lib_IntVector_Intrinsics_vec256_add64(a33, - Lib_IntVector_Intrinsics_vec256_mul64(r12340, a3)); - Lib_IntVector_Intrinsics_vec256 - a44 = - Lib_IntVector_Intrinsics_vec256_add64(a43, - Lib_IntVector_Intrinsics_vec256_mul64(r12341, a3)); - Lib_IntVector_Intrinsics_vec256 - a05 = - Lib_IntVector_Intrinsics_vec256_add64(a04, - Lib_IntVector_Intrinsics_vec256_mul64(r123451, a4)); - Lib_IntVector_Intrinsics_vec256 - a15 = - Lib_IntVector_Intrinsics_vec256_add64(a14, - Lib_IntVector_Intrinsics_vec256_mul64(r123452, a4)); - Lib_IntVector_Intrinsics_vec256 - a25 = - Lib_IntVector_Intrinsics_vec256_add64(a24, - Lib_IntVector_Intrinsics_vec256_mul64(r123453, a4)); - Lib_IntVector_Intrinsics_vec256 - a35 = - Lib_IntVector_Intrinsics_vec256_add64(a34, - Lib_IntVector_Intrinsics_vec256_mul64(r123454, a4)); - Lib_IntVector_Intrinsics_vec256 - a45 = - Lib_IntVector_Intrinsics_vec256_add64(a44, - Lib_IntVector_Intrinsics_vec256_mul64(r12340, a4)); - Lib_IntVector_Intrinsics_vec256 t0 = a05; - Lib_IntVector_Intrinsics_vec256 t1 = a15; - Lib_IntVector_Intrinsics_vec256 t2 = a25; - Lib_IntVector_Intrinsics_vec256 t3 = a35; - Lib_IntVector_Intrinsics_vec256 t4 = a45; - Lib_IntVector_Intrinsics_vec256 - mask26 = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU); - Lib_IntVector_Intrinsics_vec256 - z0 = Lib_IntVector_Intrinsics_vec256_shift_right64(t0, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec256 - z1 = Lib_IntVector_Intrinsics_vec256_shift_right64(t3, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec256 x0 = Lib_IntVector_Intrinsics_vec256_and(t0, mask26); - Lib_IntVector_Intrinsics_vec256 x3 = Lib_IntVector_Intrinsics_vec256_and(t3, mask26); - Lib_IntVector_Intrinsics_vec256 x1 = Lib_IntVector_Intrinsics_vec256_add64(t1, z0); - Lib_IntVector_Intrinsics_vec256 x4 = Lib_IntVector_Intrinsics_vec256_add64(t4, z1); - Lib_IntVector_Intrinsics_vec256 - z01 = Lib_IntVector_Intrinsics_vec256_shift_right64(x1, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec256 - z11 = Lib_IntVector_Intrinsics_vec256_shift_right64(x4, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec256 - t = Lib_IntVector_Intrinsics_vec256_shift_left64(z11, (uint32_t)2U); - Lib_IntVector_Intrinsics_vec256 z121 = Lib_IntVector_Intrinsics_vec256_add64(z11, t); - Lib_IntVector_Intrinsics_vec256 x11 = Lib_IntVector_Intrinsics_vec256_and(x1, mask26); - Lib_IntVector_Intrinsics_vec256 x41 = Lib_IntVector_Intrinsics_vec256_and(x4, mask26); - Lib_IntVector_Intrinsics_vec256 x2 = Lib_IntVector_Intrinsics_vec256_add64(t2, z01); - Lib_IntVector_Intrinsics_vec256 x01 = Lib_IntVector_Intrinsics_vec256_add64(x0, z121); - Lib_IntVector_Intrinsics_vec256 - z02 = Lib_IntVector_Intrinsics_vec256_shift_right64(x2, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec256 - z13 = Lib_IntVector_Intrinsics_vec256_shift_right64(x01, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec256 x21 = Lib_IntVector_Intrinsics_vec256_and(x2, mask26); - Lib_IntVector_Intrinsics_vec256 x02 = Lib_IntVector_Intrinsics_vec256_and(x01, mask26); - Lib_IntVector_Intrinsics_vec256 x31 = Lib_IntVector_Intrinsics_vec256_add64(x3, z02); - Lib_IntVector_Intrinsics_vec256 x12 = Lib_IntVector_Intrinsics_vec256_add64(x11, z13); - Lib_IntVector_Intrinsics_vec256 - z03 = Lib_IntVector_Intrinsics_vec256_shift_right64(x31, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec256 x32 = Lib_IntVector_Intrinsics_vec256_and(x31, mask26); - Lib_IntVector_Intrinsics_vec256 x42 = Lib_IntVector_Intrinsics_vec256_add64(x41, z03); - Lib_IntVector_Intrinsics_vec256 o0 = x02; - Lib_IntVector_Intrinsics_vec256 o10 = x12; - Lib_IntVector_Intrinsics_vec256 o20 = x21; - Lib_IntVector_Intrinsics_vec256 o30 = x32; - Lib_IntVector_Intrinsics_vec256 o40 = x42; - Lib_IntVector_Intrinsics_vec256 - v00 = Lib_IntVector_Intrinsics_vec256_interleave_high128(o0, o0); - Lib_IntVector_Intrinsics_vec256 v10 = Lib_IntVector_Intrinsics_vec256_add64(o0, v00); - Lib_IntVector_Intrinsics_vec256 - v10h = Lib_IntVector_Intrinsics_vec256_interleave_high64(v10, v10); - Lib_IntVector_Intrinsics_vec256 v20 = Lib_IntVector_Intrinsics_vec256_add64(v10, v10h); - Lib_IntVector_Intrinsics_vec256 - v01 = Lib_IntVector_Intrinsics_vec256_interleave_high128(o10, o10); - Lib_IntVector_Intrinsics_vec256 v11 = Lib_IntVector_Intrinsics_vec256_add64(o10, v01); - Lib_IntVector_Intrinsics_vec256 - v11h = Lib_IntVector_Intrinsics_vec256_interleave_high64(v11, v11); - Lib_IntVector_Intrinsics_vec256 v21 = Lib_IntVector_Intrinsics_vec256_add64(v11, v11h); - Lib_IntVector_Intrinsics_vec256 - v02 = Lib_IntVector_Intrinsics_vec256_interleave_high128(o20, o20); - Lib_IntVector_Intrinsics_vec256 v12 = Lib_IntVector_Intrinsics_vec256_add64(o20, v02); - Lib_IntVector_Intrinsics_vec256 - v12h = Lib_IntVector_Intrinsics_vec256_interleave_high64(v12, v12); - Lib_IntVector_Intrinsics_vec256 v22 = Lib_IntVector_Intrinsics_vec256_add64(v12, v12h); - Lib_IntVector_Intrinsics_vec256 - v03 = Lib_IntVector_Intrinsics_vec256_interleave_high128(o30, o30); - Lib_IntVector_Intrinsics_vec256 v13 = Lib_IntVector_Intrinsics_vec256_add64(o30, v03); - Lib_IntVector_Intrinsics_vec256 - v13h = Lib_IntVector_Intrinsics_vec256_interleave_high64(v13, v13); - Lib_IntVector_Intrinsics_vec256 v23 = Lib_IntVector_Intrinsics_vec256_add64(v13, v13h); - Lib_IntVector_Intrinsics_vec256 - v04 = Lib_IntVector_Intrinsics_vec256_interleave_high128(o40, o40); - Lib_IntVector_Intrinsics_vec256 v14 = Lib_IntVector_Intrinsics_vec256_add64(o40, v04); - Lib_IntVector_Intrinsics_vec256 - v14h = Lib_IntVector_Intrinsics_vec256_interleave_high64(v14, v14); - Lib_IntVector_Intrinsics_vec256 v24 = Lib_IntVector_Intrinsics_vec256_add64(v14, v14h); - Lib_IntVector_Intrinsics_vec256 - l = Lib_IntVector_Intrinsics_vec256_add64(v20, Lib_IntVector_Intrinsics_vec256_zero); - Lib_IntVector_Intrinsics_vec256 - tmp0 = - Lib_IntVector_Intrinsics_vec256_and(l, - Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU)); - Lib_IntVector_Intrinsics_vec256 - c0 = Lib_IntVector_Intrinsics_vec256_shift_right64(l, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec256 l0 = Lib_IntVector_Intrinsics_vec256_add64(v21, c0); - Lib_IntVector_Intrinsics_vec256 - tmp1 = - Lib_IntVector_Intrinsics_vec256_and(l0, - Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU)); - Lib_IntVector_Intrinsics_vec256 - c1 = Lib_IntVector_Intrinsics_vec256_shift_right64(l0, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec256 l1 = Lib_IntVector_Intrinsics_vec256_add64(v22, c1); - Lib_IntVector_Intrinsics_vec256 - tmp2 = - Lib_IntVector_Intrinsics_vec256_and(l1, - Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU)); - Lib_IntVector_Intrinsics_vec256 - c2 = Lib_IntVector_Intrinsics_vec256_shift_right64(l1, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec256 l2 = Lib_IntVector_Intrinsics_vec256_add64(v23, c2); - Lib_IntVector_Intrinsics_vec256 - tmp3 = - Lib_IntVector_Intrinsics_vec256_and(l2, - Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU)); - Lib_IntVector_Intrinsics_vec256 - c3 = Lib_IntVector_Intrinsics_vec256_shift_right64(l2, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec256 l3 = Lib_IntVector_Intrinsics_vec256_add64(v24, c3); - Lib_IntVector_Intrinsics_vec256 - tmp4 = - Lib_IntVector_Intrinsics_vec256_and(l3, - Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU)); - Lib_IntVector_Intrinsics_vec256 - c4 = Lib_IntVector_Intrinsics_vec256_shift_right64(l3, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec256 - o00 = - Lib_IntVector_Intrinsics_vec256_add64(tmp0, - Lib_IntVector_Intrinsics_vec256_smul64(c4, (uint64_t)5U)); - Lib_IntVector_Intrinsics_vec256 o1 = tmp1; - Lib_IntVector_Intrinsics_vec256 o2 = tmp2; - Lib_IntVector_Intrinsics_vec256 o3 = tmp3; - Lib_IntVector_Intrinsics_vec256 o4 = tmp4; - out[0U] = o00; - out[1U] = o1; - out[2U] = o2; - out[3U] = o3; - out[4U] = o4; -} - -void Hacl_Poly1305_256_poly1305_init(Lib_IntVector_Intrinsics_vec256 *ctx, uint8_t *key) -{ - Lib_IntVector_Intrinsics_vec256 *acc = ctx; - Lib_IntVector_Intrinsics_vec256 *pre = ctx + (uint32_t)5U; - uint8_t *kr = key; - uint64_t u0; - uint64_t lo; - uint64_t u; - uint64_t hi; - uint64_t mask0; - uint64_t mask1; - uint64_t lo1; - uint64_t hi1; - Lib_IntVector_Intrinsics_vec256 *r; - Lib_IntVector_Intrinsics_vec256 *r5; - Lib_IntVector_Intrinsics_vec256 *rn; - Lib_IntVector_Intrinsics_vec256 *rn_5; - Lib_IntVector_Intrinsics_vec256 r_vec0; - Lib_IntVector_Intrinsics_vec256 r_vec1; - Lib_IntVector_Intrinsics_vec256 f00; - Lib_IntVector_Intrinsics_vec256 f15; - Lib_IntVector_Intrinsics_vec256 f25; - Lib_IntVector_Intrinsics_vec256 f30; - Lib_IntVector_Intrinsics_vec256 f40; - Lib_IntVector_Intrinsics_vec256 f0; - Lib_IntVector_Intrinsics_vec256 f1; - Lib_IntVector_Intrinsics_vec256 f2; - Lib_IntVector_Intrinsics_vec256 f3; - Lib_IntVector_Intrinsics_vec256 f4; - Lib_IntVector_Intrinsics_vec256 f200; - Lib_IntVector_Intrinsics_vec256 f210; - Lib_IntVector_Intrinsics_vec256 f220; - Lib_IntVector_Intrinsics_vec256 f230; - Lib_IntVector_Intrinsics_vec256 f240; - Lib_IntVector_Intrinsics_vec256 r00; - Lib_IntVector_Intrinsics_vec256 r10; - Lib_IntVector_Intrinsics_vec256 r20; - Lib_IntVector_Intrinsics_vec256 r30; - Lib_IntVector_Intrinsics_vec256 r40; - Lib_IntVector_Intrinsics_vec256 r510; - Lib_IntVector_Intrinsics_vec256 r520; - Lib_IntVector_Intrinsics_vec256 r530; - Lib_IntVector_Intrinsics_vec256 r540; - Lib_IntVector_Intrinsics_vec256 f100; - Lib_IntVector_Intrinsics_vec256 f110; - Lib_IntVector_Intrinsics_vec256 f120; - Lib_IntVector_Intrinsics_vec256 f130; - Lib_IntVector_Intrinsics_vec256 f140; - Lib_IntVector_Intrinsics_vec256 a00; - Lib_IntVector_Intrinsics_vec256 a10; - Lib_IntVector_Intrinsics_vec256 a20; - Lib_IntVector_Intrinsics_vec256 a30; - Lib_IntVector_Intrinsics_vec256 a40; - Lib_IntVector_Intrinsics_vec256 a010; - Lib_IntVector_Intrinsics_vec256 a110; - Lib_IntVector_Intrinsics_vec256 a210; - Lib_IntVector_Intrinsics_vec256 a310; - Lib_IntVector_Intrinsics_vec256 a410; - Lib_IntVector_Intrinsics_vec256 a020; - Lib_IntVector_Intrinsics_vec256 a120; - Lib_IntVector_Intrinsics_vec256 a220; - Lib_IntVector_Intrinsics_vec256 a320; - Lib_IntVector_Intrinsics_vec256 a420; - Lib_IntVector_Intrinsics_vec256 a030; - Lib_IntVector_Intrinsics_vec256 a130; - Lib_IntVector_Intrinsics_vec256 a230; - Lib_IntVector_Intrinsics_vec256 a330; - Lib_IntVector_Intrinsics_vec256 a430; - Lib_IntVector_Intrinsics_vec256 a040; - Lib_IntVector_Intrinsics_vec256 a140; - Lib_IntVector_Intrinsics_vec256 a240; - Lib_IntVector_Intrinsics_vec256 a340; - Lib_IntVector_Intrinsics_vec256 a440; - Lib_IntVector_Intrinsics_vec256 t00; - Lib_IntVector_Intrinsics_vec256 t10; - Lib_IntVector_Intrinsics_vec256 t20; - Lib_IntVector_Intrinsics_vec256 t30; - Lib_IntVector_Intrinsics_vec256 t40; - Lib_IntVector_Intrinsics_vec256 mask260; - Lib_IntVector_Intrinsics_vec256 z00; - Lib_IntVector_Intrinsics_vec256 z10; - Lib_IntVector_Intrinsics_vec256 x00; - Lib_IntVector_Intrinsics_vec256 x30; - Lib_IntVector_Intrinsics_vec256 x10; - Lib_IntVector_Intrinsics_vec256 x40; - Lib_IntVector_Intrinsics_vec256 z010; - Lib_IntVector_Intrinsics_vec256 z110; - Lib_IntVector_Intrinsics_vec256 t5; - Lib_IntVector_Intrinsics_vec256 z120; - Lib_IntVector_Intrinsics_vec256 x110; - Lib_IntVector_Intrinsics_vec256 x410; - Lib_IntVector_Intrinsics_vec256 x20; - Lib_IntVector_Intrinsics_vec256 x010; - Lib_IntVector_Intrinsics_vec256 z020; - Lib_IntVector_Intrinsics_vec256 z130; - Lib_IntVector_Intrinsics_vec256 x210; - Lib_IntVector_Intrinsics_vec256 x020; - Lib_IntVector_Intrinsics_vec256 x310; - Lib_IntVector_Intrinsics_vec256 x120; - Lib_IntVector_Intrinsics_vec256 z030; - Lib_IntVector_Intrinsics_vec256 x320; - Lib_IntVector_Intrinsics_vec256 x420; - Lib_IntVector_Intrinsics_vec256 o00; - Lib_IntVector_Intrinsics_vec256 o10; - Lib_IntVector_Intrinsics_vec256 o20; - Lib_IntVector_Intrinsics_vec256 o30; - Lib_IntVector_Intrinsics_vec256 o40; - Lib_IntVector_Intrinsics_vec256 f201; - Lib_IntVector_Intrinsics_vec256 f211; - Lib_IntVector_Intrinsics_vec256 f221; - Lib_IntVector_Intrinsics_vec256 f231; - Lib_IntVector_Intrinsics_vec256 f241; - Lib_IntVector_Intrinsics_vec256 r0; - Lib_IntVector_Intrinsics_vec256 r1; - Lib_IntVector_Intrinsics_vec256 r2; - Lib_IntVector_Intrinsics_vec256 r3; - Lib_IntVector_Intrinsics_vec256 r4; - Lib_IntVector_Intrinsics_vec256 r51; - Lib_IntVector_Intrinsics_vec256 r52; - Lib_IntVector_Intrinsics_vec256 r53; - Lib_IntVector_Intrinsics_vec256 r54; - Lib_IntVector_Intrinsics_vec256 f10; - Lib_IntVector_Intrinsics_vec256 f11; - Lib_IntVector_Intrinsics_vec256 f12; - Lib_IntVector_Intrinsics_vec256 f13; - Lib_IntVector_Intrinsics_vec256 f14; - Lib_IntVector_Intrinsics_vec256 a0; - Lib_IntVector_Intrinsics_vec256 a1; - Lib_IntVector_Intrinsics_vec256 a2; - Lib_IntVector_Intrinsics_vec256 a3; - Lib_IntVector_Intrinsics_vec256 a4; - Lib_IntVector_Intrinsics_vec256 a01; - Lib_IntVector_Intrinsics_vec256 a11; - Lib_IntVector_Intrinsics_vec256 a21; - Lib_IntVector_Intrinsics_vec256 a31; - Lib_IntVector_Intrinsics_vec256 a41; - Lib_IntVector_Intrinsics_vec256 a02; - Lib_IntVector_Intrinsics_vec256 a12; - Lib_IntVector_Intrinsics_vec256 a22; - Lib_IntVector_Intrinsics_vec256 a32; - Lib_IntVector_Intrinsics_vec256 a42; - Lib_IntVector_Intrinsics_vec256 a03; - Lib_IntVector_Intrinsics_vec256 a13; - Lib_IntVector_Intrinsics_vec256 a23; - Lib_IntVector_Intrinsics_vec256 a33; - Lib_IntVector_Intrinsics_vec256 a43; - Lib_IntVector_Intrinsics_vec256 a04; - Lib_IntVector_Intrinsics_vec256 a14; - Lib_IntVector_Intrinsics_vec256 a24; - Lib_IntVector_Intrinsics_vec256 a34; - Lib_IntVector_Intrinsics_vec256 a44; - Lib_IntVector_Intrinsics_vec256 t0; - Lib_IntVector_Intrinsics_vec256 t1; - Lib_IntVector_Intrinsics_vec256 t2; - Lib_IntVector_Intrinsics_vec256 t3; - Lib_IntVector_Intrinsics_vec256 t4; - Lib_IntVector_Intrinsics_vec256 mask26; - Lib_IntVector_Intrinsics_vec256 z0; - Lib_IntVector_Intrinsics_vec256 z1; - Lib_IntVector_Intrinsics_vec256 x0; - Lib_IntVector_Intrinsics_vec256 x3; - Lib_IntVector_Intrinsics_vec256 x1; - Lib_IntVector_Intrinsics_vec256 x4; - Lib_IntVector_Intrinsics_vec256 z01; - Lib_IntVector_Intrinsics_vec256 z11; - Lib_IntVector_Intrinsics_vec256 t; - Lib_IntVector_Intrinsics_vec256 z12; - Lib_IntVector_Intrinsics_vec256 x11; - Lib_IntVector_Intrinsics_vec256 x41; - Lib_IntVector_Intrinsics_vec256 x2; - Lib_IntVector_Intrinsics_vec256 x01; - Lib_IntVector_Intrinsics_vec256 z02; - Lib_IntVector_Intrinsics_vec256 z13; - Lib_IntVector_Intrinsics_vec256 x21; - Lib_IntVector_Intrinsics_vec256 x02; - Lib_IntVector_Intrinsics_vec256 x31; - Lib_IntVector_Intrinsics_vec256 x12; - Lib_IntVector_Intrinsics_vec256 z03; - Lib_IntVector_Intrinsics_vec256 x32; - Lib_IntVector_Intrinsics_vec256 x42; - Lib_IntVector_Intrinsics_vec256 o0; - Lib_IntVector_Intrinsics_vec256 o1; - Lib_IntVector_Intrinsics_vec256 o2; - Lib_IntVector_Intrinsics_vec256 o3; - Lib_IntVector_Intrinsics_vec256 o4; - Lib_IntVector_Intrinsics_vec256 f20; - Lib_IntVector_Intrinsics_vec256 f21; - Lib_IntVector_Intrinsics_vec256 f22; - Lib_IntVector_Intrinsics_vec256 f23; - Lib_IntVector_Intrinsics_vec256 f24; - acc[0U] = Lib_IntVector_Intrinsics_vec256_zero; - acc[1U] = Lib_IntVector_Intrinsics_vec256_zero; - acc[2U] = Lib_IntVector_Intrinsics_vec256_zero; - acc[3U] = Lib_IntVector_Intrinsics_vec256_zero; - acc[4U] = Lib_IntVector_Intrinsics_vec256_zero; - u0 = load64_le(kr); - lo = u0; - u = load64_le(kr + (uint32_t)8U); - hi = u; - mask0 = (uint64_t)0x0ffffffc0fffffffU; - mask1 = (uint64_t)0x0ffffffc0ffffffcU; - lo1 = lo & mask0; - hi1 = hi & mask1; - r = pre; - r5 = pre + (uint32_t)5U; - rn = pre + (uint32_t)10U; - rn_5 = pre + (uint32_t)15U; - r_vec0 = Lib_IntVector_Intrinsics_vec256_load64(lo1); - r_vec1 = Lib_IntVector_Intrinsics_vec256_load64(hi1); - f00 = - Lib_IntVector_Intrinsics_vec256_and(r_vec0, - Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU)); - f15 = - Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(r_vec0, - (uint32_t)26U), - Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU)); - f25 = - Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_right64(r_vec0, - (uint32_t)52U), - Lib_IntVector_Intrinsics_vec256_shift_left64(Lib_IntVector_Intrinsics_vec256_and(r_vec1, - Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3fffU)), - (uint32_t)12U)); - f30 = - Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(r_vec1, - (uint32_t)14U), - Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU)); - f40 = Lib_IntVector_Intrinsics_vec256_shift_right64(r_vec1, (uint32_t)40U); - f0 = f00; - f1 = f15; - f2 = f25; - f3 = f30; - f4 = f40; - r[0U] = f0; - r[1U] = f1; - r[2U] = f2; - r[3U] = f3; - r[4U] = f4; - f200 = r[0U]; - f210 = r[1U]; - f220 = r[2U]; - f230 = r[3U]; - f240 = r[4U]; - r5[0U] = Lib_IntVector_Intrinsics_vec256_smul64(f200, (uint64_t)5U); - r5[1U] = Lib_IntVector_Intrinsics_vec256_smul64(f210, (uint64_t)5U); - r5[2U] = Lib_IntVector_Intrinsics_vec256_smul64(f220, (uint64_t)5U); - r5[3U] = Lib_IntVector_Intrinsics_vec256_smul64(f230, (uint64_t)5U); - r5[4U] = Lib_IntVector_Intrinsics_vec256_smul64(f240, (uint64_t)5U); - r00 = r[0U]; - r10 = r[1U]; - r20 = r[2U]; - r30 = r[3U]; - r40 = r[4U]; - r510 = r5[1U]; - r520 = r5[2U]; - r530 = r5[3U]; - r540 = r5[4U]; - f100 = r[0U]; - f110 = r[1U]; - f120 = r[2U]; - f130 = r[3U]; - f140 = r[4U]; - a00 = Lib_IntVector_Intrinsics_vec256_mul64(r00, f100); - a10 = Lib_IntVector_Intrinsics_vec256_mul64(r10, f100); - a20 = Lib_IntVector_Intrinsics_vec256_mul64(r20, f100); - a30 = Lib_IntVector_Intrinsics_vec256_mul64(r30, f100); - a40 = Lib_IntVector_Intrinsics_vec256_mul64(r40, f100); - a010 = - Lib_IntVector_Intrinsics_vec256_add64(a00, - Lib_IntVector_Intrinsics_vec256_mul64(r540, f110)); - a110 = - Lib_IntVector_Intrinsics_vec256_add64(a10, - Lib_IntVector_Intrinsics_vec256_mul64(r00, f110)); - a210 = - Lib_IntVector_Intrinsics_vec256_add64(a20, - Lib_IntVector_Intrinsics_vec256_mul64(r10, f110)); - a310 = - Lib_IntVector_Intrinsics_vec256_add64(a30, - Lib_IntVector_Intrinsics_vec256_mul64(r20, f110)); - a410 = - Lib_IntVector_Intrinsics_vec256_add64(a40, - Lib_IntVector_Intrinsics_vec256_mul64(r30, f110)); - a020 = - Lib_IntVector_Intrinsics_vec256_add64(a010, - Lib_IntVector_Intrinsics_vec256_mul64(r530, f120)); - a120 = - Lib_IntVector_Intrinsics_vec256_add64(a110, - Lib_IntVector_Intrinsics_vec256_mul64(r540, f120)); - a220 = - Lib_IntVector_Intrinsics_vec256_add64(a210, - Lib_IntVector_Intrinsics_vec256_mul64(r00, f120)); - a320 = - Lib_IntVector_Intrinsics_vec256_add64(a310, - Lib_IntVector_Intrinsics_vec256_mul64(r10, f120)); - a420 = - Lib_IntVector_Intrinsics_vec256_add64(a410, - Lib_IntVector_Intrinsics_vec256_mul64(r20, f120)); - a030 = - Lib_IntVector_Intrinsics_vec256_add64(a020, - Lib_IntVector_Intrinsics_vec256_mul64(r520, f130)); - a130 = - Lib_IntVector_Intrinsics_vec256_add64(a120, - Lib_IntVector_Intrinsics_vec256_mul64(r530, f130)); - a230 = - Lib_IntVector_Intrinsics_vec256_add64(a220, - Lib_IntVector_Intrinsics_vec256_mul64(r540, f130)); - a330 = - Lib_IntVector_Intrinsics_vec256_add64(a320, - Lib_IntVector_Intrinsics_vec256_mul64(r00, f130)); - a430 = - Lib_IntVector_Intrinsics_vec256_add64(a420, - Lib_IntVector_Intrinsics_vec256_mul64(r10, f130)); - a040 = - Lib_IntVector_Intrinsics_vec256_add64(a030, - Lib_IntVector_Intrinsics_vec256_mul64(r510, f140)); - a140 = - Lib_IntVector_Intrinsics_vec256_add64(a130, - Lib_IntVector_Intrinsics_vec256_mul64(r520, f140)); - a240 = - Lib_IntVector_Intrinsics_vec256_add64(a230, - Lib_IntVector_Intrinsics_vec256_mul64(r530, f140)); - a340 = - Lib_IntVector_Intrinsics_vec256_add64(a330, - Lib_IntVector_Intrinsics_vec256_mul64(r540, f140)); - a440 = - Lib_IntVector_Intrinsics_vec256_add64(a430, - Lib_IntVector_Intrinsics_vec256_mul64(r00, f140)); - t00 = a040; - t10 = a140; - t20 = a240; - t30 = a340; - t40 = a440; - mask260 = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU); - z00 = Lib_IntVector_Intrinsics_vec256_shift_right64(t00, (uint32_t)26U); - z10 = Lib_IntVector_Intrinsics_vec256_shift_right64(t30, (uint32_t)26U); - x00 = Lib_IntVector_Intrinsics_vec256_and(t00, mask260); - x30 = Lib_IntVector_Intrinsics_vec256_and(t30, mask260); - x10 = Lib_IntVector_Intrinsics_vec256_add64(t10, z00); - x40 = Lib_IntVector_Intrinsics_vec256_add64(t40, z10); - z010 = Lib_IntVector_Intrinsics_vec256_shift_right64(x10, (uint32_t)26U); - z110 = Lib_IntVector_Intrinsics_vec256_shift_right64(x40, (uint32_t)26U); - t5 = Lib_IntVector_Intrinsics_vec256_shift_left64(z110, (uint32_t)2U); - z120 = Lib_IntVector_Intrinsics_vec256_add64(z110, t5); - x110 = Lib_IntVector_Intrinsics_vec256_and(x10, mask260); - x410 = Lib_IntVector_Intrinsics_vec256_and(x40, mask260); - x20 = Lib_IntVector_Intrinsics_vec256_add64(t20, z010); - x010 = Lib_IntVector_Intrinsics_vec256_add64(x00, z120); - z020 = Lib_IntVector_Intrinsics_vec256_shift_right64(x20, (uint32_t)26U); - z130 = Lib_IntVector_Intrinsics_vec256_shift_right64(x010, (uint32_t)26U); - x210 = Lib_IntVector_Intrinsics_vec256_and(x20, mask260); - x020 = Lib_IntVector_Intrinsics_vec256_and(x010, mask260); - x310 = Lib_IntVector_Intrinsics_vec256_add64(x30, z020); - x120 = Lib_IntVector_Intrinsics_vec256_add64(x110, z130); - z030 = Lib_IntVector_Intrinsics_vec256_shift_right64(x310, (uint32_t)26U); - x320 = Lib_IntVector_Intrinsics_vec256_and(x310, mask260); - x420 = Lib_IntVector_Intrinsics_vec256_add64(x410, z030); - o00 = x020; - o10 = x120; - o20 = x210; - o30 = x320; - o40 = x420; - rn[0U] = o00; - rn[1U] = o10; - rn[2U] = o20; - rn[3U] = o30; - rn[4U] = o40; - f201 = rn[0U]; - f211 = rn[1U]; - f221 = rn[2U]; - f231 = rn[3U]; - f241 = rn[4U]; - rn_5[0U] = Lib_IntVector_Intrinsics_vec256_smul64(f201, (uint64_t)5U); - rn_5[1U] = Lib_IntVector_Intrinsics_vec256_smul64(f211, (uint64_t)5U); - rn_5[2U] = Lib_IntVector_Intrinsics_vec256_smul64(f221, (uint64_t)5U); - rn_5[3U] = Lib_IntVector_Intrinsics_vec256_smul64(f231, (uint64_t)5U); - rn_5[4U] = Lib_IntVector_Intrinsics_vec256_smul64(f241, (uint64_t)5U); - r0 = rn[0U]; - r1 = rn[1U]; - r2 = rn[2U]; - r3 = rn[3U]; - r4 = rn[4U]; - r51 = rn_5[1U]; - r52 = rn_5[2U]; - r53 = rn_5[3U]; - r54 = rn_5[4U]; - f10 = rn[0U]; - f11 = rn[1U]; - f12 = rn[2U]; - f13 = rn[3U]; - f14 = rn[4U]; - a0 = Lib_IntVector_Intrinsics_vec256_mul64(r0, f10); - a1 = Lib_IntVector_Intrinsics_vec256_mul64(r1, f10); - a2 = Lib_IntVector_Intrinsics_vec256_mul64(r2, f10); - a3 = Lib_IntVector_Intrinsics_vec256_mul64(r3, f10); - a4 = Lib_IntVector_Intrinsics_vec256_mul64(r4, f10); - a01 = - Lib_IntVector_Intrinsics_vec256_add64(a0, - Lib_IntVector_Intrinsics_vec256_mul64(r54, f11)); - a11 = Lib_IntVector_Intrinsics_vec256_add64(a1, Lib_IntVector_Intrinsics_vec256_mul64(r0, f11)); - a21 = Lib_IntVector_Intrinsics_vec256_add64(a2, Lib_IntVector_Intrinsics_vec256_mul64(r1, f11)); - a31 = Lib_IntVector_Intrinsics_vec256_add64(a3, Lib_IntVector_Intrinsics_vec256_mul64(r2, f11)); - a41 = Lib_IntVector_Intrinsics_vec256_add64(a4, Lib_IntVector_Intrinsics_vec256_mul64(r3, f11)); - a02 = - Lib_IntVector_Intrinsics_vec256_add64(a01, - Lib_IntVector_Intrinsics_vec256_mul64(r53, f12)); - a12 = - Lib_IntVector_Intrinsics_vec256_add64(a11, - Lib_IntVector_Intrinsics_vec256_mul64(r54, f12)); - a22 = - Lib_IntVector_Intrinsics_vec256_add64(a21, - Lib_IntVector_Intrinsics_vec256_mul64(r0, f12)); - a32 = - Lib_IntVector_Intrinsics_vec256_add64(a31, - Lib_IntVector_Intrinsics_vec256_mul64(r1, f12)); - a42 = - Lib_IntVector_Intrinsics_vec256_add64(a41, - Lib_IntVector_Intrinsics_vec256_mul64(r2, f12)); - a03 = - Lib_IntVector_Intrinsics_vec256_add64(a02, - Lib_IntVector_Intrinsics_vec256_mul64(r52, f13)); - a13 = - Lib_IntVector_Intrinsics_vec256_add64(a12, - Lib_IntVector_Intrinsics_vec256_mul64(r53, f13)); - a23 = - Lib_IntVector_Intrinsics_vec256_add64(a22, - Lib_IntVector_Intrinsics_vec256_mul64(r54, f13)); - a33 = - Lib_IntVector_Intrinsics_vec256_add64(a32, - Lib_IntVector_Intrinsics_vec256_mul64(r0, f13)); - a43 = - Lib_IntVector_Intrinsics_vec256_add64(a42, - Lib_IntVector_Intrinsics_vec256_mul64(r1, f13)); - a04 = - Lib_IntVector_Intrinsics_vec256_add64(a03, - Lib_IntVector_Intrinsics_vec256_mul64(r51, f14)); - a14 = - Lib_IntVector_Intrinsics_vec256_add64(a13, - Lib_IntVector_Intrinsics_vec256_mul64(r52, f14)); - a24 = - Lib_IntVector_Intrinsics_vec256_add64(a23, - Lib_IntVector_Intrinsics_vec256_mul64(r53, f14)); - a34 = - Lib_IntVector_Intrinsics_vec256_add64(a33, - Lib_IntVector_Intrinsics_vec256_mul64(r54, f14)); - a44 = - Lib_IntVector_Intrinsics_vec256_add64(a43, - Lib_IntVector_Intrinsics_vec256_mul64(r0, f14)); - t0 = a04; - t1 = a14; - t2 = a24; - t3 = a34; - t4 = a44; - mask26 = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU); - z0 = Lib_IntVector_Intrinsics_vec256_shift_right64(t0, (uint32_t)26U); - z1 = Lib_IntVector_Intrinsics_vec256_shift_right64(t3, (uint32_t)26U); - x0 = Lib_IntVector_Intrinsics_vec256_and(t0, mask26); - x3 = Lib_IntVector_Intrinsics_vec256_and(t3, mask26); - x1 = Lib_IntVector_Intrinsics_vec256_add64(t1, z0); - x4 = Lib_IntVector_Intrinsics_vec256_add64(t4, z1); - z01 = Lib_IntVector_Intrinsics_vec256_shift_right64(x1, (uint32_t)26U); - z11 = Lib_IntVector_Intrinsics_vec256_shift_right64(x4, (uint32_t)26U); - t = Lib_IntVector_Intrinsics_vec256_shift_left64(z11, (uint32_t)2U); - z12 = Lib_IntVector_Intrinsics_vec256_add64(z11, t); - x11 = Lib_IntVector_Intrinsics_vec256_and(x1, mask26); - x41 = Lib_IntVector_Intrinsics_vec256_and(x4, mask26); - x2 = Lib_IntVector_Intrinsics_vec256_add64(t2, z01); - x01 = Lib_IntVector_Intrinsics_vec256_add64(x0, z12); - z02 = Lib_IntVector_Intrinsics_vec256_shift_right64(x2, (uint32_t)26U); - z13 = Lib_IntVector_Intrinsics_vec256_shift_right64(x01, (uint32_t)26U); - x21 = Lib_IntVector_Intrinsics_vec256_and(x2, mask26); - x02 = Lib_IntVector_Intrinsics_vec256_and(x01, mask26); - x31 = Lib_IntVector_Intrinsics_vec256_add64(x3, z02); - x12 = Lib_IntVector_Intrinsics_vec256_add64(x11, z13); - z03 = Lib_IntVector_Intrinsics_vec256_shift_right64(x31, (uint32_t)26U); - x32 = Lib_IntVector_Intrinsics_vec256_and(x31, mask26); - x42 = Lib_IntVector_Intrinsics_vec256_add64(x41, z03); - o0 = x02; - o1 = x12; - o2 = x21; - o3 = x32; - o4 = x42; - rn[0U] = o0; - rn[1U] = o1; - rn[2U] = o2; - rn[3U] = o3; - rn[4U] = o4; - f20 = rn[0U]; - f21 = rn[1U]; - f22 = rn[2U]; - f23 = rn[3U]; - f24 = rn[4U]; - rn_5[0U] = Lib_IntVector_Intrinsics_vec256_smul64(f20, (uint64_t)5U); - rn_5[1U] = Lib_IntVector_Intrinsics_vec256_smul64(f21, (uint64_t)5U); - rn_5[2U] = Lib_IntVector_Intrinsics_vec256_smul64(f22, (uint64_t)5U); - rn_5[3U] = Lib_IntVector_Intrinsics_vec256_smul64(f23, (uint64_t)5U); - rn_5[4U] = Lib_IntVector_Intrinsics_vec256_smul64(f24, (uint64_t)5U); -} - -void Hacl_Poly1305_256_poly1305_update1(Lib_IntVector_Intrinsics_vec256 *ctx, uint8_t *text) -{ - Lib_IntVector_Intrinsics_vec256 *pre = ctx + (uint32_t)5U; - Lib_IntVector_Intrinsics_vec256 *acc = ctx; - KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 e[5U] KRML_POST_ALIGN(32) = { 0U }; - uint64_t u0 = load64_le(text); - uint64_t lo = u0; - uint64_t u = load64_le(text + (uint32_t)8U); - uint64_t hi = u; - Lib_IntVector_Intrinsics_vec256 f0 = Lib_IntVector_Intrinsics_vec256_load64(lo); - Lib_IntVector_Intrinsics_vec256 f1 = Lib_IntVector_Intrinsics_vec256_load64(hi); - Lib_IntVector_Intrinsics_vec256 - f010 = - Lib_IntVector_Intrinsics_vec256_and(f0, - Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU)); - Lib_IntVector_Intrinsics_vec256 - f110 = - Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(f0, - (uint32_t)26U), - Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU)); - Lib_IntVector_Intrinsics_vec256 - f20 = - Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_right64(f0, - (uint32_t)52U), - Lib_IntVector_Intrinsics_vec256_shift_left64(Lib_IntVector_Intrinsics_vec256_and(f1, - Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3fffU)), - (uint32_t)12U)); - Lib_IntVector_Intrinsics_vec256 - f30 = - Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(f1, - (uint32_t)14U), - Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU)); - Lib_IntVector_Intrinsics_vec256 - f40 = Lib_IntVector_Intrinsics_vec256_shift_right64(f1, (uint32_t)40U); - Lib_IntVector_Intrinsics_vec256 f01 = f010; - Lib_IntVector_Intrinsics_vec256 f111 = f110; - Lib_IntVector_Intrinsics_vec256 f2 = f20; - Lib_IntVector_Intrinsics_vec256 f3 = f30; - Lib_IntVector_Intrinsics_vec256 f41 = f40; - uint64_t b; - Lib_IntVector_Intrinsics_vec256 mask; - Lib_IntVector_Intrinsics_vec256 f4; - Lib_IntVector_Intrinsics_vec256 *r; - Lib_IntVector_Intrinsics_vec256 *r5; - Lib_IntVector_Intrinsics_vec256 r0; - Lib_IntVector_Intrinsics_vec256 r1; - Lib_IntVector_Intrinsics_vec256 r2; - Lib_IntVector_Intrinsics_vec256 r3; - Lib_IntVector_Intrinsics_vec256 r4; - Lib_IntVector_Intrinsics_vec256 r51; - Lib_IntVector_Intrinsics_vec256 r52; - Lib_IntVector_Intrinsics_vec256 r53; - Lib_IntVector_Intrinsics_vec256 r54; - Lib_IntVector_Intrinsics_vec256 f10; - Lib_IntVector_Intrinsics_vec256 f11; - Lib_IntVector_Intrinsics_vec256 f12; - Lib_IntVector_Intrinsics_vec256 f13; - Lib_IntVector_Intrinsics_vec256 f14; - Lib_IntVector_Intrinsics_vec256 a0; - Lib_IntVector_Intrinsics_vec256 a1; - Lib_IntVector_Intrinsics_vec256 a2; - Lib_IntVector_Intrinsics_vec256 a3; - Lib_IntVector_Intrinsics_vec256 a4; - Lib_IntVector_Intrinsics_vec256 a01; - Lib_IntVector_Intrinsics_vec256 a11; - Lib_IntVector_Intrinsics_vec256 a21; - Lib_IntVector_Intrinsics_vec256 a31; - Lib_IntVector_Intrinsics_vec256 a41; - Lib_IntVector_Intrinsics_vec256 a02; - Lib_IntVector_Intrinsics_vec256 a12; - Lib_IntVector_Intrinsics_vec256 a22; - Lib_IntVector_Intrinsics_vec256 a32; - Lib_IntVector_Intrinsics_vec256 a42; - Lib_IntVector_Intrinsics_vec256 a03; - Lib_IntVector_Intrinsics_vec256 a13; - Lib_IntVector_Intrinsics_vec256 a23; - Lib_IntVector_Intrinsics_vec256 a33; - Lib_IntVector_Intrinsics_vec256 a43; - Lib_IntVector_Intrinsics_vec256 a04; - Lib_IntVector_Intrinsics_vec256 a14; - Lib_IntVector_Intrinsics_vec256 a24; - Lib_IntVector_Intrinsics_vec256 a34; - Lib_IntVector_Intrinsics_vec256 a44; - Lib_IntVector_Intrinsics_vec256 a05; - Lib_IntVector_Intrinsics_vec256 a15; - Lib_IntVector_Intrinsics_vec256 a25; - Lib_IntVector_Intrinsics_vec256 a35; - Lib_IntVector_Intrinsics_vec256 a45; - Lib_IntVector_Intrinsics_vec256 a06; - Lib_IntVector_Intrinsics_vec256 a16; - Lib_IntVector_Intrinsics_vec256 a26; - Lib_IntVector_Intrinsics_vec256 a36; - Lib_IntVector_Intrinsics_vec256 a46; - Lib_IntVector_Intrinsics_vec256 t0; - Lib_IntVector_Intrinsics_vec256 t1; - Lib_IntVector_Intrinsics_vec256 t2; - Lib_IntVector_Intrinsics_vec256 t3; - Lib_IntVector_Intrinsics_vec256 t4; - Lib_IntVector_Intrinsics_vec256 mask26; - Lib_IntVector_Intrinsics_vec256 z0; - Lib_IntVector_Intrinsics_vec256 z1; - Lib_IntVector_Intrinsics_vec256 x0; - Lib_IntVector_Intrinsics_vec256 x3; - Lib_IntVector_Intrinsics_vec256 x1; - Lib_IntVector_Intrinsics_vec256 x4; - Lib_IntVector_Intrinsics_vec256 z01; - Lib_IntVector_Intrinsics_vec256 z11; - Lib_IntVector_Intrinsics_vec256 t; - Lib_IntVector_Intrinsics_vec256 z12; - Lib_IntVector_Intrinsics_vec256 x11; - Lib_IntVector_Intrinsics_vec256 x41; - Lib_IntVector_Intrinsics_vec256 x2; - Lib_IntVector_Intrinsics_vec256 x01; - Lib_IntVector_Intrinsics_vec256 z02; - Lib_IntVector_Intrinsics_vec256 z13; - Lib_IntVector_Intrinsics_vec256 x21; - Lib_IntVector_Intrinsics_vec256 x02; - Lib_IntVector_Intrinsics_vec256 x31; - Lib_IntVector_Intrinsics_vec256 x12; - Lib_IntVector_Intrinsics_vec256 z03; - Lib_IntVector_Intrinsics_vec256 x32; - Lib_IntVector_Intrinsics_vec256 x42; - Lib_IntVector_Intrinsics_vec256 o0; - Lib_IntVector_Intrinsics_vec256 o1; - Lib_IntVector_Intrinsics_vec256 o2; - Lib_IntVector_Intrinsics_vec256 o3; - Lib_IntVector_Intrinsics_vec256 o4; - e[0U] = f01; - e[1U] = f111; - e[2U] = f2; - e[3U] = f3; - e[4U] = f41; - b = (uint64_t)0x1000000U; - mask = Lib_IntVector_Intrinsics_vec256_load64(b); - f4 = e[4U]; - e[4U] = Lib_IntVector_Intrinsics_vec256_or(f4, mask); - r = pre; - r5 = pre + (uint32_t)5U; - r0 = r[0U]; - r1 = r[1U]; - r2 = r[2U]; - r3 = r[3U]; - r4 = r[4U]; - r51 = r5[1U]; - r52 = r5[2U]; - r53 = r5[3U]; - r54 = r5[4U]; - f10 = e[0U]; - f11 = e[1U]; - f12 = e[2U]; - f13 = e[3U]; - f14 = e[4U]; - a0 = acc[0U]; - a1 = acc[1U]; - a2 = acc[2U]; - a3 = acc[3U]; - a4 = acc[4U]; - a01 = Lib_IntVector_Intrinsics_vec256_add64(a0, f10); - a11 = Lib_IntVector_Intrinsics_vec256_add64(a1, f11); - a21 = Lib_IntVector_Intrinsics_vec256_add64(a2, f12); - a31 = Lib_IntVector_Intrinsics_vec256_add64(a3, f13); - a41 = Lib_IntVector_Intrinsics_vec256_add64(a4, f14); - a02 = Lib_IntVector_Intrinsics_vec256_mul64(r0, a01); - a12 = Lib_IntVector_Intrinsics_vec256_mul64(r1, a01); - a22 = Lib_IntVector_Intrinsics_vec256_mul64(r2, a01); - a32 = Lib_IntVector_Intrinsics_vec256_mul64(r3, a01); - a42 = Lib_IntVector_Intrinsics_vec256_mul64(r4, a01); - a03 = - Lib_IntVector_Intrinsics_vec256_add64(a02, - Lib_IntVector_Intrinsics_vec256_mul64(r54, a11)); - a13 = - Lib_IntVector_Intrinsics_vec256_add64(a12, - Lib_IntVector_Intrinsics_vec256_mul64(r0, a11)); - a23 = - Lib_IntVector_Intrinsics_vec256_add64(a22, - Lib_IntVector_Intrinsics_vec256_mul64(r1, a11)); - a33 = - Lib_IntVector_Intrinsics_vec256_add64(a32, - Lib_IntVector_Intrinsics_vec256_mul64(r2, a11)); - a43 = - Lib_IntVector_Intrinsics_vec256_add64(a42, - Lib_IntVector_Intrinsics_vec256_mul64(r3, a11)); - a04 = - Lib_IntVector_Intrinsics_vec256_add64(a03, - Lib_IntVector_Intrinsics_vec256_mul64(r53, a21)); - a14 = - Lib_IntVector_Intrinsics_vec256_add64(a13, - Lib_IntVector_Intrinsics_vec256_mul64(r54, a21)); - a24 = - Lib_IntVector_Intrinsics_vec256_add64(a23, - Lib_IntVector_Intrinsics_vec256_mul64(r0, a21)); - a34 = - Lib_IntVector_Intrinsics_vec256_add64(a33, - Lib_IntVector_Intrinsics_vec256_mul64(r1, a21)); - a44 = - Lib_IntVector_Intrinsics_vec256_add64(a43, - Lib_IntVector_Intrinsics_vec256_mul64(r2, a21)); - a05 = - Lib_IntVector_Intrinsics_vec256_add64(a04, - Lib_IntVector_Intrinsics_vec256_mul64(r52, a31)); - a15 = - Lib_IntVector_Intrinsics_vec256_add64(a14, - Lib_IntVector_Intrinsics_vec256_mul64(r53, a31)); - a25 = - Lib_IntVector_Intrinsics_vec256_add64(a24, - Lib_IntVector_Intrinsics_vec256_mul64(r54, a31)); - a35 = - Lib_IntVector_Intrinsics_vec256_add64(a34, - Lib_IntVector_Intrinsics_vec256_mul64(r0, a31)); - a45 = - Lib_IntVector_Intrinsics_vec256_add64(a44, - Lib_IntVector_Intrinsics_vec256_mul64(r1, a31)); - a06 = - Lib_IntVector_Intrinsics_vec256_add64(a05, - Lib_IntVector_Intrinsics_vec256_mul64(r51, a41)); - a16 = - Lib_IntVector_Intrinsics_vec256_add64(a15, - Lib_IntVector_Intrinsics_vec256_mul64(r52, a41)); - a26 = - Lib_IntVector_Intrinsics_vec256_add64(a25, - Lib_IntVector_Intrinsics_vec256_mul64(r53, a41)); - a36 = - Lib_IntVector_Intrinsics_vec256_add64(a35, - Lib_IntVector_Intrinsics_vec256_mul64(r54, a41)); - a46 = - Lib_IntVector_Intrinsics_vec256_add64(a45, - Lib_IntVector_Intrinsics_vec256_mul64(r0, a41)); - t0 = a06; - t1 = a16; - t2 = a26; - t3 = a36; - t4 = a46; - mask26 = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU); - z0 = Lib_IntVector_Intrinsics_vec256_shift_right64(t0, (uint32_t)26U); - z1 = Lib_IntVector_Intrinsics_vec256_shift_right64(t3, (uint32_t)26U); - x0 = Lib_IntVector_Intrinsics_vec256_and(t0, mask26); - x3 = Lib_IntVector_Intrinsics_vec256_and(t3, mask26); - x1 = Lib_IntVector_Intrinsics_vec256_add64(t1, z0); - x4 = Lib_IntVector_Intrinsics_vec256_add64(t4, z1); - z01 = Lib_IntVector_Intrinsics_vec256_shift_right64(x1, (uint32_t)26U); - z11 = Lib_IntVector_Intrinsics_vec256_shift_right64(x4, (uint32_t)26U); - t = Lib_IntVector_Intrinsics_vec256_shift_left64(z11, (uint32_t)2U); - z12 = Lib_IntVector_Intrinsics_vec256_add64(z11, t); - x11 = Lib_IntVector_Intrinsics_vec256_and(x1, mask26); - x41 = Lib_IntVector_Intrinsics_vec256_and(x4, mask26); - x2 = Lib_IntVector_Intrinsics_vec256_add64(t2, z01); - x01 = Lib_IntVector_Intrinsics_vec256_add64(x0, z12); - z02 = Lib_IntVector_Intrinsics_vec256_shift_right64(x2, (uint32_t)26U); - z13 = Lib_IntVector_Intrinsics_vec256_shift_right64(x01, (uint32_t)26U); - x21 = Lib_IntVector_Intrinsics_vec256_and(x2, mask26); - x02 = Lib_IntVector_Intrinsics_vec256_and(x01, mask26); - x31 = Lib_IntVector_Intrinsics_vec256_add64(x3, z02); - x12 = Lib_IntVector_Intrinsics_vec256_add64(x11, z13); - z03 = Lib_IntVector_Intrinsics_vec256_shift_right64(x31, (uint32_t)26U); - x32 = Lib_IntVector_Intrinsics_vec256_and(x31, mask26); - x42 = Lib_IntVector_Intrinsics_vec256_add64(x41, z03); - o0 = x02; - o1 = x12; - o2 = x21; - o3 = x32; - o4 = x42; - acc[0U] = o0; - acc[1U] = o1; - acc[2U] = o2; - acc[3U] = o3; - acc[4U] = o4; -} - -void -Hacl_Poly1305_256_poly1305_update( - Lib_IntVector_Intrinsics_vec256 *ctx, - uint32_t len, - uint8_t *text -) -{ - Lib_IntVector_Intrinsics_vec256 *pre = ctx + (uint32_t)5U; - Lib_IntVector_Intrinsics_vec256 *acc = ctx; - uint32_t sz_block = (uint32_t)64U; - uint32_t len0 = len / sz_block * sz_block; - uint8_t *t0 = text; - uint32_t len1; - uint8_t *t10; - uint32_t nb0; - uint32_t rem; - if (len0 > (uint32_t)0U) - { - uint32_t bs = (uint32_t)64U; - uint8_t *text0 = t0; - Hacl_Impl_Poly1305_Field32xN_256_load_acc4(acc, text0); - { - uint32_t len10 = len0 - bs; - uint8_t *text1 = t0 + bs; - uint32_t nb = len10 / bs; - { - uint32_t i; - for (i = (uint32_t)0U; i < nb; i++) - { - uint8_t *block = text1 + i * bs; - KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 e[5U] KRML_POST_ALIGN(32) = { 0U }; - Lib_IntVector_Intrinsics_vec256 lo = Lib_IntVector_Intrinsics_vec256_load64_le(block); - Lib_IntVector_Intrinsics_vec256 - hi = Lib_IntVector_Intrinsics_vec256_load64_le(block + (uint32_t)32U); - Lib_IntVector_Intrinsics_vec256 - mask260 = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU); - Lib_IntVector_Intrinsics_vec256 - m0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(lo, hi); - Lib_IntVector_Intrinsics_vec256 - m1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(lo, hi); - Lib_IntVector_Intrinsics_vec256 - m2 = Lib_IntVector_Intrinsics_vec256_shift_right(m0, (uint32_t)48U); - Lib_IntVector_Intrinsics_vec256 - m3 = Lib_IntVector_Intrinsics_vec256_shift_right(m1, (uint32_t)48U); - Lib_IntVector_Intrinsics_vec256 - m4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(m0, m1); - Lib_IntVector_Intrinsics_vec256 - t010 = Lib_IntVector_Intrinsics_vec256_interleave_low64(m0, m1); - Lib_IntVector_Intrinsics_vec256 - t30 = Lib_IntVector_Intrinsics_vec256_interleave_low64(m2, m3); - Lib_IntVector_Intrinsics_vec256 - t20 = Lib_IntVector_Intrinsics_vec256_shift_right64(t30, (uint32_t)4U); - Lib_IntVector_Intrinsics_vec256 o20 = Lib_IntVector_Intrinsics_vec256_and(t20, mask260); - Lib_IntVector_Intrinsics_vec256 - t11 = Lib_IntVector_Intrinsics_vec256_shift_right64(t010, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec256 o10 = Lib_IntVector_Intrinsics_vec256_and(t11, mask260); - Lib_IntVector_Intrinsics_vec256 o5 = Lib_IntVector_Intrinsics_vec256_and(t010, mask260); - Lib_IntVector_Intrinsics_vec256 - t31 = Lib_IntVector_Intrinsics_vec256_shift_right64(t30, (uint32_t)30U); - Lib_IntVector_Intrinsics_vec256 o30 = Lib_IntVector_Intrinsics_vec256_and(t31, mask260); - Lib_IntVector_Intrinsics_vec256 - o40 = Lib_IntVector_Intrinsics_vec256_shift_right64(m4, (uint32_t)40U); - Lib_IntVector_Intrinsics_vec256 o00 = o5; - Lib_IntVector_Intrinsics_vec256 o11 = o10; - Lib_IntVector_Intrinsics_vec256 o21 = o20; - Lib_IntVector_Intrinsics_vec256 o31 = o30; - Lib_IntVector_Intrinsics_vec256 o41 = o40; - e[0U] = o00; - e[1U] = o11; - e[2U] = o21; - e[3U] = o31; - e[4U] = o41; - { - uint64_t b = (uint64_t)0x1000000U; - Lib_IntVector_Intrinsics_vec256 mask = Lib_IntVector_Intrinsics_vec256_load64(b); - Lib_IntVector_Intrinsics_vec256 f4 = e[4U]; - e[4U] = Lib_IntVector_Intrinsics_vec256_or(f4, mask); - { - Lib_IntVector_Intrinsics_vec256 *rn = pre + (uint32_t)10U; - Lib_IntVector_Intrinsics_vec256 *rn5 = pre + (uint32_t)15U; - Lib_IntVector_Intrinsics_vec256 r0 = rn[0U]; - Lib_IntVector_Intrinsics_vec256 r1 = rn[1U]; - Lib_IntVector_Intrinsics_vec256 r2 = rn[2U]; - Lib_IntVector_Intrinsics_vec256 r3 = rn[3U]; - Lib_IntVector_Intrinsics_vec256 r4 = rn[4U]; - Lib_IntVector_Intrinsics_vec256 r51 = rn5[1U]; - Lib_IntVector_Intrinsics_vec256 r52 = rn5[2U]; - Lib_IntVector_Intrinsics_vec256 r53 = rn5[3U]; - Lib_IntVector_Intrinsics_vec256 r54 = rn5[4U]; - Lib_IntVector_Intrinsics_vec256 f10 = acc[0U]; - Lib_IntVector_Intrinsics_vec256 f110 = acc[1U]; - Lib_IntVector_Intrinsics_vec256 f120 = acc[2U]; - Lib_IntVector_Intrinsics_vec256 f130 = acc[3U]; - Lib_IntVector_Intrinsics_vec256 f140 = acc[4U]; - Lib_IntVector_Intrinsics_vec256 a0 = Lib_IntVector_Intrinsics_vec256_mul64(r0, f10); - Lib_IntVector_Intrinsics_vec256 a1 = Lib_IntVector_Intrinsics_vec256_mul64(r1, f10); - Lib_IntVector_Intrinsics_vec256 a2 = Lib_IntVector_Intrinsics_vec256_mul64(r2, f10); - Lib_IntVector_Intrinsics_vec256 a3 = Lib_IntVector_Intrinsics_vec256_mul64(r3, f10); - Lib_IntVector_Intrinsics_vec256 a4 = Lib_IntVector_Intrinsics_vec256_mul64(r4, f10); - Lib_IntVector_Intrinsics_vec256 - a01 = - Lib_IntVector_Intrinsics_vec256_add64(a0, - Lib_IntVector_Intrinsics_vec256_mul64(r54, f110)); - Lib_IntVector_Intrinsics_vec256 - a11 = - Lib_IntVector_Intrinsics_vec256_add64(a1, - Lib_IntVector_Intrinsics_vec256_mul64(r0, f110)); - Lib_IntVector_Intrinsics_vec256 - a21 = - Lib_IntVector_Intrinsics_vec256_add64(a2, - Lib_IntVector_Intrinsics_vec256_mul64(r1, f110)); - Lib_IntVector_Intrinsics_vec256 - a31 = - Lib_IntVector_Intrinsics_vec256_add64(a3, - Lib_IntVector_Intrinsics_vec256_mul64(r2, f110)); - Lib_IntVector_Intrinsics_vec256 - a41 = - Lib_IntVector_Intrinsics_vec256_add64(a4, - Lib_IntVector_Intrinsics_vec256_mul64(r3, f110)); - Lib_IntVector_Intrinsics_vec256 - a02 = - Lib_IntVector_Intrinsics_vec256_add64(a01, - Lib_IntVector_Intrinsics_vec256_mul64(r53, f120)); - Lib_IntVector_Intrinsics_vec256 - a12 = - Lib_IntVector_Intrinsics_vec256_add64(a11, - Lib_IntVector_Intrinsics_vec256_mul64(r54, f120)); - Lib_IntVector_Intrinsics_vec256 - a22 = - Lib_IntVector_Intrinsics_vec256_add64(a21, - Lib_IntVector_Intrinsics_vec256_mul64(r0, f120)); - Lib_IntVector_Intrinsics_vec256 - a32 = - Lib_IntVector_Intrinsics_vec256_add64(a31, - Lib_IntVector_Intrinsics_vec256_mul64(r1, f120)); - Lib_IntVector_Intrinsics_vec256 - a42 = - Lib_IntVector_Intrinsics_vec256_add64(a41, - Lib_IntVector_Intrinsics_vec256_mul64(r2, f120)); - Lib_IntVector_Intrinsics_vec256 - a03 = - Lib_IntVector_Intrinsics_vec256_add64(a02, - Lib_IntVector_Intrinsics_vec256_mul64(r52, f130)); - Lib_IntVector_Intrinsics_vec256 - a13 = - Lib_IntVector_Intrinsics_vec256_add64(a12, - Lib_IntVector_Intrinsics_vec256_mul64(r53, f130)); - Lib_IntVector_Intrinsics_vec256 - a23 = - Lib_IntVector_Intrinsics_vec256_add64(a22, - Lib_IntVector_Intrinsics_vec256_mul64(r54, f130)); - Lib_IntVector_Intrinsics_vec256 - a33 = - Lib_IntVector_Intrinsics_vec256_add64(a32, - Lib_IntVector_Intrinsics_vec256_mul64(r0, f130)); - Lib_IntVector_Intrinsics_vec256 - a43 = - Lib_IntVector_Intrinsics_vec256_add64(a42, - Lib_IntVector_Intrinsics_vec256_mul64(r1, f130)); - Lib_IntVector_Intrinsics_vec256 - a04 = - Lib_IntVector_Intrinsics_vec256_add64(a03, - Lib_IntVector_Intrinsics_vec256_mul64(r51, f140)); - Lib_IntVector_Intrinsics_vec256 - a14 = - Lib_IntVector_Intrinsics_vec256_add64(a13, - Lib_IntVector_Intrinsics_vec256_mul64(r52, f140)); - Lib_IntVector_Intrinsics_vec256 - a24 = - Lib_IntVector_Intrinsics_vec256_add64(a23, - Lib_IntVector_Intrinsics_vec256_mul64(r53, f140)); - Lib_IntVector_Intrinsics_vec256 - a34 = - Lib_IntVector_Intrinsics_vec256_add64(a33, - Lib_IntVector_Intrinsics_vec256_mul64(r54, f140)); - Lib_IntVector_Intrinsics_vec256 - a44 = - Lib_IntVector_Intrinsics_vec256_add64(a43, - Lib_IntVector_Intrinsics_vec256_mul64(r0, f140)); - Lib_IntVector_Intrinsics_vec256 t01 = a04; - Lib_IntVector_Intrinsics_vec256 t1 = a14; - Lib_IntVector_Intrinsics_vec256 t2 = a24; - Lib_IntVector_Intrinsics_vec256 t3 = a34; - Lib_IntVector_Intrinsics_vec256 t4 = a44; - Lib_IntVector_Intrinsics_vec256 - mask26 = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU); - Lib_IntVector_Intrinsics_vec256 - z0 = Lib_IntVector_Intrinsics_vec256_shift_right64(t01, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec256 - z1 = Lib_IntVector_Intrinsics_vec256_shift_right64(t3, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec256 x0 = Lib_IntVector_Intrinsics_vec256_and(t01, mask26); - Lib_IntVector_Intrinsics_vec256 x3 = Lib_IntVector_Intrinsics_vec256_and(t3, mask26); - Lib_IntVector_Intrinsics_vec256 x1 = Lib_IntVector_Intrinsics_vec256_add64(t1, z0); - Lib_IntVector_Intrinsics_vec256 x4 = Lib_IntVector_Intrinsics_vec256_add64(t4, z1); - Lib_IntVector_Intrinsics_vec256 - z01 = Lib_IntVector_Intrinsics_vec256_shift_right64(x1, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec256 - z11 = Lib_IntVector_Intrinsics_vec256_shift_right64(x4, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec256 - t = Lib_IntVector_Intrinsics_vec256_shift_left64(z11, (uint32_t)2U); - Lib_IntVector_Intrinsics_vec256 z12 = Lib_IntVector_Intrinsics_vec256_add64(z11, t); - Lib_IntVector_Intrinsics_vec256 x11 = Lib_IntVector_Intrinsics_vec256_and(x1, mask26); - Lib_IntVector_Intrinsics_vec256 x41 = Lib_IntVector_Intrinsics_vec256_and(x4, mask26); - Lib_IntVector_Intrinsics_vec256 x2 = Lib_IntVector_Intrinsics_vec256_add64(t2, z01); - Lib_IntVector_Intrinsics_vec256 x01 = Lib_IntVector_Intrinsics_vec256_add64(x0, z12); - Lib_IntVector_Intrinsics_vec256 - z02 = Lib_IntVector_Intrinsics_vec256_shift_right64(x2, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec256 - z13 = Lib_IntVector_Intrinsics_vec256_shift_right64(x01, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec256 x21 = Lib_IntVector_Intrinsics_vec256_and(x2, mask26); - Lib_IntVector_Intrinsics_vec256 - x02 = Lib_IntVector_Intrinsics_vec256_and(x01, mask26); - Lib_IntVector_Intrinsics_vec256 x31 = Lib_IntVector_Intrinsics_vec256_add64(x3, z02); - Lib_IntVector_Intrinsics_vec256 x12 = Lib_IntVector_Intrinsics_vec256_add64(x11, z13); - Lib_IntVector_Intrinsics_vec256 - z03 = Lib_IntVector_Intrinsics_vec256_shift_right64(x31, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec256 - x32 = Lib_IntVector_Intrinsics_vec256_and(x31, mask26); - Lib_IntVector_Intrinsics_vec256 x42 = Lib_IntVector_Intrinsics_vec256_add64(x41, z03); - Lib_IntVector_Intrinsics_vec256 o01 = x02; - Lib_IntVector_Intrinsics_vec256 o12 = x12; - Lib_IntVector_Intrinsics_vec256 o22 = x21; - Lib_IntVector_Intrinsics_vec256 o32 = x32; - Lib_IntVector_Intrinsics_vec256 o42 = x42; - acc[0U] = o01; - acc[1U] = o12; - acc[2U] = o22; - acc[3U] = o32; - acc[4U] = o42; - { - Lib_IntVector_Intrinsics_vec256 f100 = acc[0U]; - Lib_IntVector_Intrinsics_vec256 f11 = acc[1U]; - Lib_IntVector_Intrinsics_vec256 f12 = acc[2U]; - Lib_IntVector_Intrinsics_vec256 f13 = acc[3U]; - Lib_IntVector_Intrinsics_vec256 f14 = acc[4U]; - Lib_IntVector_Intrinsics_vec256 f20 = e[0U]; - Lib_IntVector_Intrinsics_vec256 f21 = e[1U]; - Lib_IntVector_Intrinsics_vec256 f22 = e[2U]; - Lib_IntVector_Intrinsics_vec256 f23 = e[3U]; - Lib_IntVector_Intrinsics_vec256 f24 = e[4U]; - Lib_IntVector_Intrinsics_vec256 - o0 = Lib_IntVector_Intrinsics_vec256_add64(f100, f20); - Lib_IntVector_Intrinsics_vec256 - o1 = Lib_IntVector_Intrinsics_vec256_add64(f11, f21); - Lib_IntVector_Intrinsics_vec256 - o2 = Lib_IntVector_Intrinsics_vec256_add64(f12, f22); - Lib_IntVector_Intrinsics_vec256 - o3 = Lib_IntVector_Intrinsics_vec256_add64(f13, f23); - Lib_IntVector_Intrinsics_vec256 - o4 = Lib_IntVector_Intrinsics_vec256_add64(f14, f24); - acc[0U] = o0; - acc[1U] = o1; - acc[2U] = o2; - acc[3U] = o3; - acc[4U] = o4; - } - } - } - } - } - Hacl_Impl_Poly1305_Field32xN_256_fmul_r4_normalize(acc, pre); - } - } - len1 = len - len0; - t10 = text + len0; - nb0 = len1 / (uint32_t)16U; - rem = len1 % (uint32_t)16U; - { - uint32_t i; - for (i = (uint32_t)0U; i < nb0; i++) - { - uint8_t *block = t10 + i * (uint32_t)16U; - KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 e[5U] KRML_POST_ALIGN(32) = { 0U }; - uint64_t u0 = load64_le(block); - uint64_t lo = u0; - uint64_t u = load64_le(block + (uint32_t)8U); - uint64_t hi = u; - Lib_IntVector_Intrinsics_vec256 f0 = Lib_IntVector_Intrinsics_vec256_load64(lo); - Lib_IntVector_Intrinsics_vec256 f1 = Lib_IntVector_Intrinsics_vec256_load64(hi); - Lib_IntVector_Intrinsics_vec256 - f010 = - Lib_IntVector_Intrinsics_vec256_and(f0, - Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU)); - Lib_IntVector_Intrinsics_vec256 - f110 = - Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(f0, - (uint32_t)26U), - Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU)); - Lib_IntVector_Intrinsics_vec256 - f20 = - Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_right64(f0, - (uint32_t)52U), - Lib_IntVector_Intrinsics_vec256_shift_left64(Lib_IntVector_Intrinsics_vec256_and(f1, - Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3fffU)), - (uint32_t)12U)); - Lib_IntVector_Intrinsics_vec256 - f30 = - Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(f1, - (uint32_t)14U), - Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU)); - Lib_IntVector_Intrinsics_vec256 - f40 = Lib_IntVector_Intrinsics_vec256_shift_right64(f1, (uint32_t)40U); - Lib_IntVector_Intrinsics_vec256 f01 = f010; - Lib_IntVector_Intrinsics_vec256 f111 = f110; - Lib_IntVector_Intrinsics_vec256 f2 = f20; - Lib_IntVector_Intrinsics_vec256 f3 = f30; - Lib_IntVector_Intrinsics_vec256 f41 = f40; - e[0U] = f01; - e[1U] = f111; - e[2U] = f2; - e[3U] = f3; - e[4U] = f41; - { - uint64_t b = (uint64_t)0x1000000U; - Lib_IntVector_Intrinsics_vec256 mask = Lib_IntVector_Intrinsics_vec256_load64(b); - Lib_IntVector_Intrinsics_vec256 f4 = e[4U]; - e[4U] = Lib_IntVector_Intrinsics_vec256_or(f4, mask); - { - Lib_IntVector_Intrinsics_vec256 *r = pre; - Lib_IntVector_Intrinsics_vec256 *r5 = pre + (uint32_t)5U; - Lib_IntVector_Intrinsics_vec256 r0 = r[0U]; - Lib_IntVector_Intrinsics_vec256 r1 = r[1U]; - Lib_IntVector_Intrinsics_vec256 r2 = r[2U]; - Lib_IntVector_Intrinsics_vec256 r3 = r[3U]; - Lib_IntVector_Intrinsics_vec256 r4 = r[4U]; - Lib_IntVector_Intrinsics_vec256 r51 = r5[1U]; - Lib_IntVector_Intrinsics_vec256 r52 = r5[2U]; - Lib_IntVector_Intrinsics_vec256 r53 = r5[3U]; - Lib_IntVector_Intrinsics_vec256 r54 = r5[4U]; - Lib_IntVector_Intrinsics_vec256 f10 = e[0U]; - Lib_IntVector_Intrinsics_vec256 f11 = e[1U]; - Lib_IntVector_Intrinsics_vec256 f12 = e[2U]; - Lib_IntVector_Intrinsics_vec256 f13 = e[3U]; - Lib_IntVector_Intrinsics_vec256 f14 = e[4U]; - Lib_IntVector_Intrinsics_vec256 a0 = acc[0U]; - Lib_IntVector_Intrinsics_vec256 a1 = acc[1U]; - Lib_IntVector_Intrinsics_vec256 a2 = acc[2U]; - Lib_IntVector_Intrinsics_vec256 a3 = acc[3U]; - Lib_IntVector_Intrinsics_vec256 a4 = acc[4U]; - Lib_IntVector_Intrinsics_vec256 a01 = Lib_IntVector_Intrinsics_vec256_add64(a0, f10); - Lib_IntVector_Intrinsics_vec256 a11 = Lib_IntVector_Intrinsics_vec256_add64(a1, f11); - Lib_IntVector_Intrinsics_vec256 a21 = Lib_IntVector_Intrinsics_vec256_add64(a2, f12); - Lib_IntVector_Intrinsics_vec256 a31 = Lib_IntVector_Intrinsics_vec256_add64(a3, f13); - Lib_IntVector_Intrinsics_vec256 a41 = Lib_IntVector_Intrinsics_vec256_add64(a4, f14); - Lib_IntVector_Intrinsics_vec256 a02 = Lib_IntVector_Intrinsics_vec256_mul64(r0, a01); - Lib_IntVector_Intrinsics_vec256 a12 = Lib_IntVector_Intrinsics_vec256_mul64(r1, a01); - Lib_IntVector_Intrinsics_vec256 a22 = Lib_IntVector_Intrinsics_vec256_mul64(r2, a01); - Lib_IntVector_Intrinsics_vec256 a32 = Lib_IntVector_Intrinsics_vec256_mul64(r3, a01); - Lib_IntVector_Intrinsics_vec256 a42 = Lib_IntVector_Intrinsics_vec256_mul64(r4, a01); - Lib_IntVector_Intrinsics_vec256 - a03 = - Lib_IntVector_Intrinsics_vec256_add64(a02, - Lib_IntVector_Intrinsics_vec256_mul64(r54, a11)); - Lib_IntVector_Intrinsics_vec256 - a13 = - Lib_IntVector_Intrinsics_vec256_add64(a12, - Lib_IntVector_Intrinsics_vec256_mul64(r0, a11)); - Lib_IntVector_Intrinsics_vec256 - a23 = - Lib_IntVector_Intrinsics_vec256_add64(a22, - Lib_IntVector_Intrinsics_vec256_mul64(r1, a11)); - Lib_IntVector_Intrinsics_vec256 - a33 = - Lib_IntVector_Intrinsics_vec256_add64(a32, - Lib_IntVector_Intrinsics_vec256_mul64(r2, a11)); - Lib_IntVector_Intrinsics_vec256 - a43 = - Lib_IntVector_Intrinsics_vec256_add64(a42, - Lib_IntVector_Intrinsics_vec256_mul64(r3, a11)); - Lib_IntVector_Intrinsics_vec256 - a04 = - Lib_IntVector_Intrinsics_vec256_add64(a03, - Lib_IntVector_Intrinsics_vec256_mul64(r53, a21)); - Lib_IntVector_Intrinsics_vec256 - a14 = - Lib_IntVector_Intrinsics_vec256_add64(a13, - Lib_IntVector_Intrinsics_vec256_mul64(r54, a21)); - Lib_IntVector_Intrinsics_vec256 - a24 = - Lib_IntVector_Intrinsics_vec256_add64(a23, - Lib_IntVector_Intrinsics_vec256_mul64(r0, a21)); - Lib_IntVector_Intrinsics_vec256 - a34 = - Lib_IntVector_Intrinsics_vec256_add64(a33, - Lib_IntVector_Intrinsics_vec256_mul64(r1, a21)); - Lib_IntVector_Intrinsics_vec256 - a44 = - Lib_IntVector_Intrinsics_vec256_add64(a43, - Lib_IntVector_Intrinsics_vec256_mul64(r2, a21)); - Lib_IntVector_Intrinsics_vec256 - a05 = - Lib_IntVector_Intrinsics_vec256_add64(a04, - Lib_IntVector_Intrinsics_vec256_mul64(r52, a31)); - Lib_IntVector_Intrinsics_vec256 - a15 = - Lib_IntVector_Intrinsics_vec256_add64(a14, - Lib_IntVector_Intrinsics_vec256_mul64(r53, a31)); - Lib_IntVector_Intrinsics_vec256 - a25 = - Lib_IntVector_Intrinsics_vec256_add64(a24, - Lib_IntVector_Intrinsics_vec256_mul64(r54, a31)); - Lib_IntVector_Intrinsics_vec256 - a35 = - Lib_IntVector_Intrinsics_vec256_add64(a34, - Lib_IntVector_Intrinsics_vec256_mul64(r0, a31)); - Lib_IntVector_Intrinsics_vec256 - a45 = - Lib_IntVector_Intrinsics_vec256_add64(a44, - Lib_IntVector_Intrinsics_vec256_mul64(r1, a31)); - Lib_IntVector_Intrinsics_vec256 - a06 = - Lib_IntVector_Intrinsics_vec256_add64(a05, - Lib_IntVector_Intrinsics_vec256_mul64(r51, a41)); - Lib_IntVector_Intrinsics_vec256 - a16 = - Lib_IntVector_Intrinsics_vec256_add64(a15, - Lib_IntVector_Intrinsics_vec256_mul64(r52, a41)); - Lib_IntVector_Intrinsics_vec256 - a26 = - Lib_IntVector_Intrinsics_vec256_add64(a25, - Lib_IntVector_Intrinsics_vec256_mul64(r53, a41)); - Lib_IntVector_Intrinsics_vec256 - a36 = - Lib_IntVector_Intrinsics_vec256_add64(a35, - Lib_IntVector_Intrinsics_vec256_mul64(r54, a41)); - Lib_IntVector_Intrinsics_vec256 - a46 = - Lib_IntVector_Intrinsics_vec256_add64(a45, - Lib_IntVector_Intrinsics_vec256_mul64(r0, a41)); - Lib_IntVector_Intrinsics_vec256 t01 = a06; - Lib_IntVector_Intrinsics_vec256 t11 = a16; - Lib_IntVector_Intrinsics_vec256 t2 = a26; - Lib_IntVector_Intrinsics_vec256 t3 = a36; - Lib_IntVector_Intrinsics_vec256 t4 = a46; - Lib_IntVector_Intrinsics_vec256 - mask26 = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU); - Lib_IntVector_Intrinsics_vec256 - z0 = Lib_IntVector_Intrinsics_vec256_shift_right64(t01, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec256 - z1 = Lib_IntVector_Intrinsics_vec256_shift_right64(t3, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec256 x0 = Lib_IntVector_Intrinsics_vec256_and(t01, mask26); - Lib_IntVector_Intrinsics_vec256 x3 = Lib_IntVector_Intrinsics_vec256_and(t3, mask26); - Lib_IntVector_Intrinsics_vec256 x1 = Lib_IntVector_Intrinsics_vec256_add64(t11, z0); - Lib_IntVector_Intrinsics_vec256 x4 = Lib_IntVector_Intrinsics_vec256_add64(t4, z1); - Lib_IntVector_Intrinsics_vec256 - z01 = Lib_IntVector_Intrinsics_vec256_shift_right64(x1, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec256 - z11 = Lib_IntVector_Intrinsics_vec256_shift_right64(x4, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec256 - t = Lib_IntVector_Intrinsics_vec256_shift_left64(z11, (uint32_t)2U); - Lib_IntVector_Intrinsics_vec256 z12 = Lib_IntVector_Intrinsics_vec256_add64(z11, t); - Lib_IntVector_Intrinsics_vec256 x11 = Lib_IntVector_Intrinsics_vec256_and(x1, mask26); - Lib_IntVector_Intrinsics_vec256 x41 = Lib_IntVector_Intrinsics_vec256_and(x4, mask26); - Lib_IntVector_Intrinsics_vec256 x2 = Lib_IntVector_Intrinsics_vec256_add64(t2, z01); - Lib_IntVector_Intrinsics_vec256 x01 = Lib_IntVector_Intrinsics_vec256_add64(x0, z12); - Lib_IntVector_Intrinsics_vec256 - z02 = Lib_IntVector_Intrinsics_vec256_shift_right64(x2, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec256 - z13 = Lib_IntVector_Intrinsics_vec256_shift_right64(x01, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec256 x21 = Lib_IntVector_Intrinsics_vec256_and(x2, mask26); - Lib_IntVector_Intrinsics_vec256 x02 = Lib_IntVector_Intrinsics_vec256_and(x01, mask26); - Lib_IntVector_Intrinsics_vec256 x31 = Lib_IntVector_Intrinsics_vec256_add64(x3, z02); - Lib_IntVector_Intrinsics_vec256 x12 = Lib_IntVector_Intrinsics_vec256_add64(x11, z13); - Lib_IntVector_Intrinsics_vec256 - z03 = Lib_IntVector_Intrinsics_vec256_shift_right64(x31, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec256 x32 = Lib_IntVector_Intrinsics_vec256_and(x31, mask26); - Lib_IntVector_Intrinsics_vec256 x42 = Lib_IntVector_Intrinsics_vec256_add64(x41, z03); - Lib_IntVector_Intrinsics_vec256 o0 = x02; - Lib_IntVector_Intrinsics_vec256 o1 = x12; - Lib_IntVector_Intrinsics_vec256 o2 = x21; - Lib_IntVector_Intrinsics_vec256 o3 = x32; - Lib_IntVector_Intrinsics_vec256 o4 = x42; - acc[0U] = o0; - acc[1U] = o1; - acc[2U] = o2; - acc[3U] = o3; - acc[4U] = o4; - } - } - } - } - if (rem > (uint32_t)0U) - { - uint8_t *last = t10 + nb0 * (uint32_t)16U; - KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 e[5U] KRML_POST_ALIGN(32) = { 0U }; - uint8_t tmp[16U] = { 0U }; - uint64_t u0; - uint64_t lo; - uint64_t u; - uint64_t hi; - Lib_IntVector_Intrinsics_vec256 f0; - Lib_IntVector_Intrinsics_vec256 f1; - Lib_IntVector_Intrinsics_vec256 f010; - Lib_IntVector_Intrinsics_vec256 f110; - Lib_IntVector_Intrinsics_vec256 f20; - Lib_IntVector_Intrinsics_vec256 f30; - Lib_IntVector_Intrinsics_vec256 f40; - Lib_IntVector_Intrinsics_vec256 f01; - Lib_IntVector_Intrinsics_vec256 f111; - Lib_IntVector_Intrinsics_vec256 f2; - Lib_IntVector_Intrinsics_vec256 f3; - Lib_IntVector_Intrinsics_vec256 f4; - uint64_t b; - Lib_IntVector_Intrinsics_vec256 mask; - Lib_IntVector_Intrinsics_vec256 fi; - Lib_IntVector_Intrinsics_vec256 *r; - Lib_IntVector_Intrinsics_vec256 *r5; - Lib_IntVector_Intrinsics_vec256 r0; - Lib_IntVector_Intrinsics_vec256 r1; - Lib_IntVector_Intrinsics_vec256 r2; - Lib_IntVector_Intrinsics_vec256 r3; - Lib_IntVector_Intrinsics_vec256 r4; - Lib_IntVector_Intrinsics_vec256 r51; - Lib_IntVector_Intrinsics_vec256 r52; - Lib_IntVector_Intrinsics_vec256 r53; - Lib_IntVector_Intrinsics_vec256 r54; - Lib_IntVector_Intrinsics_vec256 f10; - Lib_IntVector_Intrinsics_vec256 f11; - Lib_IntVector_Intrinsics_vec256 f12; - Lib_IntVector_Intrinsics_vec256 f13; - Lib_IntVector_Intrinsics_vec256 f14; - Lib_IntVector_Intrinsics_vec256 a0; - Lib_IntVector_Intrinsics_vec256 a1; - Lib_IntVector_Intrinsics_vec256 a2; - Lib_IntVector_Intrinsics_vec256 a3; - Lib_IntVector_Intrinsics_vec256 a4; - Lib_IntVector_Intrinsics_vec256 a01; - Lib_IntVector_Intrinsics_vec256 a11; - Lib_IntVector_Intrinsics_vec256 a21; - Lib_IntVector_Intrinsics_vec256 a31; - Lib_IntVector_Intrinsics_vec256 a41; - Lib_IntVector_Intrinsics_vec256 a02; - Lib_IntVector_Intrinsics_vec256 a12; - Lib_IntVector_Intrinsics_vec256 a22; - Lib_IntVector_Intrinsics_vec256 a32; - Lib_IntVector_Intrinsics_vec256 a42; - Lib_IntVector_Intrinsics_vec256 a03; - Lib_IntVector_Intrinsics_vec256 a13; - Lib_IntVector_Intrinsics_vec256 a23; - Lib_IntVector_Intrinsics_vec256 a33; - Lib_IntVector_Intrinsics_vec256 a43; - Lib_IntVector_Intrinsics_vec256 a04; - Lib_IntVector_Intrinsics_vec256 a14; - Lib_IntVector_Intrinsics_vec256 a24; - Lib_IntVector_Intrinsics_vec256 a34; - Lib_IntVector_Intrinsics_vec256 a44; - Lib_IntVector_Intrinsics_vec256 a05; - Lib_IntVector_Intrinsics_vec256 a15; - Lib_IntVector_Intrinsics_vec256 a25; - Lib_IntVector_Intrinsics_vec256 a35; - Lib_IntVector_Intrinsics_vec256 a45; - Lib_IntVector_Intrinsics_vec256 a06; - Lib_IntVector_Intrinsics_vec256 a16; - Lib_IntVector_Intrinsics_vec256 a26; - Lib_IntVector_Intrinsics_vec256 a36; - Lib_IntVector_Intrinsics_vec256 a46; - Lib_IntVector_Intrinsics_vec256 t01; - Lib_IntVector_Intrinsics_vec256 t11; - Lib_IntVector_Intrinsics_vec256 t2; - Lib_IntVector_Intrinsics_vec256 t3; - Lib_IntVector_Intrinsics_vec256 t4; - Lib_IntVector_Intrinsics_vec256 mask26; - Lib_IntVector_Intrinsics_vec256 z0; - Lib_IntVector_Intrinsics_vec256 z1; - Lib_IntVector_Intrinsics_vec256 x0; - Lib_IntVector_Intrinsics_vec256 x3; - Lib_IntVector_Intrinsics_vec256 x1; - Lib_IntVector_Intrinsics_vec256 x4; - Lib_IntVector_Intrinsics_vec256 z01; - Lib_IntVector_Intrinsics_vec256 z11; - Lib_IntVector_Intrinsics_vec256 t; - Lib_IntVector_Intrinsics_vec256 z12; - Lib_IntVector_Intrinsics_vec256 x11; - Lib_IntVector_Intrinsics_vec256 x41; - Lib_IntVector_Intrinsics_vec256 x2; - Lib_IntVector_Intrinsics_vec256 x01; - Lib_IntVector_Intrinsics_vec256 z02; - Lib_IntVector_Intrinsics_vec256 z13; - Lib_IntVector_Intrinsics_vec256 x21; - Lib_IntVector_Intrinsics_vec256 x02; - Lib_IntVector_Intrinsics_vec256 x31; - Lib_IntVector_Intrinsics_vec256 x12; - Lib_IntVector_Intrinsics_vec256 z03; - Lib_IntVector_Intrinsics_vec256 x32; - Lib_IntVector_Intrinsics_vec256 x42; - Lib_IntVector_Intrinsics_vec256 o0; - Lib_IntVector_Intrinsics_vec256 o1; - Lib_IntVector_Intrinsics_vec256 o2; - Lib_IntVector_Intrinsics_vec256 o3; - Lib_IntVector_Intrinsics_vec256 o4; - memcpy(tmp, last, rem * sizeof (uint8_t)); - u0 = load64_le(tmp); - lo = u0; - u = load64_le(tmp + (uint32_t)8U); - hi = u; - f0 = Lib_IntVector_Intrinsics_vec256_load64(lo); - f1 = Lib_IntVector_Intrinsics_vec256_load64(hi); - f010 = - Lib_IntVector_Intrinsics_vec256_and(f0, - Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU)); - f110 = - Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(f0, - (uint32_t)26U), - Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU)); - f20 = - Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_right64(f0, - (uint32_t)52U), - Lib_IntVector_Intrinsics_vec256_shift_left64(Lib_IntVector_Intrinsics_vec256_and(f1, - Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3fffU)), - (uint32_t)12U)); - f30 = - Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(f1, - (uint32_t)14U), - Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU)); - f40 = Lib_IntVector_Intrinsics_vec256_shift_right64(f1, (uint32_t)40U); - f01 = f010; - f111 = f110; - f2 = f20; - f3 = f30; - f4 = f40; - e[0U] = f01; - e[1U] = f111; - e[2U] = f2; - e[3U] = f3; - e[4U] = f4; - b = (uint64_t)1U << rem * (uint32_t)8U % (uint32_t)26U; - mask = Lib_IntVector_Intrinsics_vec256_load64(b); - fi = e[rem * (uint32_t)8U / (uint32_t)26U]; - e[rem * (uint32_t)8U / (uint32_t)26U] = Lib_IntVector_Intrinsics_vec256_or(fi, mask); - r = pre; - r5 = pre + (uint32_t)5U; - r0 = r[0U]; - r1 = r[1U]; - r2 = r[2U]; - r3 = r[3U]; - r4 = r[4U]; - r51 = r5[1U]; - r52 = r5[2U]; - r53 = r5[3U]; - r54 = r5[4U]; - f10 = e[0U]; - f11 = e[1U]; - f12 = e[2U]; - f13 = e[3U]; - f14 = e[4U]; - a0 = acc[0U]; - a1 = acc[1U]; - a2 = acc[2U]; - a3 = acc[3U]; - a4 = acc[4U]; - a01 = Lib_IntVector_Intrinsics_vec256_add64(a0, f10); - a11 = Lib_IntVector_Intrinsics_vec256_add64(a1, f11); - a21 = Lib_IntVector_Intrinsics_vec256_add64(a2, f12); - a31 = Lib_IntVector_Intrinsics_vec256_add64(a3, f13); - a41 = Lib_IntVector_Intrinsics_vec256_add64(a4, f14); - a02 = Lib_IntVector_Intrinsics_vec256_mul64(r0, a01); - a12 = Lib_IntVector_Intrinsics_vec256_mul64(r1, a01); - a22 = Lib_IntVector_Intrinsics_vec256_mul64(r2, a01); - a32 = Lib_IntVector_Intrinsics_vec256_mul64(r3, a01); - a42 = Lib_IntVector_Intrinsics_vec256_mul64(r4, a01); - a03 = - Lib_IntVector_Intrinsics_vec256_add64(a02, - Lib_IntVector_Intrinsics_vec256_mul64(r54, a11)); - a13 = - Lib_IntVector_Intrinsics_vec256_add64(a12, - Lib_IntVector_Intrinsics_vec256_mul64(r0, a11)); - a23 = - Lib_IntVector_Intrinsics_vec256_add64(a22, - Lib_IntVector_Intrinsics_vec256_mul64(r1, a11)); - a33 = - Lib_IntVector_Intrinsics_vec256_add64(a32, - Lib_IntVector_Intrinsics_vec256_mul64(r2, a11)); - a43 = - Lib_IntVector_Intrinsics_vec256_add64(a42, - Lib_IntVector_Intrinsics_vec256_mul64(r3, a11)); - a04 = - Lib_IntVector_Intrinsics_vec256_add64(a03, - Lib_IntVector_Intrinsics_vec256_mul64(r53, a21)); - a14 = - Lib_IntVector_Intrinsics_vec256_add64(a13, - Lib_IntVector_Intrinsics_vec256_mul64(r54, a21)); - a24 = - Lib_IntVector_Intrinsics_vec256_add64(a23, - Lib_IntVector_Intrinsics_vec256_mul64(r0, a21)); - a34 = - Lib_IntVector_Intrinsics_vec256_add64(a33, - Lib_IntVector_Intrinsics_vec256_mul64(r1, a21)); - a44 = - Lib_IntVector_Intrinsics_vec256_add64(a43, - Lib_IntVector_Intrinsics_vec256_mul64(r2, a21)); - a05 = - Lib_IntVector_Intrinsics_vec256_add64(a04, - Lib_IntVector_Intrinsics_vec256_mul64(r52, a31)); - a15 = - Lib_IntVector_Intrinsics_vec256_add64(a14, - Lib_IntVector_Intrinsics_vec256_mul64(r53, a31)); - a25 = - Lib_IntVector_Intrinsics_vec256_add64(a24, - Lib_IntVector_Intrinsics_vec256_mul64(r54, a31)); - a35 = - Lib_IntVector_Intrinsics_vec256_add64(a34, - Lib_IntVector_Intrinsics_vec256_mul64(r0, a31)); - a45 = - Lib_IntVector_Intrinsics_vec256_add64(a44, - Lib_IntVector_Intrinsics_vec256_mul64(r1, a31)); - a06 = - Lib_IntVector_Intrinsics_vec256_add64(a05, - Lib_IntVector_Intrinsics_vec256_mul64(r51, a41)); - a16 = - Lib_IntVector_Intrinsics_vec256_add64(a15, - Lib_IntVector_Intrinsics_vec256_mul64(r52, a41)); - a26 = - Lib_IntVector_Intrinsics_vec256_add64(a25, - Lib_IntVector_Intrinsics_vec256_mul64(r53, a41)); - a36 = - Lib_IntVector_Intrinsics_vec256_add64(a35, - Lib_IntVector_Intrinsics_vec256_mul64(r54, a41)); - a46 = - Lib_IntVector_Intrinsics_vec256_add64(a45, - Lib_IntVector_Intrinsics_vec256_mul64(r0, a41)); - t01 = a06; - t11 = a16; - t2 = a26; - t3 = a36; - t4 = a46; - mask26 = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU); - z0 = Lib_IntVector_Intrinsics_vec256_shift_right64(t01, (uint32_t)26U); - z1 = Lib_IntVector_Intrinsics_vec256_shift_right64(t3, (uint32_t)26U); - x0 = Lib_IntVector_Intrinsics_vec256_and(t01, mask26); - x3 = Lib_IntVector_Intrinsics_vec256_and(t3, mask26); - x1 = Lib_IntVector_Intrinsics_vec256_add64(t11, z0); - x4 = Lib_IntVector_Intrinsics_vec256_add64(t4, z1); - z01 = Lib_IntVector_Intrinsics_vec256_shift_right64(x1, (uint32_t)26U); - z11 = Lib_IntVector_Intrinsics_vec256_shift_right64(x4, (uint32_t)26U); - t = Lib_IntVector_Intrinsics_vec256_shift_left64(z11, (uint32_t)2U); - z12 = Lib_IntVector_Intrinsics_vec256_add64(z11, t); - x11 = Lib_IntVector_Intrinsics_vec256_and(x1, mask26); - x41 = Lib_IntVector_Intrinsics_vec256_and(x4, mask26); - x2 = Lib_IntVector_Intrinsics_vec256_add64(t2, z01); - x01 = Lib_IntVector_Intrinsics_vec256_add64(x0, z12); - z02 = Lib_IntVector_Intrinsics_vec256_shift_right64(x2, (uint32_t)26U); - z13 = Lib_IntVector_Intrinsics_vec256_shift_right64(x01, (uint32_t)26U); - x21 = Lib_IntVector_Intrinsics_vec256_and(x2, mask26); - x02 = Lib_IntVector_Intrinsics_vec256_and(x01, mask26); - x31 = Lib_IntVector_Intrinsics_vec256_add64(x3, z02); - x12 = Lib_IntVector_Intrinsics_vec256_add64(x11, z13); - z03 = Lib_IntVector_Intrinsics_vec256_shift_right64(x31, (uint32_t)26U); - x32 = Lib_IntVector_Intrinsics_vec256_and(x31, mask26); - x42 = Lib_IntVector_Intrinsics_vec256_add64(x41, z03); - o0 = x02; - o1 = x12; - o2 = x21; - o3 = x32; - o4 = x42; - acc[0U] = o0; - acc[1U] = o1; - acc[2U] = o2; - acc[3U] = o3; - acc[4U] = o4; - return; - } -} - -void -Hacl_Poly1305_256_poly1305_finish( - uint8_t *tag, - uint8_t *key, - Lib_IntVector_Intrinsics_vec256 *ctx -) -{ - Lib_IntVector_Intrinsics_vec256 *acc = ctx; - uint8_t *ks = key + (uint32_t)16U; - Lib_IntVector_Intrinsics_vec256 f00 = acc[0U]; - Lib_IntVector_Intrinsics_vec256 f13 = acc[1U]; - Lib_IntVector_Intrinsics_vec256 f23 = acc[2U]; - Lib_IntVector_Intrinsics_vec256 f33 = acc[3U]; - Lib_IntVector_Intrinsics_vec256 f40 = acc[4U]; - Lib_IntVector_Intrinsics_vec256 - l0 = Lib_IntVector_Intrinsics_vec256_add64(f00, Lib_IntVector_Intrinsics_vec256_zero); - Lib_IntVector_Intrinsics_vec256 - tmp00 = - Lib_IntVector_Intrinsics_vec256_and(l0, - Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU)); - Lib_IntVector_Intrinsics_vec256 - c00 = Lib_IntVector_Intrinsics_vec256_shift_right64(l0, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec256 l1 = Lib_IntVector_Intrinsics_vec256_add64(f13, c00); - Lib_IntVector_Intrinsics_vec256 - tmp10 = - Lib_IntVector_Intrinsics_vec256_and(l1, - Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU)); - Lib_IntVector_Intrinsics_vec256 - c10 = Lib_IntVector_Intrinsics_vec256_shift_right64(l1, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec256 l2 = Lib_IntVector_Intrinsics_vec256_add64(f23, c10); - Lib_IntVector_Intrinsics_vec256 - tmp20 = - Lib_IntVector_Intrinsics_vec256_and(l2, - Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU)); - Lib_IntVector_Intrinsics_vec256 - c20 = Lib_IntVector_Intrinsics_vec256_shift_right64(l2, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec256 l3 = Lib_IntVector_Intrinsics_vec256_add64(f33, c20); - Lib_IntVector_Intrinsics_vec256 - tmp30 = - Lib_IntVector_Intrinsics_vec256_and(l3, - Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU)); - Lib_IntVector_Intrinsics_vec256 - c30 = Lib_IntVector_Intrinsics_vec256_shift_right64(l3, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec256 l4 = Lib_IntVector_Intrinsics_vec256_add64(f40, c30); - Lib_IntVector_Intrinsics_vec256 - tmp40 = - Lib_IntVector_Intrinsics_vec256_and(l4, - Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU)); - Lib_IntVector_Intrinsics_vec256 - c40 = Lib_IntVector_Intrinsics_vec256_shift_right64(l4, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec256 - f010 = - Lib_IntVector_Intrinsics_vec256_add64(tmp00, - Lib_IntVector_Intrinsics_vec256_smul64(c40, (uint64_t)5U)); - Lib_IntVector_Intrinsics_vec256 f110 = tmp10; - Lib_IntVector_Intrinsics_vec256 f210 = tmp20; - Lib_IntVector_Intrinsics_vec256 f310 = tmp30; - Lib_IntVector_Intrinsics_vec256 f410 = tmp40; - Lib_IntVector_Intrinsics_vec256 - l = Lib_IntVector_Intrinsics_vec256_add64(f010, Lib_IntVector_Intrinsics_vec256_zero); - Lib_IntVector_Intrinsics_vec256 - tmp0 = - Lib_IntVector_Intrinsics_vec256_and(l, - Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU)); - Lib_IntVector_Intrinsics_vec256 - c0 = Lib_IntVector_Intrinsics_vec256_shift_right64(l, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec256 l5 = Lib_IntVector_Intrinsics_vec256_add64(f110, c0); - Lib_IntVector_Intrinsics_vec256 - tmp1 = - Lib_IntVector_Intrinsics_vec256_and(l5, - Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU)); - Lib_IntVector_Intrinsics_vec256 - c1 = Lib_IntVector_Intrinsics_vec256_shift_right64(l5, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec256 l6 = Lib_IntVector_Intrinsics_vec256_add64(f210, c1); - Lib_IntVector_Intrinsics_vec256 - tmp2 = - Lib_IntVector_Intrinsics_vec256_and(l6, - Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU)); - Lib_IntVector_Intrinsics_vec256 - c2 = Lib_IntVector_Intrinsics_vec256_shift_right64(l6, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec256 l7 = Lib_IntVector_Intrinsics_vec256_add64(f310, c2); - Lib_IntVector_Intrinsics_vec256 - tmp3 = - Lib_IntVector_Intrinsics_vec256_and(l7, - Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU)); - Lib_IntVector_Intrinsics_vec256 - c3 = Lib_IntVector_Intrinsics_vec256_shift_right64(l7, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec256 l8 = Lib_IntVector_Intrinsics_vec256_add64(f410, c3); - Lib_IntVector_Intrinsics_vec256 - tmp4 = - Lib_IntVector_Intrinsics_vec256_and(l8, - Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU)); - Lib_IntVector_Intrinsics_vec256 - c4 = Lib_IntVector_Intrinsics_vec256_shift_right64(l8, (uint32_t)26U); - Lib_IntVector_Intrinsics_vec256 - f02 = - Lib_IntVector_Intrinsics_vec256_add64(tmp0, - Lib_IntVector_Intrinsics_vec256_smul64(c4, (uint64_t)5U)); - Lib_IntVector_Intrinsics_vec256 f12 = tmp1; - Lib_IntVector_Intrinsics_vec256 f22 = tmp2; - Lib_IntVector_Intrinsics_vec256 f32 = tmp3; - Lib_IntVector_Intrinsics_vec256 f42 = tmp4; - Lib_IntVector_Intrinsics_vec256 - mh = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU); - Lib_IntVector_Intrinsics_vec256 - ml = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3fffffbU); - Lib_IntVector_Intrinsics_vec256 mask = Lib_IntVector_Intrinsics_vec256_eq64(f42, mh); - Lib_IntVector_Intrinsics_vec256 - mask1 = - Lib_IntVector_Intrinsics_vec256_and(mask, - Lib_IntVector_Intrinsics_vec256_eq64(f32, mh)); - Lib_IntVector_Intrinsics_vec256 - mask2 = - Lib_IntVector_Intrinsics_vec256_and(mask1, - Lib_IntVector_Intrinsics_vec256_eq64(f22, mh)); - Lib_IntVector_Intrinsics_vec256 - mask3 = - Lib_IntVector_Intrinsics_vec256_and(mask2, - Lib_IntVector_Intrinsics_vec256_eq64(f12, mh)); - Lib_IntVector_Intrinsics_vec256 - mask4 = - Lib_IntVector_Intrinsics_vec256_and(mask3, - Lib_IntVector_Intrinsics_vec256_lognot(Lib_IntVector_Intrinsics_vec256_gt64(ml, f02))); - Lib_IntVector_Intrinsics_vec256 ph = Lib_IntVector_Intrinsics_vec256_and(mask4, mh); - Lib_IntVector_Intrinsics_vec256 pl = Lib_IntVector_Intrinsics_vec256_and(mask4, ml); - Lib_IntVector_Intrinsics_vec256 o0 = Lib_IntVector_Intrinsics_vec256_sub64(f02, pl); - Lib_IntVector_Intrinsics_vec256 o1 = Lib_IntVector_Intrinsics_vec256_sub64(f12, ph); - Lib_IntVector_Intrinsics_vec256 o2 = Lib_IntVector_Intrinsics_vec256_sub64(f22, ph); - Lib_IntVector_Intrinsics_vec256 o3 = Lib_IntVector_Intrinsics_vec256_sub64(f32, ph); - Lib_IntVector_Intrinsics_vec256 o4 = Lib_IntVector_Intrinsics_vec256_sub64(f42, ph); - Lib_IntVector_Intrinsics_vec256 f011 = o0; - Lib_IntVector_Intrinsics_vec256 f111 = o1; - Lib_IntVector_Intrinsics_vec256 f211 = o2; - Lib_IntVector_Intrinsics_vec256 f311 = o3; - Lib_IntVector_Intrinsics_vec256 f411 = o4; - Lib_IntVector_Intrinsics_vec256 f0; - Lib_IntVector_Intrinsics_vec256 f1; - Lib_IntVector_Intrinsics_vec256 f2; - Lib_IntVector_Intrinsics_vec256 f3; - Lib_IntVector_Intrinsics_vec256 f4; - uint64_t f01; - uint64_t f112; - uint64_t f212; - uint64_t f312; - uint64_t f41; - uint64_t lo0; - uint64_t hi0; - uint64_t f10; - uint64_t f11; - uint64_t u0; - uint64_t lo; - uint64_t u; - uint64_t hi; - uint64_t f20; - uint64_t f21; - uint64_t r0; - uint64_t r1; - uint64_t c; - uint64_t r11; - uint64_t f30; - uint64_t f31; - acc[0U] = f011; - acc[1U] = f111; - acc[2U] = f211; - acc[3U] = f311; - acc[4U] = f411; - f0 = acc[0U]; - f1 = acc[1U]; - f2 = acc[2U]; - f3 = acc[3U]; - f4 = acc[4U]; - f01 = Lib_IntVector_Intrinsics_vec256_extract64(f0, (uint32_t)0U); - f112 = Lib_IntVector_Intrinsics_vec256_extract64(f1, (uint32_t)0U); - f212 = Lib_IntVector_Intrinsics_vec256_extract64(f2, (uint32_t)0U); - f312 = Lib_IntVector_Intrinsics_vec256_extract64(f3, (uint32_t)0U); - f41 = Lib_IntVector_Intrinsics_vec256_extract64(f4, (uint32_t)0U); - lo0 = (f01 | f112 << (uint32_t)26U) | f212 << (uint32_t)52U; - hi0 = (f212 >> (uint32_t)12U | f312 << (uint32_t)14U) | f41 << (uint32_t)40U; - f10 = lo0; - f11 = hi0; - u0 = load64_le(ks); - lo = u0; - u = load64_le(ks + (uint32_t)8U); - hi = u; - f20 = lo; - f21 = hi; - r0 = f10 + f20; - r1 = f11 + f21; - c = (r0 ^ ((r0 ^ f20) | ((r0 - f20) ^ f20))) >> (uint32_t)63U; - r11 = r1 + c; - f30 = r0; - f31 = r11; - store64_le(tag, f30); - store64_le(tag + (uint32_t)8U, f31); -} - -void Hacl_Poly1305_256_poly1305_mac(uint8_t *tag, uint32_t len, uint8_t *text, uint8_t *key) -{ - KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ctx[25U] KRML_POST_ALIGN(32) = { 0U }; - Hacl_Poly1305_256_poly1305_init(ctx, key); - Hacl_Poly1305_256_poly1305_update(ctx, len, text); - Hacl_Poly1305_256_poly1305_finish(tag, key, ctx); -} - diff --git a/dist/c89-compatible/Hacl_Poly1305_256.h b/dist/c89-compatible/Hacl_Poly1305_256.h deleted file mode 100644 index 1f50d20cc5..0000000000 --- a/dist/c89-compatible/Hacl_Poly1305_256.h +++ /dev/null @@ -1,68 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#ifndef __Hacl_Poly1305_256_H -#define __Hacl_Poly1305_256_H - -#if defined(__cplusplus) -extern "C" { -#endif - -#include -#include "krml/internal/types.h" -#include "krml/lowstar_endianness.h" -#include "krml/internal/target.h" - - -#include "Hacl_Krmllib.h" -#include "evercrypt_targetconfig.h" -#include "libintvector.h" -typedef Lib_IntVector_Intrinsics_vec256 *Hacl_Poly1305_256_poly1305_ctx; - -void Hacl_Poly1305_256_poly1305_init(Lib_IntVector_Intrinsics_vec256 *ctx, uint8_t *key); - -void Hacl_Poly1305_256_poly1305_update1(Lib_IntVector_Intrinsics_vec256 *ctx, uint8_t *text); - -void -Hacl_Poly1305_256_poly1305_update( - Lib_IntVector_Intrinsics_vec256 *ctx, - uint32_t len, - uint8_t *text -); - -void -Hacl_Poly1305_256_poly1305_finish( - uint8_t *tag, - uint8_t *key, - Lib_IntVector_Intrinsics_vec256 *ctx -); - -void Hacl_Poly1305_256_poly1305_mac(uint8_t *tag, uint32_t len, uint8_t *text, uint8_t *key); - -#if defined(__cplusplus) -} -#endif - -#define __Hacl_Poly1305_256_H_DEFINED -#endif diff --git a/dist/c89-compatible/Hacl_Poly1305_32.c b/dist/c89-compatible/Hacl_Poly1305_32.c deleted file mode 100644 index bdea6e2267..0000000000 --- a/dist/c89-compatible/Hacl_Poly1305_32.c +++ /dev/null @@ -1,827 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#include "Hacl_Poly1305_32.h" - - - -void Hacl_Poly1305_32_poly1305_init(uint64_t *ctx, uint8_t *key) -{ - uint64_t *acc = ctx; - uint64_t *pre = ctx + (uint32_t)5U; - uint8_t *kr = key; - uint64_t u0; - uint64_t lo; - uint64_t u; - uint64_t hi; - uint64_t mask0; - uint64_t mask1; - uint64_t lo1; - uint64_t hi1; - uint64_t *r; - uint64_t *r5; - uint64_t *rn; - uint64_t *rn_5; - uint64_t r_vec0; - uint64_t r_vec1; - uint64_t f00; - uint64_t f10; - uint64_t f25; - uint64_t f30; - uint64_t f40; - uint64_t f0; - uint64_t f1; - uint64_t f2; - uint64_t f3; - uint64_t f4; - uint64_t f20; - uint64_t f21; - uint64_t f22; - uint64_t f23; - uint64_t f24; - acc[0U] = (uint64_t)0U; - acc[1U] = (uint64_t)0U; - acc[2U] = (uint64_t)0U; - acc[3U] = (uint64_t)0U; - acc[4U] = (uint64_t)0U; - u0 = load64_le(kr); - lo = u0; - u = load64_le(kr + (uint32_t)8U); - hi = u; - mask0 = (uint64_t)0x0ffffffc0fffffffU; - mask1 = (uint64_t)0x0ffffffc0ffffffcU; - lo1 = lo & mask0; - hi1 = hi & mask1; - r = pre; - r5 = pre + (uint32_t)5U; - rn = pre + (uint32_t)10U; - rn_5 = pre + (uint32_t)15U; - r_vec0 = lo1; - r_vec1 = hi1; - f00 = r_vec0 & (uint64_t)0x3ffffffU; - f10 = r_vec0 >> (uint32_t)26U & (uint64_t)0x3ffffffU; - f25 = r_vec0 >> (uint32_t)52U | (r_vec1 & (uint64_t)0x3fffU) << (uint32_t)12U; - f30 = r_vec1 >> (uint32_t)14U & (uint64_t)0x3ffffffU; - f40 = r_vec1 >> (uint32_t)40U; - f0 = f00; - f1 = f10; - f2 = f25; - f3 = f30; - f4 = f40; - r[0U] = f0; - r[1U] = f1; - r[2U] = f2; - r[3U] = f3; - r[4U] = f4; - f20 = r[0U]; - f21 = r[1U]; - f22 = r[2U]; - f23 = r[3U]; - f24 = r[4U]; - r5[0U] = f20 * (uint64_t)5U; - r5[1U] = f21 * (uint64_t)5U; - r5[2U] = f22 * (uint64_t)5U; - r5[3U] = f23 * (uint64_t)5U; - r5[4U] = f24 * (uint64_t)5U; - rn[0U] = r[0U]; - rn[1U] = r[1U]; - rn[2U] = r[2U]; - rn[3U] = r[3U]; - rn[4U] = r[4U]; - rn_5[0U] = r5[0U]; - rn_5[1U] = r5[1U]; - rn_5[2U] = r5[2U]; - rn_5[3U] = r5[3U]; - rn_5[4U] = r5[4U]; -} - -void Hacl_Poly1305_32_poly1305_update1(uint64_t *ctx, uint8_t *text) -{ - uint64_t *pre = ctx + (uint32_t)5U; - uint64_t *acc = ctx; - uint64_t e[5U] = { 0U }; - uint64_t u0 = load64_le(text); - uint64_t lo = u0; - uint64_t u = load64_le(text + (uint32_t)8U); - uint64_t hi = u; - uint64_t f0 = lo; - uint64_t f1 = hi; - uint64_t f010 = f0 & (uint64_t)0x3ffffffU; - uint64_t f110 = f0 >> (uint32_t)26U & (uint64_t)0x3ffffffU; - uint64_t f20 = f0 >> (uint32_t)52U | (f1 & (uint64_t)0x3fffU) << (uint32_t)12U; - uint64_t f30 = f1 >> (uint32_t)14U & (uint64_t)0x3ffffffU; - uint64_t f40 = f1 >> (uint32_t)40U; - uint64_t f01 = f010; - uint64_t f111 = f110; - uint64_t f2 = f20; - uint64_t f3 = f30; - uint64_t f41 = f40; - uint64_t b; - uint64_t mask; - uint64_t f4; - uint64_t *r; - uint64_t *r5; - uint64_t r0; - uint64_t r1; - uint64_t r2; - uint64_t r3; - uint64_t r4; - uint64_t r51; - uint64_t r52; - uint64_t r53; - uint64_t r54; - uint64_t f10; - uint64_t f11; - uint64_t f12; - uint64_t f13; - uint64_t f14; - uint64_t a0; - uint64_t a1; - uint64_t a2; - uint64_t a3; - uint64_t a4; - uint64_t a01; - uint64_t a11; - uint64_t a21; - uint64_t a31; - uint64_t a41; - uint64_t a02; - uint64_t a12; - uint64_t a22; - uint64_t a32; - uint64_t a42; - uint64_t a03; - uint64_t a13; - uint64_t a23; - uint64_t a33; - uint64_t a43; - uint64_t a04; - uint64_t a14; - uint64_t a24; - uint64_t a34; - uint64_t a44; - uint64_t a05; - uint64_t a15; - uint64_t a25; - uint64_t a35; - uint64_t a45; - uint64_t a06; - uint64_t a16; - uint64_t a26; - uint64_t a36; - uint64_t a46; - uint64_t t0; - uint64_t t1; - uint64_t t2; - uint64_t t3; - uint64_t t4; - uint64_t mask26; - uint64_t z0; - uint64_t z1; - uint64_t x0; - uint64_t x3; - uint64_t x1; - uint64_t x4; - uint64_t z01; - uint64_t z11; - uint64_t t; - uint64_t z12; - uint64_t x11; - uint64_t x41; - uint64_t x2; - uint64_t x01; - uint64_t z02; - uint64_t z13; - uint64_t x21; - uint64_t x02; - uint64_t x31; - uint64_t x12; - uint64_t z03; - uint64_t x32; - uint64_t x42; - uint64_t o0; - uint64_t o1; - uint64_t o2; - uint64_t o3; - uint64_t o4; - e[0U] = f01; - e[1U] = f111; - e[2U] = f2; - e[3U] = f3; - e[4U] = f41; - b = (uint64_t)0x1000000U; - mask = b; - f4 = e[4U]; - e[4U] = f4 | mask; - r = pre; - r5 = pre + (uint32_t)5U; - r0 = r[0U]; - r1 = r[1U]; - r2 = r[2U]; - r3 = r[3U]; - r4 = r[4U]; - r51 = r5[1U]; - r52 = r5[2U]; - r53 = r5[3U]; - r54 = r5[4U]; - f10 = e[0U]; - f11 = e[1U]; - f12 = e[2U]; - f13 = e[3U]; - f14 = e[4U]; - a0 = acc[0U]; - a1 = acc[1U]; - a2 = acc[2U]; - a3 = acc[3U]; - a4 = acc[4U]; - a01 = a0 + f10; - a11 = a1 + f11; - a21 = a2 + f12; - a31 = a3 + f13; - a41 = a4 + f14; - a02 = r0 * a01; - a12 = r1 * a01; - a22 = r2 * a01; - a32 = r3 * a01; - a42 = r4 * a01; - a03 = a02 + r54 * a11; - a13 = a12 + r0 * a11; - a23 = a22 + r1 * a11; - a33 = a32 + r2 * a11; - a43 = a42 + r3 * a11; - a04 = a03 + r53 * a21; - a14 = a13 + r54 * a21; - a24 = a23 + r0 * a21; - a34 = a33 + r1 * a21; - a44 = a43 + r2 * a21; - a05 = a04 + r52 * a31; - a15 = a14 + r53 * a31; - a25 = a24 + r54 * a31; - a35 = a34 + r0 * a31; - a45 = a44 + r1 * a31; - a06 = a05 + r51 * a41; - a16 = a15 + r52 * a41; - a26 = a25 + r53 * a41; - a36 = a35 + r54 * a41; - a46 = a45 + r0 * a41; - t0 = a06; - t1 = a16; - t2 = a26; - t3 = a36; - t4 = a46; - mask26 = (uint64_t)0x3ffffffU; - z0 = t0 >> (uint32_t)26U; - z1 = t3 >> (uint32_t)26U; - x0 = t0 & mask26; - x3 = t3 & mask26; - x1 = t1 + z0; - x4 = t4 + z1; - z01 = x1 >> (uint32_t)26U; - z11 = x4 >> (uint32_t)26U; - t = z11 << (uint32_t)2U; - z12 = z11 + t; - x11 = x1 & mask26; - x41 = x4 & mask26; - x2 = t2 + z01; - x01 = x0 + z12; - z02 = x2 >> (uint32_t)26U; - z13 = x01 >> (uint32_t)26U; - x21 = x2 & mask26; - x02 = x01 & mask26; - x31 = x3 + z02; - x12 = x11 + z13; - z03 = x31 >> (uint32_t)26U; - x32 = x31 & mask26; - x42 = x41 + z03; - o0 = x02; - o1 = x12; - o2 = x21; - o3 = x32; - o4 = x42; - acc[0U] = o0; - acc[1U] = o1; - acc[2U] = o2; - acc[3U] = o3; - acc[4U] = o4; -} - -void Hacl_Poly1305_32_poly1305_update(uint64_t *ctx, uint32_t len, uint8_t *text) -{ - uint64_t *pre = ctx + (uint32_t)5U; - uint64_t *acc = ctx; - uint32_t nb = len / (uint32_t)16U; - uint32_t rem = len % (uint32_t)16U; - { - uint32_t i; - for (i = (uint32_t)0U; i < nb; i++) - { - uint8_t *block = text + i * (uint32_t)16U; - uint64_t e[5U] = { 0U }; - uint64_t u0 = load64_le(block); - uint64_t lo = u0; - uint64_t u = load64_le(block + (uint32_t)8U); - uint64_t hi = u; - uint64_t f0 = lo; - uint64_t f1 = hi; - uint64_t f010 = f0 & (uint64_t)0x3ffffffU; - uint64_t f110 = f0 >> (uint32_t)26U & (uint64_t)0x3ffffffU; - uint64_t f20 = f0 >> (uint32_t)52U | (f1 & (uint64_t)0x3fffU) << (uint32_t)12U; - uint64_t f30 = f1 >> (uint32_t)14U & (uint64_t)0x3ffffffU; - uint64_t f40 = f1 >> (uint32_t)40U; - uint64_t f01 = f010; - uint64_t f111 = f110; - uint64_t f2 = f20; - uint64_t f3 = f30; - uint64_t f41 = f40; - e[0U] = f01; - e[1U] = f111; - e[2U] = f2; - e[3U] = f3; - e[4U] = f41; - { - uint64_t b = (uint64_t)0x1000000U; - uint64_t mask = b; - uint64_t f4 = e[4U]; - e[4U] = f4 | mask; - { - uint64_t *r = pre; - uint64_t *r5 = pre + (uint32_t)5U; - uint64_t r0 = r[0U]; - uint64_t r1 = r[1U]; - uint64_t r2 = r[2U]; - uint64_t r3 = r[3U]; - uint64_t r4 = r[4U]; - uint64_t r51 = r5[1U]; - uint64_t r52 = r5[2U]; - uint64_t r53 = r5[3U]; - uint64_t r54 = r5[4U]; - uint64_t f10 = e[0U]; - uint64_t f11 = e[1U]; - uint64_t f12 = e[2U]; - uint64_t f13 = e[3U]; - uint64_t f14 = e[4U]; - uint64_t a0 = acc[0U]; - uint64_t a1 = acc[1U]; - uint64_t a2 = acc[2U]; - uint64_t a3 = acc[3U]; - uint64_t a4 = acc[4U]; - uint64_t a01 = a0 + f10; - uint64_t a11 = a1 + f11; - uint64_t a21 = a2 + f12; - uint64_t a31 = a3 + f13; - uint64_t a41 = a4 + f14; - uint64_t a02 = r0 * a01; - uint64_t a12 = r1 * a01; - uint64_t a22 = r2 * a01; - uint64_t a32 = r3 * a01; - uint64_t a42 = r4 * a01; - uint64_t a03 = a02 + r54 * a11; - uint64_t a13 = a12 + r0 * a11; - uint64_t a23 = a22 + r1 * a11; - uint64_t a33 = a32 + r2 * a11; - uint64_t a43 = a42 + r3 * a11; - uint64_t a04 = a03 + r53 * a21; - uint64_t a14 = a13 + r54 * a21; - uint64_t a24 = a23 + r0 * a21; - uint64_t a34 = a33 + r1 * a21; - uint64_t a44 = a43 + r2 * a21; - uint64_t a05 = a04 + r52 * a31; - uint64_t a15 = a14 + r53 * a31; - uint64_t a25 = a24 + r54 * a31; - uint64_t a35 = a34 + r0 * a31; - uint64_t a45 = a44 + r1 * a31; - uint64_t a06 = a05 + r51 * a41; - uint64_t a16 = a15 + r52 * a41; - uint64_t a26 = a25 + r53 * a41; - uint64_t a36 = a35 + r54 * a41; - uint64_t a46 = a45 + r0 * a41; - uint64_t t0 = a06; - uint64_t t1 = a16; - uint64_t t2 = a26; - uint64_t t3 = a36; - uint64_t t4 = a46; - uint64_t mask26 = (uint64_t)0x3ffffffU; - uint64_t z0 = t0 >> (uint32_t)26U; - uint64_t z1 = t3 >> (uint32_t)26U; - uint64_t x0 = t0 & mask26; - uint64_t x3 = t3 & mask26; - uint64_t x1 = t1 + z0; - uint64_t x4 = t4 + z1; - uint64_t z01 = x1 >> (uint32_t)26U; - uint64_t z11 = x4 >> (uint32_t)26U; - uint64_t t = z11 << (uint32_t)2U; - uint64_t z12 = z11 + t; - uint64_t x11 = x1 & mask26; - uint64_t x41 = x4 & mask26; - uint64_t x2 = t2 + z01; - uint64_t x01 = x0 + z12; - uint64_t z02 = x2 >> (uint32_t)26U; - uint64_t z13 = x01 >> (uint32_t)26U; - uint64_t x21 = x2 & mask26; - uint64_t x02 = x01 & mask26; - uint64_t x31 = x3 + z02; - uint64_t x12 = x11 + z13; - uint64_t z03 = x31 >> (uint32_t)26U; - uint64_t x32 = x31 & mask26; - uint64_t x42 = x41 + z03; - uint64_t o0 = x02; - uint64_t o1 = x12; - uint64_t o2 = x21; - uint64_t o3 = x32; - uint64_t o4 = x42; - acc[0U] = o0; - acc[1U] = o1; - acc[2U] = o2; - acc[3U] = o3; - acc[4U] = o4; - } - } - } - } - if (rem > (uint32_t)0U) - { - uint8_t *last = text + nb * (uint32_t)16U; - uint64_t e[5U] = { 0U }; - uint8_t tmp[16U] = { 0U }; - uint64_t u0; - uint64_t lo; - uint64_t u; - uint64_t hi; - uint64_t f0; - uint64_t f1; - uint64_t f010; - uint64_t f110; - uint64_t f20; - uint64_t f30; - uint64_t f40; - uint64_t f01; - uint64_t f111; - uint64_t f2; - uint64_t f3; - uint64_t f4; - uint64_t b; - uint64_t mask; - uint64_t fi; - uint64_t *r; - uint64_t *r5; - uint64_t r0; - uint64_t r1; - uint64_t r2; - uint64_t r3; - uint64_t r4; - uint64_t r51; - uint64_t r52; - uint64_t r53; - uint64_t r54; - uint64_t f10; - uint64_t f11; - uint64_t f12; - uint64_t f13; - uint64_t f14; - uint64_t a0; - uint64_t a1; - uint64_t a2; - uint64_t a3; - uint64_t a4; - uint64_t a01; - uint64_t a11; - uint64_t a21; - uint64_t a31; - uint64_t a41; - uint64_t a02; - uint64_t a12; - uint64_t a22; - uint64_t a32; - uint64_t a42; - uint64_t a03; - uint64_t a13; - uint64_t a23; - uint64_t a33; - uint64_t a43; - uint64_t a04; - uint64_t a14; - uint64_t a24; - uint64_t a34; - uint64_t a44; - uint64_t a05; - uint64_t a15; - uint64_t a25; - uint64_t a35; - uint64_t a45; - uint64_t a06; - uint64_t a16; - uint64_t a26; - uint64_t a36; - uint64_t a46; - uint64_t t0; - uint64_t t1; - uint64_t t2; - uint64_t t3; - uint64_t t4; - uint64_t mask26; - uint64_t z0; - uint64_t z1; - uint64_t x0; - uint64_t x3; - uint64_t x1; - uint64_t x4; - uint64_t z01; - uint64_t z11; - uint64_t t; - uint64_t z12; - uint64_t x11; - uint64_t x41; - uint64_t x2; - uint64_t x01; - uint64_t z02; - uint64_t z13; - uint64_t x21; - uint64_t x02; - uint64_t x31; - uint64_t x12; - uint64_t z03; - uint64_t x32; - uint64_t x42; - uint64_t o0; - uint64_t o1; - uint64_t o2; - uint64_t o3; - uint64_t o4; - memcpy(tmp, last, rem * sizeof (uint8_t)); - u0 = load64_le(tmp); - lo = u0; - u = load64_le(tmp + (uint32_t)8U); - hi = u; - f0 = lo; - f1 = hi; - f010 = f0 & (uint64_t)0x3ffffffU; - f110 = f0 >> (uint32_t)26U & (uint64_t)0x3ffffffU; - f20 = f0 >> (uint32_t)52U | (f1 & (uint64_t)0x3fffU) << (uint32_t)12U; - f30 = f1 >> (uint32_t)14U & (uint64_t)0x3ffffffU; - f40 = f1 >> (uint32_t)40U; - f01 = f010; - f111 = f110; - f2 = f20; - f3 = f30; - f4 = f40; - e[0U] = f01; - e[1U] = f111; - e[2U] = f2; - e[3U] = f3; - e[4U] = f4; - b = (uint64_t)1U << rem * (uint32_t)8U % (uint32_t)26U; - mask = b; - fi = e[rem * (uint32_t)8U / (uint32_t)26U]; - e[rem * (uint32_t)8U / (uint32_t)26U] = fi | mask; - r = pre; - r5 = pre + (uint32_t)5U; - r0 = r[0U]; - r1 = r[1U]; - r2 = r[2U]; - r3 = r[3U]; - r4 = r[4U]; - r51 = r5[1U]; - r52 = r5[2U]; - r53 = r5[3U]; - r54 = r5[4U]; - f10 = e[0U]; - f11 = e[1U]; - f12 = e[2U]; - f13 = e[3U]; - f14 = e[4U]; - a0 = acc[0U]; - a1 = acc[1U]; - a2 = acc[2U]; - a3 = acc[3U]; - a4 = acc[4U]; - a01 = a0 + f10; - a11 = a1 + f11; - a21 = a2 + f12; - a31 = a3 + f13; - a41 = a4 + f14; - a02 = r0 * a01; - a12 = r1 * a01; - a22 = r2 * a01; - a32 = r3 * a01; - a42 = r4 * a01; - a03 = a02 + r54 * a11; - a13 = a12 + r0 * a11; - a23 = a22 + r1 * a11; - a33 = a32 + r2 * a11; - a43 = a42 + r3 * a11; - a04 = a03 + r53 * a21; - a14 = a13 + r54 * a21; - a24 = a23 + r0 * a21; - a34 = a33 + r1 * a21; - a44 = a43 + r2 * a21; - a05 = a04 + r52 * a31; - a15 = a14 + r53 * a31; - a25 = a24 + r54 * a31; - a35 = a34 + r0 * a31; - a45 = a44 + r1 * a31; - a06 = a05 + r51 * a41; - a16 = a15 + r52 * a41; - a26 = a25 + r53 * a41; - a36 = a35 + r54 * a41; - a46 = a45 + r0 * a41; - t0 = a06; - t1 = a16; - t2 = a26; - t3 = a36; - t4 = a46; - mask26 = (uint64_t)0x3ffffffU; - z0 = t0 >> (uint32_t)26U; - z1 = t3 >> (uint32_t)26U; - x0 = t0 & mask26; - x3 = t3 & mask26; - x1 = t1 + z0; - x4 = t4 + z1; - z01 = x1 >> (uint32_t)26U; - z11 = x4 >> (uint32_t)26U; - t = z11 << (uint32_t)2U; - z12 = z11 + t; - x11 = x1 & mask26; - x41 = x4 & mask26; - x2 = t2 + z01; - x01 = x0 + z12; - z02 = x2 >> (uint32_t)26U; - z13 = x01 >> (uint32_t)26U; - x21 = x2 & mask26; - x02 = x01 & mask26; - x31 = x3 + z02; - x12 = x11 + z13; - z03 = x31 >> (uint32_t)26U; - x32 = x31 & mask26; - x42 = x41 + z03; - o0 = x02; - o1 = x12; - o2 = x21; - o3 = x32; - o4 = x42; - acc[0U] = o0; - acc[1U] = o1; - acc[2U] = o2; - acc[3U] = o3; - acc[4U] = o4; - return; - } -} - -void Hacl_Poly1305_32_poly1305_finish(uint8_t *tag, uint8_t *key, uint64_t *ctx) -{ - uint64_t *acc = ctx; - uint8_t *ks = key + (uint32_t)16U; - uint64_t f00 = acc[0U]; - uint64_t f13 = acc[1U]; - uint64_t f23 = acc[2U]; - uint64_t f33 = acc[3U]; - uint64_t f40 = acc[4U]; - uint64_t l0 = f00 + (uint64_t)0U; - uint64_t tmp00 = l0 & (uint64_t)0x3ffffffU; - uint64_t c00 = l0 >> (uint32_t)26U; - uint64_t l1 = f13 + c00; - uint64_t tmp10 = l1 & (uint64_t)0x3ffffffU; - uint64_t c10 = l1 >> (uint32_t)26U; - uint64_t l2 = f23 + c10; - uint64_t tmp20 = l2 & (uint64_t)0x3ffffffU; - uint64_t c20 = l2 >> (uint32_t)26U; - uint64_t l3 = f33 + c20; - uint64_t tmp30 = l3 & (uint64_t)0x3ffffffU; - uint64_t c30 = l3 >> (uint32_t)26U; - uint64_t l4 = f40 + c30; - uint64_t tmp40 = l4 & (uint64_t)0x3ffffffU; - uint64_t c40 = l4 >> (uint32_t)26U; - uint64_t f010 = tmp00 + c40 * (uint64_t)5U; - uint64_t f110 = tmp10; - uint64_t f210 = tmp20; - uint64_t f310 = tmp30; - uint64_t f410 = tmp40; - uint64_t l = f010 + (uint64_t)0U; - uint64_t tmp0 = l & (uint64_t)0x3ffffffU; - uint64_t c0 = l >> (uint32_t)26U; - uint64_t l5 = f110 + c0; - uint64_t tmp1 = l5 & (uint64_t)0x3ffffffU; - uint64_t c1 = l5 >> (uint32_t)26U; - uint64_t l6 = f210 + c1; - uint64_t tmp2 = l6 & (uint64_t)0x3ffffffU; - uint64_t c2 = l6 >> (uint32_t)26U; - uint64_t l7 = f310 + c2; - uint64_t tmp3 = l7 & (uint64_t)0x3ffffffU; - uint64_t c3 = l7 >> (uint32_t)26U; - uint64_t l8 = f410 + c3; - uint64_t tmp4 = l8 & (uint64_t)0x3ffffffU; - uint64_t c4 = l8 >> (uint32_t)26U; - uint64_t f02 = tmp0 + c4 * (uint64_t)5U; - uint64_t f12 = tmp1; - uint64_t f22 = tmp2; - uint64_t f32 = tmp3; - uint64_t f42 = tmp4; - uint64_t mh = (uint64_t)0x3ffffffU; - uint64_t ml = (uint64_t)0x3fffffbU; - uint64_t mask = FStar_UInt64_eq_mask(f42, mh); - uint64_t mask1 = mask & FStar_UInt64_eq_mask(f32, mh); - uint64_t mask2 = mask1 & FStar_UInt64_eq_mask(f22, mh); - uint64_t mask3 = mask2 & FStar_UInt64_eq_mask(f12, mh); - uint64_t mask4 = mask3 & ~~FStar_UInt64_gte_mask(f02, ml); - uint64_t ph = mask4 & mh; - uint64_t pl = mask4 & ml; - uint64_t o0 = f02 - pl; - uint64_t o1 = f12 - ph; - uint64_t o2 = f22 - ph; - uint64_t o3 = f32 - ph; - uint64_t o4 = f42 - ph; - uint64_t f011 = o0; - uint64_t f111 = o1; - uint64_t f211 = o2; - uint64_t f311 = o3; - uint64_t f411 = o4; - uint64_t f0; - uint64_t f1; - uint64_t f2; - uint64_t f3; - uint64_t f4; - uint64_t f01; - uint64_t f112; - uint64_t f212; - uint64_t f312; - uint64_t f41; - uint64_t lo0; - uint64_t hi0; - uint64_t f10; - uint64_t f11; - uint64_t u0; - uint64_t lo; - uint64_t u; - uint64_t hi; - uint64_t f20; - uint64_t f21; - uint64_t r0; - uint64_t r1; - uint64_t c; - uint64_t r11; - uint64_t f30; - uint64_t f31; - acc[0U] = f011; - acc[1U] = f111; - acc[2U] = f211; - acc[3U] = f311; - acc[4U] = f411; - f0 = acc[0U]; - f1 = acc[1U]; - f2 = acc[2U]; - f3 = acc[3U]; - f4 = acc[4U]; - f01 = f0; - f112 = f1; - f212 = f2; - f312 = f3; - f41 = f4; - lo0 = (f01 | f112 << (uint32_t)26U) | f212 << (uint32_t)52U; - hi0 = (f212 >> (uint32_t)12U | f312 << (uint32_t)14U) | f41 << (uint32_t)40U; - f10 = lo0; - f11 = hi0; - u0 = load64_le(ks); - lo = u0; - u = load64_le(ks + (uint32_t)8U); - hi = u; - f20 = lo; - f21 = hi; - r0 = f10 + f20; - r1 = f11 + f21; - c = (r0 ^ ((r0 ^ f20) | ((r0 - f20) ^ f20))) >> (uint32_t)63U; - r11 = r1 + c; - f30 = r0; - f31 = r11; - store64_le(tag, f30); - store64_le(tag + (uint32_t)8U, f31); -} - -void Hacl_Poly1305_32_poly1305_mac(uint8_t *tag, uint32_t len, uint8_t *text, uint8_t *key) -{ - uint64_t ctx[25U] = { 0U }; - Hacl_Poly1305_32_poly1305_init(ctx, key); - Hacl_Poly1305_32_poly1305_update(ctx, len, text); - Hacl_Poly1305_32_poly1305_finish(tag, key, ctx); -} - diff --git a/dist/c89-compatible/Hacl_Poly1305_32.h b/dist/c89-compatible/Hacl_Poly1305_32.h deleted file mode 100644 index 6bc98a1144..0000000000 --- a/dist/c89-compatible/Hacl_Poly1305_32.h +++ /dev/null @@ -1,57 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#ifndef __Hacl_Poly1305_32_H -#define __Hacl_Poly1305_32_H - -#if defined(__cplusplus) -extern "C" { -#endif - -#include -#include "krml/internal/types.h" -#include "krml/lowstar_endianness.h" -#include "krml/internal/target.h" - - -#include "Hacl_Krmllib.h" -#include "evercrypt_targetconfig.h" -typedef uint64_t *Hacl_Poly1305_32_poly1305_ctx; - -void Hacl_Poly1305_32_poly1305_init(uint64_t *ctx, uint8_t *key); - -void Hacl_Poly1305_32_poly1305_update1(uint64_t *ctx, uint8_t *text); - -void Hacl_Poly1305_32_poly1305_update(uint64_t *ctx, uint32_t len, uint8_t *text); - -void Hacl_Poly1305_32_poly1305_finish(uint8_t *tag, uint8_t *key, uint64_t *ctx); - -void Hacl_Poly1305_32_poly1305_mac(uint8_t *tag, uint32_t len, uint8_t *text, uint8_t *key); - -#if defined(__cplusplus) -} -#endif - -#define __Hacl_Poly1305_32_H_DEFINED -#endif diff --git a/dist/c89-compatible/Hacl_RSAPSS.c b/dist/c89-compatible/Hacl_RSAPSS.c deleted file mode 100644 index 50676d3ddf..0000000000 --- a/dist/c89-compatible/Hacl_RSAPSS.c +++ /dev/null @@ -1,1029 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#include "Hacl_RSAPSS.h" - -#include "internal/Hacl_Bignum.h" - -static inline uint32_t hash_len(Spec_Hash_Definitions_hash_alg a) -{ - switch (a) - { - case Spec_Hash_Definitions_MD5: - { - return (uint32_t)16U; - } - case Spec_Hash_Definitions_SHA1: - { - return (uint32_t)20U; - } - case Spec_Hash_Definitions_SHA2_224: - { - return (uint32_t)28U; - } - case Spec_Hash_Definitions_SHA2_256: - { - return (uint32_t)32U; - } - case Spec_Hash_Definitions_SHA2_384: - { - return (uint32_t)48U; - } - case Spec_Hash_Definitions_SHA2_512: - { - return (uint32_t)64U; - } - case Spec_Hash_Definitions_SHA3_256: - { - return (uint32_t)32U; - } - case Spec_Hash_Definitions_Blake2S: - { - return (uint32_t)32U; - } - case Spec_Hash_Definitions_Blake2B: - { - return (uint32_t)64U; - } - default: - { - KRML_HOST_PRINTF("KaRaMeL incomplete match at %s:%d\n", __FILE__, __LINE__); - KRML_HOST_EXIT(253U); - } - } -} - -static inline void -hash(Spec_Hash_Definitions_hash_alg a, uint8_t *mHash, uint32_t msgLen, uint8_t *msg) -{ - switch (a) - { - case Spec_Hash_Definitions_SHA2_256: - { - Hacl_Hash_SHA2_hash_256(msg, msgLen, mHash); - break; - } - case Spec_Hash_Definitions_SHA2_384: - { - Hacl_Hash_SHA2_hash_384(msg, msgLen, mHash); - break; - } - case Spec_Hash_Definitions_SHA2_512: - { - Hacl_Hash_SHA2_hash_512(msg, msgLen, mHash); - break; - } - default: - { - KRML_HOST_PRINTF("KaRaMeL incomplete match at %s:%d\n", __FILE__, __LINE__); - KRML_HOST_EXIT(253U); - } - } -} - -static inline void -mgf_hash( - Spec_Hash_Definitions_hash_alg a, - uint32_t len, - uint8_t *mgfseed, - uint32_t maskLen, - uint8_t *res -) -{ - KRML_CHECK_SIZE(sizeof (uint8_t), len + (uint32_t)4U); - { - uint8_t mgfseed_counter[len + (uint32_t)4U]; - memset(mgfseed_counter, 0U, (len + (uint32_t)4U) * sizeof (uint8_t)); - { - uint32_t hLen; - uint32_t n; - uint32_t accLen; - memcpy(mgfseed_counter, mgfseed, len * sizeof (uint8_t)); - hLen = hash_len(a); - n = (maskLen - (uint32_t)1U) / hLen + (uint32_t)1U; - accLen = n * hLen; - KRML_CHECK_SIZE(sizeof (uint8_t), accLen); - { - uint8_t acc[accLen]; - memset(acc, 0U, accLen * sizeof (uint8_t)); - { - uint32_t i; - for (i = (uint32_t)0U; i < n; i++) - { - uint8_t *acc_i = acc + i * hLen; - uint8_t *c = mgfseed_counter + len; - c[0U] = (uint8_t)(i >> (uint32_t)24U); - c[1U] = (uint8_t)(i >> (uint32_t)16U); - c[2U] = (uint8_t)(i >> (uint32_t)8U); - c[3U] = (uint8_t)i; - hash(a, acc_i, len + (uint32_t)4U, mgfseed_counter); - } - } - memcpy(res, acc, maskLen * sizeof (uint8_t)); - } - } - } -} - -static inline uint64_t check_num_bits_u64(uint32_t bs, uint64_t *b) -{ - uint32_t bLen = (bs - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U; - if (bs == (uint32_t)64U * bLen) - { - return (uint64_t)0xFFFFFFFFFFFFFFFFU; - } - KRML_CHECK_SIZE(sizeof (uint64_t), bLen); - { - uint64_t b2[bLen]; - memset(b2, 0U, bLen * sizeof (uint64_t)); - { - uint32_t i0 = bs / (uint32_t)64U; - uint32_t j = bs % (uint32_t)64U; - b2[i0] = b2[i0] | (uint64_t)1U << j; - { - uint64_t acc = (uint64_t)0U; - { - uint32_t i; - for (i = (uint32_t)0U; i < bLen; i++) - { - uint64_t beq = FStar_UInt64_eq_mask(b[i], b2[i]); - uint64_t blt = ~FStar_UInt64_gte_mask(b[i], b2[i]); - acc = - (beq & acc) - | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U))); - } - } - { - uint64_t res = acc; - return res; - } - } - } - } -} - -static inline uint64_t check_modulus_u64(uint32_t modBits, uint64_t *n) -{ - uint32_t nLen = (modBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U; - uint64_t bits0 = n[0U] & (uint64_t)1U; - uint64_t m0 = (uint64_t)0U - bits0; - KRML_CHECK_SIZE(sizeof (uint64_t), nLen); - { - uint64_t b2[nLen]; - memset(b2, 0U, nLen * sizeof (uint64_t)); - { - uint32_t i = (modBits - (uint32_t)1U) / (uint32_t)64U; - uint32_t j = (modBits - (uint32_t)1U) % (uint32_t)64U; - b2[i] = b2[i] | (uint64_t)1U << j; - { - uint64_t acc = (uint64_t)0U; - uint64_t res; - uint64_t m1; - uint64_t m2; - { - uint32_t i0; - for (i0 = (uint32_t)0U; i0 < nLen; i0++) - { - uint64_t beq = FStar_UInt64_eq_mask(b2[i0], n[i0]); - uint64_t blt = ~FStar_UInt64_gte_mask(b2[i0], n[i0]); - acc = - (beq & acc) - | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U))); - } - } - res = acc; - m1 = res; - m2 = check_num_bits_u64(modBits, n); - return m0 & (m1 & m2); - } - } - } -} - -static inline uint64_t check_exponent_u64(uint32_t eBits, uint64_t *e) -{ - uint32_t eLen = (eBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U; - KRML_CHECK_SIZE(sizeof (uint64_t), eLen); - { - uint64_t bn_zero[eLen]; - memset(bn_zero, 0U, eLen * sizeof (uint64_t)); - { - uint64_t mask = (uint64_t)0xFFFFFFFFFFFFFFFFU; - uint64_t mask1; - uint64_t res; - uint64_t m0; - uint64_t m1; - { - uint32_t i; - for (i = (uint32_t)0U; i < eLen; i++) - { - uint64_t uu____0 = FStar_UInt64_eq_mask(e[i], bn_zero[i]); - mask = uu____0 & mask; - } - } - mask1 = mask; - res = mask1; - m0 = res; - m1 = check_num_bits_u64(eBits, e); - return ~m0 & m1; - } - } -} - -static inline void -pss_encode( - Spec_Hash_Definitions_hash_alg a, - uint32_t saltLen, - uint8_t *salt, - uint32_t msgLen, - uint8_t *msg, - uint32_t emBits, - uint8_t *em -) -{ - uint32_t hLen = hash_len(a); - KRML_CHECK_SIZE(sizeof (uint8_t), hLen); - { - uint8_t m1Hash[hLen]; - memset(m1Hash, 0U, hLen * sizeof (uint8_t)); - { - uint32_t m1Len = (uint32_t)8U + hLen + saltLen; - KRML_CHECK_SIZE(sizeof (uint8_t), m1Len); - { - uint8_t m1[m1Len]; - memset(m1, 0U, m1Len * sizeof (uint8_t)); - { - uint32_t emLen; - uint32_t dbLen; - hash(a, m1 + (uint32_t)8U, msgLen, msg); - memcpy(m1 + (uint32_t)8U + hLen, salt, saltLen * sizeof (uint8_t)); - hash(a, m1Hash, m1Len, m1); - emLen = (emBits - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U; - dbLen = emLen - hLen - (uint32_t)1U; - KRML_CHECK_SIZE(sizeof (uint8_t), dbLen); - { - uint8_t db[dbLen]; - memset(db, 0U, dbLen * sizeof (uint8_t)); - { - uint32_t last_before_salt = dbLen - saltLen - (uint32_t)1U; - db[last_before_salt] = (uint8_t)1U; - memcpy(db + last_before_salt + (uint32_t)1U, salt, saltLen * sizeof (uint8_t)); - KRML_CHECK_SIZE(sizeof (uint8_t), dbLen); - { - uint8_t dbMask[dbLen]; - memset(dbMask, 0U, dbLen * sizeof (uint8_t)); - { - uint32_t msBits; - mgf_hash(a, hLen, m1Hash, dbLen, dbMask); - { - uint32_t i; - for (i = (uint32_t)0U; i < dbLen; i++) - { - uint8_t *os = db; - uint8_t x = db[i] ^ dbMask[i]; - os[i] = x; - } - } - msBits = emBits % (uint32_t)8U; - if (msBits > (uint32_t)0U) - { - db[0U] = db[0U] & (uint8_t)0xffU >> ((uint32_t)8U - msBits); - } - memcpy(em, db, dbLen * sizeof (uint8_t)); - memcpy(em + dbLen, m1Hash, hLen * sizeof (uint8_t)); - em[emLen - (uint32_t)1U] = (uint8_t)0xbcU; - } - } - } - } - } - } - } - } -} - -static inline bool -pss_verify( - Spec_Hash_Definitions_hash_alg a, - uint32_t saltLen, - uint32_t msgLen, - uint8_t *msg, - uint32_t emBits, - uint8_t *em -) -{ - uint32_t emLen = (emBits - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U; - uint32_t msBits = emBits % (uint32_t)8U; - uint8_t em_0; - if (msBits > (uint32_t)0U) - { - em_0 = em[0U] & (uint8_t)0xffU << msBits; - } - else - { - em_0 = (uint8_t)0U; - } - { - uint8_t em_last = em[emLen - (uint32_t)1U]; - if (emLen < saltLen + hash_len(a) + (uint32_t)2U) - { - return false; - } - if (!(em_last == (uint8_t)0xbcU && em_0 == (uint8_t)0U)) - { - return false; - } - { - uint32_t emLen1 = (emBits - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U; - uint32_t hLen = hash_len(a); - KRML_CHECK_SIZE(sizeof (uint8_t), hLen); - { - uint8_t m1Hash0[hLen]; - memset(m1Hash0, 0U, hLen * sizeof (uint8_t)); - { - uint32_t dbLen = emLen1 - hLen - (uint32_t)1U; - uint8_t *maskedDB = em; - uint8_t *m1Hash = em + dbLen; - KRML_CHECK_SIZE(sizeof (uint8_t), dbLen); - { - uint8_t dbMask[dbLen]; - memset(dbMask, 0U, dbLen * sizeof (uint8_t)); - mgf_hash(a, hLen, m1Hash, dbLen, dbMask); - { - uint32_t i; - for (i = (uint32_t)0U; i < dbLen; i++) - { - uint8_t *os = dbMask; - uint8_t x = dbMask[i] ^ maskedDB[i]; - os[i] = x; - } - } - { - uint32_t msBits1 = emBits % (uint32_t)8U; - if (msBits1 > (uint32_t)0U) - { - dbMask[0U] = dbMask[0U] & (uint8_t)0xffU >> ((uint32_t)8U - msBits1); - } - { - uint32_t padLen = emLen1 - saltLen - hLen - (uint32_t)1U; - KRML_CHECK_SIZE(sizeof (uint8_t), padLen); - { - uint8_t pad2[padLen]; - memset(pad2, 0U, padLen * sizeof (uint8_t)); - pad2[padLen - (uint32_t)1U] = (uint8_t)0x01U; - { - uint8_t *pad = dbMask; - uint8_t *salt = dbMask + padLen; - uint8_t res = (uint8_t)255U; - { - uint32_t i; - for (i = (uint32_t)0U; i < padLen; i++) - { - uint8_t uu____0 = FStar_UInt8_eq_mask(pad[i], pad2[i]); - res = uu____0 & res; - } - } - { - uint8_t z = res; - if (!(z == (uint8_t)255U)) - { - return false; - } - { - uint32_t m1Len = (uint32_t)8U + hLen + saltLen; - KRML_CHECK_SIZE(sizeof (uint8_t), m1Len); - { - uint8_t m1[m1Len]; - memset(m1, 0U, m1Len * sizeof (uint8_t)); - hash(a, m1 + (uint32_t)8U, msgLen, msg); - memcpy(m1 + (uint32_t)8U + hLen, salt, saltLen * sizeof (uint8_t)); - hash(a, m1Hash0, m1Len, m1); - { - uint8_t res0 = (uint8_t)255U; - { - uint32_t i; - for (i = (uint32_t)0U; i < hLen; i++) - { - uint8_t uu____1 = FStar_UInt8_eq_mask(m1Hash0[i], m1Hash[i]); - res0 = uu____1 & res0; - } - } - { - uint8_t z0 = res0; - return z0 == (uint8_t)255U; - } - } - } - } - } - } - } - } - } - } - } - } - } - } -} - -static inline bool -load_pkey(uint32_t modBits, uint32_t eBits, uint8_t *nb, uint8_t *eb, uint64_t *pkey) -{ - uint32_t nbLen = (modBits - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U; - uint32_t ebLen = (eBits - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U; - uint32_t nLen = (modBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U; - uint64_t *n = pkey; - uint64_t *r2 = pkey + nLen; - uint64_t *e = pkey + nLen + nLen; - uint64_t m0; - uint64_t m1; - uint64_t m; - Hacl_Bignum_Convert_bn_from_bytes_be_uint64(nbLen, nb, n); - Hacl_Bignum_Montgomery_bn_precomp_r2_mod_n_u64((modBits - (uint32_t)1U) - / (uint32_t)64U - + (uint32_t)1U, - modBits - (uint32_t)1U, - n, - r2); - Hacl_Bignum_Convert_bn_from_bytes_be_uint64(ebLen, eb, e); - m0 = check_modulus_u64(modBits, n); - m1 = check_exponent_u64(eBits, e); - m = m0 & m1; - return m == (uint64_t)0xFFFFFFFFFFFFFFFFU; -} - -static inline bool -load_skey( - uint32_t modBits, - uint32_t eBits, - uint32_t dBits, - uint8_t *nb, - uint8_t *eb, - uint8_t *db, - uint64_t *skey -) -{ - uint32_t dbLen = (dBits - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U; - uint32_t nLen = (modBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U; - uint32_t eLen = (eBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U; - uint32_t pkeyLen = nLen + nLen + eLen; - uint64_t *pkey = skey; - uint64_t *d = skey + pkeyLen; - bool b = load_pkey(modBits, eBits, nb, eb, pkey); - uint64_t m1; - Hacl_Bignum_Convert_bn_from_bytes_be_uint64(dbLen, db, d); - m1 = check_exponent_u64(dBits, d); - return b && m1 == (uint64_t)0xFFFFFFFFFFFFFFFFU; -} - -bool -Hacl_RSAPSS_rsapss_sign( - Spec_Hash_Definitions_hash_alg a, - uint32_t modBits, - uint32_t eBits, - uint32_t dBits, - uint64_t *skey, - uint32_t saltLen, - uint8_t *salt, - uint32_t msgLen, - uint8_t *msg, - uint8_t *sgnt -) -{ - uint32_t hLen = hash_len(a); - bool - b = - saltLen - <= (uint32_t)0xffffffffU - hLen - (uint32_t)8U - && - saltLen - + hLen - + (uint32_t)2U - <= (modBits - (uint32_t)1U - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U; - if (b) - { - uint32_t nLen = (modBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U; - KRML_CHECK_SIZE(sizeof (uint64_t), nLen); - { - uint64_t m[nLen]; - memset(m, 0U, nLen * sizeof (uint64_t)); - { - uint32_t emBits = modBits - (uint32_t)1U; - uint32_t emLen = (emBits - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U; - KRML_CHECK_SIZE(sizeof (uint8_t), emLen); - { - uint8_t em[emLen]; - memset(em, 0U, emLen * sizeof (uint8_t)); - pss_encode(a, saltLen, salt, msgLen, msg, emBits, em); - Hacl_Bignum_Convert_bn_from_bytes_be_uint64(emLen, em, m); - { - uint32_t nLen1 = (modBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U; - uint32_t k = (modBits - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U; - KRML_CHECK_SIZE(sizeof (uint64_t), nLen1); - { - uint64_t s[nLen1]; - memset(s, 0U, nLen1 * sizeof (uint64_t)); - KRML_CHECK_SIZE(sizeof (uint64_t), nLen1); - { - uint64_t m_[nLen1]; - memset(m_, 0U, nLen1 * sizeof (uint64_t)); - { - uint32_t nLen2 = (modBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U; - uint32_t eLen = (eBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U; - uint64_t *n = skey; - uint64_t *r2 = skey + nLen2; - uint64_t *e = skey + nLen2 + nLen2; - uint64_t *d = skey + nLen2 + nLen2 + eLen; - uint64_t mu = Hacl_Bignum_ModInvLimb_mod_inv_uint64(n[0U]); - Hacl_Bignum_Exponentiation_bn_mod_exp_consttime_precomp_u64((modBits - - (uint32_t)1U) - / (uint32_t)64U - + (uint32_t)1U, - n, - mu, - r2, - m, - dBits, - d, - s); - { - uint64_t mu0 = Hacl_Bignum_ModInvLimb_mod_inv_uint64(n[0U]); - Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_precomp_u64((modBits - - (uint32_t)1U) - / (uint32_t)64U - + (uint32_t)1U, - n, - mu0, - r2, - s, - eBits, - e, - m_); - { - uint64_t mask = (uint64_t)0xFFFFFFFFFFFFFFFFU; - { - uint32_t i; - for (i = (uint32_t)0U; i < nLen2; i++) - { - uint64_t uu____0 = FStar_UInt64_eq_mask(m[i], m_[i]); - mask = uu____0 & mask; - } - } - { - uint64_t mask1 = mask; - uint64_t eq_m = mask1; - { - uint32_t i; - for (i = (uint32_t)0U; i < nLen2; i++) - { - uint64_t *os = s; - uint64_t x = s[i]; - uint64_t x0 = eq_m & x; - os[i] = x0; - } - } - { - bool eq_b = eq_m == (uint64_t)0xFFFFFFFFFFFFFFFFU; - Hacl_Bignum_Convert_bn_to_bytes_be_uint64(k, s, sgnt); - { - bool eq_b0 = eq_b; - return eq_b0; - } - } - } - } - } - } - } - } - } - } - } - } - } - return false; -} - -bool -Hacl_RSAPSS_rsapss_verify( - Spec_Hash_Definitions_hash_alg a, - uint32_t modBits, - uint32_t eBits, - uint64_t *pkey, - uint32_t saltLen, - uint32_t sgntLen, - uint8_t *sgnt, - uint32_t msgLen, - uint8_t *msg -) -{ - uint32_t hLen = hash_len(a); - bool - b = - saltLen - <= (uint32_t)0xffffffffU - hLen - (uint32_t)8U - && sgntLen == (modBits - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U; - if (b) - { - uint32_t nLen = (modBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U; - KRML_CHECK_SIZE(sizeof (uint64_t), nLen); - { - uint64_t m[nLen]; - memset(m, 0U, nLen * sizeof (uint64_t)); - { - uint32_t nLen1 = (modBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U; - uint32_t k = (modBits - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U; - KRML_CHECK_SIZE(sizeof (uint64_t), nLen1); - { - uint64_t s[nLen1]; - memset(s, 0U, nLen1 * sizeof (uint64_t)); - Hacl_Bignum_Convert_bn_from_bytes_be_uint64(k, sgnt, s); - { - uint32_t nLen2 = (modBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U; - uint64_t *n = pkey; - uint64_t *r2 = pkey + nLen2; - uint64_t *e = pkey + nLen2 + nLen2; - uint64_t acc = (uint64_t)0U; - { - uint32_t i; - for (i = (uint32_t)0U; i < nLen2; i++) - { - uint64_t beq = FStar_UInt64_eq_mask(s[i], n[i]); - uint64_t blt = ~FStar_UInt64_gte_mask(s[i], n[i]); - acc = - (beq & acc) - | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U))); - } - } - { - uint64_t mask = acc; - bool res; - if (mask == (uint64_t)0xFFFFFFFFFFFFFFFFU) - { - uint64_t mu = Hacl_Bignum_ModInvLimb_mod_inv_uint64(n[0U]); - Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_precomp_u64((modBits - (uint32_t)1U) - / (uint32_t)64U - + (uint32_t)1U, - n, - mu, - r2, - s, - eBits, - e, - m); - { - bool ite; - if (!((modBits - (uint32_t)1U) % (uint32_t)8U == (uint32_t)0U)) - { - ite = true; - } - else - { - uint32_t i = (modBits - (uint32_t)1U) / (uint32_t)64U; - uint32_t j = (modBits - (uint32_t)1U) % (uint32_t)64U; - uint64_t tmp = m[i]; - uint64_t get_bit = tmp >> j & (uint64_t)1U; - ite = get_bit == (uint64_t)0U; - } - if (ite) - { - res = true; - } - else - { - res = false; - } - } - } - else - { - res = false; - } - { - bool b1 = res; - bool b10 = b1; - if (b10) - { - uint32_t emBits = modBits - (uint32_t)1U; - uint32_t emLen = (emBits - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U; - KRML_CHECK_SIZE(sizeof (uint8_t), emLen); - { - uint8_t em[emLen]; - memset(em, 0U, emLen * sizeof (uint8_t)); - { - uint64_t *m1 = m; - Hacl_Bignum_Convert_bn_to_bytes_be_uint64(emLen, m1, em); - { - bool res0 = pss_verify(a, saltLen, msgLen, msg, emBits, em); - return res0; - } - } - } - } - return false; - } - } - } - } - } - } - } - return false; -} - -uint64_t -*Hacl_RSAPSS_new_rsapss_load_pkey(uint32_t modBits, uint32_t eBits, uint8_t *nb, uint8_t *eb) -{ - bool ite; - if ((uint32_t)1U < modBits && (uint32_t)0U < eBits) - { - uint32_t nLen = (modBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U; - uint32_t eLen = (eBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U; - ite = - nLen - <= (uint32_t)33554431U - && eLen <= (uint32_t)67108863U - && nLen + nLen <= (uint32_t)0xffffffffU - eLen; - } - else - { - ite = false; - } - if (!ite) - { - return NULL; - } - { - uint32_t nLen = (modBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U; - uint32_t eLen = (eBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U; - uint32_t pkeyLen = nLen + nLen + eLen; - KRML_CHECK_SIZE(sizeof (uint64_t), pkeyLen); - { - uint64_t *pkey = (uint64_t *)KRML_HOST_CALLOC(pkeyLen, sizeof (uint64_t)); - if (pkey == NULL) - { - return pkey; - } - { - uint64_t *pkey1 = pkey; - uint64_t *pkey2 = pkey1; - uint32_t nbLen = (modBits - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U; - uint32_t ebLen = (eBits - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U; - uint32_t nLen1 = (modBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U; - uint64_t *n = pkey2; - uint64_t *r2 = pkey2 + nLen1; - uint64_t *e = pkey2 + nLen1 + nLen1; - Hacl_Bignum_Convert_bn_from_bytes_be_uint64(nbLen, nb, n); - Hacl_Bignum_Montgomery_bn_precomp_r2_mod_n_u64((modBits - (uint32_t)1U) - / (uint32_t)64U - + (uint32_t)1U, - modBits - (uint32_t)1U, - n, - r2); - Hacl_Bignum_Convert_bn_from_bytes_be_uint64(ebLen, eb, e); - { - uint64_t m0 = check_modulus_u64(modBits, n); - uint64_t m1 = check_exponent_u64(eBits, e); - uint64_t m = m0 & m1; - bool b = m == (uint64_t)0xFFFFFFFFFFFFFFFFU; - if (b) - { - return pkey2; - } - KRML_HOST_FREE(pkey2); - return NULL; - } - } - } - } -} - -uint64_t -*Hacl_RSAPSS_new_rsapss_load_skey( - uint32_t modBits, - uint32_t eBits, - uint32_t dBits, - uint8_t *nb, - uint8_t *eb, - uint8_t *db -) -{ - bool ite0; - if ((uint32_t)1U < modBits && (uint32_t)0U < eBits) - { - uint32_t nLen = (modBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U; - uint32_t eLen = (eBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U; - ite0 = - nLen - <= (uint32_t)33554431U - && eLen <= (uint32_t)67108863U - && nLen + nLen <= (uint32_t)0xffffffffU - eLen; - } - else - { - ite0 = false; - } - { - bool ite; - if (ite0 && (uint32_t)0U < dBits) - { - uint32_t nLen = (modBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U; - uint32_t eLen = (eBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U; - uint32_t dLen = (dBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U; - ite = - dLen - <= (uint32_t)67108863U - && (uint32_t)2U * nLen <= (uint32_t)0xffffffffU - eLen - dLen; - } - else - { - ite = false; - } - if (!ite) - { - return NULL; - } - { - uint32_t nLen = (modBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U; - uint32_t eLen = (eBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U; - uint32_t dLen = (dBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U; - uint32_t skeyLen = nLen + nLen + eLen + dLen; - KRML_CHECK_SIZE(sizeof (uint64_t), skeyLen); - { - uint64_t *skey = (uint64_t *)KRML_HOST_CALLOC(skeyLen, sizeof (uint64_t)); - if (skey == NULL) - { - return skey; - } - { - uint64_t *skey1 = skey; - uint64_t *skey2 = skey1; - uint32_t dbLen = (dBits - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U; - uint32_t nLen1 = (modBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U; - uint32_t eLen1 = (eBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U; - uint32_t pkeyLen = nLen1 + nLen1 + eLen1; - uint64_t *pkey = skey2; - uint64_t *d = skey2 + pkeyLen; - uint32_t nbLen1 = (modBits - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U; - uint32_t ebLen1 = (eBits - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U; - uint32_t nLen2 = (modBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U; - uint64_t *n = pkey; - uint64_t *r2 = pkey + nLen2; - uint64_t *e = pkey + nLen2 + nLen2; - Hacl_Bignum_Convert_bn_from_bytes_be_uint64(nbLen1, nb, n); - Hacl_Bignum_Montgomery_bn_precomp_r2_mod_n_u64((modBits - (uint32_t)1U) - / (uint32_t)64U - + (uint32_t)1U, - modBits - (uint32_t)1U, - n, - r2); - Hacl_Bignum_Convert_bn_from_bytes_be_uint64(ebLen1, eb, e); - { - uint64_t m0 = check_modulus_u64(modBits, n); - uint64_t m10 = check_exponent_u64(eBits, e); - uint64_t m = m0 & m10; - bool b = m == (uint64_t)0xFFFFFFFFFFFFFFFFU; - Hacl_Bignum_Convert_bn_from_bytes_be_uint64(dbLen, db, d); - { - uint64_t m1 = check_exponent_u64(dBits, d); - bool b0 = b && m1 == (uint64_t)0xFFFFFFFFFFFFFFFFU; - if (b0) - { - return skey2; - } - KRML_HOST_FREE(skey2); - return NULL; - } - } - } - } - } - } -} - -bool -Hacl_RSAPSS_rsapss_skey_sign( - Spec_Hash_Definitions_hash_alg a, - uint32_t modBits, - uint32_t eBits, - uint32_t dBits, - uint8_t *nb, - uint8_t *eb, - uint8_t *db, - uint32_t saltLen, - uint8_t *salt, - uint32_t msgLen, - uint8_t *msg, - uint8_t *sgnt -) -{ - KRML_CHECK_SIZE(sizeof (uint64_t), - (uint32_t)2U - * ((modBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U) - + (eBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U - + (dBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U); - { - uint64_t - skey[(uint32_t)2U - * ((modBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U) - + (eBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U - + (dBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U]; - memset(skey, - 0U, - ((uint32_t)2U - * ((modBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U) - + (eBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U - + (dBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U) - * sizeof (uint64_t)); - { - bool b = load_skey(modBits, eBits, dBits, nb, eb, db, skey); - if (b) - { - return - Hacl_RSAPSS_rsapss_sign(a, - modBits, - eBits, - dBits, - skey, - saltLen, - salt, - msgLen, - msg, - sgnt); - } - return false; - } - } -} - -bool -Hacl_RSAPSS_rsapss_pkey_verify( - Spec_Hash_Definitions_hash_alg a, - uint32_t modBits, - uint32_t eBits, - uint8_t *nb, - uint8_t *eb, - uint32_t saltLen, - uint32_t sgntLen, - uint8_t *sgnt, - uint32_t msgLen, - uint8_t *msg -) -{ - KRML_CHECK_SIZE(sizeof (uint64_t), - (uint32_t)2U - * ((modBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U) - + (eBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U); - { - uint64_t - pkey[(uint32_t)2U - * ((modBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U) - + (eBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U]; - memset(pkey, - 0U, - ((uint32_t)2U - * ((modBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U) - + (eBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U) - * sizeof (uint64_t)); - { - bool b = load_pkey(modBits, eBits, nb, eb, pkey); - if (b) - { - return - Hacl_RSAPSS_rsapss_verify(a, - modBits, - eBits, - pkey, - saltLen, - sgntLen, - sgnt, - msgLen, - msg); - } - return false; - } - } -} - diff --git a/dist/c89-compatible/Hacl_RSAPSS.h b/dist/c89-compatible/Hacl_RSAPSS.h deleted file mode 100644 index 18bb9de27b..0000000000 --- a/dist/c89-compatible/Hacl_RSAPSS.h +++ /dev/null @@ -1,117 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#ifndef __Hacl_RSAPSS_H -#define __Hacl_RSAPSS_H - -#if defined(__cplusplus) -extern "C" { -#endif - -#include -#include "krml/internal/types.h" -#include "krml/lowstar_endianness.h" -#include "krml/internal/target.h" - - -#include "Hacl_Spec.h" -#include "Hacl_Hash_SHA2.h" -#include "Hacl_Bignum_Base.h" -#include "evercrypt_targetconfig.h" -bool -Hacl_RSAPSS_rsapss_sign( - Spec_Hash_Definitions_hash_alg a, - uint32_t modBits, - uint32_t eBits, - uint32_t dBits, - uint64_t *skey, - uint32_t saltLen, - uint8_t *salt, - uint32_t msgLen, - uint8_t *msg, - uint8_t *sgnt -); - -bool -Hacl_RSAPSS_rsapss_verify( - Spec_Hash_Definitions_hash_alg a, - uint32_t modBits, - uint32_t eBits, - uint64_t *pkey, - uint32_t saltLen, - uint32_t sgntLen, - uint8_t *sgnt, - uint32_t msgLen, - uint8_t *msg -); - -uint64_t -*Hacl_RSAPSS_new_rsapss_load_pkey(uint32_t modBits, uint32_t eBits, uint8_t *nb, uint8_t *eb); - -uint64_t -*Hacl_RSAPSS_new_rsapss_load_skey( - uint32_t modBits, - uint32_t eBits, - uint32_t dBits, - uint8_t *nb, - uint8_t *eb, - uint8_t *db -); - -bool -Hacl_RSAPSS_rsapss_skey_sign( - Spec_Hash_Definitions_hash_alg a, - uint32_t modBits, - uint32_t eBits, - uint32_t dBits, - uint8_t *nb, - uint8_t *eb, - uint8_t *db, - uint32_t saltLen, - uint8_t *salt, - uint32_t msgLen, - uint8_t *msg, - uint8_t *sgnt -); - -bool -Hacl_RSAPSS_rsapss_pkey_verify( - Spec_Hash_Definitions_hash_alg a, - uint32_t modBits, - uint32_t eBits, - uint8_t *nb, - uint8_t *eb, - uint32_t saltLen, - uint32_t sgntLen, - uint8_t *sgnt, - uint32_t msgLen, - uint8_t *msg -); - -#if defined(__cplusplus) -} -#endif - -#define __Hacl_RSAPSS_H_DEFINED -#endif diff --git a/dist/c89-compatible/Hacl_SHA2_Generic.h b/dist/c89-compatible/Hacl_SHA2_Generic.h deleted file mode 100644 index 22f09235ba..0000000000 --- a/dist/c89-compatible/Hacl_SHA2_Generic.h +++ /dev/null @@ -1,134 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#ifndef __Hacl_SHA2_Generic_H -#define __Hacl_SHA2_Generic_H - -#if defined(__cplusplus) -extern "C" { -#endif - -#include -#include "krml/internal/types.h" -#include "krml/lowstar_endianness.h" -#include "krml/internal/target.h" - - - -#include "evercrypt_targetconfig.h" -static const -uint32_t -Hacl_Impl_SHA2_Generic_h224[8U] = - { - (uint32_t)0xc1059ed8U, (uint32_t)0x367cd507U, (uint32_t)0x3070dd17U, (uint32_t)0xf70e5939U, - (uint32_t)0xffc00b31U, (uint32_t)0x68581511U, (uint32_t)0x64f98fa7U, (uint32_t)0xbefa4fa4U - }; - -static const -uint32_t -Hacl_Impl_SHA2_Generic_h256[8U] = - { - (uint32_t)0x6a09e667U, (uint32_t)0xbb67ae85U, (uint32_t)0x3c6ef372U, (uint32_t)0xa54ff53aU, - (uint32_t)0x510e527fU, (uint32_t)0x9b05688cU, (uint32_t)0x1f83d9abU, (uint32_t)0x5be0cd19U - }; - -static const -uint64_t -Hacl_Impl_SHA2_Generic_h384[8U] = - { - (uint64_t)0xcbbb9d5dc1059ed8U, (uint64_t)0x629a292a367cd507U, (uint64_t)0x9159015a3070dd17U, - (uint64_t)0x152fecd8f70e5939U, (uint64_t)0x67332667ffc00b31U, (uint64_t)0x8eb44a8768581511U, - (uint64_t)0xdb0c2e0d64f98fa7U, (uint64_t)0x47b5481dbefa4fa4U - }; - -static const -uint64_t -Hacl_Impl_SHA2_Generic_h512[8U] = - { - (uint64_t)0x6a09e667f3bcc908U, (uint64_t)0xbb67ae8584caa73bU, (uint64_t)0x3c6ef372fe94f82bU, - (uint64_t)0xa54ff53a5f1d36f1U, (uint64_t)0x510e527fade682d1U, (uint64_t)0x9b05688c2b3e6c1fU, - (uint64_t)0x1f83d9abfb41bd6bU, (uint64_t)0x5be0cd19137e2179U - }; - -static const -uint32_t -Hacl_Impl_SHA2_Generic_k224_256[64U] = - { - (uint32_t)0x428a2f98U, (uint32_t)0x71374491U, (uint32_t)0xb5c0fbcfU, (uint32_t)0xe9b5dba5U, - (uint32_t)0x3956c25bU, (uint32_t)0x59f111f1U, (uint32_t)0x923f82a4U, (uint32_t)0xab1c5ed5U, - (uint32_t)0xd807aa98U, (uint32_t)0x12835b01U, (uint32_t)0x243185beU, (uint32_t)0x550c7dc3U, - (uint32_t)0x72be5d74U, (uint32_t)0x80deb1feU, (uint32_t)0x9bdc06a7U, (uint32_t)0xc19bf174U, - (uint32_t)0xe49b69c1U, (uint32_t)0xefbe4786U, (uint32_t)0x0fc19dc6U, (uint32_t)0x240ca1ccU, - (uint32_t)0x2de92c6fU, (uint32_t)0x4a7484aaU, (uint32_t)0x5cb0a9dcU, (uint32_t)0x76f988daU, - (uint32_t)0x983e5152U, (uint32_t)0xa831c66dU, (uint32_t)0xb00327c8U, (uint32_t)0xbf597fc7U, - (uint32_t)0xc6e00bf3U, (uint32_t)0xd5a79147U, (uint32_t)0x06ca6351U, (uint32_t)0x14292967U, - (uint32_t)0x27b70a85U, (uint32_t)0x2e1b2138U, (uint32_t)0x4d2c6dfcU, (uint32_t)0x53380d13U, - (uint32_t)0x650a7354U, (uint32_t)0x766a0abbU, (uint32_t)0x81c2c92eU, (uint32_t)0x92722c85U, - (uint32_t)0xa2bfe8a1U, (uint32_t)0xa81a664bU, (uint32_t)0xc24b8b70U, (uint32_t)0xc76c51a3U, - (uint32_t)0xd192e819U, (uint32_t)0xd6990624U, (uint32_t)0xf40e3585U, (uint32_t)0x106aa070U, - (uint32_t)0x19a4c116U, (uint32_t)0x1e376c08U, (uint32_t)0x2748774cU, (uint32_t)0x34b0bcb5U, - (uint32_t)0x391c0cb3U, (uint32_t)0x4ed8aa4aU, (uint32_t)0x5b9cca4fU, (uint32_t)0x682e6ff3U, - (uint32_t)0x748f82eeU, (uint32_t)0x78a5636fU, (uint32_t)0x84c87814U, (uint32_t)0x8cc70208U, - (uint32_t)0x90befffaU, (uint32_t)0xa4506cebU, (uint32_t)0xbef9a3f7U, (uint32_t)0xc67178f2U - }; - -static const -uint64_t -Hacl_Impl_SHA2_Generic_k384_512[80U] = - { - (uint64_t)0x428a2f98d728ae22U, (uint64_t)0x7137449123ef65cdU, (uint64_t)0xb5c0fbcfec4d3b2fU, - (uint64_t)0xe9b5dba58189dbbcU, (uint64_t)0x3956c25bf348b538U, (uint64_t)0x59f111f1b605d019U, - (uint64_t)0x923f82a4af194f9bU, (uint64_t)0xab1c5ed5da6d8118U, (uint64_t)0xd807aa98a3030242U, - (uint64_t)0x12835b0145706fbeU, (uint64_t)0x243185be4ee4b28cU, (uint64_t)0x550c7dc3d5ffb4e2U, - (uint64_t)0x72be5d74f27b896fU, (uint64_t)0x80deb1fe3b1696b1U, (uint64_t)0x9bdc06a725c71235U, - (uint64_t)0xc19bf174cf692694U, (uint64_t)0xe49b69c19ef14ad2U, (uint64_t)0xefbe4786384f25e3U, - (uint64_t)0x0fc19dc68b8cd5b5U, (uint64_t)0x240ca1cc77ac9c65U, (uint64_t)0x2de92c6f592b0275U, - (uint64_t)0x4a7484aa6ea6e483U, (uint64_t)0x5cb0a9dcbd41fbd4U, (uint64_t)0x76f988da831153b5U, - (uint64_t)0x983e5152ee66dfabU, (uint64_t)0xa831c66d2db43210U, (uint64_t)0xb00327c898fb213fU, - (uint64_t)0xbf597fc7beef0ee4U, (uint64_t)0xc6e00bf33da88fc2U, (uint64_t)0xd5a79147930aa725U, - (uint64_t)0x06ca6351e003826fU, (uint64_t)0x142929670a0e6e70U, (uint64_t)0x27b70a8546d22ffcU, - (uint64_t)0x2e1b21385c26c926U, (uint64_t)0x4d2c6dfc5ac42aedU, (uint64_t)0x53380d139d95b3dfU, - (uint64_t)0x650a73548baf63deU, (uint64_t)0x766a0abb3c77b2a8U, (uint64_t)0x81c2c92e47edaee6U, - (uint64_t)0x92722c851482353bU, (uint64_t)0xa2bfe8a14cf10364U, (uint64_t)0xa81a664bbc423001U, - (uint64_t)0xc24b8b70d0f89791U, (uint64_t)0xc76c51a30654be30U, (uint64_t)0xd192e819d6ef5218U, - (uint64_t)0xd69906245565a910U, (uint64_t)0xf40e35855771202aU, (uint64_t)0x106aa07032bbd1b8U, - (uint64_t)0x19a4c116b8d2d0c8U, (uint64_t)0x1e376c085141ab53U, (uint64_t)0x2748774cdf8eeb99U, - (uint64_t)0x34b0bcb5e19b48a8U, (uint64_t)0x391c0cb3c5c95a63U, (uint64_t)0x4ed8aa4ae3418acbU, - (uint64_t)0x5b9cca4f7763e373U, (uint64_t)0x682e6ff3d6b2b8a3U, (uint64_t)0x748f82ee5defb2fcU, - (uint64_t)0x78a5636f43172f60U, (uint64_t)0x84c87814a1f0ab72U, (uint64_t)0x8cc702081a6439ecU, - (uint64_t)0x90befffa23631e28U, (uint64_t)0xa4506cebde82bde9U, (uint64_t)0xbef9a3f7b2c67915U, - (uint64_t)0xc67178f2e372532bU, (uint64_t)0xca273eceea26619cU, (uint64_t)0xd186b8c721c0c207U, - (uint64_t)0xeada7dd6cde0eb1eU, (uint64_t)0xf57d4f7fee6ed178U, (uint64_t)0x06f067aa72176fbaU, - (uint64_t)0x0a637dc5a2c898a6U, (uint64_t)0x113f9804bef90daeU, (uint64_t)0x1b710b35131c471bU, - (uint64_t)0x28db77f523047d84U, (uint64_t)0x32caab7b40c72493U, (uint64_t)0x3c9ebe0a15c9bebcU, - (uint64_t)0x431d67c49c100d4cU, (uint64_t)0x4cc5d4becb3e42b6U, (uint64_t)0x597f299cfc657e2aU, - (uint64_t)0x5fcb6fab3ad6faecU, (uint64_t)0x6c44198c4a475817U - }; - -#if defined(__cplusplus) -} -#endif - -#define __Hacl_SHA2_Generic_H_DEFINED -#endif diff --git a/dist/c89-compatible/Hacl_SHA2_Scalar32.c b/dist/c89-compatible/Hacl_SHA2_Scalar32.c deleted file mode 100644 index 0892f92416..0000000000 --- a/dist/c89-compatible/Hacl_SHA2_Scalar32.c +++ /dev/null @@ -1,984 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#include "Hacl_SHA2_Scalar32.h" - -#include "internal/Hacl_SHA2_Types.h" - -static inline void sha224_update1(uint8_t *block, uint32_t *hash) -{ - uint32_t hash_old[8U] = { 0U }; - uint32_t ws[16U] = { 0U }; - uint8_t *b; - uint32_t u0; - uint32_t u1; - uint32_t u2; - uint32_t u3; - uint32_t u4; - uint32_t u5; - uint32_t u6; - uint32_t u7; - uint32_t u8; - uint32_t u9; - uint32_t u10; - uint32_t u11; - uint32_t u12; - uint32_t u13; - uint32_t u14; - uint32_t u; - memcpy(hash_old, hash, (uint32_t)8U * sizeof (uint32_t)); - b = block; - u0 = load32_be(b); - ws[0U] = u0; - u1 = load32_be(b + (uint32_t)4U); - ws[1U] = u1; - u2 = load32_be(b + (uint32_t)8U); - ws[2U] = u2; - u3 = load32_be(b + (uint32_t)12U); - ws[3U] = u3; - u4 = load32_be(b + (uint32_t)16U); - ws[4U] = u4; - u5 = load32_be(b + (uint32_t)20U); - ws[5U] = u5; - u6 = load32_be(b + (uint32_t)24U); - ws[6U] = u6; - u7 = load32_be(b + (uint32_t)28U); - ws[7U] = u7; - u8 = load32_be(b + (uint32_t)32U); - ws[8U] = u8; - u9 = load32_be(b + (uint32_t)36U); - ws[9U] = u9; - u10 = load32_be(b + (uint32_t)40U); - ws[10U] = u10; - u11 = load32_be(b + (uint32_t)44U); - ws[11U] = u11; - u12 = load32_be(b + (uint32_t)48U); - ws[12U] = u12; - u13 = load32_be(b + (uint32_t)52U); - ws[13U] = u13; - u14 = load32_be(b + (uint32_t)56U); - ws[14U] = u14; - u = load32_be(b + (uint32_t)60U); - ws[15U] = u; - KRML_MAYBE_FOR4(i0, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - KRML_MAYBE_FOR16(i, - (uint32_t)0U, - (uint32_t)16U, - (uint32_t)1U, - uint32_t k_t = Hacl_Impl_SHA2_Generic_k224_256[(uint32_t)16U * i0 + i]; - uint32_t ws_t = ws[i]; - uint32_t a0 = hash[0U]; - uint32_t b0 = hash[1U]; - uint32_t c0 = hash[2U]; - uint32_t d0 = hash[3U]; - uint32_t e0 = hash[4U]; - uint32_t f0 = hash[5U]; - uint32_t g0 = hash[6U]; - uint32_t h02 = hash[7U]; - uint32_t k_e_t = k_t; - uint32_t - t1 = - h02 - + - ((e0 << (uint32_t)26U | e0 >> (uint32_t)6U) - ^ - ((e0 << (uint32_t)21U | e0 >> (uint32_t)11U) - ^ (e0 << (uint32_t)7U | e0 >> (uint32_t)25U))) - + ((e0 & f0) ^ (~e0 & g0)) - + k_e_t - + ws_t; - uint32_t - t2 = - ((a0 << (uint32_t)30U | a0 >> (uint32_t)2U) - ^ - ((a0 << (uint32_t)19U | a0 >> (uint32_t)13U) - ^ (a0 << (uint32_t)10U | a0 >> (uint32_t)22U))) - + ((a0 & b0) ^ ((a0 & c0) ^ (b0 & c0))); - uint32_t a1 = t1 + t2; - uint32_t b1 = a0; - uint32_t c1 = b0; - uint32_t d1 = c0; - uint32_t e1 = d0 + t1; - uint32_t f1 = e0; - uint32_t g1 = f0; - uint32_t h12 = g0; - hash[0U] = a1; - hash[1U] = b1; - hash[2U] = c1; - hash[3U] = d1; - hash[4U] = e1; - hash[5U] = f1; - hash[6U] = g1; - hash[7U] = h12;); - if (i0 < (uint32_t)3U) - { - KRML_MAYBE_FOR16(i, - (uint32_t)0U, - (uint32_t)16U, - (uint32_t)1U, - uint32_t t16 = ws[i]; - uint32_t t15 = ws[(i + (uint32_t)1U) % (uint32_t)16U]; - uint32_t t7 = ws[(i + (uint32_t)9U) % (uint32_t)16U]; - uint32_t t2 = ws[(i + (uint32_t)14U) % (uint32_t)16U]; - uint32_t - s1 = - (t2 << (uint32_t)15U | t2 >> (uint32_t)17U) - ^ ((t2 << (uint32_t)13U | t2 >> (uint32_t)19U) ^ t2 >> (uint32_t)10U); - uint32_t - s0 = - (t15 << (uint32_t)25U | t15 >> (uint32_t)7U) - ^ ((t15 << (uint32_t)14U | t15 >> (uint32_t)18U) ^ t15 >> (uint32_t)3U); - ws[i] = s1 + t7 + s0 + t16;); - }); - KRML_MAYBE_FOR8(i, - (uint32_t)0U, - (uint32_t)8U, - (uint32_t)1U, - uint32_t *os = hash; - uint32_t x = hash[i] + hash_old[i]; - os[i] = x;); -} - -void Hacl_SHA2_Scalar32_sha224(uint8_t *dst, uint32_t input_len, uint8_t *input) -{ - uint8_t *ib = input; - uint8_t *rb = dst; - uint32_t st[8U] = { 0U }; - uint32_t rem; - uint64_t len_; - uint32_t blocks0; - uint32_t rem1; - uint8_t *b00; - uint8_t *lb; - KRML_MAYBE_FOR8(i, - (uint32_t)0U, - (uint32_t)8U, - (uint32_t)1U, - uint32_t *os = st; - uint32_t x = Hacl_Impl_SHA2_Generic_h224[i]; - os[i] = x;); - rem = input_len % (uint32_t)64U; - len_ = (uint64_t)input_len; - blocks0 = input_len / (uint32_t)64U; - { - uint32_t i; - for (i = (uint32_t)0U; i < blocks0; i++) - { - uint8_t *b0 = ib; - uint8_t *mb = b0 + i * (uint32_t)64U; - sha224_update1(mb, st); - } - } - rem1 = input_len % (uint32_t)64U; - b00 = ib; - lb = b00 + input_len - rem1; - { - uint32_t blocks; - if (rem + (uint32_t)8U + (uint32_t)1U <= (uint32_t)64U) - { - blocks = (uint32_t)1U; - } - else - { - blocks = (uint32_t)2U; - } - { - uint32_t fin = blocks * (uint32_t)64U; - uint8_t last[128U] = { 0U }; - uint8_t totlen_buf[8U] = { 0U }; - uint64_t total_len_bits = len_ << (uint32_t)3U; - uint8_t *b0; - uint8_t *last00; - uint8_t *last10; - store64_be(totlen_buf, total_len_bits); - b0 = lb; - memcpy(last, b0, rem * sizeof (uint8_t)); - last[rem] = (uint8_t)0x80U; - memcpy(last + fin - (uint32_t)8U, totlen_buf, (uint32_t)8U * sizeof (uint8_t)); - last00 = last; - last10 = last + (uint32_t)64U; - { - Hacl_Impl_SHA2_Types_uint8_2p lit0; - Hacl_Impl_SHA2_Types_uint8_2p scrut0; - uint8_t *l0; - uint8_t *l1; - uint8_t *lb0; - uint8_t *lb1; - lit0.fst = last00; - lit0.snd = last10; - scrut0 = lit0; - l0 = scrut0.fst; - l1 = scrut0.snd; - lb0 = l0; - lb1 = l1; - { - Hacl_Impl_SHA2_Types_uint8_2p lit; - Hacl_Impl_SHA2_Types_uint8_2p scrut; - uint8_t *last0; - uint8_t *last1; - lit.fst = lb0; - lit.snd = lb1; - scrut = lit; - last0 = scrut.fst; - last1 = scrut.snd; - sha224_update1(last0, st); - if (blocks > (uint32_t)1U) - { - sha224_update1(last1, st); - } - { - uint8_t hbuf[32U] = { 0U }; - KRML_MAYBE_FOR8(i, - (uint32_t)0U, - (uint32_t)8U, - (uint32_t)1U, - store32_be(hbuf + i * (uint32_t)4U, st[i]);); - memcpy(rb, hbuf, (uint32_t)28U * sizeof (uint8_t)); - } - } - } - } - } -} - -static inline void sha256_update1(uint8_t *block, uint32_t *hash) -{ - uint32_t hash_old[8U] = { 0U }; - uint32_t ws[16U] = { 0U }; - uint8_t *b; - uint32_t u0; - uint32_t u1; - uint32_t u2; - uint32_t u3; - uint32_t u4; - uint32_t u5; - uint32_t u6; - uint32_t u7; - uint32_t u8; - uint32_t u9; - uint32_t u10; - uint32_t u11; - uint32_t u12; - uint32_t u13; - uint32_t u14; - uint32_t u; - memcpy(hash_old, hash, (uint32_t)8U * sizeof (uint32_t)); - b = block; - u0 = load32_be(b); - ws[0U] = u0; - u1 = load32_be(b + (uint32_t)4U); - ws[1U] = u1; - u2 = load32_be(b + (uint32_t)8U); - ws[2U] = u2; - u3 = load32_be(b + (uint32_t)12U); - ws[3U] = u3; - u4 = load32_be(b + (uint32_t)16U); - ws[4U] = u4; - u5 = load32_be(b + (uint32_t)20U); - ws[5U] = u5; - u6 = load32_be(b + (uint32_t)24U); - ws[6U] = u6; - u7 = load32_be(b + (uint32_t)28U); - ws[7U] = u7; - u8 = load32_be(b + (uint32_t)32U); - ws[8U] = u8; - u9 = load32_be(b + (uint32_t)36U); - ws[9U] = u9; - u10 = load32_be(b + (uint32_t)40U); - ws[10U] = u10; - u11 = load32_be(b + (uint32_t)44U); - ws[11U] = u11; - u12 = load32_be(b + (uint32_t)48U); - ws[12U] = u12; - u13 = load32_be(b + (uint32_t)52U); - ws[13U] = u13; - u14 = load32_be(b + (uint32_t)56U); - ws[14U] = u14; - u = load32_be(b + (uint32_t)60U); - ws[15U] = u; - KRML_MAYBE_FOR4(i0, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - KRML_MAYBE_FOR16(i, - (uint32_t)0U, - (uint32_t)16U, - (uint32_t)1U, - uint32_t k_t = Hacl_Impl_SHA2_Generic_k224_256[(uint32_t)16U * i0 + i]; - uint32_t ws_t = ws[i]; - uint32_t a0 = hash[0U]; - uint32_t b0 = hash[1U]; - uint32_t c0 = hash[2U]; - uint32_t d0 = hash[3U]; - uint32_t e0 = hash[4U]; - uint32_t f0 = hash[5U]; - uint32_t g0 = hash[6U]; - uint32_t h02 = hash[7U]; - uint32_t k_e_t = k_t; - uint32_t - t1 = - h02 - + - ((e0 << (uint32_t)26U | e0 >> (uint32_t)6U) - ^ - ((e0 << (uint32_t)21U | e0 >> (uint32_t)11U) - ^ (e0 << (uint32_t)7U | e0 >> (uint32_t)25U))) - + ((e0 & f0) ^ (~e0 & g0)) - + k_e_t - + ws_t; - uint32_t - t2 = - ((a0 << (uint32_t)30U | a0 >> (uint32_t)2U) - ^ - ((a0 << (uint32_t)19U | a0 >> (uint32_t)13U) - ^ (a0 << (uint32_t)10U | a0 >> (uint32_t)22U))) - + ((a0 & b0) ^ ((a0 & c0) ^ (b0 & c0))); - uint32_t a1 = t1 + t2; - uint32_t b1 = a0; - uint32_t c1 = b0; - uint32_t d1 = c0; - uint32_t e1 = d0 + t1; - uint32_t f1 = e0; - uint32_t g1 = f0; - uint32_t h12 = g0; - hash[0U] = a1; - hash[1U] = b1; - hash[2U] = c1; - hash[3U] = d1; - hash[4U] = e1; - hash[5U] = f1; - hash[6U] = g1; - hash[7U] = h12;); - if (i0 < (uint32_t)3U) - { - KRML_MAYBE_FOR16(i, - (uint32_t)0U, - (uint32_t)16U, - (uint32_t)1U, - uint32_t t16 = ws[i]; - uint32_t t15 = ws[(i + (uint32_t)1U) % (uint32_t)16U]; - uint32_t t7 = ws[(i + (uint32_t)9U) % (uint32_t)16U]; - uint32_t t2 = ws[(i + (uint32_t)14U) % (uint32_t)16U]; - uint32_t - s1 = - (t2 << (uint32_t)15U | t2 >> (uint32_t)17U) - ^ ((t2 << (uint32_t)13U | t2 >> (uint32_t)19U) ^ t2 >> (uint32_t)10U); - uint32_t - s0 = - (t15 << (uint32_t)25U | t15 >> (uint32_t)7U) - ^ ((t15 << (uint32_t)14U | t15 >> (uint32_t)18U) ^ t15 >> (uint32_t)3U); - ws[i] = s1 + t7 + s0 + t16;); - }); - KRML_MAYBE_FOR8(i, - (uint32_t)0U, - (uint32_t)8U, - (uint32_t)1U, - uint32_t *os = hash; - uint32_t x = hash[i] + hash_old[i]; - os[i] = x;); -} - -void Hacl_SHA2_Scalar32_sha256(uint8_t *dst, uint32_t input_len, uint8_t *input) -{ - uint8_t *ib = input; - uint8_t *rb = dst; - uint32_t st[8U] = { 0U }; - uint32_t rem; - uint64_t len_; - uint32_t blocks0; - uint32_t rem1; - uint8_t *b00; - uint8_t *lb; - KRML_MAYBE_FOR8(i, - (uint32_t)0U, - (uint32_t)8U, - (uint32_t)1U, - uint32_t *os = st; - uint32_t x = Hacl_Impl_SHA2_Generic_h256[i]; - os[i] = x;); - rem = input_len % (uint32_t)64U; - len_ = (uint64_t)input_len; - blocks0 = input_len / (uint32_t)64U; - { - uint32_t i; - for (i = (uint32_t)0U; i < blocks0; i++) - { - uint8_t *b0 = ib; - uint8_t *mb = b0 + i * (uint32_t)64U; - sha256_update1(mb, st); - } - } - rem1 = input_len % (uint32_t)64U; - b00 = ib; - lb = b00 + input_len - rem1; - { - uint32_t blocks; - if (rem + (uint32_t)8U + (uint32_t)1U <= (uint32_t)64U) - { - blocks = (uint32_t)1U; - } - else - { - blocks = (uint32_t)2U; - } - { - uint32_t fin = blocks * (uint32_t)64U; - uint8_t last[128U] = { 0U }; - uint8_t totlen_buf[8U] = { 0U }; - uint64_t total_len_bits = len_ << (uint32_t)3U; - uint8_t *b0; - uint8_t *last00; - uint8_t *last10; - store64_be(totlen_buf, total_len_bits); - b0 = lb; - memcpy(last, b0, rem * sizeof (uint8_t)); - last[rem] = (uint8_t)0x80U; - memcpy(last + fin - (uint32_t)8U, totlen_buf, (uint32_t)8U * sizeof (uint8_t)); - last00 = last; - last10 = last + (uint32_t)64U; - { - Hacl_Impl_SHA2_Types_uint8_2p lit0; - Hacl_Impl_SHA2_Types_uint8_2p scrut0; - uint8_t *l0; - uint8_t *l1; - uint8_t *lb0; - uint8_t *lb1; - lit0.fst = last00; - lit0.snd = last10; - scrut0 = lit0; - l0 = scrut0.fst; - l1 = scrut0.snd; - lb0 = l0; - lb1 = l1; - { - Hacl_Impl_SHA2_Types_uint8_2p lit; - Hacl_Impl_SHA2_Types_uint8_2p scrut; - uint8_t *last0; - uint8_t *last1; - lit.fst = lb0; - lit.snd = lb1; - scrut = lit; - last0 = scrut.fst; - last1 = scrut.snd; - sha256_update1(last0, st); - if (blocks > (uint32_t)1U) - { - sha256_update1(last1, st); - } - { - uint8_t hbuf[32U] = { 0U }; - KRML_MAYBE_FOR8(i, - (uint32_t)0U, - (uint32_t)8U, - (uint32_t)1U, - store32_be(hbuf + i * (uint32_t)4U, st[i]);); - memcpy(rb, hbuf, (uint32_t)32U * sizeof (uint8_t)); - } - } - } - } - } -} - -static inline void sha384_update1(uint8_t *block, uint64_t *hash) -{ - uint64_t hash_old[8U] = { 0U }; - uint64_t ws[16U] = { 0U }; - uint8_t *b; - uint64_t u0; - uint64_t u1; - uint64_t u2; - uint64_t u3; - uint64_t u4; - uint64_t u5; - uint64_t u6; - uint64_t u7; - uint64_t u8; - uint64_t u9; - uint64_t u10; - uint64_t u11; - uint64_t u12; - uint64_t u13; - uint64_t u14; - uint64_t u; - memcpy(hash_old, hash, (uint32_t)8U * sizeof (uint64_t)); - b = block; - u0 = load64_be(b); - ws[0U] = u0; - u1 = load64_be(b + (uint32_t)8U); - ws[1U] = u1; - u2 = load64_be(b + (uint32_t)16U); - ws[2U] = u2; - u3 = load64_be(b + (uint32_t)24U); - ws[3U] = u3; - u4 = load64_be(b + (uint32_t)32U); - ws[4U] = u4; - u5 = load64_be(b + (uint32_t)40U); - ws[5U] = u5; - u6 = load64_be(b + (uint32_t)48U); - ws[6U] = u6; - u7 = load64_be(b + (uint32_t)56U); - ws[7U] = u7; - u8 = load64_be(b + (uint32_t)64U); - ws[8U] = u8; - u9 = load64_be(b + (uint32_t)72U); - ws[9U] = u9; - u10 = load64_be(b + (uint32_t)80U); - ws[10U] = u10; - u11 = load64_be(b + (uint32_t)88U); - ws[11U] = u11; - u12 = load64_be(b + (uint32_t)96U); - ws[12U] = u12; - u13 = load64_be(b + (uint32_t)104U); - ws[13U] = u13; - u14 = load64_be(b + (uint32_t)112U); - ws[14U] = u14; - u = load64_be(b + (uint32_t)120U); - ws[15U] = u; - KRML_MAYBE_FOR5(i0, - (uint32_t)0U, - (uint32_t)5U, - (uint32_t)1U, - KRML_MAYBE_FOR16(i, - (uint32_t)0U, - (uint32_t)16U, - (uint32_t)1U, - uint64_t k_t = Hacl_Impl_SHA2_Generic_k384_512[(uint32_t)16U * i0 + i]; - uint64_t ws_t = ws[i]; - uint64_t a0 = hash[0U]; - uint64_t b0 = hash[1U]; - uint64_t c0 = hash[2U]; - uint64_t d0 = hash[3U]; - uint64_t e0 = hash[4U]; - uint64_t f0 = hash[5U]; - uint64_t g0 = hash[6U]; - uint64_t h02 = hash[7U]; - uint64_t k_e_t = k_t; - uint64_t - t1 = - h02 - + - ((e0 << (uint32_t)50U | e0 >> (uint32_t)14U) - ^ - ((e0 << (uint32_t)46U | e0 >> (uint32_t)18U) - ^ (e0 << (uint32_t)23U | e0 >> (uint32_t)41U))) - + ((e0 & f0) ^ (~e0 & g0)) - + k_e_t - + ws_t; - uint64_t - t2 = - ((a0 << (uint32_t)36U | a0 >> (uint32_t)28U) - ^ - ((a0 << (uint32_t)30U | a0 >> (uint32_t)34U) - ^ (a0 << (uint32_t)25U | a0 >> (uint32_t)39U))) - + ((a0 & b0) ^ ((a0 & c0) ^ (b0 & c0))); - uint64_t a1 = t1 + t2; - uint64_t b1 = a0; - uint64_t c1 = b0; - uint64_t d1 = c0; - uint64_t e1 = d0 + t1; - uint64_t f1 = e0; - uint64_t g1 = f0; - uint64_t h12 = g0; - hash[0U] = a1; - hash[1U] = b1; - hash[2U] = c1; - hash[3U] = d1; - hash[4U] = e1; - hash[5U] = f1; - hash[6U] = g1; - hash[7U] = h12;); - if (i0 < (uint32_t)4U) - { - KRML_MAYBE_FOR16(i, - (uint32_t)0U, - (uint32_t)16U, - (uint32_t)1U, - uint64_t t16 = ws[i]; - uint64_t t15 = ws[(i + (uint32_t)1U) % (uint32_t)16U]; - uint64_t t7 = ws[(i + (uint32_t)9U) % (uint32_t)16U]; - uint64_t t2 = ws[(i + (uint32_t)14U) % (uint32_t)16U]; - uint64_t - s1 = - (t2 << (uint32_t)45U | t2 >> (uint32_t)19U) - ^ ((t2 << (uint32_t)3U | t2 >> (uint32_t)61U) ^ t2 >> (uint32_t)6U); - uint64_t - s0 = - (t15 << (uint32_t)63U | t15 >> (uint32_t)1U) - ^ ((t15 << (uint32_t)56U | t15 >> (uint32_t)8U) ^ t15 >> (uint32_t)7U); - ws[i] = s1 + t7 + s0 + t16;); - }); - KRML_MAYBE_FOR8(i, - (uint32_t)0U, - (uint32_t)8U, - (uint32_t)1U, - uint64_t *os = hash; - uint64_t x = hash[i] + hash_old[i]; - os[i] = x;); -} - -void Hacl_SHA2_Scalar32_sha384(uint8_t *dst, uint32_t input_len, uint8_t *input) -{ - uint8_t *ib = input; - uint8_t *rb = dst; - uint64_t st[8U] = { 0U }; - uint32_t rem; - FStar_UInt128_uint128 len_; - uint32_t blocks0; - uint32_t rem1; - uint8_t *b00; - uint8_t *lb; - KRML_MAYBE_FOR8(i, - (uint32_t)0U, - (uint32_t)8U, - (uint32_t)1U, - uint64_t *os = st; - uint64_t x = Hacl_Impl_SHA2_Generic_h384[i]; - os[i] = x;); - rem = input_len % (uint32_t)128U; - len_ = FStar_UInt128_uint64_to_uint128((uint64_t)input_len); - blocks0 = input_len / (uint32_t)128U; - { - uint32_t i; - for (i = (uint32_t)0U; i < blocks0; i++) - { - uint8_t *b0 = ib; - uint8_t *mb = b0 + i * (uint32_t)128U; - sha384_update1(mb, st); - } - } - rem1 = input_len % (uint32_t)128U; - b00 = ib; - lb = b00 + input_len - rem1; - { - uint32_t blocks; - if (rem + (uint32_t)16U + (uint32_t)1U <= (uint32_t)128U) - { - blocks = (uint32_t)1U; - } - else - { - blocks = (uint32_t)2U; - } - { - uint32_t fin = blocks * (uint32_t)128U; - uint8_t last[256U] = { 0U }; - uint8_t totlen_buf[16U] = { 0U }; - FStar_UInt128_uint128 total_len_bits = FStar_UInt128_shift_left(len_, (uint32_t)3U); - uint8_t *b0; - uint8_t *last00; - uint8_t *last10; - store128_be(totlen_buf, total_len_bits); - b0 = lb; - memcpy(last, b0, rem * sizeof (uint8_t)); - last[rem] = (uint8_t)0x80U; - memcpy(last + fin - (uint32_t)16U, totlen_buf, (uint32_t)16U * sizeof (uint8_t)); - last00 = last; - last10 = last + (uint32_t)128U; - { - Hacl_Impl_SHA2_Types_uint8_2p lit0; - Hacl_Impl_SHA2_Types_uint8_2p scrut0; - uint8_t *l0; - uint8_t *l1; - uint8_t *lb0; - uint8_t *lb1; - lit0.fst = last00; - lit0.snd = last10; - scrut0 = lit0; - l0 = scrut0.fst; - l1 = scrut0.snd; - lb0 = l0; - lb1 = l1; - { - Hacl_Impl_SHA2_Types_uint8_2p lit; - Hacl_Impl_SHA2_Types_uint8_2p scrut; - uint8_t *last0; - uint8_t *last1; - lit.fst = lb0; - lit.snd = lb1; - scrut = lit; - last0 = scrut.fst; - last1 = scrut.snd; - sha384_update1(last0, st); - if (blocks > (uint32_t)1U) - { - sha384_update1(last1, st); - } - { - uint8_t hbuf[64U] = { 0U }; - KRML_MAYBE_FOR8(i, - (uint32_t)0U, - (uint32_t)8U, - (uint32_t)1U, - store64_be(hbuf + i * (uint32_t)8U, st[i]);); - memcpy(rb, hbuf, (uint32_t)48U * sizeof (uint8_t)); - } - } - } - } - } -} - -static inline void sha512_update1(uint8_t *block, uint64_t *hash) -{ - uint64_t hash_old[8U] = { 0U }; - uint64_t ws[16U] = { 0U }; - uint8_t *b; - uint64_t u0; - uint64_t u1; - uint64_t u2; - uint64_t u3; - uint64_t u4; - uint64_t u5; - uint64_t u6; - uint64_t u7; - uint64_t u8; - uint64_t u9; - uint64_t u10; - uint64_t u11; - uint64_t u12; - uint64_t u13; - uint64_t u14; - uint64_t u; - memcpy(hash_old, hash, (uint32_t)8U * sizeof (uint64_t)); - b = block; - u0 = load64_be(b); - ws[0U] = u0; - u1 = load64_be(b + (uint32_t)8U); - ws[1U] = u1; - u2 = load64_be(b + (uint32_t)16U); - ws[2U] = u2; - u3 = load64_be(b + (uint32_t)24U); - ws[3U] = u3; - u4 = load64_be(b + (uint32_t)32U); - ws[4U] = u4; - u5 = load64_be(b + (uint32_t)40U); - ws[5U] = u5; - u6 = load64_be(b + (uint32_t)48U); - ws[6U] = u6; - u7 = load64_be(b + (uint32_t)56U); - ws[7U] = u7; - u8 = load64_be(b + (uint32_t)64U); - ws[8U] = u8; - u9 = load64_be(b + (uint32_t)72U); - ws[9U] = u9; - u10 = load64_be(b + (uint32_t)80U); - ws[10U] = u10; - u11 = load64_be(b + (uint32_t)88U); - ws[11U] = u11; - u12 = load64_be(b + (uint32_t)96U); - ws[12U] = u12; - u13 = load64_be(b + (uint32_t)104U); - ws[13U] = u13; - u14 = load64_be(b + (uint32_t)112U); - ws[14U] = u14; - u = load64_be(b + (uint32_t)120U); - ws[15U] = u; - KRML_MAYBE_FOR5(i0, - (uint32_t)0U, - (uint32_t)5U, - (uint32_t)1U, - KRML_MAYBE_FOR16(i, - (uint32_t)0U, - (uint32_t)16U, - (uint32_t)1U, - uint64_t k_t = Hacl_Impl_SHA2_Generic_k384_512[(uint32_t)16U * i0 + i]; - uint64_t ws_t = ws[i]; - uint64_t a0 = hash[0U]; - uint64_t b0 = hash[1U]; - uint64_t c0 = hash[2U]; - uint64_t d0 = hash[3U]; - uint64_t e0 = hash[4U]; - uint64_t f0 = hash[5U]; - uint64_t g0 = hash[6U]; - uint64_t h02 = hash[7U]; - uint64_t k_e_t = k_t; - uint64_t - t1 = - h02 - + - ((e0 << (uint32_t)50U | e0 >> (uint32_t)14U) - ^ - ((e0 << (uint32_t)46U | e0 >> (uint32_t)18U) - ^ (e0 << (uint32_t)23U | e0 >> (uint32_t)41U))) - + ((e0 & f0) ^ (~e0 & g0)) - + k_e_t - + ws_t; - uint64_t - t2 = - ((a0 << (uint32_t)36U | a0 >> (uint32_t)28U) - ^ - ((a0 << (uint32_t)30U | a0 >> (uint32_t)34U) - ^ (a0 << (uint32_t)25U | a0 >> (uint32_t)39U))) - + ((a0 & b0) ^ ((a0 & c0) ^ (b0 & c0))); - uint64_t a1 = t1 + t2; - uint64_t b1 = a0; - uint64_t c1 = b0; - uint64_t d1 = c0; - uint64_t e1 = d0 + t1; - uint64_t f1 = e0; - uint64_t g1 = f0; - uint64_t h12 = g0; - hash[0U] = a1; - hash[1U] = b1; - hash[2U] = c1; - hash[3U] = d1; - hash[4U] = e1; - hash[5U] = f1; - hash[6U] = g1; - hash[7U] = h12;); - if (i0 < (uint32_t)4U) - { - KRML_MAYBE_FOR16(i, - (uint32_t)0U, - (uint32_t)16U, - (uint32_t)1U, - uint64_t t16 = ws[i]; - uint64_t t15 = ws[(i + (uint32_t)1U) % (uint32_t)16U]; - uint64_t t7 = ws[(i + (uint32_t)9U) % (uint32_t)16U]; - uint64_t t2 = ws[(i + (uint32_t)14U) % (uint32_t)16U]; - uint64_t - s1 = - (t2 << (uint32_t)45U | t2 >> (uint32_t)19U) - ^ ((t2 << (uint32_t)3U | t2 >> (uint32_t)61U) ^ t2 >> (uint32_t)6U); - uint64_t - s0 = - (t15 << (uint32_t)63U | t15 >> (uint32_t)1U) - ^ ((t15 << (uint32_t)56U | t15 >> (uint32_t)8U) ^ t15 >> (uint32_t)7U); - ws[i] = s1 + t7 + s0 + t16;); - }); - KRML_MAYBE_FOR8(i, - (uint32_t)0U, - (uint32_t)8U, - (uint32_t)1U, - uint64_t *os = hash; - uint64_t x = hash[i] + hash_old[i]; - os[i] = x;); -} - -void Hacl_SHA2_Scalar32_sha512(uint8_t *dst, uint32_t input_len, uint8_t *input) -{ - uint8_t *ib = input; - uint8_t *rb = dst; - uint64_t st[8U] = { 0U }; - uint32_t rem; - FStar_UInt128_uint128 len_; - uint32_t blocks0; - uint32_t rem1; - uint8_t *b00; - uint8_t *lb; - KRML_MAYBE_FOR8(i, - (uint32_t)0U, - (uint32_t)8U, - (uint32_t)1U, - uint64_t *os = st; - uint64_t x = Hacl_Impl_SHA2_Generic_h512[i]; - os[i] = x;); - rem = input_len % (uint32_t)128U; - len_ = FStar_UInt128_uint64_to_uint128((uint64_t)input_len); - blocks0 = input_len / (uint32_t)128U; - { - uint32_t i; - for (i = (uint32_t)0U; i < blocks0; i++) - { - uint8_t *b0 = ib; - uint8_t *mb = b0 + i * (uint32_t)128U; - sha512_update1(mb, st); - } - } - rem1 = input_len % (uint32_t)128U; - b00 = ib; - lb = b00 + input_len - rem1; - { - uint32_t blocks; - if (rem + (uint32_t)16U + (uint32_t)1U <= (uint32_t)128U) - { - blocks = (uint32_t)1U; - } - else - { - blocks = (uint32_t)2U; - } - { - uint32_t fin = blocks * (uint32_t)128U; - uint8_t last[256U] = { 0U }; - uint8_t totlen_buf[16U] = { 0U }; - FStar_UInt128_uint128 total_len_bits = FStar_UInt128_shift_left(len_, (uint32_t)3U); - uint8_t *b0; - uint8_t *last00; - uint8_t *last10; - store128_be(totlen_buf, total_len_bits); - b0 = lb; - memcpy(last, b0, rem * sizeof (uint8_t)); - last[rem] = (uint8_t)0x80U; - memcpy(last + fin - (uint32_t)16U, totlen_buf, (uint32_t)16U * sizeof (uint8_t)); - last00 = last; - last10 = last + (uint32_t)128U; - { - Hacl_Impl_SHA2_Types_uint8_2p lit0; - Hacl_Impl_SHA2_Types_uint8_2p scrut0; - uint8_t *l0; - uint8_t *l1; - uint8_t *lb0; - uint8_t *lb1; - lit0.fst = last00; - lit0.snd = last10; - scrut0 = lit0; - l0 = scrut0.fst; - l1 = scrut0.snd; - lb0 = l0; - lb1 = l1; - { - Hacl_Impl_SHA2_Types_uint8_2p lit; - Hacl_Impl_SHA2_Types_uint8_2p scrut; - uint8_t *last0; - uint8_t *last1; - lit.fst = lb0; - lit.snd = lb1; - scrut = lit; - last0 = scrut.fst; - last1 = scrut.snd; - sha512_update1(last0, st); - if (blocks > (uint32_t)1U) - { - sha512_update1(last1, st); - } - { - uint8_t hbuf[64U] = { 0U }; - KRML_MAYBE_FOR8(i, - (uint32_t)0U, - (uint32_t)8U, - (uint32_t)1U, - store64_be(hbuf + i * (uint32_t)8U, st[i]);); - memcpy(rb, hbuf, (uint32_t)64U * sizeof (uint8_t)); - } - } - } - } - } -} - diff --git a/dist/c89-compatible/Hacl_SHA2_Scalar32.h b/dist/c89-compatible/Hacl_SHA2_Scalar32.h deleted file mode 100644 index 60a520f73f..0000000000 --- a/dist/c89-compatible/Hacl_SHA2_Scalar32.h +++ /dev/null @@ -1,54 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#ifndef __Hacl_SHA2_Scalar32_H -#define __Hacl_SHA2_Scalar32_H - -#if defined(__cplusplus) -extern "C" { -#endif - -#include -#include "krml/internal/types.h" -#include "krml/lowstar_endianness.h" -#include "krml/internal/target.h" - - -#include "Hacl_SHA2_Generic.h" -#include "Hacl_Krmllib.h" -#include "evercrypt_targetconfig.h" -void Hacl_SHA2_Scalar32_sha224(uint8_t *dst, uint32_t input_len, uint8_t *input); - -void Hacl_SHA2_Scalar32_sha256(uint8_t *dst, uint32_t input_len, uint8_t *input); - -void Hacl_SHA2_Scalar32_sha384(uint8_t *dst, uint32_t input_len, uint8_t *input); - -void Hacl_SHA2_Scalar32_sha512(uint8_t *dst, uint32_t input_len, uint8_t *input); - -#if defined(__cplusplus) -} -#endif - -#define __Hacl_SHA2_Scalar32_H_DEFINED -#endif diff --git a/dist/c89-compatible/Hacl_SHA2_Types.h b/dist/c89-compatible/Hacl_SHA2_Types.h deleted file mode 100644 index b52ed915ae..0000000000 --- a/dist/c89-compatible/Hacl_SHA2_Types.h +++ /dev/null @@ -1,47 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#ifndef __Hacl_SHA2_Types_H -#define __Hacl_SHA2_Types_H - -#if defined(__cplusplus) -extern "C" { -#endif - -#include -#include "krml/internal/types.h" -#include "krml/lowstar_endianness.h" -#include "krml/internal/target.h" - - - -#include "evercrypt_targetconfig.h" -typedef uint8_t *Hacl_Impl_SHA2_Types_uint8_1p; - -#if defined(__cplusplus) -} -#endif - -#define __Hacl_SHA2_Types_H_DEFINED -#endif diff --git a/dist/c89-compatible/Hacl_SHA2_Vec128.c b/dist/c89-compatible/Hacl_SHA2_Vec128.c deleted file mode 100644 index 26c10d38bc..0000000000 --- a/dist/c89-compatible/Hacl_SHA2_Vec128.c +++ /dev/null @@ -1,1264 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#include "Hacl_SHA2_Vec128.h" - -#include "internal/Hacl_SHA2_Types.h" -#include "libintvector.h" -static inline void -sha224_update4(Hacl_Impl_SHA2_Types_uint8_4p block, Lib_IntVector_Intrinsics_vec128 *hash) -{ - KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 hash_old[8U] KRML_POST_ALIGN(16) = { 0U }; - KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 ws[16U] KRML_POST_ALIGN(16) = { 0U }; - uint8_t *b3; - uint8_t *b2; - uint8_t *b10; - uint8_t *b00; - Lib_IntVector_Intrinsics_vec128 v00; - Lib_IntVector_Intrinsics_vec128 v10; - Lib_IntVector_Intrinsics_vec128 v20; - Lib_IntVector_Intrinsics_vec128 v30; - Lib_IntVector_Intrinsics_vec128 v0_; - Lib_IntVector_Intrinsics_vec128 v1_; - Lib_IntVector_Intrinsics_vec128 v2_; - Lib_IntVector_Intrinsics_vec128 v3_; - Lib_IntVector_Intrinsics_vec128 v0__; - Lib_IntVector_Intrinsics_vec128 v1__; - Lib_IntVector_Intrinsics_vec128 v2__; - Lib_IntVector_Intrinsics_vec128 v3__; - Lib_IntVector_Intrinsics_vec128 v0__0; - Lib_IntVector_Intrinsics_vec128 v2__0; - Lib_IntVector_Intrinsics_vec128 v1__0; - Lib_IntVector_Intrinsics_vec128 v3__0; - Lib_IntVector_Intrinsics_vec128 ws0; - Lib_IntVector_Intrinsics_vec128 ws1; - Lib_IntVector_Intrinsics_vec128 ws2; - Lib_IntVector_Intrinsics_vec128 ws3; - Lib_IntVector_Intrinsics_vec128 v01; - Lib_IntVector_Intrinsics_vec128 v11; - Lib_IntVector_Intrinsics_vec128 v21; - Lib_IntVector_Intrinsics_vec128 v31; - Lib_IntVector_Intrinsics_vec128 v0_0; - Lib_IntVector_Intrinsics_vec128 v1_0; - Lib_IntVector_Intrinsics_vec128 v2_0; - Lib_IntVector_Intrinsics_vec128 v3_0; - Lib_IntVector_Intrinsics_vec128 v0__1; - Lib_IntVector_Intrinsics_vec128 v1__1; - Lib_IntVector_Intrinsics_vec128 v2__1; - Lib_IntVector_Intrinsics_vec128 v3__1; - Lib_IntVector_Intrinsics_vec128 v0__2; - Lib_IntVector_Intrinsics_vec128 v2__2; - Lib_IntVector_Intrinsics_vec128 v1__2; - Lib_IntVector_Intrinsics_vec128 v3__2; - Lib_IntVector_Intrinsics_vec128 ws4; - Lib_IntVector_Intrinsics_vec128 ws5; - Lib_IntVector_Intrinsics_vec128 ws6; - Lib_IntVector_Intrinsics_vec128 ws7; - Lib_IntVector_Intrinsics_vec128 v02; - Lib_IntVector_Intrinsics_vec128 v12; - Lib_IntVector_Intrinsics_vec128 v22; - Lib_IntVector_Intrinsics_vec128 v32; - Lib_IntVector_Intrinsics_vec128 v0_1; - Lib_IntVector_Intrinsics_vec128 v1_1; - Lib_IntVector_Intrinsics_vec128 v2_1; - Lib_IntVector_Intrinsics_vec128 v3_1; - Lib_IntVector_Intrinsics_vec128 v0__3; - Lib_IntVector_Intrinsics_vec128 v1__3; - Lib_IntVector_Intrinsics_vec128 v2__3; - Lib_IntVector_Intrinsics_vec128 v3__3; - Lib_IntVector_Intrinsics_vec128 v0__4; - Lib_IntVector_Intrinsics_vec128 v2__4; - Lib_IntVector_Intrinsics_vec128 v1__4; - Lib_IntVector_Intrinsics_vec128 v3__4; - Lib_IntVector_Intrinsics_vec128 ws8; - Lib_IntVector_Intrinsics_vec128 ws9; - Lib_IntVector_Intrinsics_vec128 ws10; - Lib_IntVector_Intrinsics_vec128 ws11; - Lib_IntVector_Intrinsics_vec128 v0; - Lib_IntVector_Intrinsics_vec128 v1; - Lib_IntVector_Intrinsics_vec128 v2; - Lib_IntVector_Intrinsics_vec128 v3; - Lib_IntVector_Intrinsics_vec128 v0_2; - Lib_IntVector_Intrinsics_vec128 v1_2; - Lib_IntVector_Intrinsics_vec128 v2_2; - Lib_IntVector_Intrinsics_vec128 v3_2; - Lib_IntVector_Intrinsics_vec128 v0__5; - Lib_IntVector_Intrinsics_vec128 v1__5; - Lib_IntVector_Intrinsics_vec128 v2__5; - Lib_IntVector_Intrinsics_vec128 v3__5; - Lib_IntVector_Intrinsics_vec128 v0__6; - Lib_IntVector_Intrinsics_vec128 v2__6; - Lib_IntVector_Intrinsics_vec128 v1__6; - Lib_IntVector_Intrinsics_vec128 v3__6; - Lib_IntVector_Intrinsics_vec128 ws12; - Lib_IntVector_Intrinsics_vec128 ws13; - Lib_IntVector_Intrinsics_vec128 ws14; - Lib_IntVector_Intrinsics_vec128 ws15; - memcpy(hash_old, hash, (uint32_t)8U * sizeof (Lib_IntVector_Intrinsics_vec128)); - b3 = block.snd.snd.snd; - b2 = block.snd.snd.fst; - b10 = block.snd.fst; - b00 = block.fst; - ws[0U] = Lib_IntVector_Intrinsics_vec128_load32_be(b00); - ws[1U] = Lib_IntVector_Intrinsics_vec128_load32_be(b10); - ws[2U] = Lib_IntVector_Intrinsics_vec128_load32_be(b2); - ws[3U] = Lib_IntVector_Intrinsics_vec128_load32_be(b3); - ws[4U] = Lib_IntVector_Intrinsics_vec128_load32_be(b00 + (uint32_t)16U); - ws[5U] = Lib_IntVector_Intrinsics_vec128_load32_be(b10 + (uint32_t)16U); - ws[6U] = Lib_IntVector_Intrinsics_vec128_load32_be(b2 + (uint32_t)16U); - ws[7U] = Lib_IntVector_Intrinsics_vec128_load32_be(b3 + (uint32_t)16U); - ws[8U] = Lib_IntVector_Intrinsics_vec128_load32_be(b00 + (uint32_t)32U); - ws[9U] = Lib_IntVector_Intrinsics_vec128_load32_be(b10 + (uint32_t)32U); - ws[10U] = Lib_IntVector_Intrinsics_vec128_load32_be(b2 + (uint32_t)32U); - ws[11U] = Lib_IntVector_Intrinsics_vec128_load32_be(b3 + (uint32_t)32U); - ws[12U] = Lib_IntVector_Intrinsics_vec128_load32_be(b00 + (uint32_t)48U); - ws[13U] = Lib_IntVector_Intrinsics_vec128_load32_be(b10 + (uint32_t)48U); - ws[14U] = Lib_IntVector_Intrinsics_vec128_load32_be(b2 + (uint32_t)48U); - ws[15U] = Lib_IntVector_Intrinsics_vec128_load32_be(b3 + (uint32_t)48U); - v00 = ws[0U]; - v10 = ws[1U]; - v20 = ws[2U]; - v30 = ws[3U]; - v0_ = Lib_IntVector_Intrinsics_vec128_interleave_low32(v00, v10); - v1_ = Lib_IntVector_Intrinsics_vec128_interleave_high32(v00, v10); - v2_ = Lib_IntVector_Intrinsics_vec128_interleave_low32(v20, v30); - v3_ = Lib_IntVector_Intrinsics_vec128_interleave_high32(v20, v30); - v0__ = Lib_IntVector_Intrinsics_vec128_interleave_low64(v0_, v2_); - v1__ = Lib_IntVector_Intrinsics_vec128_interleave_high64(v0_, v2_); - v2__ = Lib_IntVector_Intrinsics_vec128_interleave_low64(v1_, v3_); - v3__ = Lib_IntVector_Intrinsics_vec128_interleave_high64(v1_, v3_); - v0__0 = v0__; - v2__0 = v2__; - v1__0 = v1__; - v3__0 = v3__; - ws0 = v0__0; - ws1 = v1__0; - ws2 = v2__0; - ws3 = v3__0; - v01 = ws[4U]; - v11 = ws[5U]; - v21 = ws[6U]; - v31 = ws[7U]; - v0_0 = Lib_IntVector_Intrinsics_vec128_interleave_low32(v01, v11); - v1_0 = Lib_IntVector_Intrinsics_vec128_interleave_high32(v01, v11); - v2_0 = Lib_IntVector_Intrinsics_vec128_interleave_low32(v21, v31); - v3_0 = Lib_IntVector_Intrinsics_vec128_interleave_high32(v21, v31); - v0__1 = Lib_IntVector_Intrinsics_vec128_interleave_low64(v0_0, v2_0); - v1__1 = Lib_IntVector_Intrinsics_vec128_interleave_high64(v0_0, v2_0); - v2__1 = Lib_IntVector_Intrinsics_vec128_interleave_low64(v1_0, v3_0); - v3__1 = Lib_IntVector_Intrinsics_vec128_interleave_high64(v1_0, v3_0); - v0__2 = v0__1; - v2__2 = v2__1; - v1__2 = v1__1; - v3__2 = v3__1; - ws4 = v0__2; - ws5 = v1__2; - ws6 = v2__2; - ws7 = v3__2; - v02 = ws[8U]; - v12 = ws[9U]; - v22 = ws[10U]; - v32 = ws[11U]; - v0_1 = Lib_IntVector_Intrinsics_vec128_interleave_low32(v02, v12); - v1_1 = Lib_IntVector_Intrinsics_vec128_interleave_high32(v02, v12); - v2_1 = Lib_IntVector_Intrinsics_vec128_interleave_low32(v22, v32); - v3_1 = Lib_IntVector_Intrinsics_vec128_interleave_high32(v22, v32); - v0__3 = Lib_IntVector_Intrinsics_vec128_interleave_low64(v0_1, v2_1); - v1__3 = Lib_IntVector_Intrinsics_vec128_interleave_high64(v0_1, v2_1); - v2__3 = Lib_IntVector_Intrinsics_vec128_interleave_low64(v1_1, v3_1); - v3__3 = Lib_IntVector_Intrinsics_vec128_interleave_high64(v1_1, v3_1); - v0__4 = v0__3; - v2__4 = v2__3; - v1__4 = v1__3; - v3__4 = v3__3; - ws8 = v0__4; - ws9 = v1__4; - ws10 = v2__4; - ws11 = v3__4; - v0 = ws[12U]; - v1 = ws[13U]; - v2 = ws[14U]; - v3 = ws[15U]; - v0_2 = Lib_IntVector_Intrinsics_vec128_interleave_low32(v0, v1); - v1_2 = Lib_IntVector_Intrinsics_vec128_interleave_high32(v0, v1); - v2_2 = Lib_IntVector_Intrinsics_vec128_interleave_low32(v2, v3); - v3_2 = Lib_IntVector_Intrinsics_vec128_interleave_high32(v2, v3); - v0__5 = Lib_IntVector_Intrinsics_vec128_interleave_low64(v0_2, v2_2); - v1__5 = Lib_IntVector_Intrinsics_vec128_interleave_high64(v0_2, v2_2); - v2__5 = Lib_IntVector_Intrinsics_vec128_interleave_low64(v1_2, v3_2); - v3__5 = Lib_IntVector_Intrinsics_vec128_interleave_high64(v1_2, v3_2); - v0__6 = v0__5; - v2__6 = v2__5; - v1__6 = v1__5; - v3__6 = v3__5; - ws12 = v0__6; - ws13 = v1__6; - ws14 = v2__6; - ws15 = v3__6; - ws[0U] = ws0; - ws[1U] = ws1; - ws[2U] = ws2; - ws[3U] = ws3; - ws[4U] = ws4; - ws[5U] = ws5; - ws[6U] = ws6; - ws[7U] = ws7; - ws[8U] = ws8; - ws[9U] = ws9; - ws[10U] = ws10; - ws[11U] = ws11; - ws[12U] = ws12; - ws[13U] = ws13; - ws[14U] = ws14; - ws[15U] = ws15; - KRML_MAYBE_FOR4(i0, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - KRML_MAYBE_FOR16(i, - (uint32_t)0U, - (uint32_t)16U, - (uint32_t)1U, - uint32_t k_t = Hacl_Impl_SHA2_Generic_k224_256[(uint32_t)16U * i0 + i]; - Lib_IntVector_Intrinsics_vec128 ws_t = ws[i]; - Lib_IntVector_Intrinsics_vec128 a0 = hash[0U]; - Lib_IntVector_Intrinsics_vec128 b0 = hash[1U]; - Lib_IntVector_Intrinsics_vec128 c0 = hash[2U]; - Lib_IntVector_Intrinsics_vec128 d0 = hash[3U]; - Lib_IntVector_Intrinsics_vec128 e0 = hash[4U]; - Lib_IntVector_Intrinsics_vec128 f0 = hash[5U]; - Lib_IntVector_Intrinsics_vec128 g0 = hash[6U]; - Lib_IntVector_Intrinsics_vec128 h02 = hash[7U]; - Lib_IntVector_Intrinsics_vec128 k_e_t = Lib_IntVector_Intrinsics_vec128_load32(k_t); - Lib_IntVector_Intrinsics_vec128 - t1 = - Lib_IntVector_Intrinsics_vec128_add32(Lib_IntVector_Intrinsics_vec128_add32(Lib_IntVector_Intrinsics_vec128_add32(Lib_IntVector_Intrinsics_vec128_add32(h02, - Lib_IntVector_Intrinsics_vec128_xor(Lib_IntVector_Intrinsics_vec128_rotate_right32(e0, - (uint32_t)6U), - Lib_IntVector_Intrinsics_vec128_xor(Lib_IntVector_Intrinsics_vec128_rotate_right32(e0, - (uint32_t)11U), - Lib_IntVector_Intrinsics_vec128_rotate_right32(e0, (uint32_t)25U)))), - Lib_IntVector_Intrinsics_vec128_xor(Lib_IntVector_Intrinsics_vec128_and(e0, f0), - Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_lognot(e0), g0))), - k_e_t), - ws_t); - Lib_IntVector_Intrinsics_vec128 - t2 = - Lib_IntVector_Intrinsics_vec128_add32(Lib_IntVector_Intrinsics_vec128_xor(Lib_IntVector_Intrinsics_vec128_rotate_right32(a0, - (uint32_t)2U), - Lib_IntVector_Intrinsics_vec128_xor(Lib_IntVector_Intrinsics_vec128_rotate_right32(a0, - (uint32_t)13U), - Lib_IntVector_Intrinsics_vec128_rotate_right32(a0, (uint32_t)22U))), - Lib_IntVector_Intrinsics_vec128_xor(Lib_IntVector_Intrinsics_vec128_and(a0, b0), - Lib_IntVector_Intrinsics_vec128_xor(Lib_IntVector_Intrinsics_vec128_and(a0, c0), - Lib_IntVector_Intrinsics_vec128_and(b0, c0)))); - Lib_IntVector_Intrinsics_vec128 a1 = Lib_IntVector_Intrinsics_vec128_add32(t1, t2); - Lib_IntVector_Intrinsics_vec128 b1 = a0; - Lib_IntVector_Intrinsics_vec128 c1 = b0; - Lib_IntVector_Intrinsics_vec128 d1 = c0; - Lib_IntVector_Intrinsics_vec128 e1 = Lib_IntVector_Intrinsics_vec128_add32(d0, t1); - Lib_IntVector_Intrinsics_vec128 f1 = e0; - Lib_IntVector_Intrinsics_vec128 g1 = f0; - Lib_IntVector_Intrinsics_vec128 h12 = g0; - hash[0U] = a1; - hash[1U] = b1; - hash[2U] = c1; - hash[3U] = d1; - hash[4U] = e1; - hash[5U] = f1; - hash[6U] = g1; - hash[7U] = h12;); - if (i0 < (uint32_t)3U) - { - KRML_MAYBE_FOR16(i, - (uint32_t)0U, - (uint32_t)16U, - (uint32_t)1U, - Lib_IntVector_Intrinsics_vec128 t16 = ws[i]; - Lib_IntVector_Intrinsics_vec128 t15 = ws[(i + (uint32_t)1U) % (uint32_t)16U]; - Lib_IntVector_Intrinsics_vec128 t7 = ws[(i + (uint32_t)9U) % (uint32_t)16U]; - Lib_IntVector_Intrinsics_vec128 t2 = ws[(i + (uint32_t)14U) % (uint32_t)16U]; - Lib_IntVector_Intrinsics_vec128 - s1 = - Lib_IntVector_Intrinsics_vec128_xor(Lib_IntVector_Intrinsics_vec128_rotate_right32(t2, - (uint32_t)17U), - Lib_IntVector_Intrinsics_vec128_xor(Lib_IntVector_Intrinsics_vec128_rotate_right32(t2, - (uint32_t)19U), - Lib_IntVector_Intrinsics_vec128_shift_right32(t2, (uint32_t)10U))); - Lib_IntVector_Intrinsics_vec128 - s0 = - Lib_IntVector_Intrinsics_vec128_xor(Lib_IntVector_Intrinsics_vec128_rotate_right32(t15, - (uint32_t)7U), - Lib_IntVector_Intrinsics_vec128_xor(Lib_IntVector_Intrinsics_vec128_rotate_right32(t15, - (uint32_t)18U), - Lib_IntVector_Intrinsics_vec128_shift_right32(t15, (uint32_t)3U))); - ws[i] = - Lib_IntVector_Intrinsics_vec128_add32(Lib_IntVector_Intrinsics_vec128_add32(Lib_IntVector_Intrinsics_vec128_add32(s1, - t7), - s0), - t16);); - }); - KRML_MAYBE_FOR8(i, - (uint32_t)0U, - (uint32_t)8U, - (uint32_t)1U, - Lib_IntVector_Intrinsics_vec128 *os = hash; - Lib_IntVector_Intrinsics_vec128 - x = Lib_IntVector_Intrinsics_vec128_add32(hash[i], hash_old[i]); - os[i] = x;); -} - -void -Hacl_SHA2_Vec128_sha224_4( - uint8_t *dst0, - uint8_t *dst1, - uint8_t *dst2, - uint8_t *dst3, - uint32_t input_len, - uint8_t *input0, - uint8_t *input1, - uint8_t *input2, - uint8_t *input3 -) -{ - Hacl_Impl_SHA2_Types_uint8_4p ib; - ib.fst = input0; - ib.snd.fst = input1; - ib.snd.snd.fst = input2; - ib.snd.snd.snd = input3; - { - Hacl_Impl_SHA2_Types_uint8_4p rb; - rb.fst = dst0; - rb.snd.fst = dst1; - rb.snd.snd.fst = dst2; - rb.snd.snd.snd = dst3; - { - KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 st[8U] KRML_POST_ALIGN(16) = { 0U }; - uint32_t rem; - uint64_t len_; - uint32_t blocks0; - uint32_t rem1; - uint8_t *b30; - uint8_t *b20; - uint8_t *b10; - uint8_t *b00; - uint8_t *bl0; - uint8_t *bl10; - uint8_t *bl20; - uint8_t *bl30; - KRML_MAYBE_FOR8(i, - (uint32_t)0U, - (uint32_t)8U, - (uint32_t)1U, - Lib_IntVector_Intrinsics_vec128 *os = st; - uint32_t hi = Hacl_Impl_SHA2_Generic_h224[i]; - Lib_IntVector_Intrinsics_vec128 x = Lib_IntVector_Intrinsics_vec128_load32(hi); - os[i] = x;); - rem = input_len % (uint32_t)64U; - len_ = (uint64_t)input_len; - blocks0 = input_len / (uint32_t)64U; - { - uint32_t i; - for (i = (uint32_t)0U; i < blocks0; i++) - { - uint8_t *b3 = ib.snd.snd.snd; - uint8_t *b2 = ib.snd.snd.fst; - uint8_t *b1 = ib.snd.fst; - uint8_t *b0 = ib.fst; - uint8_t *bl00 = b0 + i * (uint32_t)64U; - uint8_t *bl1 = b1 + i * (uint32_t)64U; - uint8_t *bl2 = b2 + i * (uint32_t)64U; - uint8_t *bl3 = b3 + i * (uint32_t)64U; - Hacl_Impl_SHA2_Types_uint8_4p lit; - lit.fst = bl00; - lit.snd.fst = bl1; - lit.snd.snd.fst = bl2; - lit.snd.snd.snd = bl3; - { - Hacl_Impl_SHA2_Types_uint8_4p mb = lit; - sha224_update4(mb, st); - } - } - } - rem1 = input_len % (uint32_t)64U; - b30 = ib.snd.snd.snd; - b20 = ib.snd.snd.fst; - b10 = ib.snd.fst; - b00 = ib.fst; - bl0 = b00 + input_len - rem1; - bl10 = b10 + input_len - rem1; - bl20 = b20 + input_len - rem1; - bl30 = b30 + input_len - rem1; - { - Hacl_Impl_SHA2_Types_uint8_4p lit0; - Hacl_Impl_SHA2_Types_uint8_4p lb; - lit0.fst = bl0; - lit0.snd.fst = bl10; - lit0.snd.snd.fst = bl20; - lit0.snd.snd.snd = bl30; - lb = lit0; - { - uint32_t blocks; - if (rem + (uint32_t)8U + (uint32_t)1U <= (uint32_t)64U) - { - blocks = (uint32_t)1U; - } - else - { - blocks = (uint32_t)2U; - } - { - uint32_t fin = blocks * (uint32_t)64U; - uint8_t last[512U] = { 0U }; - uint8_t totlen_buf[8U] = { 0U }; - uint64_t total_len_bits = len_ << (uint32_t)3U; - uint8_t *b31; - uint8_t *b21; - uint8_t *b11; - uint8_t *b01; - uint8_t *last00; - uint8_t *last10; - uint8_t *last2; - uint8_t *last3; - uint8_t *last010; - uint8_t *last110; - store64_be(totlen_buf, total_len_bits); - b31 = lb.snd.snd.snd; - b21 = lb.snd.snd.fst; - b11 = lb.snd.fst; - b01 = lb.fst; - last00 = last; - last10 = last + (uint32_t)128U; - last2 = last + (uint32_t)256U; - last3 = last + (uint32_t)384U; - memcpy(last00, b01, rem * sizeof (uint8_t)); - last00[rem] = (uint8_t)0x80U; - memcpy(last00 + fin - (uint32_t)8U, totlen_buf, (uint32_t)8U * sizeof (uint8_t)); - last010 = last00; - last110 = last00 + (uint32_t)64U; - { - Hacl_Impl_SHA2_Types_uint8_2p lit1; - Hacl_Impl_SHA2_Types_uint8_2p scrut0; - uint8_t *l00; - uint8_t *l01; - uint8_t *last011; - uint8_t *last111; - lit1.fst = last010; - lit1.snd = last110; - scrut0 = lit1; - l00 = scrut0.fst; - l01 = scrut0.snd; - memcpy(last10, b11, rem * sizeof (uint8_t)); - last10[rem] = (uint8_t)0x80U; - memcpy(last10 + fin - (uint32_t)8U, totlen_buf, (uint32_t)8U * sizeof (uint8_t)); - last011 = last10; - last111 = last10 + (uint32_t)64U; - { - Hacl_Impl_SHA2_Types_uint8_2p lit2; - Hacl_Impl_SHA2_Types_uint8_2p scrut1; - uint8_t *l10; - uint8_t *l11; - uint8_t *last012; - uint8_t *last112; - lit2.fst = last011; - lit2.snd = last111; - scrut1 = lit2; - l10 = scrut1.fst; - l11 = scrut1.snd; - memcpy(last2, b21, rem * sizeof (uint8_t)); - last2[rem] = (uint8_t)0x80U; - memcpy(last2 + fin - (uint32_t)8U, totlen_buf, (uint32_t)8U * sizeof (uint8_t)); - last012 = last2; - last112 = last2 + (uint32_t)64U; - { - Hacl_Impl_SHA2_Types_uint8_2p lit3; - Hacl_Impl_SHA2_Types_uint8_2p scrut2; - uint8_t *l20; - uint8_t *l21; - uint8_t *last01; - uint8_t *last11; - lit3.fst = last012; - lit3.snd = last112; - scrut2 = lit3; - l20 = scrut2.fst; - l21 = scrut2.snd; - memcpy(last3, b31, rem * sizeof (uint8_t)); - last3[rem] = (uint8_t)0x80U; - memcpy(last3 + fin - (uint32_t)8U, totlen_buf, (uint32_t)8U * sizeof (uint8_t)); - last01 = last3; - last11 = last3 + (uint32_t)64U; - { - Hacl_Impl_SHA2_Types_uint8_2p lit4; - Hacl_Impl_SHA2_Types_uint8_2p scrut3; - uint8_t *l30; - uint8_t *l31; - lit4.fst = last01; - lit4.snd = last11; - scrut3 = lit4; - l30 = scrut3.fst; - l31 = scrut3.snd; - { - Hacl_Impl_SHA2_Types_uint8_4p mb0; - mb0.fst = l00; - mb0.snd.fst = l10; - mb0.snd.snd.fst = l20; - mb0.snd.snd.snd = l30; - { - Hacl_Impl_SHA2_Types_uint8_4p mb1; - mb1.fst = l01; - mb1.snd.fst = l11; - mb1.snd.snd.fst = l21; - mb1.snd.snd.snd = l31; - { - Hacl_Impl_SHA2_Types_uint8_2x4p lit; - Hacl_Impl_SHA2_Types_uint8_2x4p scrut; - Hacl_Impl_SHA2_Types_uint8_4p last0; - Hacl_Impl_SHA2_Types_uint8_4p last1; - lit.fst = mb0; - lit.snd = mb1; - scrut = lit; - last0 = scrut.fst; - last1 = scrut.snd; - sha224_update4(last0, st); - if (blocks > (uint32_t)1U) - { - sha224_update4(last1, st); - } - { - uint8_t hbuf[128U] = { 0U }; - Lib_IntVector_Intrinsics_vec128 v00 = st[0U]; - Lib_IntVector_Intrinsics_vec128 v10 = st[1U]; - Lib_IntVector_Intrinsics_vec128 v20 = st[2U]; - Lib_IntVector_Intrinsics_vec128 v30 = st[3U]; - Lib_IntVector_Intrinsics_vec128 - v0_ = Lib_IntVector_Intrinsics_vec128_interleave_low32(v00, v10); - Lib_IntVector_Intrinsics_vec128 - v1_ = Lib_IntVector_Intrinsics_vec128_interleave_high32(v00, v10); - Lib_IntVector_Intrinsics_vec128 - v2_ = Lib_IntVector_Intrinsics_vec128_interleave_low32(v20, v30); - Lib_IntVector_Intrinsics_vec128 - v3_ = Lib_IntVector_Intrinsics_vec128_interleave_high32(v20, v30); - Lib_IntVector_Intrinsics_vec128 - v0__ = Lib_IntVector_Intrinsics_vec128_interleave_low64(v0_, v2_); - Lib_IntVector_Intrinsics_vec128 - v1__ = Lib_IntVector_Intrinsics_vec128_interleave_high64(v0_, v2_); - Lib_IntVector_Intrinsics_vec128 - v2__ = Lib_IntVector_Intrinsics_vec128_interleave_low64(v1_, v3_); - Lib_IntVector_Intrinsics_vec128 - v3__ = Lib_IntVector_Intrinsics_vec128_interleave_high64(v1_, v3_); - Lib_IntVector_Intrinsics_vec128 v0__0 = v0__; - Lib_IntVector_Intrinsics_vec128 v2__0 = v2__; - Lib_IntVector_Intrinsics_vec128 v1__0 = v1__; - Lib_IntVector_Intrinsics_vec128 v3__0 = v3__; - Lib_IntVector_Intrinsics_vec128 st0_ = v0__0; - Lib_IntVector_Intrinsics_vec128 st1_ = v1__0; - Lib_IntVector_Intrinsics_vec128 st2_ = v2__0; - Lib_IntVector_Intrinsics_vec128 st3_ = v3__0; - Lib_IntVector_Intrinsics_vec128 v0 = st[4U]; - Lib_IntVector_Intrinsics_vec128 v1 = st[5U]; - Lib_IntVector_Intrinsics_vec128 v2 = st[6U]; - Lib_IntVector_Intrinsics_vec128 v3 = st[7U]; - Lib_IntVector_Intrinsics_vec128 - v0_0 = Lib_IntVector_Intrinsics_vec128_interleave_low32(v0, v1); - Lib_IntVector_Intrinsics_vec128 - v1_0 = Lib_IntVector_Intrinsics_vec128_interleave_high32(v0, v1); - Lib_IntVector_Intrinsics_vec128 - v2_0 = Lib_IntVector_Intrinsics_vec128_interleave_low32(v2, v3); - Lib_IntVector_Intrinsics_vec128 - v3_0 = Lib_IntVector_Intrinsics_vec128_interleave_high32(v2, v3); - Lib_IntVector_Intrinsics_vec128 - v0__1 = Lib_IntVector_Intrinsics_vec128_interleave_low64(v0_0, v2_0); - Lib_IntVector_Intrinsics_vec128 - v1__1 = Lib_IntVector_Intrinsics_vec128_interleave_high64(v0_0, v2_0); - Lib_IntVector_Intrinsics_vec128 - v2__1 = Lib_IntVector_Intrinsics_vec128_interleave_low64(v1_0, v3_0); - Lib_IntVector_Intrinsics_vec128 - v3__1 = Lib_IntVector_Intrinsics_vec128_interleave_high64(v1_0, v3_0); - Lib_IntVector_Intrinsics_vec128 v0__2 = v0__1; - Lib_IntVector_Intrinsics_vec128 v2__2 = v2__1; - Lib_IntVector_Intrinsics_vec128 v1__2 = v1__1; - Lib_IntVector_Intrinsics_vec128 v3__2 = v3__1; - Lib_IntVector_Intrinsics_vec128 st4_ = v0__2; - Lib_IntVector_Intrinsics_vec128 st5_ = v1__2; - Lib_IntVector_Intrinsics_vec128 st6_ = v2__2; - Lib_IntVector_Intrinsics_vec128 st7_ = v3__2; - uint8_t *b3; - uint8_t *b2; - uint8_t *b1; - uint8_t *b0; - st[0U] = st0_; - st[1U] = st4_; - st[2U] = st1_; - st[3U] = st5_; - st[4U] = st2_; - st[5U] = st6_; - st[6U] = st3_; - st[7U] = st7_; - KRML_MAYBE_FOR8(i, - (uint32_t)0U, - (uint32_t)8U, - (uint32_t)1U, - Lib_IntVector_Intrinsics_vec128_store32_be(hbuf + i * (uint32_t)16U, - st[i]);); - b3 = rb.snd.snd.snd; - b2 = rb.snd.snd.fst; - b1 = rb.snd.fst; - b0 = rb.fst; - memcpy(b0, hbuf, (uint32_t)28U * sizeof (uint8_t)); - memcpy(b1, hbuf + (uint32_t)32U, (uint32_t)28U * sizeof (uint8_t)); - memcpy(b2, hbuf + (uint32_t)64U, (uint32_t)28U * sizeof (uint8_t)); - memcpy(b3, hbuf + (uint32_t)96U, (uint32_t)28U * sizeof (uint8_t)); - } - } - } - } - } - } - } - } - } - } - } - } - } -} - -static inline void -sha256_update4(Hacl_Impl_SHA2_Types_uint8_4p block, Lib_IntVector_Intrinsics_vec128 *hash) -{ - KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 hash_old[8U] KRML_POST_ALIGN(16) = { 0U }; - KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 ws[16U] KRML_POST_ALIGN(16) = { 0U }; - uint8_t *b3; - uint8_t *b2; - uint8_t *b10; - uint8_t *b00; - Lib_IntVector_Intrinsics_vec128 v00; - Lib_IntVector_Intrinsics_vec128 v10; - Lib_IntVector_Intrinsics_vec128 v20; - Lib_IntVector_Intrinsics_vec128 v30; - Lib_IntVector_Intrinsics_vec128 v0_; - Lib_IntVector_Intrinsics_vec128 v1_; - Lib_IntVector_Intrinsics_vec128 v2_; - Lib_IntVector_Intrinsics_vec128 v3_; - Lib_IntVector_Intrinsics_vec128 v0__; - Lib_IntVector_Intrinsics_vec128 v1__; - Lib_IntVector_Intrinsics_vec128 v2__; - Lib_IntVector_Intrinsics_vec128 v3__; - Lib_IntVector_Intrinsics_vec128 v0__0; - Lib_IntVector_Intrinsics_vec128 v2__0; - Lib_IntVector_Intrinsics_vec128 v1__0; - Lib_IntVector_Intrinsics_vec128 v3__0; - Lib_IntVector_Intrinsics_vec128 ws0; - Lib_IntVector_Intrinsics_vec128 ws1; - Lib_IntVector_Intrinsics_vec128 ws2; - Lib_IntVector_Intrinsics_vec128 ws3; - Lib_IntVector_Intrinsics_vec128 v01; - Lib_IntVector_Intrinsics_vec128 v11; - Lib_IntVector_Intrinsics_vec128 v21; - Lib_IntVector_Intrinsics_vec128 v31; - Lib_IntVector_Intrinsics_vec128 v0_0; - Lib_IntVector_Intrinsics_vec128 v1_0; - Lib_IntVector_Intrinsics_vec128 v2_0; - Lib_IntVector_Intrinsics_vec128 v3_0; - Lib_IntVector_Intrinsics_vec128 v0__1; - Lib_IntVector_Intrinsics_vec128 v1__1; - Lib_IntVector_Intrinsics_vec128 v2__1; - Lib_IntVector_Intrinsics_vec128 v3__1; - Lib_IntVector_Intrinsics_vec128 v0__2; - Lib_IntVector_Intrinsics_vec128 v2__2; - Lib_IntVector_Intrinsics_vec128 v1__2; - Lib_IntVector_Intrinsics_vec128 v3__2; - Lib_IntVector_Intrinsics_vec128 ws4; - Lib_IntVector_Intrinsics_vec128 ws5; - Lib_IntVector_Intrinsics_vec128 ws6; - Lib_IntVector_Intrinsics_vec128 ws7; - Lib_IntVector_Intrinsics_vec128 v02; - Lib_IntVector_Intrinsics_vec128 v12; - Lib_IntVector_Intrinsics_vec128 v22; - Lib_IntVector_Intrinsics_vec128 v32; - Lib_IntVector_Intrinsics_vec128 v0_1; - Lib_IntVector_Intrinsics_vec128 v1_1; - Lib_IntVector_Intrinsics_vec128 v2_1; - Lib_IntVector_Intrinsics_vec128 v3_1; - Lib_IntVector_Intrinsics_vec128 v0__3; - Lib_IntVector_Intrinsics_vec128 v1__3; - Lib_IntVector_Intrinsics_vec128 v2__3; - Lib_IntVector_Intrinsics_vec128 v3__3; - Lib_IntVector_Intrinsics_vec128 v0__4; - Lib_IntVector_Intrinsics_vec128 v2__4; - Lib_IntVector_Intrinsics_vec128 v1__4; - Lib_IntVector_Intrinsics_vec128 v3__4; - Lib_IntVector_Intrinsics_vec128 ws8; - Lib_IntVector_Intrinsics_vec128 ws9; - Lib_IntVector_Intrinsics_vec128 ws10; - Lib_IntVector_Intrinsics_vec128 ws11; - Lib_IntVector_Intrinsics_vec128 v0; - Lib_IntVector_Intrinsics_vec128 v1; - Lib_IntVector_Intrinsics_vec128 v2; - Lib_IntVector_Intrinsics_vec128 v3; - Lib_IntVector_Intrinsics_vec128 v0_2; - Lib_IntVector_Intrinsics_vec128 v1_2; - Lib_IntVector_Intrinsics_vec128 v2_2; - Lib_IntVector_Intrinsics_vec128 v3_2; - Lib_IntVector_Intrinsics_vec128 v0__5; - Lib_IntVector_Intrinsics_vec128 v1__5; - Lib_IntVector_Intrinsics_vec128 v2__5; - Lib_IntVector_Intrinsics_vec128 v3__5; - Lib_IntVector_Intrinsics_vec128 v0__6; - Lib_IntVector_Intrinsics_vec128 v2__6; - Lib_IntVector_Intrinsics_vec128 v1__6; - Lib_IntVector_Intrinsics_vec128 v3__6; - Lib_IntVector_Intrinsics_vec128 ws12; - Lib_IntVector_Intrinsics_vec128 ws13; - Lib_IntVector_Intrinsics_vec128 ws14; - Lib_IntVector_Intrinsics_vec128 ws15; - memcpy(hash_old, hash, (uint32_t)8U * sizeof (Lib_IntVector_Intrinsics_vec128)); - b3 = block.snd.snd.snd; - b2 = block.snd.snd.fst; - b10 = block.snd.fst; - b00 = block.fst; - ws[0U] = Lib_IntVector_Intrinsics_vec128_load32_be(b00); - ws[1U] = Lib_IntVector_Intrinsics_vec128_load32_be(b10); - ws[2U] = Lib_IntVector_Intrinsics_vec128_load32_be(b2); - ws[3U] = Lib_IntVector_Intrinsics_vec128_load32_be(b3); - ws[4U] = Lib_IntVector_Intrinsics_vec128_load32_be(b00 + (uint32_t)16U); - ws[5U] = Lib_IntVector_Intrinsics_vec128_load32_be(b10 + (uint32_t)16U); - ws[6U] = Lib_IntVector_Intrinsics_vec128_load32_be(b2 + (uint32_t)16U); - ws[7U] = Lib_IntVector_Intrinsics_vec128_load32_be(b3 + (uint32_t)16U); - ws[8U] = Lib_IntVector_Intrinsics_vec128_load32_be(b00 + (uint32_t)32U); - ws[9U] = Lib_IntVector_Intrinsics_vec128_load32_be(b10 + (uint32_t)32U); - ws[10U] = Lib_IntVector_Intrinsics_vec128_load32_be(b2 + (uint32_t)32U); - ws[11U] = Lib_IntVector_Intrinsics_vec128_load32_be(b3 + (uint32_t)32U); - ws[12U] = Lib_IntVector_Intrinsics_vec128_load32_be(b00 + (uint32_t)48U); - ws[13U] = Lib_IntVector_Intrinsics_vec128_load32_be(b10 + (uint32_t)48U); - ws[14U] = Lib_IntVector_Intrinsics_vec128_load32_be(b2 + (uint32_t)48U); - ws[15U] = Lib_IntVector_Intrinsics_vec128_load32_be(b3 + (uint32_t)48U); - v00 = ws[0U]; - v10 = ws[1U]; - v20 = ws[2U]; - v30 = ws[3U]; - v0_ = Lib_IntVector_Intrinsics_vec128_interleave_low32(v00, v10); - v1_ = Lib_IntVector_Intrinsics_vec128_interleave_high32(v00, v10); - v2_ = Lib_IntVector_Intrinsics_vec128_interleave_low32(v20, v30); - v3_ = Lib_IntVector_Intrinsics_vec128_interleave_high32(v20, v30); - v0__ = Lib_IntVector_Intrinsics_vec128_interleave_low64(v0_, v2_); - v1__ = Lib_IntVector_Intrinsics_vec128_interleave_high64(v0_, v2_); - v2__ = Lib_IntVector_Intrinsics_vec128_interleave_low64(v1_, v3_); - v3__ = Lib_IntVector_Intrinsics_vec128_interleave_high64(v1_, v3_); - v0__0 = v0__; - v2__0 = v2__; - v1__0 = v1__; - v3__0 = v3__; - ws0 = v0__0; - ws1 = v1__0; - ws2 = v2__0; - ws3 = v3__0; - v01 = ws[4U]; - v11 = ws[5U]; - v21 = ws[6U]; - v31 = ws[7U]; - v0_0 = Lib_IntVector_Intrinsics_vec128_interleave_low32(v01, v11); - v1_0 = Lib_IntVector_Intrinsics_vec128_interleave_high32(v01, v11); - v2_0 = Lib_IntVector_Intrinsics_vec128_interleave_low32(v21, v31); - v3_0 = Lib_IntVector_Intrinsics_vec128_interleave_high32(v21, v31); - v0__1 = Lib_IntVector_Intrinsics_vec128_interleave_low64(v0_0, v2_0); - v1__1 = Lib_IntVector_Intrinsics_vec128_interleave_high64(v0_0, v2_0); - v2__1 = Lib_IntVector_Intrinsics_vec128_interleave_low64(v1_0, v3_0); - v3__1 = Lib_IntVector_Intrinsics_vec128_interleave_high64(v1_0, v3_0); - v0__2 = v0__1; - v2__2 = v2__1; - v1__2 = v1__1; - v3__2 = v3__1; - ws4 = v0__2; - ws5 = v1__2; - ws6 = v2__2; - ws7 = v3__2; - v02 = ws[8U]; - v12 = ws[9U]; - v22 = ws[10U]; - v32 = ws[11U]; - v0_1 = Lib_IntVector_Intrinsics_vec128_interleave_low32(v02, v12); - v1_1 = Lib_IntVector_Intrinsics_vec128_interleave_high32(v02, v12); - v2_1 = Lib_IntVector_Intrinsics_vec128_interleave_low32(v22, v32); - v3_1 = Lib_IntVector_Intrinsics_vec128_interleave_high32(v22, v32); - v0__3 = Lib_IntVector_Intrinsics_vec128_interleave_low64(v0_1, v2_1); - v1__3 = Lib_IntVector_Intrinsics_vec128_interleave_high64(v0_1, v2_1); - v2__3 = Lib_IntVector_Intrinsics_vec128_interleave_low64(v1_1, v3_1); - v3__3 = Lib_IntVector_Intrinsics_vec128_interleave_high64(v1_1, v3_1); - v0__4 = v0__3; - v2__4 = v2__3; - v1__4 = v1__3; - v3__4 = v3__3; - ws8 = v0__4; - ws9 = v1__4; - ws10 = v2__4; - ws11 = v3__4; - v0 = ws[12U]; - v1 = ws[13U]; - v2 = ws[14U]; - v3 = ws[15U]; - v0_2 = Lib_IntVector_Intrinsics_vec128_interleave_low32(v0, v1); - v1_2 = Lib_IntVector_Intrinsics_vec128_interleave_high32(v0, v1); - v2_2 = Lib_IntVector_Intrinsics_vec128_interleave_low32(v2, v3); - v3_2 = Lib_IntVector_Intrinsics_vec128_interleave_high32(v2, v3); - v0__5 = Lib_IntVector_Intrinsics_vec128_interleave_low64(v0_2, v2_2); - v1__5 = Lib_IntVector_Intrinsics_vec128_interleave_high64(v0_2, v2_2); - v2__5 = Lib_IntVector_Intrinsics_vec128_interleave_low64(v1_2, v3_2); - v3__5 = Lib_IntVector_Intrinsics_vec128_interleave_high64(v1_2, v3_2); - v0__6 = v0__5; - v2__6 = v2__5; - v1__6 = v1__5; - v3__6 = v3__5; - ws12 = v0__6; - ws13 = v1__6; - ws14 = v2__6; - ws15 = v3__6; - ws[0U] = ws0; - ws[1U] = ws1; - ws[2U] = ws2; - ws[3U] = ws3; - ws[4U] = ws4; - ws[5U] = ws5; - ws[6U] = ws6; - ws[7U] = ws7; - ws[8U] = ws8; - ws[9U] = ws9; - ws[10U] = ws10; - ws[11U] = ws11; - ws[12U] = ws12; - ws[13U] = ws13; - ws[14U] = ws14; - ws[15U] = ws15; - KRML_MAYBE_FOR4(i0, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - KRML_MAYBE_FOR16(i, - (uint32_t)0U, - (uint32_t)16U, - (uint32_t)1U, - uint32_t k_t = Hacl_Impl_SHA2_Generic_k224_256[(uint32_t)16U * i0 + i]; - Lib_IntVector_Intrinsics_vec128 ws_t = ws[i]; - Lib_IntVector_Intrinsics_vec128 a0 = hash[0U]; - Lib_IntVector_Intrinsics_vec128 b0 = hash[1U]; - Lib_IntVector_Intrinsics_vec128 c0 = hash[2U]; - Lib_IntVector_Intrinsics_vec128 d0 = hash[3U]; - Lib_IntVector_Intrinsics_vec128 e0 = hash[4U]; - Lib_IntVector_Intrinsics_vec128 f0 = hash[5U]; - Lib_IntVector_Intrinsics_vec128 g0 = hash[6U]; - Lib_IntVector_Intrinsics_vec128 h02 = hash[7U]; - Lib_IntVector_Intrinsics_vec128 k_e_t = Lib_IntVector_Intrinsics_vec128_load32(k_t); - Lib_IntVector_Intrinsics_vec128 - t1 = - Lib_IntVector_Intrinsics_vec128_add32(Lib_IntVector_Intrinsics_vec128_add32(Lib_IntVector_Intrinsics_vec128_add32(Lib_IntVector_Intrinsics_vec128_add32(h02, - Lib_IntVector_Intrinsics_vec128_xor(Lib_IntVector_Intrinsics_vec128_rotate_right32(e0, - (uint32_t)6U), - Lib_IntVector_Intrinsics_vec128_xor(Lib_IntVector_Intrinsics_vec128_rotate_right32(e0, - (uint32_t)11U), - Lib_IntVector_Intrinsics_vec128_rotate_right32(e0, (uint32_t)25U)))), - Lib_IntVector_Intrinsics_vec128_xor(Lib_IntVector_Intrinsics_vec128_and(e0, f0), - Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_lognot(e0), g0))), - k_e_t), - ws_t); - Lib_IntVector_Intrinsics_vec128 - t2 = - Lib_IntVector_Intrinsics_vec128_add32(Lib_IntVector_Intrinsics_vec128_xor(Lib_IntVector_Intrinsics_vec128_rotate_right32(a0, - (uint32_t)2U), - Lib_IntVector_Intrinsics_vec128_xor(Lib_IntVector_Intrinsics_vec128_rotate_right32(a0, - (uint32_t)13U), - Lib_IntVector_Intrinsics_vec128_rotate_right32(a0, (uint32_t)22U))), - Lib_IntVector_Intrinsics_vec128_xor(Lib_IntVector_Intrinsics_vec128_and(a0, b0), - Lib_IntVector_Intrinsics_vec128_xor(Lib_IntVector_Intrinsics_vec128_and(a0, c0), - Lib_IntVector_Intrinsics_vec128_and(b0, c0)))); - Lib_IntVector_Intrinsics_vec128 a1 = Lib_IntVector_Intrinsics_vec128_add32(t1, t2); - Lib_IntVector_Intrinsics_vec128 b1 = a0; - Lib_IntVector_Intrinsics_vec128 c1 = b0; - Lib_IntVector_Intrinsics_vec128 d1 = c0; - Lib_IntVector_Intrinsics_vec128 e1 = Lib_IntVector_Intrinsics_vec128_add32(d0, t1); - Lib_IntVector_Intrinsics_vec128 f1 = e0; - Lib_IntVector_Intrinsics_vec128 g1 = f0; - Lib_IntVector_Intrinsics_vec128 h12 = g0; - hash[0U] = a1; - hash[1U] = b1; - hash[2U] = c1; - hash[3U] = d1; - hash[4U] = e1; - hash[5U] = f1; - hash[6U] = g1; - hash[7U] = h12;); - if (i0 < (uint32_t)3U) - { - KRML_MAYBE_FOR16(i, - (uint32_t)0U, - (uint32_t)16U, - (uint32_t)1U, - Lib_IntVector_Intrinsics_vec128 t16 = ws[i]; - Lib_IntVector_Intrinsics_vec128 t15 = ws[(i + (uint32_t)1U) % (uint32_t)16U]; - Lib_IntVector_Intrinsics_vec128 t7 = ws[(i + (uint32_t)9U) % (uint32_t)16U]; - Lib_IntVector_Intrinsics_vec128 t2 = ws[(i + (uint32_t)14U) % (uint32_t)16U]; - Lib_IntVector_Intrinsics_vec128 - s1 = - Lib_IntVector_Intrinsics_vec128_xor(Lib_IntVector_Intrinsics_vec128_rotate_right32(t2, - (uint32_t)17U), - Lib_IntVector_Intrinsics_vec128_xor(Lib_IntVector_Intrinsics_vec128_rotate_right32(t2, - (uint32_t)19U), - Lib_IntVector_Intrinsics_vec128_shift_right32(t2, (uint32_t)10U))); - Lib_IntVector_Intrinsics_vec128 - s0 = - Lib_IntVector_Intrinsics_vec128_xor(Lib_IntVector_Intrinsics_vec128_rotate_right32(t15, - (uint32_t)7U), - Lib_IntVector_Intrinsics_vec128_xor(Lib_IntVector_Intrinsics_vec128_rotate_right32(t15, - (uint32_t)18U), - Lib_IntVector_Intrinsics_vec128_shift_right32(t15, (uint32_t)3U))); - ws[i] = - Lib_IntVector_Intrinsics_vec128_add32(Lib_IntVector_Intrinsics_vec128_add32(Lib_IntVector_Intrinsics_vec128_add32(s1, - t7), - s0), - t16);); - }); - KRML_MAYBE_FOR8(i, - (uint32_t)0U, - (uint32_t)8U, - (uint32_t)1U, - Lib_IntVector_Intrinsics_vec128 *os = hash; - Lib_IntVector_Intrinsics_vec128 - x = Lib_IntVector_Intrinsics_vec128_add32(hash[i], hash_old[i]); - os[i] = x;); -} - -void -Hacl_SHA2_Vec128_sha256_4( - uint8_t *dst0, - uint8_t *dst1, - uint8_t *dst2, - uint8_t *dst3, - uint32_t input_len, - uint8_t *input0, - uint8_t *input1, - uint8_t *input2, - uint8_t *input3 -) -{ - Hacl_Impl_SHA2_Types_uint8_4p ib; - ib.fst = input0; - ib.snd.fst = input1; - ib.snd.snd.fst = input2; - ib.snd.snd.snd = input3; - { - Hacl_Impl_SHA2_Types_uint8_4p rb; - rb.fst = dst0; - rb.snd.fst = dst1; - rb.snd.snd.fst = dst2; - rb.snd.snd.snd = dst3; - { - KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 st[8U] KRML_POST_ALIGN(16) = { 0U }; - uint32_t rem; - uint64_t len_; - uint32_t blocks0; - uint32_t rem1; - uint8_t *b30; - uint8_t *b20; - uint8_t *b10; - uint8_t *b00; - uint8_t *bl0; - uint8_t *bl10; - uint8_t *bl20; - uint8_t *bl30; - KRML_MAYBE_FOR8(i, - (uint32_t)0U, - (uint32_t)8U, - (uint32_t)1U, - Lib_IntVector_Intrinsics_vec128 *os = st; - uint32_t hi = Hacl_Impl_SHA2_Generic_h256[i]; - Lib_IntVector_Intrinsics_vec128 x = Lib_IntVector_Intrinsics_vec128_load32(hi); - os[i] = x;); - rem = input_len % (uint32_t)64U; - len_ = (uint64_t)input_len; - blocks0 = input_len / (uint32_t)64U; - { - uint32_t i; - for (i = (uint32_t)0U; i < blocks0; i++) - { - uint8_t *b3 = ib.snd.snd.snd; - uint8_t *b2 = ib.snd.snd.fst; - uint8_t *b1 = ib.snd.fst; - uint8_t *b0 = ib.fst; - uint8_t *bl00 = b0 + i * (uint32_t)64U; - uint8_t *bl1 = b1 + i * (uint32_t)64U; - uint8_t *bl2 = b2 + i * (uint32_t)64U; - uint8_t *bl3 = b3 + i * (uint32_t)64U; - Hacl_Impl_SHA2_Types_uint8_4p lit; - lit.fst = bl00; - lit.snd.fst = bl1; - lit.snd.snd.fst = bl2; - lit.snd.snd.snd = bl3; - { - Hacl_Impl_SHA2_Types_uint8_4p mb = lit; - sha256_update4(mb, st); - } - } - } - rem1 = input_len % (uint32_t)64U; - b30 = ib.snd.snd.snd; - b20 = ib.snd.snd.fst; - b10 = ib.snd.fst; - b00 = ib.fst; - bl0 = b00 + input_len - rem1; - bl10 = b10 + input_len - rem1; - bl20 = b20 + input_len - rem1; - bl30 = b30 + input_len - rem1; - { - Hacl_Impl_SHA2_Types_uint8_4p lit0; - Hacl_Impl_SHA2_Types_uint8_4p lb; - lit0.fst = bl0; - lit0.snd.fst = bl10; - lit0.snd.snd.fst = bl20; - lit0.snd.snd.snd = bl30; - lb = lit0; - { - uint32_t blocks; - if (rem + (uint32_t)8U + (uint32_t)1U <= (uint32_t)64U) - { - blocks = (uint32_t)1U; - } - else - { - blocks = (uint32_t)2U; - } - { - uint32_t fin = blocks * (uint32_t)64U; - uint8_t last[512U] = { 0U }; - uint8_t totlen_buf[8U] = { 0U }; - uint64_t total_len_bits = len_ << (uint32_t)3U; - uint8_t *b31; - uint8_t *b21; - uint8_t *b11; - uint8_t *b01; - uint8_t *last00; - uint8_t *last10; - uint8_t *last2; - uint8_t *last3; - uint8_t *last010; - uint8_t *last110; - store64_be(totlen_buf, total_len_bits); - b31 = lb.snd.snd.snd; - b21 = lb.snd.snd.fst; - b11 = lb.snd.fst; - b01 = lb.fst; - last00 = last; - last10 = last + (uint32_t)128U; - last2 = last + (uint32_t)256U; - last3 = last + (uint32_t)384U; - memcpy(last00, b01, rem * sizeof (uint8_t)); - last00[rem] = (uint8_t)0x80U; - memcpy(last00 + fin - (uint32_t)8U, totlen_buf, (uint32_t)8U * sizeof (uint8_t)); - last010 = last00; - last110 = last00 + (uint32_t)64U; - { - Hacl_Impl_SHA2_Types_uint8_2p lit1; - Hacl_Impl_SHA2_Types_uint8_2p scrut0; - uint8_t *l00; - uint8_t *l01; - uint8_t *last011; - uint8_t *last111; - lit1.fst = last010; - lit1.snd = last110; - scrut0 = lit1; - l00 = scrut0.fst; - l01 = scrut0.snd; - memcpy(last10, b11, rem * sizeof (uint8_t)); - last10[rem] = (uint8_t)0x80U; - memcpy(last10 + fin - (uint32_t)8U, totlen_buf, (uint32_t)8U * sizeof (uint8_t)); - last011 = last10; - last111 = last10 + (uint32_t)64U; - { - Hacl_Impl_SHA2_Types_uint8_2p lit2; - Hacl_Impl_SHA2_Types_uint8_2p scrut1; - uint8_t *l10; - uint8_t *l11; - uint8_t *last012; - uint8_t *last112; - lit2.fst = last011; - lit2.snd = last111; - scrut1 = lit2; - l10 = scrut1.fst; - l11 = scrut1.snd; - memcpy(last2, b21, rem * sizeof (uint8_t)); - last2[rem] = (uint8_t)0x80U; - memcpy(last2 + fin - (uint32_t)8U, totlen_buf, (uint32_t)8U * sizeof (uint8_t)); - last012 = last2; - last112 = last2 + (uint32_t)64U; - { - Hacl_Impl_SHA2_Types_uint8_2p lit3; - Hacl_Impl_SHA2_Types_uint8_2p scrut2; - uint8_t *l20; - uint8_t *l21; - uint8_t *last01; - uint8_t *last11; - lit3.fst = last012; - lit3.snd = last112; - scrut2 = lit3; - l20 = scrut2.fst; - l21 = scrut2.snd; - memcpy(last3, b31, rem * sizeof (uint8_t)); - last3[rem] = (uint8_t)0x80U; - memcpy(last3 + fin - (uint32_t)8U, totlen_buf, (uint32_t)8U * sizeof (uint8_t)); - last01 = last3; - last11 = last3 + (uint32_t)64U; - { - Hacl_Impl_SHA2_Types_uint8_2p lit4; - Hacl_Impl_SHA2_Types_uint8_2p scrut3; - uint8_t *l30; - uint8_t *l31; - lit4.fst = last01; - lit4.snd = last11; - scrut3 = lit4; - l30 = scrut3.fst; - l31 = scrut3.snd; - { - Hacl_Impl_SHA2_Types_uint8_4p mb0; - mb0.fst = l00; - mb0.snd.fst = l10; - mb0.snd.snd.fst = l20; - mb0.snd.snd.snd = l30; - { - Hacl_Impl_SHA2_Types_uint8_4p mb1; - mb1.fst = l01; - mb1.snd.fst = l11; - mb1.snd.snd.fst = l21; - mb1.snd.snd.snd = l31; - { - Hacl_Impl_SHA2_Types_uint8_2x4p lit; - Hacl_Impl_SHA2_Types_uint8_2x4p scrut; - Hacl_Impl_SHA2_Types_uint8_4p last0; - Hacl_Impl_SHA2_Types_uint8_4p last1; - lit.fst = mb0; - lit.snd = mb1; - scrut = lit; - last0 = scrut.fst; - last1 = scrut.snd; - sha256_update4(last0, st); - if (blocks > (uint32_t)1U) - { - sha256_update4(last1, st); - } - { - uint8_t hbuf[128U] = { 0U }; - Lib_IntVector_Intrinsics_vec128 v00 = st[0U]; - Lib_IntVector_Intrinsics_vec128 v10 = st[1U]; - Lib_IntVector_Intrinsics_vec128 v20 = st[2U]; - Lib_IntVector_Intrinsics_vec128 v30 = st[3U]; - Lib_IntVector_Intrinsics_vec128 - v0_ = Lib_IntVector_Intrinsics_vec128_interleave_low32(v00, v10); - Lib_IntVector_Intrinsics_vec128 - v1_ = Lib_IntVector_Intrinsics_vec128_interleave_high32(v00, v10); - Lib_IntVector_Intrinsics_vec128 - v2_ = Lib_IntVector_Intrinsics_vec128_interleave_low32(v20, v30); - Lib_IntVector_Intrinsics_vec128 - v3_ = Lib_IntVector_Intrinsics_vec128_interleave_high32(v20, v30); - Lib_IntVector_Intrinsics_vec128 - v0__ = Lib_IntVector_Intrinsics_vec128_interleave_low64(v0_, v2_); - Lib_IntVector_Intrinsics_vec128 - v1__ = Lib_IntVector_Intrinsics_vec128_interleave_high64(v0_, v2_); - Lib_IntVector_Intrinsics_vec128 - v2__ = Lib_IntVector_Intrinsics_vec128_interleave_low64(v1_, v3_); - Lib_IntVector_Intrinsics_vec128 - v3__ = Lib_IntVector_Intrinsics_vec128_interleave_high64(v1_, v3_); - Lib_IntVector_Intrinsics_vec128 v0__0 = v0__; - Lib_IntVector_Intrinsics_vec128 v2__0 = v2__; - Lib_IntVector_Intrinsics_vec128 v1__0 = v1__; - Lib_IntVector_Intrinsics_vec128 v3__0 = v3__; - Lib_IntVector_Intrinsics_vec128 st0_ = v0__0; - Lib_IntVector_Intrinsics_vec128 st1_ = v1__0; - Lib_IntVector_Intrinsics_vec128 st2_ = v2__0; - Lib_IntVector_Intrinsics_vec128 st3_ = v3__0; - Lib_IntVector_Intrinsics_vec128 v0 = st[4U]; - Lib_IntVector_Intrinsics_vec128 v1 = st[5U]; - Lib_IntVector_Intrinsics_vec128 v2 = st[6U]; - Lib_IntVector_Intrinsics_vec128 v3 = st[7U]; - Lib_IntVector_Intrinsics_vec128 - v0_0 = Lib_IntVector_Intrinsics_vec128_interleave_low32(v0, v1); - Lib_IntVector_Intrinsics_vec128 - v1_0 = Lib_IntVector_Intrinsics_vec128_interleave_high32(v0, v1); - Lib_IntVector_Intrinsics_vec128 - v2_0 = Lib_IntVector_Intrinsics_vec128_interleave_low32(v2, v3); - Lib_IntVector_Intrinsics_vec128 - v3_0 = Lib_IntVector_Intrinsics_vec128_interleave_high32(v2, v3); - Lib_IntVector_Intrinsics_vec128 - v0__1 = Lib_IntVector_Intrinsics_vec128_interleave_low64(v0_0, v2_0); - Lib_IntVector_Intrinsics_vec128 - v1__1 = Lib_IntVector_Intrinsics_vec128_interleave_high64(v0_0, v2_0); - Lib_IntVector_Intrinsics_vec128 - v2__1 = Lib_IntVector_Intrinsics_vec128_interleave_low64(v1_0, v3_0); - Lib_IntVector_Intrinsics_vec128 - v3__1 = Lib_IntVector_Intrinsics_vec128_interleave_high64(v1_0, v3_0); - Lib_IntVector_Intrinsics_vec128 v0__2 = v0__1; - Lib_IntVector_Intrinsics_vec128 v2__2 = v2__1; - Lib_IntVector_Intrinsics_vec128 v1__2 = v1__1; - Lib_IntVector_Intrinsics_vec128 v3__2 = v3__1; - Lib_IntVector_Intrinsics_vec128 st4_ = v0__2; - Lib_IntVector_Intrinsics_vec128 st5_ = v1__2; - Lib_IntVector_Intrinsics_vec128 st6_ = v2__2; - Lib_IntVector_Intrinsics_vec128 st7_ = v3__2; - uint8_t *b3; - uint8_t *b2; - uint8_t *b1; - uint8_t *b0; - st[0U] = st0_; - st[1U] = st4_; - st[2U] = st1_; - st[3U] = st5_; - st[4U] = st2_; - st[5U] = st6_; - st[6U] = st3_; - st[7U] = st7_; - KRML_MAYBE_FOR8(i, - (uint32_t)0U, - (uint32_t)8U, - (uint32_t)1U, - Lib_IntVector_Intrinsics_vec128_store32_be(hbuf + i * (uint32_t)16U, - st[i]);); - b3 = rb.snd.snd.snd; - b2 = rb.snd.snd.fst; - b1 = rb.snd.fst; - b0 = rb.fst; - memcpy(b0, hbuf, (uint32_t)32U * sizeof (uint8_t)); - memcpy(b1, hbuf + (uint32_t)32U, (uint32_t)32U * sizeof (uint8_t)); - memcpy(b2, hbuf + (uint32_t)64U, (uint32_t)32U * sizeof (uint8_t)); - memcpy(b3, hbuf + (uint32_t)96U, (uint32_t)32U * sizeof (uint8_t)); - } - } - } - } - } - } - } - } - } - } - } - } - } -} - diff --git a/dist/c89-compatible/Hacl_SHA2_Vec128.h b/dist/c89-compatible/Hacl_SHA2_Vec128.h deleted file mode 100644 index f132901718..0000000000 --- a/dist/c89-compatible/Hacl_SHA2_Vec128.h +++ /dev/null @@ -1,72 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#ifndef __Hacl_SHA2_Vec128_H -#define __Hacl_SHA2_Vec128_H - -#if defined(__cplusplus) -extern "C" { -#endif - -#include -#include "krml/internal/types.h" -#include "krml/lowstar_endianness.h" -#include "krml/internal/target.h" - - -#include "Hacl_SHA2_Generic.h" -#include "Hacl_Krmllib.h" -#include "evercrypt_targetconfig.h" -void -Hacl_SHA2_Vec128_sha224_4( - uint8_t *dst0, - uint8_t *dst1, - uint8_t *dst2, - uint8_t *dst3, - uint32_t input_len, - uint8_t *input0, - uint8_t *input1, - uint8_t *input2, - uint8_t *input3 -); - -void -Hacl_SHA2_Vec128_sha256_4( - uint8_t *dst0, - uint8_t *dst1, - uint8_t *dst2, - uint8_t *dst3, - uint32_t input_len, - uint8_t *input0, - uint8_t *input1, - uint8_t *input2, - uint8_t *input3 -); - -#if defined(__cplusplus) -} -#endif - -#define __Hacl_SHA2_Vec128_H_DEFINED -#endif diff --git a/dist/c89-compatible/Hacl_SHA2_Vec256.c b/dist/c89-compatible/Hacl_SHA2_Vec256.c deleted file mode 100644 index 37720a72f4..0000000000 --- a/dist/c89-compatible/Hacl_SHA2_Vec256.c +++ /dev/null @@ -1,3190 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#include "Hacl_SHA2_Vec256.h" - -#include "internal/Hacl_SHA2_Types.h" -#include "libintvector.h" -static inline void -sha224_update8(Hacl_Impl_SHA2_Types_uint8_8p block, Lib_IntVector_Intrinsics_vec256 *hash) -{ - KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 hash_old[8U] KRML_POST_ALIGN(32) = { 0U }; - KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[16U] KRML_POST_ALIGN(32) = { 0U }; - uint8_t *b7; - uint8_t *b6; - uint8_t *b5; - uint8_t *b4; - uint8_t *b3; - uint8_t *b2; - uint8_t *b10; - uint8_t *b00; - Lib_IntVector_Intrinsics_vec256 v00; - Lib_IntVector_Intrinsics_vec256 v10; - Lib_IntVector_Intrinsics_vec256 v20; - Lib_IntVector_Intrinsics_vec256 v30; - Lib_IntVector_Intrinsics_vec256 v40; - Lib_IntVector_Intrinsics_vec256 v50; - Lib_IntVector_Intrinsics_vec256 v60; - Lib_IntVector_Intrinsics_vec256 v70; - Lib_IntVector_Intrinsics_vec256 v0_; - Lib_IntVector_Intrinsics_vec256 v1_; - Lib_IntVector_Intrinsics_vec256 v2_; - Lib_IntVector_Intrinsics_vec256 v3_; - Lib_IntVector_Intrinsics_vec256 v4_; - Lib_IntVector_Intrinsics_vec256 v5_; - Lib_IntVector_Intrinsics_vec256 v6_; - Lib_IntVector_Intrinsics_vec256 v7_; - Lib_IntVector_Intrinsics_vec256 v0_0; - Lib_IntVector_Intrinsics_vec256 v1_0; - Lib_IntVector_Intrinsics_vec256 v2_0; - Lib_IntVector_Intrinsics_vec256 v3_0; - Lib_IntVector_Intrinsics_vec256 v4_0; - Lib_IntVector_Intrinsics_vec256 v5_0; - Lib_IntVector_Intrinsics_vec256 v6_0; - Lib_IntVector_Intrinsics_vec256 v7_0; - Lib_IntVector_Intrinsics_vec256 v0_1; - Lib_IntVector_Intrinsics_vec256 v2_1; - Lib_IntVector_Intrinsics_vec256 v1_1; - Lib_IntVector_Intrinsics_vec256 v3_1; - Lib_IntVector_Intrinsics_vec256 v4_1; - Lib_IntVector_Intrinsics_vec256 v6_1; - Lib_IntVector_Intrinsics_vec256 v5_1; - Lib_IntVector_Intrinsics_vec256 v7_1; - Lib_IntVector_Intrinsics_vec256 v0_10; - Lib_IntVector_Intrinsics_vec256 v1_10; - Lib_IntVector_Intrinsics_vec256 v2_10; - Lib_IntVector_Intrinsics_vec256 v3_10; - Lib_IntVector_Intrinsics_vec256 v4_10; - Lib_IntVector_Intrinsics_vec256 v5_10; - Lib_IntVector_Intrinsics_vec256 v6_10; - Lib_IntVector_Intrinsics_vec256 v7_10; - Lib_IntVector_Intrinsics_vec256 v0_2; - Lib_IntVector_Intrinsics_vec256 v4_2; - Lib_IntVector_Intrinsics_vec256 v1_2; - Lib_IntVector_Intrinsics_vec256 v5_2; - Lib_IntVector_Intrinsics_vec256 v2_2; - Lib_IntVector_Intrinsics_vec256 v6_2; - Lib_IntVector_Intrinsics_vec256 v3_2; - Lib_IntVector_Intrinsics_vec256 v7_2; - Lib_IntVector_Intrinsics_vec256 v0_20; - Lib_IntVector_Intrinsics_vec256 v1_20; - Lib_IntVector_Intrinsics_vec256 v2_20; - Lib_IntVector_Intrinsics_vec256 v3_20; - Lib_IntVector_Intrinsics_vec256 v4_20; - Lib_IntVector_Intrinsics_vec256 v5_20; - Lib_IntVector_Intrinsics_vec256 v6_20; - Lib_IntVector_Intrinsics_vec256 v7_20; - Lib_IntVector_Intrinsics_vec256 v0_3; - Lib_IntVector_Intrinsics_vec256 v1_3; - Lib_IntVector_Intrinsics_vec256 v2_3; - Lib_IntVector_Intrinsics_vec256 v3_3; - Lib_IntVector_Intrinsics_vec256 v4_3; - Lib_IntVector_Intrinsics_vec256 v5_3; - Lib_IntVector_Intrinsics_vec256 v6_3; - Lib_IntVector_Intrinsics_vec256 v7_3; - Lib_IntVector_Intrinsics_vec256 ws0; - Lib_IntVector_Intrinsics_vec256 ws1; - Lib_IntVector_Intrinsics_vec256 ws2; - Lib_IntVector_Intrinsics_vec256 ws3; - Lib_IntVector_Intrinsics_vec256 ws4; - Lib_IntVector_Intrinsics_vec256 ws5; - Lib_IntVector_Intrinsics_vec256 ws6; - Lib_IntVector_Intrinsics_vec256 ws7; - Lib_IntVector_Intrinsics_vec256 v0; - Lib_IntVector_Intrinsics_vec256 v1; - Lib_IntVector_Intrinsics_vec256 v2; - Lib_IntVector_Intrinsics_vec256 v3; - Lib_IntVector_Intrinsics_vec256 v4; - Lib_IntVector_Intrinsics_vec256 v5; - Lib_IntVector_Intrinsics_vec256 v6; - Lib_IntVector_Intrinsics_vec256 v7; - Lib_IntVector_Intrinsics_vec256 v0_4; - Lib_IntVector_Intrinsics_vec256 v1_4; - Lib_IntVector_Intrinsics_vec256 v2_4; - Lib_IntVector_Intrinsics_vec256 v3_4; - Lib_IntVector_Intrinsics_vec256 v4_4; - Lib_IntVector_Intrinsics_vec256 v5_4; - Lib_IntVector_Intrinsics_vec256 v6_4; - Lib_IntVector_Intrinsics_vec256 v7_4; - Lib_IntVector_Intrinsics_vec256 v0_5; - Lib_IntVector_Intrinsics_vec256 v1_5; - Lib_IntVector_Intrinsics_vec256 v2_5; - Lib_IntVector_Intrinsics_vec256 v3_5; - Lib_IntVector_Intrinsics_vec256 v4_5; - Lib_IntVector_Intrinsics_vec256 v5_5; - Lib_IntVector_Intrinsics_vec256 v6_5; - Lib_IntVector_Intrinsics_vec256 v7_5; - Lib_IntVector_Intrinsics_vec256 v0_11; - Lib_IntVector_Intrinsics_vec256 v2_11; - Lib_IntVector_Intrinsics_vec256 v1_11; - Lib_IntVector_Intrinsics_vec256 v3_11; - Lib_IntVector_Intrinsics_vec256 v4_11; - Lib_IntVector_Intrinsics_vec256 v6_11; - Lib_IntVector_Intrinsics_vec256 v5_11; - Lib_IntVector_Intrinsics_vec256 v7_11; - Lib_IntVector_Intrinsics_vec256 v0_12; - Lib_IntVector_Intrinsics_vec256 v1_12; - Lib_IntVector_Intrinsics_vec256 v2_12; - Lib_IntVector_Intrinsics_vec256 v3_12; - Lib_IntVector_Intrinsics_vec256 v4_12; - Lib_IntVector_Intrinsics_vec256 v5_12; - Lib_IntVector_Intrinsics_vec256 v6_12; - Lib_IntVector_Intrinsics_vec256 v7_12; - Lib_IntVector_Intrinsics_vec256 v0_21; - Lib_IntVector_Intrinsics_vec256 v4_21; - Lib_IntVector_Intrinsics_vec256 v1_21; - Lib_IntVector_Intrinsics_vec256 v5_21; - Lib_IntVector_Intrinsics_vec256 v2_21; - Lib_IntVector_Intrinsics_vec256 v6_21; - Lib_IntVector_Intrinsics_vec256 v3_21; - Lib_IntVector_Intrinsics_vec256 v7_21; - Lib_IntVector_Intrinsics_vec256 v0_22; - Lib_IntVector_Intrinsics_vec256 v1_22; - Lib_IntVector_Intrinsics_vec256 v2_22; - Lib_IntVector_Intrinsics_vec256 v3_22; - Lib_IntVector_Intrinsics_vec256 v4_22; - Lib_IntVector_Intrinsics_vec256 v5_22; - Lib_IntVector_Intrinsics_vec256 v6_22; - Lib_IntVector_Intrinsics_vec256 v7_22; - Lib_IntVector_Intrinsics_vec256 v0_6; - Lib_IntVector_Intrinsics_vec256 v1_6; - Lib_IntVector_Intrinsics_vec256 v2_6; - Lib_IntVector_Intrinsics_vec256 v3_6; - Lib_IntVector_Intrinsics_vec256 v4_6; - Lib_IntVector_Intrinsics_vec256 v5_6; - Lib_IntVector_Intrinsics_vec256 v6_6; - Lib_IntVector_Intrinsics_vec256 v7_6; - Lib_IntVector_Intrinsics_vec256 ws8; - Lib_IntVector_Intrinsics_vec256 ws9; - Lib_IntVector_Intrinsics_vec256 ws10; - Lib_IntVector_Intrinsics_vec256 ws11; - Lib_IntVector_Intrinsics_vec256 ws12; - Lib_IntVector_Intrinsics_vec256 ws13; - Lib_IntVector_Intrinsics_vec256 ws14; - Lib_IntVector_Intrinsics_vec256 ws15; - memcpy(hash_old, hash, (uint32_t)8U * sizeof (Lib_IntVector_Intrinsics_vec256)); - b7 = block.snd.snd.snd.snd.snd.snd.snd; - b6 = block.snd.snd.snd.snd.snd.snd.fst; - b5 = block.snd.snd.snd.snd.snd.fst; - b4 = block.snd.snd.snd.snd.fst; - b3 = block.snd.snd.snd.fst; - b2 = block.snd.snd.fst; - b10 = block.snd.fst; - b00 = block.fst; - ws[0U] = Lib_IntVector_Intrinsics_vec256_load32_be(b00); - ws[1U] = Lib_IntVector_Intrinsics_vec256_load32_be(b10); - ws[2U] = Lib_IntVector_Intrinsics_vec256_load32_be(b2); - ws[3U] = Lib_IntVector_Intrinsics_vec256_load32_be(b3); - ws[4U] = Lib_IntVector_Intrinsics_vec256_load32_be(b4); - ws[5U] = Lib_IntVector_Intrinsics_vec256_load32_be(b5); - ws[6U] = Lib_IntVector_Intrinsics_vec256_load32_be(b6); - ws[7U] = Lib_IntVector_Intrinsics_vec256_load32_be(b7); - ws[8U] = Lib_IntVector_Intrinsics_vec256_load32_be(b00 + (uint32_t)32U); - ws[9U] = Lib_IntVector_Intrinsics_vec256_load32_be(b10 + (uint32_t)32U); - ws[10U] = Lib_IntVector_Intrinsics_vec256_load32_be(b2 + (uint32_t)32U); - ws[11U] = Lib_IntVector_Intrinsics_vec256_load32_be(b3 + (uint32_t)32U); - ws[12U] = Lib_IntVector_Intrinsics_vec256_load32_be(b4 + (uint32_t)32U); - ws[13U] = Lib_IntVector_Intrinsics_vec256_load32_be(b5 + (uint32_t)32U); - ws[14U] = Lib_IntVector_Intrinsics_vec256_load32_be(b6 + (uint32_t)32U); - ws[15U] = Lib_IntVector_Intrinsics_vec256_load32_be(b7 + (uint32_t)32U); - v00 = ws[0U]; - v10 = ws[1U]; - v20 = ws[2U]; - v30 = ws[3U]; - v40 = ws[4U]; - v50 = ws[5U]; - v60 = ws[6U]; - v70 = ws[7U]; - v0_ = Lib_IntVector_Intrinsics_vec256_interleave_low32(v00, v10); - v1_ = Lib_IntVector_Intrinsics_vec256_interleave_high32(v00, v10); - v2_ = Lib_IntVector_Intrinsics_vec256_interleave_low32(v20, v30); - v3_ = Lib_IntVector_Intrinsics_vec256_interleave_high32(v20, v30); - v4_ = Lib_IntVector_Intrinsics_vec256_interleave_low32(v40, v50); - v5_ = Lib_IntVector_Intrinsics_vec256_interleave_high32(v40, v50); - v6_ = Lib_IntVector_Intrinsics_vec256_interleave_low32(v60, v70); - v7_ = Lib_IntVector_Intrinsics_vec256_interleave_high32(v60, v70); - v0_0 = v0_; - v1_0 = v1_; - v2_0 = v2_; - v3_0 = v3_; - v4_0 = v4_; - v5_0 = v5_; - v6_0 = v6_; - v7_0 = v7_; - v0_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0_0, v2_0); - v2_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0_0, v2_0); - v1_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v1_0, v3_0); - v3_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v1_0, v3_0); - v4_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v4_0, v6_0); - v6_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v4_0, v6_0); - v5_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v5_0, v7_0); - v7_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v5_0, v7_0); - v0_10 = v0_1; - v1_10 = v1_1; - v2_10 = v2_1; - v3_10 = v3_1; - v4_10 = v4_1; - v5_10 = v5_1; - v6_10 = v6_1; - v7_10 = v7_1; - v0_2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_10, v4_10); - v4_2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_10, v4_10); - v1_2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_10, v5_10); - v5_2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_10, v5_10); - v2_2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v2_10, v6_10); - v6_2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v2_10, v6_10); - v3_2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v3_10, v7_10); - v7_2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v3_10, v7_10); - v0_20 = v0_2; - v1_20 = v1_2; - v2_20 = v2_2; - v3_20 = v3_2; - v4_20 = v4_2; - v5_20 = v5_2; - v6_20 = v6_2; - v7_20 = v7_2; - v0_3 = v0_20; - v1_3 = v1_20; - v2_3 = v2_20; - v3_3 = v3_20; - v4_3 = v4_20; - v5_3 = v5_20; - v6_3 = v6_20; - v7_3 = v7_20; - ws0 = v0_3; - ws1 = v2_3; - ws2 = v1_3; - ws3 = v3_3; - ws4 = v4_3; - ws5 = v6_3; - ws6 = v5_3; - ws7 = v7_3; - v0 = ws[8U]; - v1 = ws[9U]; - v2 = ws[10U]; - v3 = ws[11U]; - v4 = ws[12U]; - v5 = ws[13U]; - v6 = ws[14U]; - v7 = ws[15U]; - v0_4 = Lib_IntVector_Intrinsics_vec256_interleave_low32(v0, v1); - v1_4 = Lib_IntVector_Intrinsics_vec256_interleave_high32(v0, v1); - v2_4 = Lib_IntVector_Intrinsics_vec256_interleave_low32(v2, v3); - v3_4 = Lib_IntVector_Intrinsics_vec256_interleave_high32(v2, v3); - v4_4 = Lib_IntVector_Intrinsics_vec256_interleave_low32(v4, v5); - v5_4 = Lib_IntVector_Intrinsics_vec256_interleave_high32(v4, v5); - v6_4 = Lib_IntVector_Intrinsics_vec256_interleave_low32(v6, v7); - v7_4 = Lib_IntVector_Intrinsics_vec256_interleave_high32(v6, v7); - v0_5 = v0_4; - v1_5 = v1_4; - v2_5 = v2_4; - v3_5 = v3_4; - v4_5 = v4_4; - v5_5 = v5_4; - v6_5 = v6_4; - v7_5 = v7_4; - v0_11 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0_5, v2_5); - v2_11 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0_5, v2_5); - v1_11 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v1_5, v3_5); - v3_11 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v1_5, v3_5); - v4_11 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v4_5, v6_5); - v6_11 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v4_5, v6_5); - v5_11 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v5_5, v7_5); - v7_11 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v5_5, v7_5); - v0_12 = v0_11; - v1_12 = v1_11; - v2_12 = v2_11; - v3_12 = v3_11; - v4_12 = v4_11; - v5_12 = v5_11; - v6_12 = v6_11; - v7_12 = v7_11; - v0_21 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_12, v4_12); - v4_21 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_12, v4_12); - v1_21 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_12, v5_12); - v5_21 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_12, v5_12); - v2_21 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v2_12, v6_12); - v6_21 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v2_12, v6_12); - v3_21 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v3_12, v7_12); - v7_21 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v3_12, v7_12); - v0_22 = v0_21; - v1_22 = v1_21; - v2_22 = v2_21; - v3_22 = v3_21; - v4_22 = v4_21; - v5_22 = v5_21; - v6_22 = v6_21; - v7_22 = v7_21; - v0_6 = v0_22; - v1_6 = v1_22; - v2_6 = v2_22; - v3_6 = v3_22; - v4_6 = v4_22; - v5_6 = v5_22; - v6_6 = v6_22; - v7_6 = v7_22; - ws8 = v0_6; - ws9 = v2_6; - ws10 = v1_6; - ws11 = v3_6; - ws12 = v4_6; - ws13 = v6_6; - ws14 = v5_6; - ws15 = v7_6; - ws[0U] = ws0; - ws[1U] = ws1; - ws[2U] = ws2; - ws[3U] = ws3; - ws[4U] = ws4; - ws[5U] = ws5; - ws[6U] = ws6; - ws[7U] = ws7; - ws[8U] = ws8; - ws[9U] = ws9; - ws[10U] = ws10; - ws[11U] = ws11; - ws[12U] = ws12; - ws[13U] = ws13; - ws[14U] = ws14; - ws[15U] = ws15; - KRML_MAYBE_FOR4(i0, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - KRML_MAYBE_FOR16(i, - (uint32_t)0U, - (uint32_t)16U, - (uint32_t)1U, - uint32_t k_t = Hacl_Impl_SHA2_Generic_k224_256[(uint32_t)16U * i0 + i]; - Lib_IntVector_Intrinsics_vec256 ws_t = ws[i]; - Lib_IntVector_Intrinsics_vec256 a0 = hash[0U]; - Lib_IntVector_Intrinsics_vec256 b0 = hash[1U]; - Lib_IntVector_Intrinsics_vec256 c0 = hash[2U]; - Lib_IntVector_Intrinsics_vec256 d0 = hash[3U]; - Lib_IntVector_Intrinsics_vec256 e0 = hash[4U]; - Lib_IntVector_Intrinsics_vec256 f0 = hash[5U]; - Lib_IntVector_Intrinsics_vec256 g0 = hash[6U]; - Lib_IntVector_Intrinsics_vec256 h02 = hash[7U]; - Lib_IntVector_Intrinsics_vec256 k_e_t = Lib_IntVector_Intrinsics_vec256_load32(k_t); - Lib_IntVector_Intrinsics_vec256 - t1 = - Lib_IntVector_Intrinsics_vec256_add32(Lib_IntVector_Intrinsics_vec256_add32(Lib_IntVector_Intrinsics_vec256_add32(Lib_IntVector_Intrinsics_vec256_add32(h02, - Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right32(e0, - (uint32_t)6U), - Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right32(e0, - (uint32_t)11U), - Lib_IntVector_Intrinsics_vec256_rotate_right32(e0, (uint32_t)25U)))), - Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_and(e0, f0), - Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_lognot(e0), g0))), - k_e_t), - ws_t); - Lib_IntVector_Intrinsics_vec256 - t2 = - Lib_IntVector_Intrinsics_vec256_add32(Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right32(a0, - (uint32_t)2U), - Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right32(a0, - (uint32_t)13U), - Lib_IntVector_Intrinsics_vec256_rotate_right32(a0, (uint32_t)22U))), - Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_and(a0, b0), - Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_and(a0, c0), - Lib_IntVector_Intrinsics_vec256_and(b0, c0)))); - Lib_IntVector_Intrinsics_vec256 a1 = Lib_IntVector_Intrinsics_vec256_add32(t1, t2); - Lib_IntVector_Intrinsics_vec256 b1 = a0; - Lib_IntVector_Intrinsics_vec256 c1 = b0; - Lib_IntVector_Intrinsics_vec256 d1 = c0; - Lib_IntVector_Intrinsics_vec256 e1 = Lib_IntVector_Intrinsics_vec256_add32(d0, t1); - Lib_IntVector_Intrinsics_vec256 f1 = e0; - Lib_IntVector_Intrinsics_vec256 g1 = f0; - Lib_IntVector_Intrinsics_vec256 h12 = g0; - hash[0U] = a1; - hash[1U] = b1; - hash[2U] = c1; - hash[3U] = d1; - hash[4U] = e1; - hash[5U] = f1; - hash[6U] = g1; - hash[7U] = h12;); - if (i0 < (uint32_t)3U) - { - KRML_MAYBE_FOR16(i, - (uint32_t)0U, - (uint32_t)16U, - (uint32_t)1U, - Lib_IntVector_Intrinsics_vec256 t16 = ws[i]; - Lib_IntVector_Intrinsics_vec256 t15 = ws[(i + (uint32_t)1U) % (uint32_t)16U]; - Lib_IntVector_Intrinsics_vec256 t7 = ws[(i + (uint32_t)9U) % (uint32_t)16U]; - Lib_IntVector_Intrinsics_vec256 t2 = ws[(i + (uint32_t)14U) % (uint32_t)16U]; - Lib_IntVector_Intrinsics_vec256 - s1 = - Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right32(t2, - (uint32_t)17U), - Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right32(t2, - (uint32_t)19U), - Lib_IntVector_Intrinsics_vec256_shift_right32(t2, (uint32_t)10U))); - Lib_IntVector_Intrinsics_vec256 - s0 = - Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right32(t15, - (uint32_t)7U), - Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right32(t15, - (uint32_t)18U), - Lib_IntVector_Intrinsics_vec256_shift_right32(t15, (uint32_t)3U))); - ws[i] = - Lib_IntVector_Intrinsics_vec256_add32(Lib_IntVector_Intrinsics_vec256_add32(Lib_IntVector_Intrinsics_vec256_add32(s1, - t7), - s0), - t16);); - }); - KRML_MAYBE_FOR8(i, - (uint32_t)0U, - (uint32_t)8U, - (uint32_t)1U, - Lib_IntVector_Intrinsics_vec256 *os = hash; - Lib_IntVector_Intrinsics_vec256 - x = Lib_IntVector_Intrinsics_vec256_add32(hash[i], hash_old[i]); - os[i] = x;); -} - -void -Hacl_SHA2_Vec256_sha224_8( - uint8_t *dst0, - uint8_t *dst1, - uint8_t *dst2, - uint8_t *dst3, - uint8_t *dst4, - uint8_t *dst5, - uint8_t *dst6, - uint8_t *dst7, - uint32_t input_len, - uint8_t *input0, - uint8_t *input1, - uint8_t *input2, - uint8_t *input3, - uint8_t *input4, - uint8_t *input5, - uint8_t *input6, - uint8_t *input7 -) -{ - Hacl_Impl_SHA2_Types_uint8_8p ib; - ib.fst = input0; - ib.snd.fst = input1; - ib.snd.snd.fst = input2; - ib.snd.snd.snd.fst = input3; - ib.snd.snd.snd.snd.fst = input4; - ib.snd.snd.snd.snd.snd.fst = input5; - ib.snd.snd.snd.snd.snd.snd.fst = input6; - ib.snd.snd.snd.snd.snd.snd.snd = input7; - { - Hacl_Impl_SHA2_Types_uint8_8p rb; - rb.fst = dst0; - rb.snd.fst = dst1; - rb.snd.snd.fst = dst2; - rb.snd.snd.snd.fst = dst3; - rb.snd.snd.snd.snd.fst = dst4; - rb.snd.snd.snd.snd.snd.fst = dst5; - rb.snd.snd.snd.snd.snd.snd.fst = dst6; - rb.snd.snd.snd.snd.snd.snd.snd = dst7; - { - KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 st[8U] KRML_POST_ALIGN(32) = { 0U }; - uint32_t rem; - uint64_t len_; - uint32_t blocks0; - uint32_t rem1; - uint8_t *b70; - uint8_t *b60; - uint8_t *b50; - uint8_t *b40; - uint8_t *b30; - uint8_t *b20; - uint8_t *b10; - uint8_t *b00; - uint8_t *bl0; - uint8_t *bl10; - uint8_t *bl20; - uint8_t *bl30; - uint8_t *bl40; - uint8_t *bl50; - uint8_t *bl60; - uint8_t *bl70; - KRML_MAYBE_FOR8(i, - (uint32_t)0U, - (uint32_t)8U, - (uint32_t)1U, - Lib_IntVector_Intrinsics_vec256 *os = st; - uint32_t hi = Hacl_Impl_SHA2_Generic_h224[i]; - Lib_IntVector_Intrinsics_vec256 x = Lib_IntVector_Intrinsics_vec256_load32(hi); - os[i] = x;); - rem = input_len % (uint32_t)64U; - len_ = (uint64_t)input_len; - blocks0 = input_len / (uint32_t)64U; - { - uint32_t i; - for (i = (uint32_t)0U; i < blocks0; i++) - { - uint8_t *b7 = ib.snd.snd.snd.snd.snd.snd.snd; - uint8_t *b6 = ib.snd.snd.snd.snd.snd.snd.fst; - uint8_t *b5 = ib.snd.snd.snd.snd.snd.fst; - uint8_t *b4 = ib.snd.snd.snd.snd.fst; - uint8_t *b3 = ib.snd.snd.snd.fst; - uint8_t *b2 = ib.snd.snd.fst; - uint8_t *b1 = ib.snd.fst; - uint8_t *b0 = ib.fst; - uint8_t *bl00 = b0 + i * (uint32_t)64U; - uint8_t *bl1 = b1 + i * (uint32_t)64U; - uint8_t *bl2 = b2 + i * (uint32_t)64U; - uint8_t *bl3 = b3 + i * (uint32_t)64U; - uint8_t *bl4 = b4 + i * (uint32_t)64U; - uint8_t *bl5 = b5 + i * (uint32_t)64U; - uint8_t *bl6 = b6 + i * (uint32_t)64U; - uint8_t *bl7 = b7 + i * (uint32_t)64U; - Hacl_Impl_SHA2_Types_uint8_8p lit; - lit.fst = bl00; - lit.snd.fst = bl1; - lit.snd.snd.fst = bl2; - lit.snd.snd.snd.fst = bl3; - lit.snd.snd.snd.snd.fst = bl4; - lit.snd.snd.snd.snd.snd.fst = bl5; - lit.snd.snd.snd.snd.snd.snd.fst = bl6; - lit.snd.snd.snd.snd.snd.snd.snd = bl7; - { - Hacl_Impl_SHA2_Types_uint8_8p mb = lit; - sha224_update8(mb, st); - } - } - } - rem1 = input_len % (uint32_t)64U; - b70 = ib.snd.snd.snd.snd.snd.snd.snd; - b60 = ib.snd.snd.snd.snd.snd.snd.fst; - b50 = ib.snd.snd.snd.snd.snd.fst; - b40 = ib.snd.snd.snd.snd.fst; - b30 = ib.snd.snd.snd.fst; - b20 = ib.snd.snd.fst; - b10 = ib.snd.fst; - b00 = ib.fst; - bl0 = b00 + input_len - rem1; - bl10 = b10 + input_len - rem1; - bl20 = b20 + input_len - rem1; - bl30 = b30 + input_len - rem1; - bl40 = b40 + input_len - rem1; - bl50 = b50 + input_len - rem1; - bl60 = b60 + input_len - rem1; - bl70 = b70 + input_len - rem1; - { - Hacl_Impl_SHA2_Types_uint8_8p lit0; - Hacl_Impl_SHA2_Types_uint8_8p lb; - lit0.fst = bl0; - lit0.snd.fst = bl10; - lit0.snd.snd.fst = bl20; - lit0.snd.snd.snd.fst = bl30; - lit0.snd.snd.snd.snd.fst = bl40; - lit0.snd.snd.snd.snd.snd.fst = bl50; - lit0.snd.snd.snd.snd.snd.snd.fst = bl60; - lit0.snd.snd.snd.snd.snd.snd.snd = bl70; - lb = lit0; - { - uint32_t blocks; - if (rem + (uint32_t)8U + (uint32_t)1U <= (uint32_t)64U) - { - blocks = (uint32_t)1U; - } - else - { - blocks = (uint32_t)2U; - } - { - uint32_t fin = blocks * (uint32_t)64U; - uint8_t last[1024U] = { 0U }; - uint8_t totlen_buf[8U] = { 0U }; - uint64_t total_len_bits = len_ << (uint32_t)3U; - uint8_t *b71; - uint8_t *b61; - uint8_t *b51; - uint8_t *b41; - uint8_t *b31; - uint8_t *b21; - uint8_t *b11; - uint8_t *b01; - uint8_t *last00; - uint8_t *last10; - uint8_t *last2; - uint8_t *last3; - uint8_t *last4; - uint8_t *last5; - uint8_t *last6; - uint8_t *last7; - uint8_t *last010; - uint8_t *last110; - store64_be(totlen_buf, total_len_bits); - b71 = lb.snd.snd.snd.snd.snd.snd.snd; - b61 = lb.snd.snd.snd.snd.snd.snd.fst; - b51 = lb.snd.snd.snd.snd.snd.fst; - b41 = lb.snd.snd.snd.snd.fst; - b31 = lb.snd.snd.snd.fst; - b21 = lb.snd.snd.fst; - b11 = lb.snd.fst; - b01 = lb.fst; - last00 = last; - last10 = last + (uint32_t)128U; - last2 = last + (uint32_t)256U; - last3 = last + (uint32_t)384U; - last4 = last + (uint32_t)512U; - last5 = last + (uint32_t)640U; - last6 = last + (uint32_t)768U; - last7 = last + (uint32_t)896U; - memcpy(last00, b01, rem * sizeof (uint8_t)); - last00[rem] = (uint8_t)0x80U; - memcpy(last00 + fin - (uint32_t)8U, totlen_buf, (uint32_t)8U * sizeof (uint8_t)); - last010 = last00; - last110 = last00 + (uint32_t)64U; - { - Hacl_Impl_SHA2_Types_uint8_2p lit1; - Hacl_Impl_SHA2_Types_uint8_2p scrut0; - uint8_t *l00; - uint8_t *l01; - uint8_t *last011; - uint8_t *last111; - lit1.fst = last010; - lit1.snd = last110; - scrut0 = lit1; - l00 = scrut0.fst; - l01 = scrut0.snd; - memcpy(last10, b11, rem * sizeof (uint8_t)); - last10[rem] = (uint8_t)0x80U; - memcpy(last10 + fin - (uint32_t)8U, totlen_buf, (uint32_t)8U * sizeof (uint8_t)); - last011 = last10; - last111 = last10 + (uint32_t)64U; - { - Hacl_Impl_SHA2_Types_uint8_2p lit2; - Hacl_Impl_SHA2_Types_uint8_2p scrut1; - uint8_t *l10; - uint8_t *l11; - uint8_t *last012; - uint8_t *last112; - lit2.fst = last011; - lit2.snd = last111; - scrut1 = lit2; - l10 = scrut1.fst; - l11 = scrut1.snd; - memcpy(last2, b21, rem * sizeof (uint8_t)); - last2[rem] = (uint8_t)0x80U; - memcpy(last2 + fin - (uint32_t)8U, totlen_buf, (uint32_t)8U * sizeof (uint8_t)); - last012 = last2; - last112 = last2 + (uint32_t)64U; - { - Hacl_Impl_SHA2_Types_uint8_2p lit3; - Hacl_Impl_SHA2_Types_uint8_2p scrut2; - uint8_t *l20; - uint8_t *l21; - uint8_t *last013; - uint8_t *last113; - lit3.fst = last012; - lit3.snd = last112; - scrut2 = lit3; - l20 = scrut2.fst; - l21 = scrut2.snd; - memcpy(last3, b31, rem * sizeof (uint8_t)); - last3[rem] = (uint8_t)0x80U; - memcpy(last3 + fin - (uint32_t)8U, totlen_buf, (uint32_t)8U * sizeof (uint8_t)); - last013 = last3; - last113 = last3 + (uint32_t)64U; - { - Hacl_Impl_SHA2_Types_uint8_2p lit4; - Hacl_Impl_SHA2_Types_uint8_2p scrut3; - uint8_t *l30; - uint8_t *l31; - uint8_t *last014; - uint8_t *last114; - lit4.fst = last013; - lit4.snd = last113; - scrut3 = lit4; - l30 = scrut3.fst; - l31 = scrut3.snd; - memcpy(last4, b41, rem * sizeof (uint8_t)); - last4[rem] = (uint8_t)0x80U; - memcpy(last4 + fin - (uint32_t)8U, totlen_buf, (uint32_t)8U * sizeof (uint8_t)); - last014 = last4; - last114 = last4 + (uint32_t)64U; - { - Hacl_Impl_SHA2_Types_uint8_2p lit5; - Hacl_Impl_SHA2_Types_uint8_2p scrut4; - uint8_t *l40; - uint8_t *l41; - uint8_t *last015; - uint8_t *last115; - lit5.fst = last014; - lit5.snd = last114; - scrut4 = lit5; - l40 = scrut4.fst; - l41 = scrut4.snd; - memcpy(last5, b51, rem * sizeof (uint8_t)); - last5[rem] = (uint8_t)0x80U; - memcpy(last5 + fin - (uint32_t)8U, - totlen_buf, - (uint32_t)8U * sizeof (uint8_t)); - last015 = last5; - last115 = last5 + (uint32_t)64U; - { - Hacl_Impl_SHA2_Types_uint8_2p lit6; - Hacl_Impl_SHA2_Types_uint8_2p scrut5; - uint8_t *l50; - uint8_t *l51; - uint8_t *last016; - uint8_t *last116; - lit6.fst = last015; - lit6.snd = last115; - scrut5 = lit6; - l50 = scrut5.fst; - l51 = scrut5.snd; - memcpy(last6, b61, rem * sizeof (uint8_t)); - last6[rem] = (uint8_t)0x80U; - memcpy(last6 + fin - (uint32_t)8U, - totlen_buf, - (uint32_t)8U * sizeof (uint8_t)); - last016 = last6; - last116 = last6 + (uint32_t)64U; - { - Hacl_Impl_SHA2_Types_uint8_2p lit7; - Hacl_Impl_SHA2_Types_uint8_2p scrut6; - uint8_t *l60; - uint8_t *l61; - uint8_t *last01; - uint8_t *last11; - lit7.fst = last016; - lit7.snd = last116; - scrut6 = lit7; - l60 = scrut6.fst; - l61 = scrut6.snd; - memcpy(last7, b71, rem * sizeof (uint8_t)); - last7[rem] = (uint8_t)0x80U; - memcpy(last7 + fin - (uint32_t)8U, - totlen_buf, - (uint32_t)8U * sizeof (uint8_t)); - last01 = last7; - last11 = last7 + (uint32_t)64U; - { - Hacl_Impl_SHA2_Types_uint8_2p lit8; - Hacl_Impl_SHA2_Types_uint8_2p scrut7; - uint8_t *l70; - uint8_t *l71; - lit8.fst = last01; - lit8.snd = last11; - scrut7 = lit8; - l70 = scrut7.fst; - l71 = scrut7.snd; - { - Hacl_Impl_SHA2_Types_uint8_8p mb0; - mb0.fst = l00; - mb0.snd.fst = l10; - mb0.snd.snd.fst = l20; - mb0.snd.snd.snd.fst = l30; - mb0.snd.snd.snd.snd.fst = l40; - mb0.snd.snd.snd.snd.snd.fst = l50; - mb0.snd.snd.snd.snd.snd.snd.fst = l60; - mb0.snd.snd.snd.snd.snd.snd.snd = l70; - { - Hacl_Impl_SHA2_Types_uint8_8p mb1; - mb1.fst = l01; - mb1.snd.fst = l11; - mb1.snd.snd.fst = l21; - mb1.snd.snd.snd.fst = l31; - mb1.snd.snd.snd.snd.fst = l41; - mb1.snd.snd.snd.snd.snd.fst = l51; - mb1.snd.snd.snd.snd.snd.snd.fst = l61; - mb1.snd.snd.snd.snd.snd.snd.snd = l71; - { - Hacl_Impl_SHA2_Types_uint8_2x8p lit; - Hacl_Impl_SHA2_Types_uint8_2x8p scrut; - Hacl_Impl_SHA2_Types_uint8_8p last0; - Hacl_Impl_SHA2_Types_uint8_8p last1; - lit.fst = mb0; - lit.snd = mb1; - scrut = lit; - last0 = scrut.fst; - last1 = scrut.snd; - sha224_update8(last0, st); - if (blocks > (uint32_t)1U) - { - sha224_update8(last1, st); - } - { - uint8_t hbuf[256U] = { 0U }; - Lib_IntVector_Intrinsics_vec256 v0 = st[0U]; - Lib_IntVector_Intrinsics_vec256 v1 = st[1U]; - Lib_IntVector_Intrinsics_vec256 v2 = st[2U]; - Lib_IntVector_Intrinsics_vec256 v3 = st[3U]; - Lib_IntVector_Intrinsics_vec256 v4 = st[4U]; - Lib_IntVector_Intrinsics_vec256 v5 = st[5U]; - Lib_IntVector_Intrinsics_vec256 v6 = st[6U]; - Lib_IntVector_Intrinsics_vec256 v7 = st[7U]; - Lib_IntVector_Intrinsics_vec256 - v0_ = Lib_IntVector_Intrinsics_vec256_interleave_low32(v0, v1); - Lib_IntVector_Intrinsics_vec256 - v1_ = Lib_IntVector_Intrinsics_vec256_interleave_high32(v0, v1); - Lib_IntVector_Intrinsics_vec256 - v2_ = Lib_IntVector_Intrinsics_vec256_interleave_low32(v2, v3); - Lib_IntVector_Intrinsics_vec256 - v3_ = Lib_IntVector_Intrinsics_vec256_interleave_high32(v2, v3); - Lib_IntVector_Intrinsics_vec256 - v4_ = Lib_IntVector_Intrinsics_vec256_interleave_low32(v4, v5); - Lib_IntVector_Intrinsics_vec256 - v5_ = Lib_IntVector_Intrinsics_vec256_interleave_high32(v4, v5); - Lib_IntVector_Intrinsics_vec256 - v6_ = Lib_IntVector_Intrinsics_vec256_interleave_low32(v6, v7); - Lib_IntVector_Intrinsics_vec256 - v7_ = Lib_IntVector_Intrinsics_vec256_interleave_high32(v6, v7); - Lib_IntVector_Intrinsics_vec256 v0_0 = v0_; - Lib_IntVector_Intrinsics_vec256 v1_0 = v1_; - Lib_IntVector_Intrinsics_vec256 v2_0 = v2_; - Lib_IntVector_Intrinsics_vec256 v3_0 = v3_; - Lib_IntVector_Intrinsics_vec256 v4_0 = v4_; - Lib_IntVector_Intrinsics_vec256 v5_0 = v5_; - Lib_IntVector_Intrinsics_vec256 v6_0 = v6_; - Lib_IntVector_Intrinsics_vec256 v7_0 = v7_; - Lib_IntVector_Intrinsics_vec256 - v0_1 = - Lib_IntVector_Intrinsics_vec256_interleave_low64(v0_0, - v2_0); - Lib_IntVector_Intrinsics_vec256 - v2_1 = - Lib_IntVector_Intrinsics_vec256_interleave_high64(v0_0, - v2_0); - Lib_IntVector_Intrinsics_vec256 - v1_1 = - Lib_IntVector_Intrinsics_vec256_interleave_low64(v1_0, - v3_0); - Lib_IntVector_Intrinsics_vec256 - v3_1 = - Lib_IntVector_Intrinsics_vec256_interleave_high64(v1_0, - v3_0); - Lib_IntVector_Intrinsics_vec256 - v4_1 = - Lib_IntVector_Intrinsics_vec256_interleave_low64(v4_0, - v6_0); - Lib_IntVector_Intrinsics_vec256 - v6_1 = - Lib_IntVector_Intrinsics_vec256_interleave_high64(v4_0, - v6_0); - Lib_IntVector_Intrinsics_vec256 - v5_1 = - Lib_IntVector_Intrinsics_vec256_interleave_low64(v5_0, - v7_0); - Lib_IntVector_Intrinsics_vec256 - v7_1 = - Lib_IntVector_Intrinsics_vec256_interleave_high64(v5_0, - v7_0); - Lib_IntVector_Intrinsics_vec256 v0_10 = v0_1; - Lib_IntVector_Intrinsics_vec256 v1_10 = v1_1; - Lib_IntVector_Intrinsics_vec256 v2_10 = v2_1; - Lib_IntVector_Intrinsics_vec256 v3_10 = v3_1; - Lib_IntVector_Intrinsics_vec256 v4_10 = v4_1; - Lib_IntVector_Intrinsics_vec256 v5_10 = v5_1; - Lib_IntVector_Intrinsics_vec256 v6_10 = v6_1; - Lib_IntVector_Intrinsics_vec256 v7_10 = v7_1; - Lib_IntVector_Intrinsics_vec256 - v0_2 = - Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_10, - v4_10); - Lib_IntVector_Intrinsics_vec256 - v4_2 = - Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_10, - v4_10); - Lib_IntVector_Intrinsics_vec256 - v1_2 = - Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_10, - v5_10); - Lib_IntVector_Intrinsics_vec256 - v5_2 = - Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_10, - v5_10); - Lib_IntVector_Intrinsics_vec256 - v2_2 = - Lib_IntVector_Intrinsics_vec256_interleave_low128(v2_10, - v6_10); - Lib_IntVector_Intrinsics_vec256 - v6_2 = - Lib_IntVector_Intrinsics_vec256_interleave_high128(v2_10, - v6_10); - Lib_IntVector_Intrinsics_vec256 - v3_2 = - Lib_IntVector_Intrinsics_vec256_interleave_low128(v3_10, - v7_10); - Lib_IntVector_Intrinsics_vec256 - v7_2 = - Lib_IntVector_Intrinsics_vec256_interleave_high128(v3_10, - v7_10); - Lib_IntVector_Intrinsics_vec256 v0_20 = v0_2; - Lib_IntVector_Intrinsics_vec256 v1_20 = v1_2; - Lib_IntVector_Intrinsics_vec256 v2_20 = v2_2; - Lib_IntVector_Intrinsics_vec256 v3_20 = v3_2; - Lib_IntVector_Intrinsics_vec256 v4_20 = v4_2; - Lib_IntVector_Intrinsics_vec256 v5_20 = v5_2; - Lib_IntVector_Intrinsics_vec256 v6_20 = v6_2; - Lib_IntVector_Intrinsics_vec256 v7_20 = v7_2; - Lib_IntVector_Intrinsics_vec256 v0_3 = v0_20; - Lib_IntVector_Intrinsics_vec256 v1_3 = v1_20; - Lib_IntVector_Intrinsics_vec256 v2_3 = v2_20; - Lib_IntVector_Intrinsics_vec256 v3_3 = v3_20; - Lib_IntVector_Intrinsics_vec256 v4_3 = v4_20; - Lib_IntVector_Intrinsics_vec256 v5_3 = v5_20; - Lib_IntVector_Intrinsics_vec256 v6_3 = v6_20; - Lib_IntVector_Intrinsics_vec256 v7_3 = v7_20; - Lib_IntVector_Intrinsics_vec256 st0_ = v0_3; - Lib_IntVector_Intrinsics_vec256 st1_ = v2_3; - Lib_IntVector_Intrinsics_vec256 st2_ = v1_3; - Lib_IntVector_Intrinsics_vec256 st3_ = v3_3; - Lib_IntVector_Intrinsics_vec256 st4_ = v4_3; - Lib_IntVector_Intrinsics_vec256 st5_ = v6_3; - Lib_IntVector_Intrinsics_vec256 st6_ = v5_3; - Lib_IntVector_Intrinsics_vec256 st7_ = v7_3; - uint8_t *b7; - uint8_t *b6; - uint8_t *b5; - uint8_t *b4; - uint8_t *b3; - uint8_t *b2; - uint8_t *b1; - uint8_t *b0; - st[0U] = st0_; - st[1U] = st1_; - st[2U] = st2_; - st[3U] = st3_; - st[4U] = st4_; - st[5U] = st5_; - st[6U] = st6_; - st[7U] = st7_; - KRML_MAYBE_FOR8(i, - (uint32_t)0U, - (uint32_t)8U, - (uint32_t)1U, - Lib_IntVector_Intrinsics_vec256_store32_be(hbuf - + i * (uint32_t)32U, - st[i]);); - b7 = rb.snd.snd.snd.snd.snd.snd.snd; - b6 = rb.snd.snd.snd.snd.snd.snd.fst; - b5 = rb.snd.snd.snd.snd.snd.fst; - b4 = rb.snd.snd.snd.snd.fst; - b3 = rb.snd.snd.snd.fst; - b2 = rb.snd.snd.fst; - b1 = rb.snd.fst; - b0 = rb.fst; - memcpy(b0, hbuf, (uint32_t)28U * sizeof (uint8_t)); - memcpy(b1, - hbuf + (uint32_t)32U, - (uint32_t)28U * sizeof (uint8_t)); - memcpy(b2, - hbuf + (uint32_t)64U, - (uint32_t)28U * sizeof (uint8_t)); - memcpy(b3, - hbuf + (uint32_t)96U, - (uint32_t)28U * sizeof (uint8_t)); - memcpy(b4, - hbuf + (uint32_t)128U, - (uint32_t)28U * sizeof (uint8_t)); - memcpy(b5, - hbuf + (uint32_t)160U, - (uint32_t)28U * sizeof (uint8_t)); - memcpy(b6, - hbuf + (uint32_t)192U, - (uint32_t)28U * sizeof (uint8_t)); - memcpy(b7, - hbuf + (uint32_t)224U, - (uint32_t)28U * sizeof (uint8_t)); - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } -} - -static inline void -sha256_update8(Hacl_Impl_SHA2_Types_uint8_8p block, Lib_IntVector_Intrinsics_vec256 *hash) -{ - KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 hash_old[8U] KRML_POST_ALIGN(32) = { 0U }; - KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[16U] KRML_POST_ALIGN(32) = { 0U }; - uint8_t *b7; - uint8_t *b6; - uint8_t *b5; - uint8_t *b4; - uint8_t *b3; - uint8_t *b2; - uint8_t *b10; - uint8_t *b00; - Lib_IntVector_Intrinsics_vec256 v00; - Lib_IntVector_Intrinsics_vec256 v10; - Lib_IntVector_Intrinsics_vec256 v20; - Lib_IntVector_Intrinsics_vec256 v30; - Lib_IntVector_Intrinsics_vec256 v40; - Lib_IntVector_Intrinsics_vec256 v50; - Lib_IntVector_Intrinsics_vec256 v60; - Lib_IntVector_Intrinsics_vec256 v70; - Lib_IntVector_Intrinsics_vec256 v0_; - Lib_IntVector_Intrinsics_vec256 v1_; - Lib_IntVector_Intrinsics_vec256 v2_; - Lib_IntVector_Intrinsics_vec256 v3_; - Lib_IntVector_Intrinsics_vec256 v4_; - Lib_IntVector_Intrinsics_vec256 v5_; - Lib_IntVector_Intrinsics_vec256 v6_; - Lib_IntVector_Intrinsics_vec256 v7_; - Lib_IntVector_Intrinsics_vec256 v0_0; - Lib_IntVector_Intrinsics_vec256 v1_0; - Lib_IntVector_Intrinsics_vec256 v2_0; - Lib_IntVector_Intrinsics_vec256 v3_0; - Lib_IntVector_Intrinsics_vec256 v4_0; - Lib_IntVector_Intrinsics_vec256 v5_0; - Lib_IntVector_Intrinsics_vec256 v6_0; - Lib_IntVector_Intrinsics_vec256 v7_0; - Lib_IntVector_Intrinsics_vec256 v0_1; - Lib_IntVector_Intrinsics_vec256 v2_1; - Lib_IntVector_Intrinsics_vec256 v1_1; - Lib_IntVector_Intrinsics_vec256 v3_1; - Lib_IntVector_Intrinsics_vec256 v4_1; - Lib_IntVector_Intrinsics_vec256 v6_1; - Lib_IntVector_Intrinsics_vec256 v5_1; - Lib_IntVector_Intrinsics_vec256 v7_1; - Lib_IntVector_Intrinsics_vec256 v0_10; - Lib_IntVector_Intrinsics_vec256 v1_10; - Lib_IntVector_Intrinsics_vec256 v2_10; - Lib_IntVector_Intrinsics_vec256 v3_10; - Lib_IntVector_Intrinsics_vec256 v4_10; - Lib_IntVector_Intrinsics_vec256 v5_10; - Lib_IntVector_Intrinsics_vec256 v6_10; - Lib_IntVector_Intrinsics_vec256 v7_10; - Lib_IntVector_Intrinsics_vec256 v0_2; - Lib_IntVector_Intrinsics_vec256 v4_2; - Lib_IntVector_Intrinsics_vec256 v1_2; - Lib_IntVector_Intrinsics_vec256 v5_2; - Lib_IntVector_Intrinsics_vec256 v2_2; - Lib_IntVector_Intrinsics_vec256 v6_2; - Lib_IntVector_Intrinsics_vec256 v3_2; - Lib_IntVector_Intrinsics_vec256 v7_2; - Lib_IntVector_Intrinsics_vec256 v0_20; - Lib_IntVector_Intrinsics_vec256 v1_20; - Lib_IntVector_Intrinsics_vec256 v2_20; - Lib_IntVector_Intrinsics_vec256 v3_20; - Lib_IntVector_Intrinsics_vec256 v4_20; - Lib_IntVector_Intrinsics_vec256 v5_20; - Lib_IntVector_Intrinsics_vec256 v6_20; - Lib_IntVector_Intrinsics_vec256 v7_20; - Lib_IntVector_Intrinsics_vec256 v0_3; - Lib_IntVector_Intrinsics_vec256 v1_3; - Lib_IntVector_Intrinsics_vec256 v2_3; - Lib_IntVector_Intrinsics_vec256 v3_3; - Lib_IntVector_Intrinsics_vec256 v4_3; - Lib_IntVector_Intrinsics_vec256 v5_3; - Lib_IntVector_Intrinsics_vec256 v6_3; - Lib_IntVector_Intrinsics_vec256 v7_3; - Lib_IntVector_Intrinsics_vec256 ws0; - Lib_IntVector_Intrinsics_vec256 ws1; - Lib_IntVector_Intrinsics_vec256 ws2; - Lib_IntVector_Intrinsics_vec256 ws3; - Lib_IntVector_Intrinsics_vec256 ws4; - Lib_IntVector_Intrinsics_vec256 ws5; - Lib_IntVector_Intrinsics_vec256 ws6; - Lib_IntVector_Intrinsics_vec256 ws7; - Lib_IntVector_Intrinsics_vec256 v0; - Lib_IntVector_Intrinsics_vec256 v1; - Lib_IntVector_Intrinsics_vec256 v2; - Lib_IntVector_Intrinsics_vec256 v3; - Lib_IntVector_Intrinsics_vec256 v4; - Lib_IntVector_Intrinsics_vec256 v5; - Lib_IntVector_Intrinsics_vec256 v6; - Lib_IntVector_Intrinsics_vec256 v7; - Lib_IntVector_Intrinsics_vec256 v0_4; - Lib_IntVector_Intrinsics_vec256 v1_4; - Lib_IntVector_Intrinsics_vec256 v2_4; - Lib_IntVector_Intrinsics_vec256 v3_4; - Lib_IntVector_Intrinsics_vec256 v4_4; - Lib_IntVector_Intrinsics_vec256 v5_4; - Lib_IntVector_Intrinsics_vec256 v6_4; - Lib_IntVector_Intrinsics_vec256 v7_4; - Lib_IntVector_Intrinsics_vec256 v0_5; - Lib_IntVector_Intrinsics_vec256 v1_5; - Lib_IntVector_Intrinsics_vec256 v2_5; - Lib_IntVector_Intrinsics_vec256 v3_5; - Lib_IntVector_Intrinsics_vec256 v4_5; - Lib_IntVector_Intrinsics_vec256 v5_5; - Lib_IntVector_Intrinsics_vec256 v6_5; - Lib_IntVector_Intrinsics_vec256 v7_5; - Lib_IntVector_Intrinsics_vec256 v0_11; - Lib_IntVector_Intrinsics_vec256 v2_11; - Lib_IntVector_Intrinsics_vec256 v1_11; - Lib_IntVector_Intrinsics_vec256 v3_11; - Lib_IntVector_Intrinsics_vec256 v4_11; - Lib_IntVector_Intrinsics_vec256 v6_11; - Lib_IntVector_Intrinsics_vec256 v5_11; - Lib_IntVector_Intrinsics_vec256 v7_11; - Lib_IntVector_Intrinsics_vec256 v0_12; - Lib_IntVector_Intrinsics_vec256 v1_12; - Lib_IntVector_Intrinsics_vec256 v2_12; - Lib_IntVector_Intrinsics_vec256 v3_12; - Lib_IntVector_Intrinsics_vec256 v4_12; - Lib_IntVector_Intrinsics_vec256 v5_12; - Lib_IntVector_Intrinsics_vec256 v6_12; - Lib_IntVector_Intrinsics_vec256 v7_12; - Lib_IntVector_Intrinsics_vec256 v0_21; - Lib_IntVector_Intrinsics_vec256 v4_21; - Lib_IntVector_Intrinsics_vec256 v1_21; - Lib_IntVector_Intrinsics_vec256 v5_21; - Lib_IntVector_Intrinsics_vec256 v2_21; - Lib_IntVector_Intrinsics_vec256 v6_21; - Lib_IntVector_Intrinsics_vec256 v3_21; - Lib_IntVector_Intrinsics_vec256 v7_21; - Lib_IntVector_Intrinsics_vec256 v0_22; - Lib_IntVector_Intrinsics_vec256 v1_22; - Lib_IntVector_Intrinsics_vec256 v2_22; - Lib_IntVector_Intrinsics_vec256 v3_22; - Lib_IntVector_Intrinsics_vec256 v4_22; - Lib_IntVector_Intrinsics_vec256 v5_22; - Lib_IntVector_Intrinsics_vec256 v6_22; - Lib_IntVector_Intrinsics_vec256 v7_22; - Lib_IntVector_Intrinsics_vec256 v0_6; - Lib_IntVector_Intrinsics_vec256 v1_6; - Lib_IntVector_Intrinsics_vec256 v2_6; - Lib_IntVector_Intrinsics_vec256 v3_6; - Lib_IntVector_Intrinsics_vec256 v4_6; - Lib_IntVector_Intrinsics_vec256 v5_6; - Lib_IntVector_Intrinsics_vec256 v6_6; - Lib_IntVector_Intrinsics_vec256 v7_6; - Lib_IntVector_Intrinsics_vec256 ws8; - Lib_IntVector_Intrinsics_vec256 ws9; - Lib_IntVector_Intrinsics_vec256 ws10; - Lib_IntVector_Intrinsics_vec256 ws11; - Lib_IntVector_Intrinsics_vec256 ws12; - Lib_IntVector_Intrinsics_vec256 ws13; - Lib_IntVector_Intrinsics_vec256 ws14; - Lib_IntVector_Intrinsics_vec256 ws15; - memcpy(hash_old, hash, (uint32_t)8U * sizeof (Lib_IntVector_Intrinsics_vec256)); - b7 = block.snd.snd.snd.snd.snd.snd.snd; - b6 = block.snd.snd.snd.snd.snd.snd.fst; - b5 = block.snd.snd.snd.snd.snd.fst; - b4 = block.snd.snd.snd.snd.fst; - b3 = block.snd.snd.snd.fst; - b2 = block.snd.snd.fst; - b10 = block.snd.fst; - b00 = block.fst; - ws[0U] = Lib_IntVector_Intrinsics_vec256_load32_be(b00); - ws[1U] = Lib_IntVector_Intrinsics_vec256_load32_be(b10); - ws[2U] = Lib_IntVector_Intrinsics_vec256_load32_be(b2); - ws[3U] = Lib_IntVector_Intrinsics_vec256_load32_be(b3); - ws[4U] = Lib_IntVector_Intrinsics_vec256_load32_be(b4); - ws[5U] = Lib_IntVector_Intrinsics_vec256_load32_be(b5); - ws[6U] = Lib_IntVector_Intrinsics_vec256_load32_be(b6); - ws[7U] = Lib_IntVector_Intrinsics_vec256_load32_be(b7); - ws[8U] = Lib_IntVector_Intrinsics_vec256_load32_be(b00 + (uint32_t)32U); - ws[9U] = Lib_IntVector_Intrinsics_vec256_load32_be(b10 + (uint32_t)32U); - ws[10U] = Lib_IntVector_Intrinsics_vec256_load32_be(b2 + (uint32_t)32U); - ws[11U] = Lib_IntVector_Intrinsics_vec256_load32_be(b3 + (uint32_t)32U); - ws[12U] = Lib_IntVector_Intrinsics_vec256_load32_be(b4 + (uint32_t)32U); - ws[13U] = Lib_IntVector_Intrinsics_vec256_load32_be(b5 + (uint32_t)32U); - ws[14U] = Lib_IntVector_Intrinsics_vec256_load32_be(b6 + (uint32_t)32U); - ws[15U] = Lib_IntVector_Intrinsics_vec256_load32_be(b7 + (uint32_t)32U); - v00 = ws[0U]; - v10 = ws[1U]; - v20 = ws[2U]; - v30 = ws[3U]; - v40 = ws[4U]; - v50 = ws[5U]; - v60 = ws[6U]; - v70 = ws[7U]; - v0_ = Lib_IntVector_Intrinsics_vec256_interleave_low32(v00, v10); - v1_ = Lib_IntVector_Intrinsics_vec256_interleave_high32(v00, v10); - v2_ = Lib_IntVector_Intrinsics_vec256_interleave_low32(v20, v30); - v3_ = Lib_IntVector_Intrinsics_vec256_interleave_high32(v20, v30); - v4_ = Lib_IntVector_Intrinsics_vec256_interleave_low32(v40, v50); - v5_ = Lib_IntVector_Intrinsics_vec256_interleave_high32(v40, v50); - v6_ = Lib_IntVector_Intrinsics_vec256_interleave_low32(v60, v70); - v7_ = Lib_IntVector_Intrinsics_vec256_interleave_high32(v60, v70); - v0_0 = v0_; - v1_0 = v1_; - v2_0 = v2_; - v3_0 = v3_; - v4_0 = v4_; - v5_0 = v5_; - v6_0 = v6_; - v7_0 = v7_; - v0_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0_0, v2_0); - v2_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0_0, v2_0); - v1_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v1_0, v3_0); - v3_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v1_0, v3_0); - v4_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v4_0, v6_0); - v6_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v4_0, v6_0); - v5_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v5_0, v7_0); - v7_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v5_0, v7_0); - v0_10 = v0_1; - v1_10 = v1_1; - v2_10 = v2_1; - v3_10 = v3_1; - v4_10 = v4_1; - v5_10 = v5_1; - v6_10 = v6_1; - v7_10 = v7_1; - v0_2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_10, v4_10); - v4_2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_10, v4_10); - v1_2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_10, v5_10); - v5_2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_10, v5_10); - v2_2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v2_10, v6_10); - v6_2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v2_10, v6_10); - v3_2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v3_10, v7_10); - v7_2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v3_10, v7_10); - v0_20 = v0_2; - v1_20 = v1_2; - v2_20 = v2_2; - v3_20 = v3_2; - v4_20 = v4_2; - v5_20 = v5_2; - v6_20 = v6_2; - v7_20 = v7_2; - v0_3 = v0_20; - v1_3 = v1_20; - v2_3 = v2_20; - v3_3 = v3_20; - v4_3 = v4_20; - v5_3 = v5_20; - v6_3 = v6_20; - v7_3 = v7_20; - ws0 = v0_3; - ws1 = v2_3; - ws2 = v1_3; - ws3 = v3_3; - ws4 = v4_3; - ws5 = v6_3; - ws6 = v5_3; - ws7 = v7_3; - v0 = ws[8U]; - v1 = ws[9U]; - v2 = ws[10U]; - v3 = ws[11U]; - v4 = ws[12U]; - v5 = ws[13U]; - v6 = ws[14U]; - v7 = ws[15U]; - v0_4 = Lib_IntVector_Intrinsics_vec256_interleave_low32(v0, v1); - v1_4 = Lib_IntVector_Intrinsics_vec256_interleave_high32(v0, v1); - v2_4 = Lib_IntVector_Intrinsics_vec256_interleave_low32(v2, v3); - v3_4 = Lib_IntVector_Intrinsics_vec256_interleave_high32(v2, v3); - v4_4 = Lib_IntVector_Intrinsics_vec256_interleave_low32(v4, v5); - v5_4 = Lib_IntVector_Intrinsics_vec256_interleave_high32(v4, v5); - v6_4 = Lib_IntVector_Intrinsics_vec256_interleave_low32(v6, v7); - v7_4 = Lib_IntVector_Intrinsics_vec256_interleave_high32(v6, v7); - v0_5 = v0_4; - v1_5 = v1_4; - v2_5 = v2_4; - v3_5 = v3_4; - v4_5 = v4_4; - v5_5 = v5_4; - v6_5 = v6_4; - v7_5 = v7_4; - v0_11 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0_5, v2_5); - v2_11 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0_5, v2_5); - v1_11 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v1_5, v3_5); - v3_11 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v1_5, v3_5); - v4_11 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v4_5, v6_5); - v6_11 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v4_5, v6_5); - v5_11 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v5_5, v7_5); - v7_11 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v5_5, v7_5); - v0_12 = v0_11; - v1_12 = v1_11; - v2_12 = v2_11; - v3_12 = v3_11; - v4_12 = v4_11; - v5_12 = v5_11; - v6_12 = v6_11; - v7_12 = v7_11; - v0_21 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_12, v4_12); - v4_21 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_12, v4_12); - v1_21 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_12, v5_12); - v5_21 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_12, v5_12); - v2_21 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v2_12, v6_12); - v6_21 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v2_12, v6_12); - v3_21 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v3_12, v7_12); - v7_21 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v3_12, v7_12); - v0_22 = v0_21; - v1_22 = v1_21; - v2_22 = v2_21; - v3_22 = v3_21; - v4_22 = v4_21; - v5_22 = v5_21; - v6_22 = v6_21; - v7_22 = v7_21; - v0_6 = v0_22; - v1_6 = v1_22; - v2_6 = v2_22; - v3_6 = v3_22; - v4_6 = v4_22; - v5_6 = v5_22; - v6_6 = v6_22; - v7_6 = v7_22; - ws8 = v0_6; - ws9 = v2_6; - ws10 = v1_6; - ws11 = v3_6; - ws12 = v4_6; - ws13 = v6_6; - ws14 = v5_6; - ws15 = v7_6; - ws[0U] = ws0; - ws[1U] = ws1; - ws[2U] = ws2; - ws[3U] = ws3; - ws[4U] = ws4; - ws[5U] = ws5; - ws[6U] = ws6; - ws[7U] = ws7; - ws[8U] = ws8; - ws[9U] = ws9; - ws[10U] = ws10; - ws[11U] = ws11; - ws[12U] = ws12; - ws[13U] = ws13; - ws[14U] = ws14; - ws[15U] = ws15; - KRML_MAYBE_FOR4(i0, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - KRML_MAYBE_FOR16(i, - (uint32_t)0U, - (uint32_t)16U, - (uint32_t)1U, - uint32_t k_t = Hacl_Impl_SHA2_Generic_k224_256[(uint32_t)16U * i0 + i]; - Lib_IntVector_Intrinsics_vec256 ws_t = ws[i]; - Lib_IntVector_Intrinsics_vec256 a0 = hash[0U]; - Lib_IntVector_Intrinsics_vec256 b0 = hash[1U]; - Lib_IntVector_Intrinsics_vec256 c0 = hash[2U]; - Lib_IntVector_Intrinsics_vec256 d0 = hash[3U]; - Lib_IntVector_Intrinsics_vec256 e0 = hash[4U]; - Lib_IntVector_Intrinsics_vec256 f0 = hash[5U]; - Lib_IntVector_Intrinsics_vec256 g0 = hash[6U]; - Lib_IntVector_Intrinsics_vec256 h02 = hash[7U]; - Lib_IntVector_Intrinsics_vec256 k_e_t = Lib_IntVector_Intrinsics_vec256_load32(k_t); - Lib_IntVector_Intrinsics_vec256 - t1 = - Lib_IntVector_Intrinsics_vec256_add32(Lib_IntVector_Intrinsics_vec256_add32(Lib_IntVector_Intrinsics_vec256_add32(Lib_IntVector_Intrinsics_vec256_add32(h02, - Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right32(e0, - (uint32_t)6U), - Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right32(e0, - (uint32_t)11U), - Lib_IntVector_Intrinsics_vec256_rotate_right32(e0, (uint32_t)25U)))), - Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_and(e0, f0), - Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_lognot(e0), g0))), - k_e_t), - ws_t); - Lib_IntVector_Intrinsics_vec256 - t2 = - Lib_IntVector_Intrinsics_vec256_add32(Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right32(a0, - (uint32_t)2U), - Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right32(a0, - (uint32_t)13U), - Lib_IntVector_Intrinsics_vec256_rotate_right32(a0, (uint32_t)22U))), - Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_and(a0, b0), - Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_and(a0, c0), - Lib_IntVector_Intrinsics_vec256_and(b0, c0)))); - Lib_IntVector_Intrinsics_vec256 a1 = Lib_IntVector_Intrinsics_vec256_add32(t1, t2); - Lib_IntVector_Intrinsics_vec256 b1 = a0; - Lib_IntVector_Intrinsics_vec256 c1 = b0; - Lib_IntVector_Intrinsics_vec256 d1 = c0; - Lib_IntVector_Intrinsics_vec256 e1 = Lib_IntVector_Intrinsics_vec256_add32(d0, t1); - Lib_IntVector_Intrinsics_vec256 f1 = e0; - Lib_IntVector_Intrinsics_vec256 g1 = f0; - Lib_IntVector_Intrinsics_vec256 h12 = g0; - hash[0U] = a1; - hash[1U] = b1; - hash[2U] = c1; - hash[3U] = d1; - hash[4U] = e1; - hash[5U] = f1; - hash[6U] = g1; - hash[7U] = h12;); - if (i0 < (uint32_t)3U) - { - KRML_MAYBE_FOR16(i, - (uint32_t)0U, - (uint32_t)16U, - (uint32_t)1U, - Lib_IntVector_Intrinsics_vec256 t16 = ws[i]; - Lib_IntVector_Intrinsics_vec256 t15 = ws[(i + (uint32_t)1U) % (uint32_t)16U]; - Lib_IntVector_Intrinsics_vec256 t7 = ws[(i + (uint32_t)9U) % (uint32_t)16U]; - Lib_IntVector_Intrinsics_vec256 t2 = ws[(i + (uint32_t)14U) % (uint32_t)16U]; - Lib_IntVector_Intrinsics_vec256 - s1 = - Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right32(t2, - (uint32_t)17U), - Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right32(t2, - (uint32_t)19U), - Lib_IntVector_Intrinsics_vec256_shift_right32(t2, (uint32_t)10U))); - Lib_IntVector_Intrinsics_vec256 - s0 = - Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right32(t15, - (uint32_t)7U), - Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right32(t15, - (uint32_t)18U), - Lib_IntVector_Intrinsics_vec256_shift_right32(t15, (uint32_t)3U))); - ws[i] = - Lib_IntVector_Intrinsics_vec256_add32(Lib_IntVector_Intrinsics_vec256_add32(Lib_IntVector_Intrinsics_vec256_add32(s1, - t7), - s0), - t16);); - }); - KRML_MAYBE_FOR8(i, - (uint32_t)0U, - (uint32_t)8U, - (uint32_t)1U, - Lib_IntVector_Intrinsics_vec256 *os = hash; - Lib_IntVector_Intrinsics_vec256 - x = Lib_IntVector_Intrinsics_vec256_add32(hash[i], hash_old[i]); - os[i] = x;); -} - -void -Hacl_SHA2_Vec256_sha256_8( - uint8_t *dst0, - uint8_t *dst1, - uint8_t *dst2, - uint8_t *dst3, - uint8_t *dst4, - uint8_t *dst5, - uint8_t *dst6, - uint8_t *dst7, - uint32_t input_len, - uint8_t *input0, - uint8_t *input1, - uint8_t *input2, - uint8_t *input3, - uint8_t *input4, - uint8_t *input5, - uint8_t *input6, - uint8_t *input7 -) -{ - Hacl_Impl_SHA2_Types_uint8_8p ib; - ib.fst = input0; - ib.snd.fst = input1; - ib.snd.snd.fst = input2; - ib.snd.snd.snd.fst = input3; - ib.snd.snd.snd.snd.fst = input4; - ib.snd.snd.snd.snd.snd.fst = input5; - ib.snd.snd.snd.snd.snd.snd.fst = input6; - ib.snd.snd.snd.snd.snd.snd.snd = input7; - { - Hacl_Impl_SHA2_Types_uint8_8p rb; - rb.fst = dst0; - rb.snd.fst = dst1; - rb.snd.snd.fst = dst2; - rb.snd.snd.snd.fst = dst3; - rb.snd.snd.snd.snd.fst = dst4; - rb.snd.snd.snd.snd.snd.fst = dst5; - rb.snd.snd.snd.snd.snd.snd.fst = dst6; - rb.snd.snd.snd.snd.snd.snd.snd = dst7; - { - KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 st[8U] KRML_POST_ALIGN(32) = { 0U }; - uint32_t rem; - uint64_t len_; - uint32_t blocks0; - uint32_t rem1; - uint8_t *b70; - uint8_t *b60; - uint8_t *b50; - uint8_t *b40; - uint8_t *b30; - uint8_t *b20; - uint8_t *b10; - uint8_t *b00; - uint8_t *bl0; - uint8_t *bl10; - uint8_t *bl20; - uint8_t *bl30; - uint8_t *bl40; - uint8_t *bl50; - uint8_t *bl60; - uint8_t *bl70; - KRML_MAYBE_FOR8(i, - (uint32_t)0U, - (uint32_t)8U, - (uint32_t)1U, - Lib_IntVector_Intrinsics_vec256 *os = st; - uint32_t hi = Hacl_Impl_SHA2_Generic_h256[i]; - Lib_IntVector_Intrinsics_vec256 x = Lib_IntVector_Intrinsics_vec256_load32(hi); - os[i] = x;); - rem = input_len % (uint32_t)64U; - len_ = (uint64_t)input_len; - blocks0 = input_len / (uint32_t)64U; - { - uint32_t i; - for (i = (uint32_t)0U; i < blocks0; i++) - { - uint8_t *b7 = ib.snd.snd.snd.snd.snd.snd.snd; - uint8_t *b6 = ib.snd.snd.snd.snd.snd.snd.fst; - uint8_t *b5 = ib.snd.snd.snd.snd.snd.fst; - uint8_t *b4 = ib.snd.snd.snd.snd.fst; - uint8_t *b3 = ib.snd.snd.snd.fst; - uint8_t *b2 = ib.snd.snd.fst; - uint8_t *b1 = ib.snd.fst; - uint8_t *b0 = ib.fst; - uint8_t *bl00 = b0 + i * (uint32_t)64U; - uint8_t *bl1 = b1 + i * (uint32_t)64U; - uint8_t *bl2 = b2 + i * (uint32_t)64U; - uint8_t *bl3 = b3 + i * (uint32_t)64U; - uint8_t *bl4 = b4 + i * (uint32_t)64U; - uint8_t *bl5 = b5 + i * (uint32_t)64U; - uint8_t *bl6 = b6 + i * (uint32_t)64U; - uint8_t *bl7 = b7 + i * (uint32_t)64U; - Hacl_Impl_SHA2_Types_uint8_8p lit; - lit.fst = bl00; - lit.snd.fst = bl1; - lit.snd.snd.fst = bl2; - lit.snd.snd.snd.fst = bl3; - lit.snd.snd.snd.snd.fst = bl4; - lit.snd.snd.snd.snd.snd.fst = bl5; - lit.snd.snd.snd.snd.snd.snd.fst = bl6; - lit.snd.snd.snd.snd.snd.snd.snd = bl7; - { - Hacl_Impl_SHA2_Types_uint8_8p mb = lit; - sha256_update8(mb, st); - } - } - } - rem1 = input_len % (uint32_t)64U; - b70 = ib.snd.snd.snd.snd.snd.snd.snd; - b60 = ib.snd.snd.snd.snd.snd.snd.fst; - b50 = ib.snd.snd.snd.snd.snd.fst; - b40 = ib.snd.snd.snd.snd.fst; - b30 = ib.snd.snd.snd.fst; - b20 = ib.snd.snd.fst; - b10 = ib.snd.fst; - b00 = ib.fst; - bl0 = b00 + input_len - rem1; - bl10 = b10 + input_len - rem1; - bl20 = b20 + input_len - rem1; - bl30 = b30 + input_len - rem1; - bl40 = b40 + input_len - rem1; - bl50 = b50 + input_len - rem1; - bl60 = b60 + input_len - rem1; - bl70 = b70 + input_len - rem1; - { - Hacl_Impl_SHA2_Types_uint8_8p lit0; - Hacl_Impl_SHA2_Types_uint8_8p lb; - lit0.fst = bl0; - lit0.snd.fst = bl10; - lit0.snd.snd.fst = bl20; - lit0.snd.snd.snd.fst = bl30; - lit0.snd.snd.snd.snd.fst = bl40; - lit0.snd.snd.snd.snd.snd.fst = bl50; - lit0.snd.snd.snd.snd.snd.snd.fst = bl60; - lit0.snd.snd.snd.snd.snd.snd.snd = bl70; - lb = lit0; - { - uint32_t blocks; - if (rem + (uint32_t)8U + (uint32_t)1U <= (uint32_t)64U) - { - blocks = (uint32_t)1U; - } - else - { - blocks = (uint32_t)2U; - } - { - uint32_t fin = blocks * (uint32_t)64U; - uint8_t last[1024U] = { 0U }; - uint8_t totlen_buf[8U] = { 0U }; - uint64_t total_len_bits = len_ << (uint32_t)3U; - uint8_t *b71; - uint8_t *b61; - uint8_t *b51; - uint8_t *b41; - uint8_t *b31; - uint8_t *b21; - uint8_t *b11; - uint8_t *b01; - uint8_t *last00; - uint8_t *last10; - uint8_t *last2; - uint8_t *last3; - uint8_t *last4; - uint8_t *last5; - uint8_t *last6; - uint8_t *last7; - uint8_t *last010; - uint8_t *last110; - store64_be(totlen_buf, total_len_bits); - b71 = lb.snd.snd.snd.snd.snd.snd.snd; - b61 = lb.snd.snd.snd.snd.snd.snd.fst; - b51 = lb.snd.snd.snd.snd.snd.fst; - b41 = lb.snd.snd.snd.snd.fst; - b31 = lb.snd.snd.snd.fst; - b21 = lb.snd.snd.fst; - b11 = lb.snd.fst; - b01 = lb.fst; - last00 = last; - last10 = last + (uint32_t)128U; - last2 = last + (uint32_t)256U; - last3 = last + (uint32_t)384U; - last4 = last + (uint32_t)512U; - last5 = last + (uint32_t)640U; - last6 = last + (uint32_t)768U; - last7 = last + (uint32_t)896U; - memcpy(last00, b01, rem * sizeof (uint8_t)); - last00[rem] = (uint8_t)0x80U; - memcpy(last00 + fin - (uint32_t)8U, totlen_buf, (uint32_t)8U * sizeof (uint8_t)); - last010 = last00; - last110 = last00 + (uint32_t)64U; - { - Hacl_Impl_SHA2_Types_uint8_2p lit1; - Hacl_Impl_SHA2_Types_uint8_2p scrut0; - uint8_t *l00; - uint8_t *l01; - uint8_t *last011; - uint8_t *last111; - lit1.fst = last010; - lit1.snd = last110; - scrut0 = lit1; - l00 = scrut0.fst; - l01 = scrut0.snd; - memcpy(last10, b11, rem * sizeof (uint8_t)); - last10[rem] = (uint8_t)0x80U; - memcpy(last10 + fin - (uint32_t)8U, totlen_buf, (uint32_t)8U * sizeof (uint8_t)); - last011 = last10; - last111 = last10 + (uint32_t)64U; - { - Hacl_Impl_SHA2_Types_uint8_2p lit2; - Hacl_Impl_SHA2_Types_uint8_2p scrut1; - uint8_t *l10; - uint8_t *l11; - uint8_t *last012; - uint8_t *last112; - lit2.fst = last011; - lit2.snd = last111; - scrut1 = lit2; - l10 = scrut1.fst; - l11 = scrut1.snd; - memcpy(last2, b21, rem * sizeof (uint8_t)); - last2[rem] = (uint8_t)0x80U; - memcpy(last2 + fin - (uint32_t)8U, totlen_buf, (uint32_t)8U * sizeof (uint8_t)); - last012 = last2; - last112 = last2 + (uint32_t)64U; - { - Hacl_Impl_SHA2_Types_uint8_2p lit3; - Hacl_Impl_SHA2_Types_uint8_2p scrut2; - uint8_t *l20; - uint8_t *l21; - uint8_t *last013; - uint8_t *last113; - lit3.fst = last012; - lit3.snd = last112; - scrut2 = lit3; - l20 = scrut2.fst; - l21 = scrut2.snd; - memcpy(last3, b31, rem * sizeof (uint8_t)); - last3[rem] = (uint8_t)0x80U; - memcpy(last3 + fin - (uint32_t)8U, totlen_buf, (uint32_t)8U * sizeof (uint8_t)); - last013 = last3; - last113 = last3 + (uint32_t)64U; - { - Hacl_Impl_SHA2_Types_uint8_2p lit4; - Hacl_Impl_SHA2_Types_uint8_2p scrut3; - uint8_t *l30; - uint8_t *l31; - uint8_t *last014; - uint8_t *last114; - lit4.fst = last013; - lit4.snd = last113; - scrut3 = lit4; - l30 = scrut3.fst; - l31 = scrut3.snd; - memcpy(last4, b41, rem * sizeof (uint8_t)); - last4[rem] = (uint8_t)0x80U; - memcpy(last4 + fin - (uint32_t)8U, totlen_buf, (uint32_t)8U * sizeof (uint8_t)); - last014 = last4; - last114 = last4 + (uint32_t)64U; - { - Hacl_Impl_SHA2_Types_uint8_2p lit5; - Hacl_Impl_SHA2_Types_uint8_2p scrut4; - uint8_t *l40; - uint8_t *l41; - uint8_t *last015; - uint8_t *last115; - lit5.fst = last014; - lit5.snd = last114; - scrut4 = lit5; - l40 = scrut4.fst; - l41 = scrut4.snd; - memcpy(last5, b51, rem * sizeof (uint8_t)); - last5[rem] = (uint8_t)0x80U; - memcpy(last5 + fin - (uint32_t)8U, - totlen_buf, - (uint32_t)8U * sizeof (uint8_t)); - last015 = last5; - last115 = last5 + (uint32_t)64U; - { - Hacl_Impl_SHA2_Types_uint8_2p lit6; - Hacl_Impl_SHA2_Types_uint8_2p scrut5; - uint8_t *l50; - uint8_t *l51; - uint8_t *last016; - uint8_t *last116; - lit6.fst = last015; - lit6.snd = last115; - scrut5 = lit6; - l50 = scrut5.fst; - l51 = scrut5.snd; - memcpy(last6, b61, rem * sizeof (uint8_t)); - last6[rem] = (uint8_t)0x80U; - memcpy(last6 + fin - (uint32_t)8U, - totlen_buf, - (uint32_t)8U * sizeof (uint8_t)); - last016 = last6; - last116 = last6 + (uint32_t)64U; - { - Hacl_Impl_SHA2_Types_uint8_2p lit7; - Hacl_Impl_SHA2_Types_uint8_2p scrut6; - uint8_t *l60; - uint8_t *l61; - uint8_t *last01; - uint8_t *last11; - lit7.fst = last016; - lit7.snd = last116; - scrut6 = lit7; - l60 = scrut6.fst; - l61 = scrut6.snd; - memcpy(last7, b71, rem * sizeof (uint8_t)); - last7[rem] = (uint8_t)0x80U; - memcpy(last7 + fin - (uint32_t)8U, - totlen_buf, - (uint32_t)8U * sizeof (uint8_t)); - last01 = last7; - last11 = last7 + (uint32_t)64U; - { - Hacl_Impl_SHA2_Types_uint8_2p lit8; - Hacl_Impl_SHA2_Types_uint8_2p scrut7; - uint8_t *l70; - uint8_t *l71; - lit8.fst = last01; - lit8.snd = last11; - scrut7 = lit8; - l70 = scrut7.fst; - l71 = scrut7.snd; - { - Hacl_Impl_SHA2_Types_uint8_8p mb0; - mb0.fst = l00; - mb0.snd.fst = l10; - mb0.snd.snd.fst = l20; - mb0.snd.snd.snd.fst = l30; - mb0.snd.snd.snd.snd.fst = l40; - mb0.snd.snd.snd.snd.snd.fst = l50; - mb0.snd.snd.snd.snd.snd.snd.fst = l60; - mb0.snd.snd.snd.snd.snd.snd.snd = l70; - { - Hacl_Impl_SHA2_Types_uint8_8p mb1; - mb1.fst = l01; - mb1.snd.fst = l11; - mb1.snd.snd.fst = l21; - mb1.snd.snd.snd.fst = l31; - mb1.snd.snd.snd.snd.fst = l41; - mb1.snd.snd.snd.snd.snd.fst = l51; - mb1.snd.snd.snd.snd.snd.snd.fst = l61; - mb1.snd.snd.snd.snd.snd.snd.snd = l71; - { - Hacl_Impl_SHA2_Types_uint8_2x8p lit; - Hacl_Impl_SHA2_Types_uint8_2x8p scrut; - Hacl_Impl_SHA2_Types_uint8_8p last0; - Hacl_Impl_SHA2_Types_uint8_8p last1; - lit.fst = mb0; - lit.snd = mb1; - scrut = lit; - last0 = scrut.fst; - last1 = scrut.snd; - sha256_update8(last0, st); - if (blocks > (uint32_t)1U) - { - sha256_update8(last1, st); - } - { - uint8_t hbuf[256U] = { 0U }; - Lib_IntVector_Intrinsics_vec256 v0 = st[0U]; - Lib_IntVector_Intrinsics_vec256 v1 = st[1U]; - Lib_IntVector_Intrinsics_vec256 v2 = st[2U]; - Lib_IntVector_Intrinsics_vec256 v3 = st[3U]; - Lib_IntVector_Intrinsics_vec256 v4 = st[4U]; - Lib_IntVector_Intrinsics_vec256 v5 = st[5U]; - Lib_IntVector_Intrinsics_vec256 v6 = st[6U]; - Lib_IntVector_Intrinsics_vec256 v7 = st[7U]; - Lib_IntVector_Intrinsics_vec256 - v0_ = Lib_IntVector_Intrinsics_vec256_interleave_low32(v0, v1); - Lib_IntVector_Intrinsics_vec256 - v1_ = Lib_IntVector_Intrinsics_vec256_interleave_high32(v0, v1); - Lib_IntVector_Intrinsics_vec256 - v2_ = Lib_IntVector_Intrinsics_vec256_interleave_low32(v2, v3); - Lib_IntVector_Intrinsics_vec256 - v3_ = Lib_IntVector_Intrinsics_vec256_interleave_high32(v2, v3); - Lib_IntVector_Intrinsics_vec256 - v4_ = Lib_IntVector_Intrinsics_vec256_interleave_low32(v4, v5); - Lib_IntVector_Intrinsics_vec256 - v5_ = Lib_IntVector_Intrinsics_vec256_interleave_high32(v4, v5); - Lib_IntVector_Intrinsics_vec256 - v6_ = Lib_IntVector_Intrinsics_vec256_interleave_low32(v6, v7); - Lib_IntVector_Intrinsics_vec256 - v7_ = Lib_IntVector_Intrinsics_vec256_interleave_high32(v6, v7); - Lib_IntVector_Intrinsics_vec256 v0_0 = v0_; - Lib_IntVector_Intrinsics_vec256 v1_0 = v1_; - Lib_IntVector_Intrinsics_vec256 v2_0 = v2_; - Lib_IntVector_Intrinsics_vec256 v3_0 = v3_; - Lib_IntVector_Intrinsics_vec256 v4_0 = v4_; - Lib_IntVector_Intrinsics_vec256 v5_0 = v5_; - Lib_IntVector_Intrinsics_vec256 v6_0 = v6_; - Lib_IntVector_Intrinsics_vec256 v7_0 = v7_; - Lib_IntVector_Intrinsics_vec256 - v0_1 = - Lib_IntVector_Intrinsics_vec256_interleave_low64(v0_0, - v2_0); - Lib_IntVector_Intrinsics_vec256 - v2_1 = - Lib_IntVector_Intrinsics_vec256_interleave_high64(v0_0, - v2_0); - Lib_IntVector_Intrinsics_vec256 - v1_1 = - Lib_IntVector_Intrinsics_vec256_interleave_low64(v1_0, - v3_0); - Lib_IntVector_Intrinsics_vec256 - v3_1 = - Lib_IntVector_Intrinsics_vec256_interleave_high64(v1_0, - v3_0); - Lib_IntVector_Intrinsics_vec256 - v4_1 = - Lib_IntVector_Intrinsics_vec256_interleave_low64(v4_0, - v6_0); - Lib_IntVector_Intrinsics_vec256 - v6_1 = - Lib_IntVector_Intrinsics_vec256_interleave_high64(v4_0, - v6_0); - Lib_IntVector_Intrinsics_vec256 - v5_1 = - Lib_IntVector_Intrinsics_vec256_interleave_low64(v5_0, - v7_0); - Lib_IntVector_Intrinsics_vec256 - v7_1 = - Lib_IntVector_Intrinsics_vec256_interleave_high64(v5_0, - v7_0); - Lib_IntVector_Intrinsics_vec256 v0_10 = v0_1; - Lib_IntVector_Intrinsics_vec256 v1_10 = v1_1; - Lib_IntVector_Intrinsics_vec256 v2_10 = v2_1; - Lib_IntVector_Intrinsics_vec256 v3_10 = v3_1; - Lib_IntVector_Intrinsics_vec256 v4_10 = v4_1; - Lib_IntVector_Intrinsics_vec256 v5_10 = v5_1; - Lib_IntVector_Intrinsics_vec256 v6_10 = v6_1; - Lib_IntVector_Intrinsics_vec256 v7_10 = v7_1; - Lib_IntVector_Intrinsics_vec256 - v0_2 = - Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_10, - v4_10); - Lib_IntVector_Intrinsics_vec256 - v4_2 = - Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_10, - v4_10); - Lib_IntVector_Intrinsics_vec256 - v1_2 = - Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_10, - v5_10); - Lib_IntVector_Intrinsics_vec256 - v5_2 = - Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_10, - v5_10); - Lib_IntVector_Intrinsics_vec256 - v2_2 = - Lib_IntVector_Intrinsics_vec256_interleave_low128(v2_10, - v6_10); - Lib_IntVector_Intrinsics_vec256 - v6_2 = - Lib_IntVector_Intrinsics_vec256_interleave_high128(v2_10, - v6_10); - Lib_IntVector_Intrinsics_vec256 - v3_2 = - Lib_IntVector_Intrinsics_vec256_interleave_low128(v3_10, - v7_10); - Lib_IntVector_Intrinsics_vec256 - v7_2 = - Lib_IntVector_Intrinsics_vec256_interleave_high128(v3_10, - v7_10); - Lib_IntVector_Intrinsics_vec256 v0_20 = v0_2; - Lib_IntVector_Intrinsics_vec256 v1_20 = v1_2; - Lib_IntVector_Intrinsics_vec256 v2_20 = v2_2; - Lib_IntVector_Intrinsics_vec256 v3_20 = v3_2; - Lib_IntVector_Intrinsics_vec256 v4_20 = v4_2; - Lib_IntVector_Intrinsics_vec256 v5_20 = v5_2; - Lib_IntVector_Intrinsics_vec256 v6_20 = v6_2; - Lib_IntVector_Intrinsics_vec256 v7_20 = v7_2; - Lib_IntVector_Intrinsics_vec256 v0_3 = v0_20; - Lib_IntVector_Intrinsics_vec256 v1_3 = v1_20; - Lib_IntVector_Intrinsics_vec256 v2_3 = v2_20; - Lib_IntVector_Intrinsics_vec256 v3_3 = v3_20; - Lib_IntVector_Intrinsics_vec256 v4_3 = v4_20; - Lib_IntVector_Intrinsics_vec256 v5_3 = v5_20; - Lib_IntVector_Intrinsics_vec256 v6_3 = v6_20; - Lib_IntVector_Intrinsics_vec256 v7_3 = v7_20; - Lib_IntVector_Intrinsics_vec256 st0_ = v0_3; - Lib_IntVector_Intrinsics_vec256 st1_ = v2_3; - Lib_IntVector_Intrinsics_vec256 st2_ = v1_3; - Lib_IntVector_Intrinsics_vec256 st3_ = v3_3; - Lib_IntVector_Intrinsics_vec256 st4_ = v4_3; - Lib_IntVector_Intrinsics_vec256 st5_ = v6_3; - Lib_IntVector_Intrinsics_vec256 st6_ = v5_3; - Lib_IntVector_Intrinsics_vec256 st7_ = v7_3; - uint8_t *b7; - uint8_t *b6; - uint8_t *b5; - uint8_t *b4; - uint8_t *b3; - uint8_t *b2; - uint8_t *b1; - uint8_t *b0; - st[0U] = st0_; - st[1U] = st1_; - st[2U] = st2_; - st[3U] = st3_; - st[4U] = st4_; - st[5U] = st5_; - st[6U] = st6_; - st[7U] = st7_; - KRML_MAYBE_FOR8(i, - (uint32_t)0U, - (uint32_t)8U, - (uint32_t)1U, - Lib_IntVector_Intrinsics_vec256_store32_be(hbuf - + i * (uint32_t)32U, - st[i]);); - b7 = rb.snd.snd.snd.snd.snd.snd.snd; - b6 = rb.snd.snd.snd.snd.snd.snd.fst; - b5 = rb.snd.snd.snd.snd.snd.fst; - b4 = rb.snd.snd.snd.snd.fst; - b3 = rb.snd.snd.snd.fst; - b2 = rb.snd.snd.fst; - b1 = rb.snd.fst; - b0 = rb.fst; - memcpy(b0, hbuf, (uint32_t)32U * sizeof (uint8_t)); - memcpy(b1, - hbuf + (uint32_t)32U, - (uint32_t)32U * sizeof (uint8_t)); - memcpy(b2, - hbuf + (uint32_t)64U, - (uint32_t)32U * sizeof (uint8_t)); - memcpy(b3, - hbuf + (uint32_t)96U, - (uint32_t)32U * sizeof (uint8_t)); - memcpy(b4, - hbuf + (uint32_t)128U, - (uint32_t)32U * sizeof (uint8_t)); - memcpy(b5, - hbuf + (uint32_t)160U, - (uint32_t)32U * sizeof (uint8_t)); - memcpy(b6, - hbuf + (uint32_t)192U, - (uint32_t)32U * sizeof (uint8_t)); - memcpy(b7, - hbuf + (uint32_t)224U, - (uint32_t)32U * sizeof (uint8_t)); - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } -} - -static inline void -sha384_update4(Hacl_Impl_SHA2_Types_uint8_4p block, Lib_IntVector_Intrinsics_vec256 *hash) -{ - KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 hash_old[8U] KRML_POST_ALIGN(32) = { 0U }; - KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[16U] KRML_POST_ALIGN(32) = { 0U }; - uint8_t *b3; - uint8_t *b2; - uint8_t *b10; - uint8_t *b00; - Lib_IntVector_Intrinsics_vec256 v00; - Lib_IntVector_Intrinsics_vec256 v10; - Lib_IntVector_Intrinsics_vec256 v20; - Lib_IntVector_Intrinsics_vec256 v30; - Lib_IntVector_Intrinsics_vec256 v0_; - Lib_IntVector_Intrinsics_vec256 v1_; - Lib_IntVector_Intrinsics_vec256 v2_; - Lib_IntVector_Intrinsics_vec256 v3_; - Lib_IntVector_Intrinsics_vec256 v0__; - Lib_IntVector_Intrinsics_vec256 v1__; - Lib_IntVector_Intrinsics_vec256 v2__; - Lib_IntVector_Intrinsics_vec256 v3__; - Lib_IntVector_Intrinsics_vec256 ws0; - Lib_IntVector_Intrinsics_vec256 ws1; - Lib_IntVector_Intrinsics_vec256 ws2; - Lib_IntVector_Intrinsics_vec256 ws3; - Lib_IntVector_Intrinsics_vec256 v01; - Lib_IntVector_Intrinsics_vec256 v11; - Lib_IntVector_Intrinsics_vec256 v21; - Lib_IntVector_Intrinsics_vec256 v31; - Lib_IntVector_Intrinsics_vec256 v0_0; - Lib_IntVector_Intrinsics_vec256 v1_0; - Lib_IntVector_Intrinsics_vec256 v2_0; - Lib_IntVector_Intrinsics_vec256 v3_0; - Lib_IntVector_Intrinsics_vec256 v0__0; - Lib_IntVector_Intrinsics_vec256 v1__0; - Lib_IntVector_Intrinsics_vec256 v2__0; - Lib_IntVector_Intrinsics_vec256 v3__0; - Lib_IntVector_Intrinsics_vec256 ws4; - Lib_IntVector_Intrinsics_vec256 ws5; - Lib_IntVector_Intrinsics_vec256 ws6; - Lib_IntVector_Intrinsics_vec256 ws7; - Lib_IntVector_Intrinsics_vec256 v02; - Lib_IntVector_Intrinsics_vec256 v12; - Lib_IntVector_Intrinsics_vec256 v22; - Lib_IntVector_Intrinsics_vec256 v32; - Lib_IntVector_Intrinsics_vec256 v0_1; - Lib_IntVector_Intrinsics_vec256 v1_1; - Lib_IntVector_Intrinsics_vec256 v2_1; - Lib_IntVector_Intrinsics_vec256 v3_1; - Lib_IntVector_Intrinsics_vec256 v0__1; - Lib_IntVector_Intrinsics_vec256 v1__1; - Lib_IntVector_Intrinsics_vec256 v2__1; - Lib_IntVector_Intrinsics_vec256 v3__1; - Lib_IntVector_Intrinsics_vec256 ws8; - Lib_IntVector_Intrinsics_vec256 ws9; - Lib_IntVector_Intrinsics_vec256 ws10; - Lib_IntVector_Intrinsics_vec256 ws11; - Lib_IntVector_Intrinsics_vec256 v0; - Lib_IntVector_Intrinsics_vec256 v1; - Lib_IntVector_Intrinsics_vec256 v2; - Lib_IntVector_Intrinsics_vec256 v3; - Lib_IntVector_Intrinsics_vec256 v0_2; - Lib_IntVector_Intrinsics_vec256 v1_2; - Lib_IntVector_Intrinsics_vec256 v2_2; - Lib_IntVector_Intrinsics_vec256 v3_2; - Lib_IntVector_Intrinsics_vec256 v0__2; - Lib_IntVector_Intrinsics_vec256 v1__2; - Lib_IntVector_Intrinsics_vec256 v2__2; - Lib_IntVector_Intrinsics_vec256 v3__2; - Lib_IntVector_Intrinsics_vec256 ws12; - Lib_IntVector_Intrinsics_vec256 ws13; - Lib_IntVector_Intrinsics_vec256 ws14; - Lib_IntVector_Intrinsics_vec256 ws15; - memcpy(hash_old, hash, (uint32_t)8U * sizeof (Lib_IntVector_Intrinsics_vec256)); - b3 = block.snd.snd.snd; - b2 = block.snd.snd.fst; - b10 = block.snd.fst; - b00 = block.fst; - ws[0U] = Lib_IntVector_Intrinsics_vec256_load64_be(b00); - ws[1U] = Lib_IntVector_Intrinsics_vec256_load64_be(b10); - ws[2U] = Lib_IntVector_Intrinsics_vec256_load64_be(b2); - ws[3U] = Lib_IntVector_Intrinsics_vec256_load64_be(b3); - ws[4U] = Lib_IntVector_Intrinsics_vec256_load64_be(b00 + (uint32_t)32U); - ws[5U] = Lib_IntVector_Intrinsics_vec256_load64_be(b10 + (uint32_t)32U); - ws[6U] = Lib_IntVector_Intrinsics_vec256_load64_be(b2 + (uint32_t)32U); - ws[7U] = Lib_IntVector_Intrinsics_vec256_load64_be(b3 + (uint32_t)32U); - ws[8U] = Lib_IntVector_Intrinsics_vec256_load64_be(b00 + (uint32_t)64U); - ws[9U] = Lib_IntVector_Intrinsics_vec256_load64_be(b10 + (uint32_t)64U); - ws[10U] = Lib_IntVector_Intrinsics_vec256_load64_be(b2 + (uint32_t)64U); - ws[11U] = Lib_IntVector_Intrinsics_vec256_load64_be(b3 + (uint32_t)64U); - ws[12U] = Lib_IntVector_Intrinsics_vec256_load64_be(b00 + (uint32_t)96U); - ws[13U] = Lib_IntVector_Intrinsics_vec256_load64_be(b10 + (uint32_t)96U); - ws[14U] = Lib_IntVector_Intrinsics_vec256_load64_be(b2 + (uint32_t)96U); - ws[15U] = Lib_IntVector_Intrinsics_vec256_load64_be(b3 + (uint32_t)96U); - v00 = ws[0U]; - v10 = ws[1U]; - v20 = ws[2U]; - v30 = ws[3U]; - v0_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v00, v10); - v1_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v00, v10); - v2_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v20, v30); - v3_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v20, v30); - v0__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_, v2_); - v1__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_, v2_); - v2__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_, v3_); - v3__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_, v3_); - ws0 = v0__; - ws1 = v2__; - ws2 = v1__; - ws3 = v3__; - v01 = ws[4U]; - v11 = ws[5U]; - v21 = ws[6U]; - v31 = ws[7U]; - v0_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v01, v11); - v1_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v01, v11); - v2_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v21, v31); - v3_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v21, v31); - v0__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_0, v2_0); - v1__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_0, v2_0); - v2__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_0, v3_0); - v3__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_0, v3_0); - ws4 = v0__0; - ws5 = v2__0; - ws6 = v1__0; - ws7 = v3__0; - v02 = ws[8U]; - v12 = ws[9U]; - v22 = ws[10U]; - v32 = ws[11U]; - v0_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v02, v12); - v1_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v02, v12); - v2_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v22, v32); - v3_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v22, v32); - v0__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_1, v2_1); - v1__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_1, v2_1); - v2__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_1, v3_1); - v3__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_1, v3_1); - ws8 = v0__1; - ws9 = v2__1; - ws10 = v1__1; - ws11 = v3__1; - v0 = ws[12U]; - v1 = ws[13U]; - v2 = ws[14U]; - v3 = ws[15U]; - v0_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0, v1); - v1_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0, v1); - v2_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v2, v3); - v3_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v2, v3); - v0__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_2, v2_2); - v1__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_2, v2_2); - v2__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_2, v3_2); - v3__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_2, v3_2); - ws12 = v0__2; - ws13 = v2__2; - ws14 = v1__2; - ws15 = v3__2; - ws[0U] = ws0; - ws[1U] = ws1; - ws[2U] = ws2; - ws[3U] = ws3; - ws[4U] = ws4; - ws[5U] = ws5; - ws[6U] = ws6; - ws[7U] = ws7; - ws[8U] = ws8; - ws[9U] = ws9; - ws[10U] = ws10; - ws[11U] = ws11; - ws[12U] = ws12; - ws[13U] = ws13; - ws[14U] = ws14; - ws[15U] = ws15; - KRML_MAYBE_FOR5(i0, - (uint32_t)0U, - (uint32_t)5U, - (uint32_t)1U, - KRML_MAYBE_FOR16(i, - (uint32_t)0U, - (uint32_t)16U, - (uint32_t)1U, - uint64_t k_t = Hacl_Impl_SHA2_Generic_k384_512[(uint32_t)16U * i0 + i]; - Lib_IntVector_Intrinsics_vec256 ws_t = ws[i]; - Lib_IntVector_Intrinsics_vec256 a0 = hash[0U]; - Lib_IntVector_Intrinsics_vec256 b0 = hash[1U]; - Lib_IntVector_Intrinsics_vec256 c0 = hash[2U]; - Lib_IntVector_Intrinsics_vec256 d0 = hash[3U]; - Lib_IntVector_Intrinsics_vec256 e0 = hash[4U]; - Lib_IntVector_Intrinsics_vec256 f0 = hash[5U]; - Lib_IntVector_Intrinsics_vec256 g0 = hash[6U]; - Lib_IntVector_Intrinsics_vec256 h02 = hash[7U]; - Lib_IntVector_Intrinsics_vec256 k_e_t = Lib_IntVector_Intrinsics_vec256_load64(k_t); - Lib_IntVector_Intrinsics_vec256 - t1 = - Lib_IntVector_Intrinsics_vec256_add64(Lib_IntVector_Intrinsics_vec256_add64(Lib_IntVector_Intrinsics_vec256_add64(Lib_IntVector_Intrinsics_vec256_add64(h02, - Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right64(e0, - (uint32_t)14U), - Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right64(e0, - (uint32_t)18U), - Lib_IntVector_Intrinsics_vec256_rotate_right64(e0, (uint32_t)41U)))), - Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_and(e0, f0), - Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_lognot(e0), g0))), - k_e_t), - ws_t); - Lib_IntVector_Intrinsics_vec256 - t2 = - Lib_IntVector_Intrinsics_vec256_add64(Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right64(a0, - (uint32_t)28U), - Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right64(a0, - (uint32_t)34U), - Lib_IntVector_Intrinsics_vec256_rotate_right64(a0, (uint32_t)39U))), - Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_and(a0, b0), - Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_and(a0, c0), - Lib_IntVector_Intrinsics_vec256_and(b0, c0)))); - Lib_IntVector_Intrinsics_vec256 a1 = Lib_IntVector_Intrinsics_vec256_add64(t1, t2); - Lib_IntVector_Intrinsics_vec256 b1 = a0; - Lib_IntVector_Intrinsics_vec256 c1 = b0; - Lib_IntVector_Intrinsics_vec256 d1 = c0; - Lib_IntVector_Intrinsics_vec256 e1 = Lib_IntVector_Intrinsics_vec256_add64(d0, t1); - Lib_IntVector_Intrinsics_vec256 f1 = e0; - Lib_IntVector_Intrinsics_vec256 g1 = f0; - Lib_IntVector_Intrinsics_vec256 h12 = g0; - hash[0U] = a1; - hash[1U] = b1; - hash[2U] = c1; - hash[3U] = d1; - hash[4U] = e1; - hash[5U] = f1; - hash[6U] = g1; - hash[7U] = h12;); - if (i0 < (uint32_t)4U) - { - KRML_MAYBE_FOR16(i, - (uint32_t)0U, - (uint32_t)16U, - (uint32_t)1U, - Lib_IntVector_Intrinsics_vec256 t16 = ws[i]; - Lib_IntVector_Intrinsics_vec256 t15 = ws[(i + (uint32_t)1U) % (uint32_t)16U]; - Lib_IntVector_Intrinsics_vec256 t7 = ws[(i + (uint32_t)9U) % (uint32_t)16U]; - Lib_IntVector_Intrinsics_vec256 t2 = ws[(i + (uint32_t)14U) % (uint32_t)16U]; - Lib_IntVector_Intrinsics_vec256 - s1 = - Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right64(t2, - (uint32_t)19U), - Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right64(t2, - (uint32_t)61U), - Lib_IntVector_Intrinsics_vec256_shift_right64(t2, (uint32_t)6U))); - Lib_IntVector_Intrinsics_vec256 - s0 = - Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right64(t15, - (uint32_t)1U), - Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right64(t15, - (uint32_t)8U), - Lib_IntVector_Intrinsics_vec256_shift_right64(t15, (uint32_t)7U))); - ws[i] = - Lib_IntVector_Intrinsics_vec256_add64(Lib_IntVector_Intrinsics_vec256_add64(Lib_IntVector_Intrinsics_vec256_add64(s1, - t7), - s0), - t16);); - }); - KRML_MAYBE_FOR8(i, - (uint32_t)0U, - (uint32_t)8U, - (uint32_t)1U, - Lib_IntVector_Intrinsics_vec256 *os = hash; - Lib_IntVector_Intrinsics_vec256 - x = Lib_IntVector_Intrinsics_vec256_add64(hash[i], hash_old[i]); - os[i] = x;); -} - -void -Hacl_SHA2_Vec256_sha384_4( - uint8_t *dst0, - uint8_t *dst1, - uint8_t *dst2, - uint8_t *dst3, - uint32_t input_len, - uint8_t *input0, - uint8_t *input1, - uint8_t *input2, - uint8_t *input3 -) -{ - Hacl_Impl_SHA2_Types_uint8_4p ib; - ib.fst = input0; - ib.snd.fst = input1; - ib.snd.snd.fst = input2; - ib.snd.snd.snd = input3; - { - Hacl_Impl_SHA2_Types_uint8_4p rb; - rb.fst = dst0; - rb.snd.fst = dst1; - rb.snd.snd.fst = dst2; - rb.snd.snd.snd = dst3; - { - KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 st[8U] KRML_POST_ALIGN(32) = { 0U }; - uint32_t rem; - FStar_UInt128_uint128 len_; - uint32_t blocks0; - uint32_t rem1; - uint8_t *b30; - uint8_t *b20; - uint8_t *b10; - uint8_t *b00; - uint8_t *bl0; - uint8_t *bl10; - uint8_t *bl20; - uint8_t *bl30; - KRML_MAYBE_FOR8(i, - (uint32_t)0U, - (uint32_t)8U, - (uint32_t)1U, - Lib_IntVector_Intrinsics_vec256 *os = st; - uint64_t hi = Hacl_Impl_SHA2_Generic_h384[i]; - Lib_IntVector_Intrinsics_vec256 x = Lib_IntVector_Intrinsics_vec256_load64(hi); - os[i] = x;); - rem = input_len % (uint32_t)128U; - len_ = FStar_UInt128_uint64_to_uint128((uint64_t)input_len); - blocks0 = input_len / (uint32_t)128U; - { - uint32_t i; - for (i = (uint32_t)0U; i < blocks0; i++) - { - uint8_t *b3 = ib.snd.snd.snd; - uint8_t *b2 = ib.snd.snd.fst; - uint8_t *b1 = ib.snd.fst; - uint8_t *b0 = ib.fst; - uint8_t *bl00 = b0 + i * (uint32_t)128U; - uint8_t *bl1 = b1 + i * (uint32_t)128U; - uint8_t *bl2 = b2 + i * (uint32_t)128U; - uint8_t *bl3 = b3 + i * (uint32_t)128U; - Hacl_Impl_SHA2_Types_uint8_4p lit; - lit.fst = bl00; - lit.snd.fst = bl1; - lit.snd.snd.fst = bl2; - lit.snd.snd.snd = bl3; - { - Hacl_Impl_SHA2_Types_uint8_4p mb = lit; - sha384_update4(mb, st); - } - } - } - rem1 = input_len % (uint32_t)128U; - b30 = ib.snd.snd.snd; - b20 = ib.snd.snd.fst; - b10 = ib.snd.fst; - b00 = ib.fst; - bl0 = b00 + input_len - rem1; - bl10 = b10 + input_len - rem1; - bl20 = b20 + input_len - rem1; - bl30 = b30 + input_len - rem1; - { - Hacl_Impl_SHA2_Types_uint8_4p lit0; - Hacl_Impl_SHA2_Types_uint8_4p lb; - lit0.fst = bl0; - lit0.snd.fst = bl10; - lit0.snd.snd.fst = bl20; - lit0.snd.snd.snd = bl30; - lb = lit0; - { - uint32_t blocks; - if (rem + (uint32_t)16U + (uint32_t)1U <= (uint32_t)128U) - { - blocks = (uint32_t)1U; - } - else - { - blocks = (uint32_t)2U; - } - { - uint32_t fin = blocks * (uint32_t)128U; - uint8_t last[1024U] = { 0U }; - uint8_t totlen_buf[16U] = { 0U }; - FStar_UInt128_uint128 total_len_bits = FStar_UInt128_shift_left(len_, (uint32_t)3U); - uint8_t *b31; - uint8_t *b21; - uint8_t *b11; - uint8_t *b01; - uint8_t *last00; - uint8_t *last10; - uint8_t *last2; - uint8_t *last3; - uint8_t *last010; - uint8_t *last110; - store128_be(totlen_buf, total_len_bits); - b31 = lb.snd.snd.snd; - b21 = lb.snd.snd.fst; - b11 = lb.snd.fst; - b01 = lb.fst; - last00 = last; - last10 = last + (uint32_t)256U; - last2 = last + (uint32_t)512U; - last3 = last + (uint32_t)768U; - memcpy(last00, b01, rem * sizeof (uint8_t)); - last00[rem] = (uint8_t)0x80U; - memcpy(last00 + fin - (uint32_t)16U, totlen_buf, (uint32_t)16U * sizeof (uint8_t)); - last010 = last00; - last110 = last00 + (uint32_t)128U; - { - Hacl_Impl_SHA2_Types_uint8_2p lit1; - Hacl_Impl_SHA2_Types_uint8_2p scrut0; - uint8_t *l00; - uint8_t *l01; - uint8_t *last011; - uint8_t *last111; - lit1.fst = last010; - lit1.snd = last110; - scrut0 = lit1; - l00 = scrut0.fst; - l01 = scrut0.snd; - memcpy(last10, b11, rem * sizeof (uint8_t)); - last10[rem] = (uint8_t)0x80U; - memcpy(last10 + fin - (uint32_t)16U, totlen_buf, (uint32_t)16U * sizeof (uint8_t)); - last011 = last10; - last111 = last10 + (uint32_t)128U; - { - Hacl_Impl_SHA2_Types_uint8_2p lit2; - Hacl_Impl_SHA2_Types_uint8_2p scrut1; - uint8_t *l10; - uint8_t *l11; - uint8_t *last012; - uint8_t *last112; - lit2.fst = last011; - lit2.snd = last111; - scrut1 = lit2; - l10 = scrut1.fst; - l11 = scrut1.snd; - memcpy(last2, b21, rem * sizeof (uint8_t)); - last2[rem] = (uint8_t)0x80U; - memcpy(last2 + fin - (uint32_t)16U, totlen_buf, (uint32_t)16U * sizeof (uint8_t)); - last012 = last2; - last112 = last2 + (uint32_t)128U; - { - Hacl_Impl_SHA2_Types_uint8_2p lit3; - Hacl_Impl_SHA2_Types_uint8_2p scrut2; - uint8_t *l20; - uint8_t *l21; - uint8_t *last01; - uint8_t *last11; - lit3.fst = last012; - lit3.snd = last112; - scrut2 = lit3; - l20 = scrut2.fst; - l21 = scrut2.snd; - memcpy(last3, b31, rem * sizeof (uint8_t)); - last3[rem] = (uint8_t)0x80U; - memcpy(last3 + fin - (uint32_t)16U, totlen_buf, (uint32_t)16U * sizeof (uint8_t)); - last01 = last3; - last11 = last3 + (uint32_t)128U; - { - Hacl_Impl_SHA2_Types_uint8_2p lit4; - Hacl_Impl_SHA2_Types_uint8_2p scrut3; - uint8_t *l30; - uint8_t *l31; - lit4.fst = last01; - lit4.snd = last11; - scrut3 = lit4; - l30 = scrut3.fst; - l31 = scrut3.snd; - { - Hacl_Impl_SHA2_Types_uint8_4p mb0; - mb0.fst = l00; - mb0.snd.fst = l10; - mb0.snd.snd.fst = l20; - mb0.snd.snd.snd = l30; - { - Hacl_Impl_SHA2_Types_uint8_4p mb1; - mb1.fst = l01; - mb1.snd.fst = l11; - mb1.snd.snd.fst = l21; - mb1.snd.snd.snd = l31; - { - Hacl_Impl_SHA2_Types_uint8_2x4p lit; - Hacl_Impl_SHA2_Types_uint8_2x4p scrut; - Hacl_Impl_SHA2_Types_uint8_4p last0; - Hacl_Impl_SHA2_Types_uint8_4p last1; - lit.fst = mb0; - lit.snd = mb1; - scrut = lit; - last0 = scrut.fst; - last1 = scrut.snd; - sha384_update4(last0, st); - if (blocks > (uint32_t)1U) - { - sha384_update4(last1, st); - } - { - uint8_t hbuf[256U] = { 0U }; - Lib_IntVector_Intrinsics_vec256 v00 = st[0U]; - Lib_IntVector_Intrinsics_vec256 v10 = st[1U]; - Lib_IntVector_Intrinsics_vec256 v20 = st[2U]; - Lib_IntVector_Intrinsics_vec256 v30 = st[3U]; - Lib_IntVector_Intrinsics_vec256 - v0_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v00, v10); - Lib_IntVector_Intrinsics_vec256 - v1_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v00, v10); - Lib_IntVector_Intrinsics_vec256 - v2_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v20, v30); - Lib_IntVector_Intrinsics_vec256 - v3_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v20, v30); - Lib_IntVector_Intrinsics_vec256 - v0__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_, v2_); - Lib_IntVector_Intrinsics_vec256 - v1__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_, v2_); - Lib_IntVector_Intrinsics_vec256 - v2__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_, v3_); - Lib_IntVector_Intrinsics_vec256 - v3__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_, v3_); - Lib_IntVector_Intrinsics_vec256 st0_ = v0__; - Lib_IntVector_Intrinsics_vec256 st1_ = v2__; - Lib_IntVector_Intrinsics_vec256 st2_ = v1__; - Lib_IntVector_Intrinsics_vec256 st3_ = v3__; - Lib_IntVector_Intrinsics_vec256 v0 = st[4U]; - Lib_IntVector_Intrinsics_vec256 v1 = st[5U]; - Lib_IntVector_Intrinsics_vec256 v2 = st[6U]; - Lib_IntVector_Intrinsics_vec256 v3 = st[7U]; - Lib_IntVector_Intrinsics_vec256 - v0_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0, v1); - Lib_IntVector_Intrinsics_vec256 - v1_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0, v1); - Lib_IntVector_Intrinsics_vec256 - v2_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v2, v3); - Lib_IntVector_Intrinsics_vec256 - v3_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v2, v3); - Lib_IntVector_Intrinsics_vec256 - v0__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_0, v2_0); - Lib_IntVector_Intrinsics_vec256 - v1__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_0, v2_0); - Lib_IntVector_Intrinsics_vec256 - v2__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_0, v3_0); - Lib_IntVector_Intrinsics_vec256 - v3__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_0, v3_0); - Lib_IntVector_Intrinsics_vec256 st4_ = v0__0; - Lib_IntVector_Intrinsics_vec256 st5_ = v2__0; - Lib_IntVector_Intrinsics_vec256 st6_ = v1__0; - Lib_IntVector_Intrinsics_vec256 st7_ = v3__0; - uint8_t *b3; - uint8_t *b2; - uint8_t *b1; - uint8_t *b0; - st[0U] = st0_; - st[1U] = st4_; - st[2U] = st1_; - st[3U] = st5_; - st[4U] = st2_; - st[5U] = st6_; - st[6U] = st3_; - st[7U] = st7_; - KRML_MAYBE_FOR8(i, - (uint32_t)0U, - (uint32_t)8U, - (uint32_t)1U, - Lib_IntVector_Intrinsics_vec256_store64_be(hbuf + i * (uint32_t)32U, - st[i]);); - b3 = rb.snd.snd.snd; - b2 = rb.snd.snd.fst; - b1 = rb.snd.fst; - b0 = rb.fst; - memcpy(b0, hbuf, (uint32_t)48U * sizeof (uint8_t)); - memcpy(b1, hbuf + (uint32_t)64U, (uint32_t)48U * sizeof (uint8_t)); - memcpy(b2, hbuf + (uint32_t)128U, (uint32_t)48U * sizeof (uint8_t)); - memcpy(b3, hbuf + (uint32_t)192U, (uint32_t)48U * sizeof (uint8_t)); - } - } - } - } - } - } - } - } - } - } - } - } - } -} - -static inline void -sha512_update4(Hacl_Impl_SHA2_Types_uint8_4p block, Lib_IntVector_Intrinsics_vec256 *hash) -{ - KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 hash_old[8U] KRML_POST_ALIGN(32) = { 0U }; - KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[16U] KRML_POST_ALIGN(32) = { 0U }; - uint8_t *b3; - uint8_t *b2; - uint8_t *b10; - uint8_t *b00; - Lib_IntVector_Intrinsics_vec256 v00; - Lib_IntVector_Intrinsics_vec256 v10; - Lib_IntVector_Intrinsics_vec256 v20; - Lib_IntVector_Intrinsics_vec256 v30; - Lib_IntVector_Intrinsics_vec256 v0_; - Lib_IntVector_Intrinsics_vec256 v1_; - Lib_IntVector_Intrinsics_vec256 v2_; - Lib_IntVector_Intrinsics_vec256 v3_; - Lib_IntVector_Intrinsics_vec256 v0__; - Lib_IntVector_Intrinsics_vec256 v1__; - Lib_IntVector_Intrinsics_vec256 v2__; - Lib_IntVector_Intrinsics_vec256 v3__; - Lib_IntVector_Intrinsics_vec256 ws0; - Lib_IntVector_Intrinsics_vec256 ws1; - Lib_IntVector_Intrinsics_vec256 ws2; - Lib_IntVector_Intrinsics_vec256 ws3; - Lib_IntVector_Intrinsics_vec256 v01; - Lib_IntVector_Intrinsics_vec256 v11; - Lib_IntVector_Intrinsics_vec256 v21; - Lib_IntVector_Intrinsics_vec256 v31; - Lib_IntVector_Intrinsics_vec256 v0_0; - Lib_IntVector_Intrinsics_vec256 v1_0; - Lib_IntVector_Intrinsics_vec256 v2_0; - Lib_IntVector_Intrinsics_vec256 v3_0; - Lib_IntVector_Intrinsics_vec256 v0__0; - Lib_IntVector_Intrinsics_vec256 v1__0; - Lib_IntVector_Intrinsics_vec256 v2__0; - Lib_IntVector_Intrinsics_vec256 v3__0; - Lib_IntVector_Intrinsics_vec256 ws4; - Lib_IntVector_Intrinsics_vec256 ws5; - Lib_IntVector_Intrinsics_vec256 ws6; - Lib_IntVector_Intrinsics_vec256 ws7; - Lib_IntVector_Intrinsics_vec256 v02; - Lib_IntVector_Intrinsics_vec256 v12; - Lib_IntVector_Intrinsics_vec256 v22; - Lib_IntVector_Intrinsics_vec256 v32; - Lib_IntVector_Intrinsics_vec256 v0_1; - Lib_IntVector_Intrinsics_vec256 v1_1; - Lib_IntVector_Intrinsics_vec256 v2_1; - Lib_IntVector_Intrinsics_vec256 v3_1; - Lib_IntVector_Intrinsics_vec256 v0__1; - Lib_IntVector_Intrinsics_vec256 v1__1; - Lib_IntVector_Intrinsics_vec256 v2__1; - Lib_IntVector_Intrinsics_vec256 v3__1; - Lib_IntVector_Intrinsics_vec256 ws8; - Lib_IntVector_Intrinsics_vec256 ws9; - Lib_IntVector_Intrinsics_vec256 ws10; - Lib_IntVector_Intrinsics_vec256 ws11; - Lib_IntVector_Intrinsics_vec256 v0; - Lib_IntVector_Intrinsics_vec256 v1; - Lib_IntVector_Intrinsics_vec256 v2; - Lib_IntVector_Intrinsics_vec256 v3; - Lib_IntVector_Intrinsics_vec256 v0_2; - Lib_IntVector_Intrinsics_vec256 v1_2; - Lib_IntVector_Intrinsics_vec256 v2_2; - Lib_IntVector_Intrinsics_vec256 v3_2; - Lib_IntVector_Intrinsics_vec256 v0__2; - Lib_IntVector_Intrinsics_vec256 v1__2; - Lib_IntVector_Intrinsics_vec256 v2__2; - Lib_IntVector_Intrinsics_vec256 v3__2; - Lib_IntVector_Intrinsics_vec256 ws12; - Lib_IntVector_Intrinsics_vec256 ws13; - Lib_IntVector_Intrinsics_vec256 ws14; - Lib_IntVector_Intrinsics_vec256 ws15; - memcpy(hash_old, hash, (uint32_t)8U * sizeof (Lib_IntVector_Intrinsics_vec256)); - b3 = block.snd.snd.snd; - b2 = block.snd.snd.fst; - b10 = block.snd.fst; - b00 = block.fst; - ws[0U] = Lib_IntVector_Intrinsics_vec256_load64_be(b00); - ws[1U] = Lib_IntVector_Intrinsics_vec256_load64_be(b10); - ws[2U] = Lib_IntVector_Intrinsics_vec256_load64_be(b2); - ws[3U] = Lib_IntVector_Intrinsics_vec256_load64_be(b3); - ws[4U] = Lib_IntVector_Intrinsics_vec256_load64_be(b00 + (uint32_t)32U); - ws[5U] = Lib_IntVector_Intrinsics_vec256_load64_be(b10 + (uint32_t)32U); - ws[6U] = Lib_IntVector_Intrinsics_vec256_load64_be(b2 + (uint32_t)32U); - ws[7U] = Lib_IntVector_Intrinsics_vec256_load64_be(b3 + (uint32_t)32U); - ws[8U] = Lib_IntVector_Intrinsics_vec256_load64_be(b00 + (uint32_t)64U); - ws[9U] = Lib_IntVector_Intrinsics_vec256_load64_be(b10 + (uint32_t)64U); - ws[10U] = Lib_IntVector_Intrinsics_vec256_load64_be(b2 + (uint32_t)64U); - ws[11U] = Lib_IntVector_Intrinsics_vec256_load64_be(b3 + (uint32_t)64U); - ws[12U] = Lib_IntVector_Intrinsics_vec256_load64_be(b00 + (uint32_t)96U); - ws[13U] = Lib_IntVector_Intrinsics_vec256_load64_be(b10 + (uint32_t)96U); - ws[14U] = Lib_IntVector_Intrinsics_vec256_load64_be(b2 + (uint32_t)96U); - ws[15U] = Lib_IntVector_Intrinsics_vec256_load64_be(b3 + (uint32_t)96U); - v00 = ws[0U]; - v10 = ws[1U]; - v20 = ws[2U]; - v30 = ws[3U]; - v0_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v00, v10); - v1_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v00, v10); - v2_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v20, v30); - v3_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v20, v30); - v0__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_, v2_); - v1__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_, v2_); - v2__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_, v3_); - v3__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_, v3_); - ws0 = v0__; - ws1 = v2__; - ws2 = v1__; - ws3 = v3__; - v01 = ws[4U]; - v11 = ws[5U]; - v21 = ws[6U]; - v31 = ws[7U]; - v0_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v01, v11); - v1_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v01, v11); - v2_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v21, v31); - v3_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v21, v31); - v0__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_0, v2_0); - v1__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_0, v2_0); - v2__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_0, v3_0); - v3__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_0, v3_0); - ws4 = v0__0; - ws5 = v2__0; - ws6 = v1__0; - ws7 = v3__0; - v02 = ws[8U]; - v12 = ws[9U]; - v22 = ws[10U]; - v32 = ws[11U]; - v0_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v02, v12); - v1_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v02, v12); - v2_1 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v22, v32); - v3_1 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v22, v32); - v0__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_1, v2_1); - v1__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_1, v2_1); - v2__1 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_1, v3_1); - v3__1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_1, v3_1); - ws8 = v0__1; - ws9 = v2__1; - ws10 = v1__1; - ws11 = v3__1; - v0 = ws[12U]; - v1 = ws[13U]; - v2 = ws[14U]; - v3 = ws[15U]; - v0_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0, v1); - v1_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0, v1); - v2_2 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v2, v3); - v3_2 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v2, v3); - v0__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_2, v2_2); - v1__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_2, v2_2); - v2__2 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_2, v3_2); - v3__2 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_2, v3_2); - ws12 = v0__2; - ws13 = v2__2; - ws14 = v1__2; - ws15 = v3__2; - ws[0U] = ws0; - ws[1U] = ws1; - ws[2U] = ws2; - ws[3U] = ws3; - ws[4U] = ws4; - ws[5U] = ws5; - ws[6U] = ws6; - ws[7U] = ws7; - ws[8U] = ws8; - ws[9U] = ws9; - ws[10U] = ws10; - ws[11U] = ws11; - ws[12U] = ws12; - ws[13U] = ws13; - ws[14U] = ws14; - ws[15U] = ws15; - KRML_MAYBE_FOR5(i0, - (uint32_t)0U, - (uint32_t)5U, - (uint32_t)1U, - KRML_MAYBE_FOR16(i, - (uint32_t)0U, - (uint32_t)16U, - (uint32_t)1U, - uint64_t k_t = Hacl_Impl_SHA2_Generic_k384_512[(uint32_t)16U * i0 + i]; - Lib_IntVector_Intrinsics_vec256 ws_t = ws[i]; - Lib_IntVector_Intrinsics_vec256 a0 = hash[0U]; - Lib_IntVector_Intrinsics_vec256 b0 = hash[1U]; - Lib_IntVector_Intrinsics_vec256 c0 = hash[2U]; - Lib_IntVector_Intrinsics_vec256 d0 = hash[3U]; - Lib_IntVector_Intrinsics_vec256 e0 = hash[4U]; - Lib_IntVector_Intrinsics_vec256 f0 = hash[5U]; - Lib_IntVector_Intrinsics_vec256 g0 = hash[6U]; - Lib_IntVector_Intrinsics_vec256 h02 = hash[7U]; - Lib_IntVector_Intrinsics_vec256 k_e_t = Lib_IntVector_Intrinsics_vec256_load64(k_t); - Lib_IntVector_Intrinsics_vec256 - t1 = - Lib_IntVector_Intrinsics_vec256_add64(Lib_IntVector_Intrinsics_vec256_add64(Lib_IntVector_Intrinsics_vec256_add64(Lib_IntVector_Intrinsics_vec256_add64(h02, - Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right64(e0, - (uint32_t)14U), - Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right64(e0, - (uint32_t)18U), - Lib_IntVector_Intrinsics_vec256_rotate_right64(e0, (uint32_t)41U)))), - Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_and(e0, f0), - Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_lognot(e0), g0))), - k_e_t), - ws_t); - Lib_IntVector_Intrinsics_vec256 - t2 = - Lib_IntVector_Intrinsics_vec256_add64(Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right64(a0, - (uint32_t)28U), - Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right64(a0, - (uint32_t)34U), - Lib_IntVector_Intrinsics_vec256_rotate_right64(a0, (uint32_t)39U))), - Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_and(a0, b0), - Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_and(a0, c0), - Lib_IntVector_Intrinsics_vec256_and(b0, c0)))); - Lib_IntVector_Intrinsics_vec256 a1 = Lib_IntVector_Intrinsics_vec256_add64(t1, t2); - Lib_IntVector_Intrinsics_vec256 b1 = a0; - Lib_IntVector_Intrinsics_vec256 c1 = b0; - Lib_IntVector_Intrinsics_vec256 d1 = c0; - Lib_IntVector_Intrinsics_vec256 e1 = Lib_IntVector_Intrinsics_vec256_add64(d0, t1); - Lib_IntVector_Intrinsics_vec256 f1 = e0; - Lib_IntVector_Intrinsics_vec256 g1 = f0; - Lib_IntVector_Intrinsics_vec256 h12 = g0; - hash[0U] = a1; - hash[1U] = b1; - hash[2U] = c1; - hash[3U] = d1; - hash[4U] = e1; - hash[5U] = f1; - hash[6U] = g1; - hash[7U] = h12;); - if (i0 < (uint32_t)4U) - { - KRML_MAYBE_FOR16(i, - (uint32_t)0U, - (uint32_t)16U, - (uint32_t)1U, - Lib_IntVector_Intrinsics_vec256 t16 = ws[i]; - Lib_IntVector_Intrinsics_vec256 t15 = ws[(i + (uint32_t)1U) % (uint32_t)16U]; - Lib_IntVector_Intrinsics_vec256 t7 = ws[(i + (uint32_t)9U) % (uint32_t)16U]; - Lib_IntVector_Intrinsics_vec256 t2 = ws[(i + (uint32_t)14U) % (uint32_t)16U]; - Lib_IntVector_Intrinsics_vec256 - s1 = - Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right64(t2, - (uint32_t)19U), - Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right64(t2, - (uint32_t)61U), - Lib_IntVector_Intrinsics_vec256_shift_right64(t2, (uint32_t)6U))); - Lib_IntVector_Intrinsics_vec256 - s0 = - Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right64(t15, - (uint32_t)1U), - Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right64(t15, - (uint32_t)8U), - Lib_IntVector_Intrinsics_vec256_shift_right64(t15, (uint32_t)7U))); - ws[i] = - Lib_IntVector_Intrinsics_vec256_add64(Lib_IntVector_Intrinsics_vec256_add64(Lib_IntVector_Intrinsics_vec256_add64(s1, - t7), - s0), - t16);); - }); - KRML_MAYBE_FOR8(i, - (uint32_t)0U, - (uint32_t)8U, - (uint32_t)1U, - Lib_IntVector_Intrinsics_vec256 *os = hash; - Lib_IntVector_Intrinsics_vec256 - x = Lib_IntVector_Intrinsics_vec256_add64(hash[i], hash_old[i]); - os[i] = x;); -} - -void -Hacl_SHA2_Vec256_sha512_4( - uint8_t *dst0, - uint8_t *dst1, - uint8_t *dst2, - uint8_t *dst3, - uint32_t input_len, - uint8_t *input0, - uint8_t *input1, - uint8_t *input2, - uint8_t *input3 -) -{ - Hacl_Impl_SHA2_Types_uint8_4p ib; - ib.fst = input0; - ib.snd.fst = input1; - ib.snd.snd.fst = input2; - ib.snd.snd.snd = input3; - { - Hacl_Impl_SHA2_Types_uint8_4p rb; - rb.fst = dst0; - rb.snd.fst = dst1; - rb.snd.snd.fst = dst2; - rb.snd.snd.snd = dst3; - { - KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 st[8U] KRML_POST_ALIGN(32) = { 0U }; - uint32_t rem; - FStar_UInt128_uint128 len_; - uint32_t blocks0; - uint32_t rem1; - uint8_t *b30; - uint8_t *b20; - uint8_t *b10; - uint8_t *b00; - uint8_t *bl0; - uint8_t *bl10; - uint8_t *bl20; - uint8_t *bl30; - KRML_MAYBE_FOR8(i, - (uint32_t)0U, - (uint32_t)8U, - (uint32_t)1U, - Lib_IntVector_Intrinsics_vec256 *os = st; - uint64_t hi = Hacl_Impl_SHA2_Generic_h512[i]; - Lib_IntVector_Intrinsics_vec256 x = Lib_IntVector_Intrinsics_vec256_load64(hi); - os[i] = x;); - rem = input_len % (uint32_t)128U; - len_ = FStar_UInt128_uint64_to_uint128((uint64_t)input_len); - blocks0 = input_len / (uint32_t)128U; - { - uint32_t i; - for (i = (uint32_t)0U; i < blocks0; i++) - { - uint8_t *b3 = ib.snd.snd.snd; - uint8_t *b2 = ib.snd.snd.fst; - uint8_t *b1 = ib.snd.fst; - uint8_t *b0 = ib.fst; - uint8_t *bl00 = b0 + i * (uint32_t)128U; - uint8_t *bl1 = b1 + i * (uint32_t)128U; - uint8_t *bl2 = b2 + i * (uint32_t)128U; - uint8_t *bl3 = b3 + i * (uint32_t)128U; - Hacl_Impl_SHA2_Types_uint8_4p lit; - lit.fst = bl00; - lit.snd.fst = bl1; - lit.snd.snd.fst = bl2; - lit.snd.snd.snd = bl3; - { - Hacl_Impl_SHA2_Types_uint8_4p mb = lit; - sha512_update4(mb, st); - } - } - } - rem1 = input_len % (uint32_t)128U; - b30 = ib.snd.snd.snd; - b20 = ib.snd.snd.fst; - b10 = ib.snd.fst; - b00 = ib.fst; - bl0 = b00 + input_len - rem1; - bl10 = b10 + input_len - rem1; - bl20 = b20 + input_len - rem1; - bl30 = b30 + input_len - rem1; - { - Hacl_Impl_SHA2_Types_uint8_4p lit0; - Hacl_Impl_SHA2_Types_uint8_4p lb; - lit0.fst = bl0; - lit0.snd.fst = bl10; - lit0.snd.snd.fst = bl20; - lit0.snd.snd.snd = bl30; - lb = lit0; - { - uint32_t blocks; - if (rem + (uint32_t)16U + (uint32_t)1U <= (uint32_t)128U) - { - blocks = (uint32_t)1U; - } - else - { - blocks = (uint32_t)2U; - } - { - uint32_t fin = blocks * (uint32_t)128U; - uint8_t last[1024U] = { 0U }; - uint8_t totlen_buf[16U] = { 0U }; - FStar_UInt128_uint128 total_len_bits = FStar_UInt128_shift_left(len_, (uint32_t)3U); - uint8_t *b31; - uint8_t *b21; - uint8_t *b11; - uint8_t *b01; - uint8_t *last00; - uint8_t *last10; - uint8_t *last2; - uint8_t *last3; - uint8_t *last010; - uint8_t *last110; - store128_be(totlen_buf, total_len_bits); - b31 = lb.snd.snd.snd; - b21 = lb.snd.snd.fst; - b11 = lb.snd.fst; - b01 = lb.fst; - last00 = last; - last10 = last + (uint32_t)256U; - last2 = last + (uint32_t)512U; - last3 = last + (uint32_t)768U; - memcpy(last00, b01, rem * sizeof (uint8_t)); - last00[rem] = (uint8_t)0x80U; - memcpy(last00 + fin - (uint32_t)16U, totlen_buf, (uint32_t)16U * sizeof (uint8_t)); - last010 = last00; - last110 = last00 + (uint32_t)128U; - { - Hacl_Impl_SHA2_Types_uint8_2p lit1; - Hacl_Impl_SHA2_Types_uint8_2p scrut0; - uint8_t *l00; - uint8_t *l01; - uint8_t *last011; - uint8_t *last111; - lit1.fst = last010; - lit1.snd = last110; - scrut0 = lit1; - l00 = scrut0.fst; - l01 = scrut0.snd; - memcpy(last10, b11, rem * sizeof (uint8_t)); - last10[rem] = (uint8_t)0x80U; - memcpy(last10 + fin - (uint32_t)16U, totlen_buf, (uint32_t)16U * sizeof (uint8_t)); - last011 = last10; - last111 = last10 + (uint32_t)128U; - { - Hacl_Impl_SHA2_Types_uint8_2p lit2; - Hacl_Impl_SHA2_Types_uint8_2p scrut1; - uint8_t *l10; - uint8_t *l11; - uint8_t *last012; - uint8_t *last112; - lit2.fst = last011; - lit2.snd = last111; - scrut1 = lit2; - l10 = scrut1.fst; - l11 = scrut1.snd; - memcpy(last2, b21, rem * sizeof (uint8_t)); - last2[rem] = (uint8_t)0x80U; - memcpy(last2 + fin - (uint32_t)16U, totlen_buf, (uint32_t)16U * sizeof (uint8_t)); - last012 = last2; - last112 = last2 + (uint32_t)128U; - { - Hacl_Impl_SHA2_Types_uint8_2p lit3; - Hacl_Impl_SHA2_Types_uint8_2p scrut2; - uint8_t *l20; - uint8_t *l21; - uint8_t *last01; - uint8_t *last11; - lit3.fst = last012; - lit3.snd = last112; - scrut2 = lit3; - l20 = scrut2.fst; - l21 = scrut2.snd; - memcpy(last3, b31, rem * sizeof (uint8_t)); - last3[rem] = (uint8_t)0x80U; - memcpy(last3 + fin - (uint32_t)16U, totlen_buf, (uint32_t)16U * sizeof (uint8_t)); - last01 = last3; - last11 = last3 + (uint32_t)128U; - { - Hacl_Impl_SHA2_Types_uint8_2p lit4; - Hacl_Impl_SHA2_Types_uint8_2p scrut3; - uint8_t *l30; - uint8_t *l31; - lit4.fst = last01; - lit4.snd = last11; - scrut3 = lit4; - l30 = scrut3.fst; - l31 = scrut3.snd; - { - Hacl_Impl_SHA2_Types_uint8_4p mb0; - mb0.fst = l00; - mb0.snd.fst = l10; - mb0.snd.snd.fst = l20; - mb0.snd.snd.snd = l30; - { - Hacl_Impl_SHA2_Types_uint8_4p mb1; - mb1.fst = l01; - mb1.snd.fst = l11; - mb1.snd.snd.fst = l21; - mb1.snd.snd.snd = l31; - { - Hacl_Impl_SHA2_Types_uint8_2x4p lit; - Hacl_Impl_SHA2_Types_uint8_2x4p scrut; - Hacl_Impl_SHA2_Types_uint8_4p last0; - Hacl_Impl_SHA2_Types_uint8_4p last1; - lit.fst = mb0; - lit.snd = mb1; - scrut = lit; - last0 = scrut.fst; - last1 = scrut.snd; - sha512_update4(last0, st); - if (blocks > (uint32_t)1U) - { - sha512_update4(last1, st); - } - { - uint8_t hbuf[256U] = { 0U }; - Lib_IntVector_Intrinsics_vec256 v00 = st[0U]; - Lib_IntVector_Intrinsics_vec256 v10 = st[1U]; - Lib_IntVector_Intrinsics_vec256 v20 = st[2U]; - Lib_IntVector_Intrinsics_vec256 v30 = st[3U]; - Lib_IntVector_Intrinsics_vec256 - v0_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v00, v10); - Lib_IntVector_Intrinsics_vec256 - v1_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v00, v10); - Lib_IntVector_Intrinsics_vec256 - v2_ = Lib_IntVector_Intrinsics_vec256_interleave_low64(v20, v30); - Lib_IntVector_Intrinsics_vec256 - v3_ = Lib_IntVector_Intrinsics_vec256_interleave_high64(v20, v30); - Lib_IntVector_Intrinsics_vec256 - v0__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_, v2_); - Lib_IntVector_Intrinsics_vec256 - v1__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_, v2_); - Lib_IntVector_Intrinsics_vec256 - v2__ = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_, v3_); - Lib_IntVector_Intrinsics_vec256 - v3__ = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_, v3_); - Lib_IntVector_Intrinsics_vec256 st0_ = v0__; - Lib_IntVector_Intrinsics_vec256 st1_ = v2__; - Lib_IntVector_Intrinsics_vec256 st2_ = v1__; - Lib_IntVector_Intrinsics_vec256 st3_ = v3__; - Lib_IntVector_Intrinsics_vec256 v0 = st[4U]; - Lib_IntVector_Intrinsics_vec256 v1 = st[5U]; - Lib_IntVector_Intrinsics_vec256 v2 = st[6U]; - Lib_IntVector_Intrinsics_vec256 v3 = st[7U]; - Lib_IntVector_Intrinsics_vec256 - v0_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v0, v1); - Lib_IntVector_Intrinsics_vec256 - v1_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v0, v1); - Lib_IntVector_Intrinsics_vec256 - v2_0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(v2, v3); - Lib_IntVector_Intrinsics_vec256 - v3_0 = Lib_IntVector_Intrinsics_vec256_interleave_high64(v2, v3); - Lib_IntVector_Intrinsics_vec256 - v0__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v0_0, v2_0); - Lib_IntVector_Intrinsics_vec256 - v1__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v0_0, v2_0); - Lib_IntVector_Intrinsics_vec256 - v2__0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v1_0, v3_0); - Lib_IntVector_Intrinsics_vec256 - v3__0 = Lib_IntVector_Intrinsics_vec256_interleave_high128(v1_0, v3_0); - Lib_IntVector_Intrinsics_vec256 st4_ = v0__0; - Lib_IntVector_Intrinsics_vec256 st5_ = v2__0; - Lib_IntVector_Intrinsics_vec256 st6_ = v1__0; - Lib_IntVector_Intrinsics_vec256 st7_ = v3__0; - uint8_t *b3; - uint8_t *b2; - uint8_t *b1; - uint8_t *b0; - st[0U] = st0_; - st[1U] = st4_; - st[2U] = st1_; - st[3U] = st5_; - st[4U] = st2_; - st[5U] = st6_; - st[6U] = st3_; - st[7U] = st7_; - KRML_MAYBE_FOR8(i, - (uint32_t)0U, - (uint32_t)8U, - (uint32_t)1U, - Lib_IntVector_Intrinsics_vec256_store64_be(hbuf + i * (uint32_t)32U, - st[i]);); - b3 = rb.snd.snd.snd; - b2 = rb.snd.snd.fst; - b1 = rb.snd.fst; - b0 = rb.fst; - memcpy(b0, hbuf, (uint32_t)64U * sizeof (uint8_t)); - memcpy(b1, hbuf + (uint32_t)64U, (uint32_t)64U * sizeof (uint8_t)); - memcpy(b2, hbuf + (uint32_t)128U, (uint32_t)64U * sizeof (uint8_t)); - memcpy(b3, hbuf + (uint32_t)192U, (uint32_t)64U * sizeof (uint8_t)); - } - } - } - } - } - } - } - } - } - } - } - } - } -} - diff --git a/dist/c89-compatible/Hacl_SHA2_Vec256.h b/dist/c89-compatible/Hacl_SHA2_Vec256.h deleted file mode 100644 index 127486953a..0000000000 --- a/dist/c89-compatible/Hacl_SHA2_Vec256.h +++ /dev/null @@ -1,114 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#ifndef __Hacl_SHA2_Vec256_H -#define __Hacl_SHA2_Vec256_H - -#if defined(__cplusplus) -extern "C" { -#endif - -#include -#include "krml/internal/types.h" -#include "krml/lowstar_endianness.h" -#include "krml/internal/target.h" - - -#include "Hacl_SHA2_Generic.h" -#include "Hacl_Krmllib.h" -#include "evercrypt_targetconfig.h" -void -Hacl_SHA2_Vec256_sha224_8( - uint8_t *dst0, - uint8_t *dst1, - uint8_t *dst2, - uint8_t *dst3, - uint8_t *dst4, - uint8_t *dst5, - uint8_t *dst6, - uint8_t *dst7, - uint32_t input_len, - uint8_t *input0, - uint8_t *input1, - uint8_t *input2, - uint8_t *input3, - uint8_t *input4, - uint8_t *input5, - uint8_t *input6, - uint8_t *input7 -); - -void -Hacl_SHA2_Vec256_sha256_8( - uint8_t *dst0, - uint8_t *dst1, - uint8_t *dst2, - uint8_t *dst3, - uint8_t *dst4, - uint8_t *dst5, - uint8_t *dst6, - uint8_t *dst7, - uint32_t input_len, - uint8_t *input0, - uint8_t *input1, - uint8_t *input2, - uint8_t *input3, - uint8_t *input4, - uint8_t *input5, - uint8_t *input6, - uint8_t *input7 -); - -void -Hacl_SHA2_Vec256_sha384_4( - uint8_t *dst0, - uint8_t *dst1, - uint8_t *dst2, - uint8_t *dst3, - uint32_t input_len, - uint8_t *input0, - uint8_t *input1, - uint8_t *input2, - uint8_t *input3 -); - -void -Hacl_SHA2_Vec256_sha512_4( - uint8_t *dst0, - uint8_t *dst1, - uint8_t *dst2, - uint8_t *dst3, - uint32_t input_len, - uint8_t *input0, - uint8_t *input1, - uint8_t *input2, - uint8_t *input3 -); - -#if defined(__cplusplus) -} -#endif - -#define __Hacl_SHA2_Vec256_H_DEFINED -#endif diff --git a/dist/c89-compatible/Hacl_SHA3.c b/dist/c89-compatible/Hacl_SHA3.c deleted file mode 100644 index f50d08d002..0000000000 --- a/dist/c89-compatible/Hacl_SHA3.c +++ /dev/null @@ -1,336 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#include "Hacl_SHA3.h" - - - -const -uint32_t -Hacl_Impl_SHA3_keccak_rotc[24U] = - { - (uint32_t)1U, (uint32_t)3U, (uint32_t)6U, (uint32_t)10U, (uint32_t)15U, (uint32_t)21U, - (uint32_t)28U, (uint32_t)36U, (uint32_t)45U, (uint32_t)55U, (uint32_t)2U, (uint32_t)14U, - (uint32_t)27U, (uint32_t)41U, (uint32_t)56U, (uint32_t)8U, (uint32_t)25U, (uint32_t)43U, - (uint32_t)62U, (uint32_t)18U, (uint32_t)39U, (uint32_t)61U, (uint32_t)20U, (uint32_t)44U - }; - -const -uint32_t -Hacl_Impl_SHA3_keccak_piln[24U] = - { - (uint32_t)10U, (uint32_t)7U, (uint32_t)11U, (uint32_t)17U, (uint32_t)18U, (uint32_t)3U, - (uint32_t)5U, (uint32_t)16U, (uint32_t)8U, (uint32_t)21U, (uint32_t)24U, (uint32_t)4U, - (uint32_t)15U, (uint32_t)23U, (uint32_t)19U, (uint32_t)13U, (uint32_t)12U, (uint32_t)2U, - (uint32_t)20U, (uint32_t)14U, (uint32_t)22U, (uint32_t)9U, (uint32_t)6U, (uint32_t)1U - }; - -const -uint64_t -Hacl_Impl_SHA3_keccak_rndc[24U] = - { - (uint64_t)0x0000000000000001U, (uint64_t)0x0000000000008082U, (uint64_t)0x800000000000808aU, - (uint64_t)0x8000000080008000U, (uint64_t)0x000000000000808bU, (uint64_t)0x0000000080000001U, - (uint64_t)0x8000000080008081U, (uint64_t)0x8000000000008009U, (uint64_t)0x000000000000008aU, - (uint64_t)0x0000000000000088U, (uint64_t)0x0000000080008009U, (uint64_t)0x000000008000000aU, - (uint64_t)0x000000008000808bU, (uint64_t)0x800000000000008bU, (uint64_t)0x8000000000008089U, - (uint64_t)0x8000000000008003U, (uint64_t)0x8000000000008002U, (uint64_t)0x8000000000000080U, - (uint64_t)0x000000000000800aU, (uint64_t)0x800000008000000aU, (uint64_t)0x8000000080008081U, - (uint64_t)0x8000000000008080U, (uint64_t)0x0000000080000001U, (uint64_t)0x8000000080008008U - }; - -inline uint64_t Hacl_Impl_SHA3_rotl(uint64_t a, uint32_t b) -{ - return a << b | a >> ((uint32_t)64U - b); -} - -void Hacl_Impl_SHA3_state_permute(uint64_t *s) -{ - uint32_t i0; - for (i0 = (uint32_t)0U; i0 < (uint32_t)24U; i0++) - { - uint64_t b[5U] = { 0U }; - uint64_t x; - KRML_MAYBE_FOR5(i, - (uint32_t)0U, - (uint32_t)5U, - (uint32_t)1U, - b[i] = - s[i - + (uint32_t)0U] - ^ - (s[i - + (uint32_t)5U] - ^ (s[i + (uint32_t)10U] ^ (s[i + (uint32_t)15U] ^ s[i + (uint32_t)20U])));); - KRML_MAYBE_FOR5(i1, - (uint32_t)0U, - (uint32_t)5U, - (uint32_t)1U, - uint64_t uu____0 = b[(i1 + (uint32_t)4U) % (uint32_t)5U]; - uint64_t - _D = uu____0 ^ Hacl_Impl_SHA3_rotl(b[(i1 + (uint32_t)1U) % (uint32_t)5U], (uint32_t)1U); - KRML_MAYBE_FOR5(i, - (uint32_t)0U, - (uint32_t)5U, - (uint32_t)1U, - s[i1 + (uint32_t)5U * i] = s[i1 + (uint32_t)5U * i] ^ _D;);); - Lib_Memzero0_memzero(b, (uint32_t)5U * sizeof (b[0U])); - x = s[1U]; - { - uint64_t b0 = x; - { - uint32_t i; - for (i = (uint32_t)0U; i < (uint32_t)24U; i++) - { - uint32_t _Y = Hacl_Impl_SHA3_keccak_piln[i]; - uint32_t r = Hacl_Impl_SHA3_keccak_rotc[i]; - uint64_t temp = s[_Y]; - s[_Y] = Hacl_Impl_SHA3_rotl(b0, r); - b0 = temp; - } - } - Lib_Memzero0_memzero(&b0, (uint32_t)1U * sizeof ((&b0)[0U])); - { - uint64_t b1[25U] = { 0U }; - uint64_t c; - memcpy(b1, s, (uint32_t)25U * sizeof (uint64_t)); - KRML_MAYBE_FOR5(i1, - (uint32_t)0U, - (uint32_t)5U, - (uint32_t)1U, - KRML_MAYBE_FOR5(i, - (uint32_t)0U, - (uint32_t)5U, - (uint32_t)1U, - s[i + (uint32_t)5U * i1] = - b1[i - + (uint32_t)5U * i1] - ^ - (~b1[(i + (uint32_t)1U) - % (uint32_t)5U - + (uint32_t)5U * i1] - & b1[(i + (uint32_t)2U) % (uint32_t)5U + (uint32_t)5U * i1]););); - Lib_Memzero0_memzero(b1, (uint32_t)25U * sizeof (b1[0U])); - c = Hacl_Impl_SHA3_keccak_rndc[i0]; - s[0U] = s[0U] ^ c; - } - } - } -} - -void Hacl_Impl_SHA3_loadState(uint32_t rateInBytes, uint8_t *input, uint64_t *s) -{ - uint8_t b[200U] = { 0U }; - memcpy(b, input, rateInBytes * sizeof (uint8_t)); - { - uint32_t i; - for (i = (uint32_t)0U; i < (uint32_t)25U; i++) - { - uint64_t u = load64_le(b + i * (uint32_t)8U); - uint64_t x = u; - s[i] = s[i] ^ x; - } - } - Lib_Memzero0_memzero(b, (uint32_t)200U * sizeof (b[0U])); -} - -void Hacl_Impl_SHA3_storeState(uint32_t rateInBytes, uint64_t *s, uint8_t *res) -{ - uint8_t b[200U] = { 0U }; - { - uint32_t i; - for (i = (uint32_t)0U; i < (uint32_t)25U; i++) - { - uint64_t sj = s[i]; - store64_le(b + i * (uint32_t)8U, sj); - } - } - memcpy(res, b, rateInBytes * sizeof (uint8_t)); - Lib_Memzero0_memzero(b, (uint32_t)200U * sizeof (b[0U])); -} - -void -Hacl_Impl_SHA3_absorb( - uint64_t *s, - uint32_t rateInBytes, - uint32_t inputByteLen, - uint8_t *input, - uint8_t delimitedSuffix -) -{ - uint32_t nb = inputByteLen / rateInBytes; - uint32_t rem = inputByteLen % rateInBytes; - uint8_t *last; - { - uint32_t i; - for (i = (uint32_t)0U; i < nb; i++) - { - uint8_t *block = input + i * rateInBytes; - Hacl_Impl_SHA3_loadState(rateInBytes, block, s); - Hacl_Impl_SHA3_state_permute(s); - } - } - last = input + nb * rateInBytes; - KRML_CHECK_SIZE(sizeof (uint8_t), rateInBytes); - { - uint8_t b[rateInBytes]; - memset(b, 0U, rateInBytes * sizeof (uint8_t)); - memcpy(b, last, rem * sizeof (uint8_t)); - b[rem] = delimitedSuffix; - Hacl_Impl_SHA3_loadState(rateInBytes, b, s); - if (!((delimitedSuffix & (uint8_t)0x80U) == (uint8_t)0U) && rem == rateInBytes - (uint32_t)1U) - { - Hacl_Impl_SHA3_state_permute(s); - } - KRML_CHECK_SIZE(sizeof (uint8_t), rateInBytes); - { - uint8_t b1[rateInBytes]; - memset(b1, 0U, rateInBytes * sizeof (uint8_t)); - b1[rateInBytes - (uint32_t)1U] = (uint8_t)0x80U; - Hacl_Impl_SHA3_loadState(rateInBytes, b1, s); - Hacl_Impl_SHA3_state_permute(s); - Lib_Memzero0_memzero(b1, rateInBytes * sizeof (b1[0U])); - Lib_Memzero0_memzero(b, rateInBytes * sizeof (b[0U])); - } - } -} - -void -Hacl_Impl_SHA3_squeeze( - uint64_t *s, - uint32_t rateInBytes, - uint32_t outputByteLen, - uint8_t *output -) -{ - uint32_t outBlocks = outputByteLen / rateInBytes; - uint32_t remOut = outputByteLen % rateInBytes; - uint8_t *last = output + outputByteLen - remOut; - uint8_t *blocks = output; - { - uint32_t i; - for (i = (uint32_t)0U; i < outBlocks; i++) - { - Hacl_Impl_SHA3_storeState(rateInBytes, s, blocks + i * rateInBytes); - Hacl_Impl_SHA3_state_permute(s); - } - } - Hacl_Impl_SHA3_storeState(remOut, s, last); -} - -void -Hacl_Impl_SHA3_keccak( - uint32_t rate, - uint32_t capacity, - uint32_t inputByteLen, - uint8_t *input, - uint8_t delimitedSuffix, - uint32_t outputByteLen, - uint8_t *output -) -{ - uint32_t rateInBytes = rate / (uint32_t)8U; - uint64_t s[25U] = { 0U }; - Hacl_Impl_SHA3_absorb(s, rateInBytes, inputByteLen, input, delimitedSuffix); - Hacl_Impl_SHA3_squeeze(s, rateInBytes, outputByteLen, output); -} - -void -Hacl_SHA3_shake128_hacl( - uint32_t inputByteLen, - uint8_t *input, - uint32_t outputByteLen, - uint8_t *output -) -{ - Hacl_Impl_SHA3_keccak((uint32_t)1344U, - (uint32_t)256U, - inputByteLen, - input, - (uint8_t)0x1FU, - outputByteLen, - output); -} - -void -Hacl_SHA3_shake256_hacl( - uint32_t inputByteLen, - uint8_t *input, - uint32_t outputByteLen, - uint8_t *output -) -{ - Hacl_Impl_SHA3_keccak((uint32_t)1088U, - (uint32_t)512U, - inputByteLen, - input, - (uint8_t)0x1FU, - outputByteLen, - output); -} - -void Hacl_SHA3_sha3_224(uint32_t inputByteLen, uint8_t *input, uint8_t *output) -{ - Hacl_Impl_SHA3_keccak((uint32_t)1152U, - (uint32_t)448U, - inputByteLen, - input, - (uint8_t)0x06U, - (uint32_t)28U, - output); -} - -void Hacl_SHA3_sha3_256(uint32_t inputByteLen, uint8_t *input, uint8_t *output) -{ - Hacl_Impl_SHA3_keccak((uint32_t)1088U, - (uint32_t)512U, - inputByteLen, - input, - (uint8_t)0x06U, - (uint32_t)32U, - output); -} - -void Hacl_SHA3_sha3_384(uint32_t inputByteLen, uint8_t *input, uint8_t *output) -{ - Hacl_Impl_SHA3_keccak((uint32_t)832U, - (uint32_t)768U, - inputByteLen, - input, - (uint8_t)0x06U, - (uint32_t)48U, - output); -} - -void Hacl_SHA3_sha3_512(uint32_t inputByteLen, uint8_t *input, uint8_t *output) -{ - Hacl_Impl_SHA3_keccak((uint32_t)576U, - (uint32_t)1024U, - inputByteLen, - input, - (uint8_t)0x06U, - (uint32_t)64U, - output); -} - diff --git a/dist/c89-compatible/Hacl_SHA3.h b/dist/c89-compatible/Hacl_SHA3.h deleted file mode 100644 index 1cfb5dfb84..0000000000 --- a/dist/c89-compatible/Hacl_SHA3.h +++ /dev/null @@ -1,112 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#ifndef __Hacl_SHA3_H -#define __Hacl_SHA3_H - -#if defined(__cplusplus) -extern "C" { -#endif - -#include -#include "krml/internal/types.h" -#include "krml/lowstar_endianness.h" -#include "krml/internal/target.h" - - -#include "Lib_Memzero0.h" -#include "Hacl_Krmllib.h" -#include "evercrypt_targetconfig.h" -extern const uint32_t Hacl_Impl_SHA3_keccak_rotc[24U]; - -extern const uint32_t Hacl_Impl_SHA3_keccak_piln[24U]; - -extern const uint64_t Hacl_Impl_SHA3_keccak_rndc[24U]; - -uint64_t Hacl_Impl_SHA3_rotl(uint64_t a, uint32_t b); - -void Hacl_Impl_SHA3_state_permute(uint64_t *s); - -void Hacl_Impl_SHA3_loadState(uint32_t rateInBytes, uint8_t *input, uint64_t *s); - -void Hacl_Impl_SHA3_storeState(uint32_t rateInBytes, uint64_t *s, uint8_t *res); - -void -Hacl_Impl_SHA3_absorb( - uint64_t *s, - uint32_t rateInBytes, - uint32_t inputByteLen, - uint8_t *input, - uint8_t delimitedSuffix -); - -void -Hacl_Impl_SHA3_squeeze( - uint64_t *s, - uint32_t rateInBytes, - uint32_t outputByteLen, - uint8_t *output -); - -void -Hacl_Impl_SHA3_keccak( - uint32_t rate, - uint32_t capacity, - uint32_t inputByteLen, - uint8_t *input, - uint8_t delimitedSuffix, - uint32_t outputByteLen, - uint8_t *output -); - -void -Hacl_SHA3_shake128_hacl( - uint32_t inputByteLen, - uint8_t *input, - uint32_t outputByteLen, - uint8_t *output -); - -void -Hacl_SHA3_shake256_hacl( - uint32_t inputByteLen, - uint8_t *input, - uint32_t outputByteLen, - uint8_t *output -); - -void Hacl_SHA3_sha3_224(uint32_t inputByteLen, uint8_t *input, uint8_t *output); - -void Hacl_SHA3_sha3_256(uint32_t inputByteLen, uint8_t *input, uint8_t *output); - -void Hacl_SHA3_sha3_384(uint32_t inputByteLen, uint8_t *input, uint8_t *output); - -void Hacl_SHA3_sha3_512(uint32_t inputByteLen, uint8_t *input, uint8_t *output); - -#if defined(__cplusplus) -} -#endif - -#define __Hacl_SHA3_H_DEFINED -#endif diff --git a/dist/c89-compatible/Hacl_Salsa20.c b/dist/c89-compatible/Hacl_Salsa20.c deleted file mode 100644 index 19cd29df1a..0000000000 --- a/dist/c89-compatible/Hacl_Salsa20.c +++ /dev/null @@ -1,511 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#include "Hacl_Salsa20.h" - - - -static inline void quarter_round(uint32_t *st, uint32_t a, uint32_t b, uint32_t c, uint32_t d) -{ - uint32_t sta0 = st[b]; - uint32_t stb0 = st[a]; - uint32_t std0 = st[d]; - uint32_t sta10 = sta0 ^ ((stb0 + std0) << (uint32_t)7U | (stb0 + std0) >> (uint32_t)25U); - uint32_t sta2; - uint32_t stb1; - uint32_t std1; - uint32_t sta11; - uint32_t sta3; - uint32_t stb2; - uint32_t std2; - uint32_t sta12; - uint32_t sta; - uint32_t stb; - uint32_t std; - uint32_t sta1; - st[b] = sta10; - sta2 = st[c]; - stb1 = st[b]; - std1 = st[a]; - sta11 = sta2 ^ ((stb1 + std1) << (uint32_t)9U | (stb1 + std1) >> (uint32_t)23U); - st[c] = sta11; - sta3 = st[d]; - stb2 = st[c]; - std2 = st[b]; - sta12 = sta3 ^ ((stb2 + std2) << (uint32_t)13U | (stb2 + std2) >> (uint32_t)19U); - st[d] = sta12; - sta = st[a]; - stb = st[d]; - std = st[c]; - sta1 = sta ^ ((stb + std) << (uint32_t)18U | (stb + std) >> (uint32_t)14U); - st[a] = sta1; -} - -static inline void double_round(uint32_t *st) -{ - quarter_round(st, (uint32_t)0U, (uint32_t)4U, (uint32_t)8U, (uint32_t)12U); - quarter_round(st, (uint32_t)5U, (uint32_t)9U, (uint32_t)13U, (uint32_t)1U); - quarter_round(st, (uint32_t)10U, (uint32_t)14U, (uint32_t)2U, (uint32_t)6U); - quarter_round(st, (uint32_t)15U, (uint32_t)3U, (uint32_t)7U, (uint32_t)11U); - quarter_round(st, (uint32_t)0U, (uint32_t)1U, (uint32_t)2U, (uint32_t)3U); - quarter_round(st, (uint32_t)5U, (uint32_t)6U, (uint32_t)7U, (uint32_t)4U); - quarter_round(st, (uint32_t)10U, (uint32_t)11U, (uint32_t)8U, (uint32_t)9U); - quarter_round(st, (uint32_t)15U, (uint32_t)12U, (uint32_t)13U, (uint32_t)14U); -} - -static inline void rounds(uint32_t *st) -{ - double_round(st); - double_round(st); - double_round(st); - double_round(st); - double_round(st); - double_round(st); - double_round(st); - double_round(st); - double_round(st); - double_round(st); -} - -static inline void salsa20_core(uint32_t *k, uint32_t *ctx, uint32_t ctr) -{ - uint32_t ctr_u32; - memcpy(k, ctx, (uint32_t)16U * sizeof (uint32_t)); - ctr_u32 = ctr; - k[8U] = k[8U] + ctr_u32; - rounds(k); - KRML_MAYBE_FOR16(i, - (uint32_t)0U, - (uint32_t)16U, - (uint32_t)1U, - uint32_t *os = k; - uint32_t x = k[i] + ctx[i]; - os[i] = x;); - k[8U] = k[8U] + ctr_u32; -} - -static inline void salsa20_key_block0(uint8_t *out, uint8_t *key, uint8_t *n) -{ - uint32_t ctx[16U] = { 0U }; - uint32_t k[16U] = { 0U }; - uint32_t k32[8U] = { 0U }; - uint32_t n32[2U] = { 0U }; - uint32_t *k0; - uint32_t *k1; - KRML_MAYBE_FOR8(i, - (uint32_t)0U, - (uint32_t)8U, - (uint32_t)1U, - uint32_t *os = k32; - uint8_t *bj = key + i * (uint32_t)4U; - uint32_t u = load32_le(bj); - uint32_t r = u; - uint32_t x = r; - os[i] = x;); - KRML_MAYBE_FOR2(i, - (uint32_t)0U, - (uint32_t)2U, - (uint32_t)1U, - uint32_t *os = n32; - uint8_t *bj = n + i * (uint32_t)4U; - uint32_t u = load32_le(bj); - uint32_t r = u; - uint32_t x = r; - os[i] = x;); - ctx[0U] = (uint32_t)0x61707865U; - k0 = k32; - k1 = k32 + (uint32_t)4U; - memcpy(ctx + (uint32_t)1U, k0, (uint32_t)4U * sizeof (uint32_t)); - ctx[5U] = (uint32_t)0x3320646eU; - memcpy(ctx + (uint32_t)6U, n32, (uint32_t)2U * sizeof (uint32_t)); - ctx[8U] = (uint32_t)0U; - ctx[9U] = (uint32_t)0U; - ctx[10U] = (uint32_t)0x79622d32U; - memcpy(ctx + (uint32_t)11U, k1, (uint32_t)4U * sizeof (uint32_t)); - ctx[15U] = (uint32_t)0x6b206574U; - salsa20_core(k, ctx, (uint32_t)0U); - KRML_MAYBE_FOR16(i, - (uint32_t)0U, - (uint32_t)16U, - (uint32_t)1U, - store32_le(out + i * (uint32_t)4U, k[i]);); -} - -static inline void -salsa20_encrypt( - uint32_t len, - uint8_t *out, - uint8_t *text, - uint8_t *key, - uint8_t *n, - uint32_t ctr -) -{ - uint32_t ctx[16U] = { 0U }; - uint32_t k32[8U] = { 0U }; - uint32_t n32[2U] = { 0U }; - uint32_t *k0; - uint32_t *k10; - KRML_MAYBE_FOR8(i, - (uint32_t)0U, - (uint32_t)8U, - (uint32_t)1U, - uint32_t *os = k32; - uint8_t *bj = key + i * (uint32_t)4U; - uint32_t u = load32_le(bj); - uint32_t r = u; - uint32_t x = r; - os[i] = x;); - KRML_MAYBE_FOR2(i, - (uint32_t)0U, - (uint32_t)2U, - (uint32_t)1U, - uint32_t *os = n32; - uint8_t *bj = n + i * (uint32_t)4U; - uint32_t u = load32_le(bj); - uint32_t r = u; - uint32_t x = r; - os[i] = x;); - ctx[0U] = (uint32_t)0x61707865U; - k0 = k32; - k10 = k32 + (uint32_t)4U; - memcpy(ctx + (uint32_t)1U, k0, (uint32_t)4U * sizeof (uint32_t)); - ctx[5U] = (uint32_t)0x3320646eU; - memcpy(ctx + (uint32_t)6U, n32, (uint32_t)2U * sizeof (uint32_t)); - ctx[8U] = ctr; - ctx[9U] = (uint32_t)0U; - ctx[10U] = (uint32_t)0x79622d32U; - memcpy(ctx + (uint32_t)11U, k10, (uint32_t)4U * sizeof (uint32_t)); - ctx[15U] = (uint32_t)0x6b206574U; - { - uint32_t k[16U] = { 0U }; - uint32_t rem = len % (uint32_t)64U; - uint32_t nb = len / (uint32_t)64U; - uint32_t rem1 = len % (uint32_t)64U; - { - uint32_t i0; - for (i0 = (uint32_t)0U; i0 < nb; i0++) - { - uint8_t *uu____0 = out + i0 * (uint32_t)64U; - uint8_t *uu____1 = text + i0 * (uint32_t)64U; - uint32_t k1[16U] = { 0U }; - salsa20_core(k1, ctx, i0); - { - uint32_t bl[16U] = { 0U }; - KRML_MAYBE_FOR16(i, - (uint32_t)0U, - (uint32_t)16U, - (uint32_t)1U, - uint32_t *os = bl; - uint8_t *bj = uu____1 + i * (uint32_t)4U; - uint32_t u = load32_le(bj); - uint32_t r = u; - uint32_t x = r; - os[i] = x;); - KRML_MAYBE_FOR16(i, - (uint32_t)0U, - (uint32_t)16U, - (uint32_t)1U, - uint32_t *os = bl; - uint32_t x = bl[i] ^ k1[i]; - os[i] = x;); - KRML_MAYBE_FOR16(i, - (uint32_t)0U, - (uint32_t)16U, - (uint32_t)1U, - store32_le(uu____0 + i * (uint32_t)4U, bl[i]);); - } - } - } - if (rem1 > (uint32_t)0U) - { - uint8_t *uu____2 = out + nb * (uint32_t)64U; - uint8_t *uu____3 = text + nb * (uint32_t)64U; - uint8_t plain[64U] = { 0U }; - memcpy(plain, uu____3, rem * sizeof (uint8_t)); - { - uint32_t k1[16U] = { 0U }; - salsa20_core(k1, ctx, nb); - { - uint32_t bl[16U] = { 0U }; - KRML_MAYBE_FOR16(i, - (uint32_t)0U, - (uint32_t)16U, - (uint32_t)1U, - uint32_t *os = bl; - uint8_t *bj = plain + i * (uint32_t)4U; - uint32_t u = load32_le(bj); - uint32_t r = u; - uint32_t x = r; - os[i] = x;); - KRML_MAYBE_FOR16(i, - (uint32_t)0U, - (uint32_t)16U, - (uint32_t)1U, - uint32_t *os = bl; - uint32_t x = bl[i] ^ k1[i]; - os[i] = x;); - KRML_MAYBE_FOR16(i, - (uint32_t)0U, - (uint32_t)16U, - (uint32_t)1U, - store32_le(plain + i * (uint32_t)4U, bl[i]);); - memcpy(uu____2, plain, rem * sizeof (uint8_t)); - } - } - } - } -} - -static inline void -salsa20_decrypt( - uint32_t len, - uint8_t *out, - uint8_t *cipher, - uint8_t *key, - uint8_t *n, - uint32_t ctr -) -{ - uint32_t ctx[16U] = { 0U }; - uint32_t k32[8U] = { 0U }; - uint32_t n32[2U] = { 0U }; - uint32_t *k0; - uint32_t *k10; - KRML_MAYBE_FOR8(i, - (uint32_t)0U, - (uint32_t)8U, - (uint32_t)1U, - uint32_t *os = k32; - uint8_t *bj = key + i * (uint32_t)4U; - uint32_t u = load32_le(bj); - uint32_t r = u; - uint32_t x = r; - os[i] = x;); - KRML_MAYBE_FOR2(i, - (uint32_t)0U, - (uint32_t)2U, - (uint32_t)1U, - uint32_t *os = n32; - uint8_t *bj = n + i * (uint32_t)4U; - uint32_t u = load32_le(bj); - uint32_t r = u; - uint32_t x = r; - os[i] = x;); - ctx[0U] = (uint32_t)0x61707865U; - k0 = k32; - k10 = k32 + (uint32_t)4U; - memcpy(ctx + (uint32_t)1U, k0, (uint32_t)4U * sizeof (uint32_t)); - ctx[5U] = (uint32_t)0x3320646eU; - memcpy(ctx + (uint32_t)6U, n32, (uint32_t)2U * sizeof (uint32_t)); - ctx[8U] = ctr; - ctx[9U] = (uint32_t)0U; - ctx[10U] = (uint32_t)0x79622d32U; - memcpy(ctx + (uint32_t)11U, k10, (uint32_t)4U * sizeof (uint32_t)); - ctx[15U] = (uint32_t)0x6b206574U; - { - uint32_t k[16U] = { 0U }; - uint32_t rem = len % (uint32_t)64U; - uint32_t nb = len / (uint32_t)64U; - uint32_t rem1 = len % (uint32_t)64U; - { - uint32_t i0; - for (i0 = (uint32_t)0U; i0 < nb; i0++) - { - uint8_t *uu____0 = out + i0 * (uint32_t)64U; - uint8_t *uu____1 = cipher + i0 * (uint32_t)64U; - uint32_t k1[16U] = { 0U }; - salsa20_core(k1, ctx, i0); - { - uint32_t bl[16U] = { 0U }; - KRML_MAYBE_FOR16(i, - (uint32_t)0U, - (uint32_t)16U, - (uint32_t)1U, - uint32_t *os = bl; - uint8_t *bj = uu____1 + i * (uint32_t)4U; - uint32_t u = load32_le(bj); - uint32_t r = u; - uint32_t x = r; - os[i] = x;); - KRML_MAYBE_FOR16(i, - (uint32_t)0U, - (uint32_t)16U, - (uint32_t)1U, - uint32_t *os = bl; - uint32_t x = bl[i] ^ k1[i]; - os[i] = x;); - KRML_MAYBE_FOR16(i, - (uint32_t)0U, - (uint32_t)16U, - (uint32_t)1U, - store32_le(uu____0 + i * (uint32_t)4U, bl[i]);); - } - } - } - if (rem1 > (uint32_t)0U) - { - uint8_t *uu____2 = out + nb * (uint32_t)64U; - uint8_t *uu____3 = cipher + nb * (uint32_t)64U; - uint8_t plain[64U] = { 0U }; - memcpy(plain, uu____3, rem * sizeof (uint8_t)); - { - uint32_t k1[16U] = { 0U }; - salsa20_core(k1, ctx, nb); - { - uint32_t bl[16U] = { 0U }; - KRML_MAYBE_FOR16(i, - (uint32_t)0U, - (uint32_t)16U, - (uint32_t)1U, - uint32_t *os = bl; - uint8_t *bj = plain + i * (uint32_t)4U; - uint32_t u = load32_le(bj); - uint32_t r = u; - uint32_t x = r; - os[i] = x;); - KRML_MAYBE_FOR16(i, - (uint32_t)0U, - (uint32_t)16U, - (uint32_t)1U, - uint32_t *os = bl; - uint32_t x = bl[i] ^ k1[i]; - os[i] = x;); - KRML_MAYBE_FOR16(i, - (uint32_t)0U, - (uint32_t)16U, - (uint32_t)1U, - store32_le(plain + i * (uint32_t)4U, bl[i]);); - memcpy(uu____2, plain, rem * sizeof (uint8_t)); - } - } - } - } -} - -static inline void hsalsa20(uint8_t *out, uint8_t *key, uint8_t *n) -{ - uint32_t ctx[16U] = { 0U }; - uint32_t k32[8U] = { 0U }; - uint32_t n32[4U] = { 0U }; - uint32_t *k0; - uint32_t *k1; - uint32_t r0; - uint32_t r1; - uint32_t r2; - uint32_t r3; - uint32_t r4; - uint32_t r5; - uint32_t r6; - uint32_t r7; - uint32_t res[8]; - KRML_MAYBE_FOR8(i, - (uint32_t)0U, - (uint32_t)8U, - (uint32_t)1U, - uint32_t *os = k32; - uint8_t *bj = key + i * (uint32_t)4U; - uint32_t u = load32_le(bj); - uint32_t r = u; - uint32_t x = r; - os[i] = x;); - KRML_MAYBE_FOR4(i, - (uint32_t)0U, - (uint32_t)4U, - (uint32_t)1U, - uint32_t *os = n32; - uint8_t *bj = n + i * (uint32_t)4U; - uint32_t u = load32_le(bj); - uint32_t r = u; - uint32_t x = r; - os[i] = x;); - k0 = k32; - k1 = k32 + (uint32_t)4U; - ctx[0U] = (uint32_t)0x61707865U; - memcpy(ctx + (uint32_t)1U, k0, (uint32_t)4U * sizeof (uint32_t)); - ctx[5U] = (uint32_t)0x3320646eU; - memcpy(ctx + (uint32_t)6U, n32, (uint32_t)4U * sizeof (uint32_t)); - ctx[10U] = (uint32_t)0x79622d32U; - memcpy(ctx + (uint32_t)11U, k1, (uint32_t)4U * sizeof (uint32_t)); - ctx[15U] = (uint32_t)0x6b206574U; - rounds(ctx); - r0 = ctx[0U]; - r1 = ctx[5U]; - r2 = ctx[10U]; - r3 = ctx[15U]; - r4 = ctx[6U]; - r5 = ctx[7U]; - r6 = ctx[8U]; - r7 = ctx[9U]; - res[0U] = r0; - res[1U] = r1; - res[2U] = r2; - res[3U] = r3; - res[4U] = r4; - res[5U] = r5; - res[6U] = r6; - res[7U] = r7; - KRML_MAYBE_FOR8(i, - (uint32_t)0U, - (uint32_t)8U, - (uint32_t)1U, - store32_le(out + i * (uint32_t)4U, res[i]);); -} - -void -Hacl_Salsa20_salsa20_encrypt( - uint32_t len, - uint8_t *out, - uint8_t *text, - uint8_t *key, - uint8_t *n, - uint32_t ctr -) -{ - salsa20_encrypt(len, out, text, key, n, ctr); -} - -void -Hacl_Salsa20_salsa20_decrypt( - uint32_t len, - uint8_t *out, - uint8_t *cipher, - uint8_t *key, - uint8_t *n, - uint32_t ctr -) -{ - salsa20_decrypt(len, out, cipher, key, n, ctr); -} - -void Hacl_Salsa20_salsa20_key_block0(uint8_t *out, uint8_t *key, uint8_t *n) -{ - salsa20_key_block0(out, key, n); -} - -void Hacl_Salsa20_hsalsa20(uint8_t *out, uint8_t *key, uint8_t *n) -{ - hsalsa20(out, key, n); -} - diff --git a/dist/c89-compatible/Hacl_Salsa20.h b/dist/c89-compatible/Hacl_Salsa20.h deleted file mode 100644 index 72f25fba7e..0000000000 --- a/dist/c89-compatible/Hacl_Salsa20.h +++ /dev/null @@ -1,69 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#ifndef __Hacl_Salsa20_H -#define __Hacl_Salsa20_H - -#if defined(__cplusplus) -extern "C" { -#endif - -#include -#include "krml/internal/types.h" -#include "krml/lowstar_endianness.h" -#include "krml/internal/target.h" - - -#include "Hacl_Krmllib.h" -#include "evercrypt_targetconfig.h" -void -Hacl_Salsa20_salsa20_encrypt( - uint32_t len, - uint8_t *out, - uint8_t *text, - uint8_t *key, - uint8_t *n, - uint32_t ctr -); - -void -Hacl_Salsa20_salsa20_decrypt( - uint32_t len, - uint8_t *out, - uint8_t *cipher, - uint8_t *key, - uint8_t *n, - uint32_t ctr -); - -void Hacl_Salsa20_salsa20_key_block0(uint8_t *out, uint8_t *key, uint8_t *n); - -void Hacl_Salsa20_hsalsa20(uint8_t *out, uint8_t *key, uint8_t *n); - -#if defined(__cplusplus) -} -#endif - -#define __Hacl_Salsa20_H_DEFINED -#endif diff --git a/dist/c89-compatible/Hacl_Spec.h b/dist/c89-compatible/Hacl_Spec.h deleted file mode 100644 index 2643817dd9..0000000000 --- a/dist/c89-compatible/Hacl_Spec.h +++ /dev/null @@ -1,85 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#ifndef __Hacl_Spec_H -#define __Hacl_Spec_H - -#if defined(__cplusplus) -extern "C" { -#endif - -#include -#include "krml/internal/types.h" -#include "krml/lowstar_endianness.h" -#include "krml/internal/target.h" - - - -#include "evercrypt_targetconfig.h" -#define Spec_Blake2_Blake2S 0 -#define Spec_Blake2_Blake2B 1 - -typedef uint8_t Spec_Blake2_alg; - -#define Spec_Hash_Definitions_SHA2_224 0 -#define Spec_Hash_Definitions_SHA2_256 1 -#define Spec_Hash_Definitions_SHA2_384 2 -#define Spec_Hash_Definitions_SHA2_512 3 -#define Spec_Hash_Definitions_SHA1 4 -#define Spec_Hash_Definitions_MD5 5 -#define Spec_Hash_Definitions_Blake2S 6 -#define Spec_Hash_Definitions_Blake2B 7 -#define Spec_Hash_Definitions_SHA3_256 8 - -typedef uint8_t Spec_Hash_Definitions_hash_alg; - -#define Spec_FFDHE_FFDHE2048 0 -#define Spec_FFDHE_FFDHE3072 1 -#define Spec_FFDHE_FFDHE4096 2 -#define Spec_FFDHE_FFDHE6144 3 -#define Spec_FFDHE_FFDHE8192 4 - -typedef uint8_t Spec_FFDHE_ffdhe_alg; - -#define Spec_Agile_AEAD_AES128_GCM 0 -#define Spec_Agile_AEAD_AES256_GCM 1 -#define Spec_Agile_AEAD_CHACHA20_POLY1305 2 -#define Spec_Agile_AEAD_AES128_CCM 3 -#define Spec_Agile_AEAD_AES256_CCM 4 -#define Spec_Agile_AEAD_AES128_CCM8 5 -#define Spec_Agile_AEAD_AES256_CCM8 6 - -typedef uint8_t Spec_Agile_AEAD_alg; - -#define Spec_Frodo_Params_SHAKE128 0 -#define Spec_Frodo_Params_AES128 1 - -typedef uint8_t Spec_Frodo_Params_frodo_gen_a; - -#if defined(__cplusplus) -} -#endif - -#define __Hacl_Spec_H_DEFINED -#endif diff --git a/dist/c89-compatible/Hacl_Streaming_Blake2.c b/dist/c89-compatible/Hacl_Streaming_Blake2.c deleted file mode 100644 index 55b9669e3c..0000000000 --- a/dist/c89-compatible/Hacl_Streaming_Blake2.c +++ /dev/null @@ -1,1265 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#include "Hacl_Streaming_Blake2.h" - - - -uint32_t -Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_alg a, Hacl_Impl_Blake2_Core_m_spec m) -{ - switch (m) - { - case Hacl_Impl_Blake2_Core_M32: - { - switch (a) - { - case Spec_Blake2_Blake2S: - { - return (uint32_t)64U; - } - case Spec_Blake2_Blake2B: - { - return (uint32_t)128U; - } - default: - { - KRML_HOST_PRINTF("KaRaMeL incomplete match at %s:%d\n", __FILE__, __LINE__); - KRML_HOST_EXIT(253U); - } - } - break; - } - case Hacl_Impl_Blake2_Core_M128: - { - switch (a) - { - case Spec_Blake2_Blake2S: - { - return (uint32_t)64U; - } - case Spec_Blake2_Blake2B: - { - return (uint32_t)128U; - } - default: - { - KRML_HOST_PRINTF("KaRaMeL incomplete match at %s:%d\n", __FILE__, __LINE__); - KRML_HOST_EXIT(253U); - } - } - break; - } - case Hacl_Impl_Blake2_Core_M256: - { - switch (a) - { - case Spec_Blake2_Blake2S: - { - return (uint32_t)64U; - } - case Spec_Blake2_Blake2B: - { - return (uint32_t)128U; - } - default: - { - KRML_HOST_PRINTF("KaRaMeL incomplete match at %s:%d\n", __FILE__, __LINE__); - KRML_HOST_EXIT(253U); - } - } - break; - } - default: - { - KRML_HOST_PRINTF("KaRaMeL incomplete match at %s:%d\n", __FILE__, __LINE__); - KRML_HOST_EXIT(253U); - } - } -} - -/** - State allocation function when there is no key -*/ -Hacl_Streaming_Blake2_blake2s_32_state *Hacl_Streaming_Blake2_blake2s_32_no_key_create_in() -{ - KRML_CHECK_SIZE(sizeof (uint8_t), - Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2S, Hacl_Impl_Blake2_Core_M32)); - { - uint8_t - *buf = - (uint8_t *)KRML_HOST_CALLOC(Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2S, - Hacl_Impl_Blake2_Core_M32), - sizeof (uint8_t)); - uint32_t *wv = (uint32_t *)KRML_HOST_CALLOC((uint32_t)16U, sizeof (uint32_t)); - uint32_t *b = (uint32_t *)KRML_HOST_CALLOC((uint32_t)16U, sizeof (uint32_t)); - Hacl_Streaming_Blake2_blake2s_32_block_state lit; - Hacl_Streaming_Blake2_blake2s_32_block_state block_state; - lit.fst = wv; - lit.snd = b; - block_state = lit; - { - Hacl_Streaming_Blake2_blake2s_32_state s1; - s1.block_state = block_state; - s1.buf = buf; - s1.total_len = (uint64_t)0U; - KRML_CHECK_SIZE(sizeof (Hacl_Streaming_Blake2_blake2s_32_state), (uint32_t)1U); - { - Hacl_Streaming_Blake2_blake2s_32_state - *p = - (Hacl_Streaming_Blake2_blake2s_32_state *)KRML_HOST_MALLOC(sizeof ( - Hacl_Streaming_Blake2_blake2s_32_state - )); - p[0U] = s1; - Hacl_Blake2s_32_blake2s_init(block_state.snd, (uint32_t)0U, (uint32_t)32U); - return p; - } - } - } -} - -/** - (Re-)initialization function when there is no key -*/ -void Hacl_Streaming_Blake2_blake2s_32_no_key_init(Hacl_Streaming_Blake2_blake2s_32_state *s1) -{ - Hacl_Streaming_Blake2_blake2s_32_state scrut = *s1; - uint8_t *buf = scrut.buf; - Hacl_Streaming_Blake2_blake2s_32_block_state block_state = scrut.block_state; - Hacl_Blake2s_32_blake2s_init(block_state.snd, (uint32_t)0U, (uint32_t)32U); - { - Hacl_Streaming_Blake2_blake2s_32_state lit; - lit.block_state = block_state; - lit.buf = buf; - lit.total_len = (uint64_t)0U; - s1[0U] = lit; - } -} - -/** - Update function when there is no key -*/ -void -Hacl_Streaming_Blake2_blake2s_32_no_key_update( - Hacl_Streaming_Blake2_blake2s_32_state *p, - uint8_t *data, - uint32_t len -) -{ - Hacl_Streaming_Blake2_blake2s_32_state s1 = *p; - uint64_t total_len = s1.total_len; - uint32_t sz; - if - ( - total_len - % - (uint64_t)Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2S, - Hacl_Impl_Blake2_Core_M32) - == (uint64_t)0U - && total_len > (uint64_t)0U - ) - { - sz = Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2S, Hacl_Impl_Blake2_Core_M32); - } - else - { - sz = - (uint32_t)(total_len - % - (uint64_t)Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2S, - Hacl_Impl_Blake2_Core_M32)); - } - if - ( - len - <= Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2S, Hacl_Impl_Blake2_Core_M32) - sz - ) - { - Hacl_Streaming_Blake2_blake2s_32_state s2 = *p; - Hacl_Streaming_Blake2_blake2s_32_block_state block_state1 = s2.block_state; - uint8_t *buf = s2.buf; - uint64_t total_len1 = s2.total_len; - uint32_t sz1; - if - ( - total_len1 - % - (uint64_t)Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2S, - Hacl_Impl_Blake2_Core_M32) - == (uint64_t)0U - && total_len1 > (uint64_t)0U - ) - { - sz1 = Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2S, Hacl_Impl_Blake2_Core_M32); - } - else - { - sz1 = - (uint32_t)(total_len1 - % - (uint64_t)Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2S, - Hacl_Impl_Blake2_Core_M32)); - } - { - uint8_t *buf2 = buf + sz1; - uint64_t total_len2; - memcpy(buf2, data, len * sizeof (uint8_t)); - total_len2 = total_len1 + (uint64_t)len; - { - Hacl_Streaming_Blake2_blake2s_32_state lit; - lit.block_state = block_state1; - lit.buf = buf; - lit.total_len = total_len2; - *p = lit; - return; - } - } - } - if (sz == (uint32_t)0U) - { - Hacl_Streaming_Blake2_blake2s_32_state s2 = *p; - Hacl_Streaming_Blake2_blake2s_32_block_state block_state1 = s2.block_state; - uint8_t *buf = s2.buf; - uint64_t total_len1 = s2.total_len; - uint32_t sz1; - if - ( - total_len1 - % - (uint64_t)Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2S, - Hacl_Impl_Blake2_Core_M32) - == (uint64_t)0U - && total_len1 > (uint64_t)0U - ) - { - sz1 = Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2S, Hacl_Impl_Blake2_Core_M32); - } - else - { - sz1 = - (uint32_t)(total_len1 - % - (uint64_t)Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2S, - Hacl_Impl_Blake2_Core_M32)); - } - { - uint32_t ite; - uint32_t n_blocks; - uint32_t data1_len; - uint32_t data2_len; - uint8_t *data1; - uint8_t *data2; - uint32_t nb0; - uint8_t *dst; - if (!(sz1 == (uint32_t)0U)) - { - uint64_t prevlen = total_len1 - (uint64_t)sz1; - uint32_t - nb = - Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2S, - Hacl_Impl_Blake2_Core_M32) - / (uint32_t)64U; - Hacl_Blake2s_32_blake2s_update_multi(Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2S, - Hacl_Impl_Blake2_Core_M32), - block_state1.fst, - block_state1.snd, - prevlen, - buf, - nb); - } - if - ( - (uint64_t)len - % - (uint64_t)Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2S, - Hacl_Impl_Blake2_Core_M32) - == (uint64_t)0U - && (uint64_t)len > (uint64_t)0U - ) - { - ite = Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2S, Hacl_Impl_Blake2_Core_M32); - } - else - { - ite = - (uint32_t)((uint64_t)len - % - (uint64_t)Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2S, - Hacl_Impl_Blake2_Core_M32)); - } - n_blocks = - (len - ite) - / Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2S, Hacl_Impl_Blake2_Core_M32); - data1_len = - n_blocks - * Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2S, Hacl_Impl_Blake2_Core_M32); - data2_len = len - data1_len; - data1 = data; - data2 = data + data1_len; - nb0 = data1_len / (uint32_t)64U; - Hacl_Blake2s_32_blake2s_update_multi(data1_len, - block_state1.fst, - block_state1.snd, - total_len1, - data1, - nb0); - dst = buf; - memcpy(dst, data2, data2_len * sizeof (uint8_t)); - { - Hacl_Streaming_Blake2_blake2s_32_state lit; - lit.block_state = block_state1; - lit.buf = buf; - lit.total_len = total_len1 + (uint64_t)len; - *p = lit; - return; - } - } - } - { - uint32_t - diff = - Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2S, - Hacl_Impl_Blake2_Core_M32) - - sz; - uint8_t *data1 = data; - uint8_t *data2 = data + diff; - Hacl_Streaming_Blake2_blake2s_32_state s20 = *p; - Hacl_Streaming_Blake2_blake2s_32_block_state block_state10 = s20.block_state; - uint8_t *buf0 = s20.buf; - uint64_t total_len10 = s20.total_len; - uint32_t sz10; - if - ( - total_len10 - % - (uint64_t)Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2S, - Hacl_Impl_Blake2_Core_M32) - == (uint64_t)0U - && total_len10 > (uint64_t)0U - ) - { - sz10 = Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2S, Hacl_Impl_Blake2_Core_M32); - } - else - { - sz10 = - (uint32_t)(total_len10 - % - (uint64_t)Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2S, - Hacl_Impl_Blake2_Core_M32)); - } - { - uint8_t *buf2 = buf0 + sz10; - uint64_t total_len2; - memcpy(buf2, data1, diff * sizeof (uint8_t)); - total_len2 = total_len10 + (uint64_t)diff; - { - Hacl_Streaming_Blake2_blake2s_32_state lit; - Hacl_Streaming_Blake2_blake2s_32_state s2; - Hacl_Streaming_Blake2_blake2s_32_block_state block_state1; - uint8_t *buf; - uint64_t total_len1; - uint32_t sz1; - uint32_t ite; - uint32_t n_blocks; - uint32_t data1_len; - uint32_t data2_len; - uint8_t *data11; - uint8_t *data21; - uint32_t nb0; - uint8_t *dst; - lit.block_state = block_state10; - lit.buf = buf0; - lit.total_len = total_len2; - *p = lit; - s2 = *p; - block_state1 = s2.block_state; - buf = s2.buf; - total_len1 = s2.total_len; - if - ( - total_len1 - % - (uint64_t)Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2S, - Hacl_Impl_Blake2_Core_M32) - == (uint64_t)0U - && total_len1 > (uint64_t)0U - ) - { - sz1 = - Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2S, - Hacl_Impl_Blake2_Core_M32); - } - else - { - sz1 = - (uint32_t)(total_len1 - % - (uint64_t)Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2S, - Hacl_Impl_Blake2_Core_M32)); - } - if (!(sz1 == (uint32_t)0U)) - { - uint64_t prevlen = total_len1 - (uint64_t)sz1; - uint32_t - nb = - Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2S, - Hacl_Impl_Blake2_Core_M32) - / (uint32_t)64U; - Hacl_Blake2s_32_blake2s_update_multi(Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2S, - Hacl_Impl_Blake2_Core_M32), - block_state1.fst, - block_state1.snd, - prevlen, - buf, - nb); - } - if - ( - (uint64_t)(len - diff) - % - (uint64_t)Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2S, - Hacl_Impl_Blake2_Core_M32) - == (uint64_t)0U - && (uint64_t)(len - diff) > (uint64_t)0U - ) - { - ite = - Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2S, - Hacl_Impl_Blake2_Core_M32); - } - else - { - ite = - (uint32_t)((uint64_t)(len - diff) - % - (uint64_t)Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2S, - Hacl_Impl_Blake2_Core_M32)); - } - n_blocks = - (len - diff - ite) - / Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2S, Hacl_Impl_Blake2_Core_M32); - data1_len = - n_blocks - * Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2S, Hacl_Impl_Blake2_Core_M32); - data2_len = len - diff - data1_len; - data11 = data2; - data21 = data2 + data1_len; - nb0 = data1_len / (uint32_t)64U; - Hacl_Blake2s_32_blake2s_update_multi(data1_len, - block_state1.fst, - block_state1.snd, - total_len1, - data11, - nb0); - dst = buf; - memcpy(dst, data21, data2_len * sizeof (uint8_t)); - { - Hacl_Streaming_Blake2_blake2s_32_state lit0; - lit0.block_state = block_state1; - lit0.buf = buf; - lit0.total_len = total_len1 + (uint64_t)(len - diff); - *p = lit0; - } - } - } - } -} - -/** - Finish function when there is no key -*/ -void -Hacl_Streaming_Blake2_blake2s_32_no_key_finish( - Hacl_Streaming_Blake2_blake2s_32_state *p, - uint8_t *dst -) -{ - Hacl_Streaming_Blake2_blake2s_32_state scrut = *p; - Hacl_Streaming_Blake2_blake2s_32_block_state block_state = scrut.block_state; - uint8_t *buf_ = scrut.buf; - uint64_t total_len = scrut.total_len; - uint32_t r; - if - ( - total_len - % - (uint64_t)Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2S, - Hacl_Impl_Blake2_Core_M32) - == (uint64_t)0U - && total_len > (uint64_t)0U - ) - { - r = Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2S, Hacl_Impl_Blake2_Core_M32); - } - else - { - r = - (uint32_t)(total_len - % - (uint64_t)Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2S, - Hacl_Impl_Blake2_Core_M32)); - } - { - uint8_t *buf_1 = buf_; - uint32_t wv[16U] = { 0U }; - uint32_t b[16U] = { 0U }; - Hacl_Streaming_Blake2_blake2s_32_block_state lit; - Hacl_Streaming_Blake2_blake2s_32_block_state tmp_block_state; - uint32_t *src_b; - uint32_t *dst_b; - uint64_t prev_len; - uint32_t ite0; - uint8_t *buf_last; - uint8_t *buf_multi; - uint32_t ite1; - uint32_t nb; - uint32_t ite2; - uint32_t ite3; - uint64_t prev_len_last; - uint32_t ite4; - uint32_t ite; - lit.fst = wv; - lit.snd = b; - tmp_block_state = lit; - src_b = block_state.snd; - dst_b = tmp_block_state.snd; - memcpy(dst_b, src_b, (uint32_t)16U * sizeof (uint32_t)); - prev_len = total_len - (uint64_t)r; - if (r % (uint32_t)64U == (uint32_t)0U && r > (uint32_t)0U) - { - ite0 = (uint32_t)64U; - } - else - { - ite0 = r % (uint32_t)64U; - } - buf_last = buf_1 + r - ite0; - buf_multi = buf_1; - if - ( - (uint32_t)64U - == Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2S, Hacl_Impl_Blake2_Core_M32) - ) - { - ite1 = (uint32_t)0U; - } - else - { - uint32_t ite5; - if (r % (uint32_t)64U == (uint32_t)0U && r > (uint32_t)0U) - { - ite5 = (uint32_t)64U; - } - else - { - ite5 = r % (uint32_t)64U; - } - ite1 = r - ite5; - } - nb = ite1 / (uint32_t)64U; - if - ( - (uint32_t)64U - == Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2S, Hacl_Impl_Blake2_Core_M32) - ) - { - ite2 = (uint32_t)0U; - } - else - { - uint32_t ite5; - if (r % (uint32_t)64U == (uint32_t)0U && r > (uint32_t)0U) - { - ite5 = (uint32_t)64U; - } - else - { - ite5 = r % (uint32_t)64U; - } - ite2 = r - ite5; - } - Hacl_Blake2s_32_blake2s_update_multi(ite2, - tmp_block_state.fst, - tmp_block_state.snd, - prev_len, - buf_multi, - nb); - if - ( - (uint32_t)64U - == Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2S, Hacl_Impl_Blake2_Core_M32) - ) - { - ite3 = r; - } - else if (r % (uint32_t)64U == (uint32_t)0U && r > (uint32_t)0U) - { - ite3 = (uint32_t)64U; - } - else - { - ite3 = r % (uint32_t)64U; - } - prev_len_last = total_len - (uint64_t)ite3; - if - ( - (uint32_t)64U - == Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2S, Hacl_Impl_Blake2_Core_M32) - ) - { - ite4 = r; - } - else if (r % (uint32_t)64U == (uint32_t)0U && r > (uint32_t)0U) - { - ite4 = (uint32_t)64U; - } - else - { - ite4 = r % (uint32_t)64U; - } - if - ( - (uint32_t)64U - == Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2S, Hacl_Impl_Blake2_Core_M32) - ) - { - ite = r; - } - else if (r % (uint32_t)64U == (uint32_t)0U && r > (uint32_t)0U) - { - ite = (uint32_t)64U; - } - else - { - ite = r % (uint32_t)64U; - } - Hacl_Blake2s_32_blake2s_update_last(ite4, - tmp_block_state.fst, - tmp_block_state.snd, - prev_len_last, - ite, - buf_last); - Hacl_Blake2s_32_blake2s_finish((uint32_t)32U, dst, tmp_block_state.snd); - } -} - -/** - Free state function when there is no key -*/ -void Hacl_Streaming_Blake2_blake2s_32_no_key_free(Hacl_Streaming_Blake2_blake2s_32_state *s1) -{ - Hacl_Streaming_Blake2_blake2s_32_state scrut = *s1; - uint8_t *buf = scrut.buf; - Hacl_Streaming_Blake2_blake2s_32_block_state block_state = scrut.block_state; - uint32_t *wv = block_state.fst; - uint32_t *b = block_state.snd; - KRML_HOST_FREE(wv); - KRML_HOST_FREE(b); - KRML_HOST_FREE(buf); - KRML_HOST_FREE(s1); -} - -/** - State allocation function when there is no key -*/ -Hacl_Streaming_Blake2_blake2b_32_state *Hacl_Streaming_Blake2_blake2b_32_no_key_create_in() -{ - KRML_CHECK_SIZE(sizeof (uint8_t), - Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2B, Hacl_Impl_Blake2_Core_M32)); - { - uint8_t - *buf = - (uint8_t *)KRML_HOST_CALLOC(Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2B, - Hacl_Impl_Blake2_Core_M32), - sizeof (uint8_t)); - uint64_t *wv = (uint64_t *)KRML_HOST_CALLOC((uint32_t)16U, sizeof (uint64_t)); - uint64_t *b = (uint64_t *)KRML_HOST_CALLOC((uint32_t)16U, sizeof (uint64_t)); - Hacl_Streaming_Blake2_blake2b_32_block_state lit; - Hacl_Streaming_Blake2_blake2b_32_block_state block_state; - lit.fst = wv; - lit.snd = b; - block_state = lit; - { - Hacl_Streaming_Blake2_blake2b_32_state s1; - s1.block_state = block_state; - s1.buf = buf; - s1.total_len = (uint64_t)0U; - KRML_CHECK_SIZE(sizeof (Hacl_Streaming_Blake2_blake2b_32_state), (uint32_t)1U); - { - Hacl_Streaming_Blake2_blake2b_32_state - *p = - (Hacl_Streaming_Blake2_blake2b_32_state *)KRML_HOST_MALLOC(sizeof ( - Hacl_Streaming_Blake2_blake2b_32_state - )); - p[0U] = s1; - Hacl_Blake2b_32_blake2b_init(block_state.snd, (uint32_t)0U, (uint32_t)64U); - return p; - } - } - } -} - -/** - (Re)-initialization function when there is no key -*/ -void Hacl_Streaming_Blake2_blake2b_32_no_key_init(Hacl_Streaming_Blake2_blake2b_32_state *s1) -{ - Hacl_Streaming_Blake2_blake2b_32_state scrut = *s1; - uint8_t *buf = scrut.buf; - Hacl_Streaming_Blake2_blake2b_32_block_state block_state = scrut.block_state; - Hacl_Blake2b_32_blake2b_init(block_state.snd, (uint32_t)0U, (uint32_t)64U); - { - Hacl_Streaming_Blake2_blake2b_32_state lit; - lit.block_state = block_state; - lit.buf = buf; - lit.total_len = (uint64_t)0U; - s1[0U] = lit; - } -} - -/** - Update function when there is no key -*/ -void -Hacl_Streaming_Blake2_blake2b_32_no_key_update( - Hacl_Streaming_Blake2_blake2b_32_state *p, - uint8_t *data, - uint32_t len -) -{ - Hacl_Streaming_Blake2_blake2b_32_state s1 = *p; - uint64_t total_len = s1.total_len; - uint32_t sz; - if - ( - total_len - % - (uint64_t)Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2B, - Hacl_Impl_Blake2_Core_M32) - == (uint64_t)0U - && total_len > (uint64_t)0U - ) - { - sz = Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2B, Hacl_Impl_Blake2_Core_M32); - } - else - { - sz = - (uint32_t)(total_len - % - (uint64_t)Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2B, - Hacl_Impl_Blake2_Core_M32)); - } - if - ( - len - <= Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2B, Hacl_Impl_Blake2_Core_M32) - sz - ) - { - Hacl_Streaming_Blake2_blake2b_32_state s2 = *p; - Hacl_Streaming_Blake2_blake2b_32_block_state block_state1 = s2.block_state; - uint8_t *buf = s2.buf; - uint64_t total_len1 = s2.total_len; - uint32_t sz1; - if - ( - total_len1 - % - (uint64_t)Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2B, - Hacl_Impl_Blake2_Core_M32) - == (uint64_t)0U - && total_len1 > (uint64_t)0U - ) - { - sz1 = Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2B, Hacl_Impl_Blake2_Core_M32); - } - else - { - sz1 = - (uint32_t)(total_len1 - % - (uint64_t)Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2B, - Hacl_Impl_Blake2_Core_M32)); - } - { - uint8_t *buf2 = buf + sz1; - uint64_t total_len2; - memcpy(buf2, data, len * sizeof (uint8_t)); - total_len2 = total_len1 + (uint64_t)len; - { - Hacl_Streaming_Blake2_blake2b_32_state lit; - lit.block_state = block_state1; - lit.buf = buf; - lit.total_len = total_len2; - *p = lit; - return; - } - } - } - if (sz == (uint32_t)0U) - { - Hacl_Streaming_Blake2_blake2b_32_state s2 = *p; - Hacl_Streaming_Blake2_blake2b_32_block_state block_state1 = s2.block_state; - uint8_t *buf = s2.buf; - uint64_t total_len1 = s2.total_len; - uint32_t sz1; - if - ( - total_len1 - % - (uint64_t)Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2B, - Hacl_Impl_Blake2_Core_M32) - == (uint64_t)0U - && total_len1 > (uint64_t)0U - ) - { - sz1 = Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2B, Hacl_Impl_Blake2_Core_M32); - } - else - { - sz1 = - (uint32_t)(total_len1 - % - (uint64_t)Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2B, - Hacl_Impl_Blake2_Core_M32)); - } - { - uint32_t ite; - uint32_t n_blocks; - uint32_t data1_len; - uint32_t data2_len; - uint8_t *data1; - uint8_t *data2; - uint32_t nb0; - uint8_t *dst; - if (!(sz1 == (uint32_t)0U)) - { - uint64_t prevlen = total_len1 - (uint64_t)sz1; - uint32_t - nb = - Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2B, - Hacl_Impl_Blake2_Core_M32) - / (uint32_t)128U; - Hacl_Blake2b_32_blake2b_update_multi(Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2B, - Hacl_Impl_Blake2_Core_M32), - block_state1.fst, - block_state1.snd, - FStar_UInt128_uint64_to_uint128(prevlen), - buf, - nb); - } - if - ( - (uint64_t)len - % - (uint64_t)Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2B, - Hacl_Impl_Blake2_Core_M32) - == (uint64_t)0U - && (uint64_t)len > (uint64_t)0U - ) - { - ite = Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2B, Hacl_Impl_Blake2_Core_M32); - } - else - { - ite = - (uint32_t)((uint64_t)len - % - (uint64_t)Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2B, - Hacl_Impl_Blake2_Core_M32)); - } - n_blocks = - (len - ite) - / Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2B, Hacl_Impl_Blake2_Core_M32); - data1_len = - n_blocks - * Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2B, Hacl_Impl_Blake2_Core_M32); - data2_len = len - data1_len; - data1 = data; - data2 = data + data1_len; - nb0 = data1_len / (uint32_t)128U; - Hacl_Blake2b_32_blake2b_update_multi(data1_len, - block_state1.fst, - block_state1.snd, - FStar_UInt128_uint64_to_uint128(total_len1), - data1, - nb0); - dst = buf; - memcpy(dst, data2, data2_len * sizeof (uint8_t)); - { - Hacl_Streaming_Blake2_blake2b_32_state lit; - lit.block_state = block_state1; - lit.buf = buf; - lit.total_len = total_len1 + (uint64_t)len; - *p = lit; - return; - } - } - } - { - uint32_t - diff = - Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2B, - Hacl_Impl_Blake2_Core_M32) - - sz; - uint8_t *data1 = data; - uint8_t *data2 = data + diff; - Hacl_Streaming_Blake2_blake2b_32_state s20 = *p; - Hacl_Streaming_Blake2_blake2b_32_block_state block_state10 = s20.block_state; - uint8_t *buf0 = s20.buf; - uint64_t total_len10 = s20.total_len; - uint32_t sz10; - if - ( - total_len10 - % - (uint64_t)Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2B, - Hacl_Impl_Blake2_Core_M32) - == (uint64_t)0U - && total_len10 > (uint64_t)0U - ) - { - sz10 = Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2B, Hacl_Impl_Blake2_Core_M32); - } - else - { - sz10 = - (uint32_t)(total_len10 - % - (uint64_t)Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2B, - Hacl_Impl_Blake2_Core_M32)); - } - { - uint8_t *buf2 = buf0 + sz10; - uint64_t total_len2; - memcpy(buf2, data1, diff * sizeof (uint8_t)); - total_len2 = total_len10 + (uint64_t)diff; - { - Hacl_Streaming_Blake2_blake2b_32_state lit; - Hacl_Streaming_Blake2_blake2b_32_state s2; - Hacl_Streaming_Blake2_blake2b_32_block_state block_state1; - uint8_t *buf; - uint64_t total_len1; - uint32_t sz1; - uint32_t ite; - uint32_t n_blocks; - uint32_t data1_len; - uint32_t data2_len; - uint8_t *data11; - uint8_t *data21; - uint32_t nb0; - uint8_t *dst; - lit.block_state = block_state10; - lit.buf = buf0; - lit.total_len = total_len2; - *p = lit; - s2 = *p; - block_state1 = s2.block_state; - buf = s2.buf; - total_len1 = s2.total_len; - if - ( - total_len1 - % - (uint64_t)Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2B, - Hacl_Impl_Blake2_Core_M32) - == (uint64_t)0U - && total_len1 > (uint64_t)0U - ) - { - sz1 = - Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2B, - Hacl_Impl_Blake2_Core_M32); - } - else - { - sz1 = - (uint32_t)(total_len1 - % - (uint64_t)Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2B, - Hacl_Impl_Blake2_Core_M32)); - } - if (!(sz1 == (uint32_t)0U)) - { - uint64_t prevlen = total_len1 - (uint64_t)sz1; - uint32_t - nb = - Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2B, - Hacl_Impl_Blake2_Core_M32) - / (uint32_t)128U; - Hacl_Blake2b_32_blake2b_update_multi(Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2B, - Hacl_Impl_Blake2_Core_M32), - block_state1.fst, - block_state1.snd, - FStar_UInt128_uint64_to_uint128(prevlen), - buf, - nb); - } - if - ( - (uint64_t)(len - diff) - % - (uint64_t)Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2B, - Hacl_Impl_Blake2_Core_M32) - == (uint64_t)0U - && (uint64_t)(len - diff) > (uint64_t)0U - ) - { - ite = - Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2B, - Hacl_Impl_Blake2_Core_M32); - } - else - { - ite = - (uint32_t)((uint64_t)(len - diff) - % - (uint64_t)Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2B, - Hacl_Impl_Blake2_Core_M32)); - } - n_blocks = - (len - diff - ite) - / Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2B, Hacl_Impl_Blake2_Core_M32); - data1_len = - n_blocks - * Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2B, Hacl_Impl_Blake2_Core_M32); - data2_len = len - diff - data1_len; - data11 = data2; - data21 = data2 + data1_len; - nb0 = data1_len / (uint32_t)128U; - Hacl_Blake2b_32_blake2b_update_multi(data1_len, - block_state1.fst, - block_state1.snd, - FStar_UInt128_uint64_to_uint128(total_len1), - data11, - nb0); - dst = buf; - memcpy(dst, data21, data2_len * sizeof (uint8_t)); - { - Hacl_Streaming_Blake2_blake2b_32_state lit0; - lit0.block_state = block_state1; - lit0.buf = buf; - lit0.total_len = total_len1 + (uint64_t)(len - diff); - *p = lit0; - } - } - } - } -} - -/** - Finish function when there is no key -*/ -void -Hacl_Streaming_Blake2_blake2b_32_no_key_finish( - Hacl_Streaming_Blake2_blake2b_32_state *p, - uint8_t *dst -) -{ - Hacl_Streaming_Blake2_blake2b_32_state scrut = *p; - Hacl_Streaming_Blake2_blake2b_32_block_state block_state = scrut.block_state; - uint8_t *buf_ = scrut.buf; - uint64_t total_len = scrut.total_len; - uint32_t r; - if - ( - total_len - % - (uint64_t)Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2B, - Hacl_Impl_Blake2_Core_M32) - == (uint64_t)0U - && total_len > (uint64_t)0U - ) - { - r = Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2B, Hacl_Impl_Blake2_Core_M32); - } - else - { - r = - (uint32_t)(total_len - % - (uint64_t)Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2B, - Hacl_Impl_Blake2_Core_M32)); - } - { - uint8_t *buf_1 = buf_; - uint64_t wv[16U] = { 0U }; - uint64_t b[16U] = { 0U }; - Hacl_Streaming_Blake2_blake2b_32_block_state lit; - Hacl_Streaming_Blake2_blake2b_32_block_state tmp_block_state; - uint64_t *src_b; - uint64_t *dst_b; - uint64_t prev_len; - uint32_t ite0; - uint8_t *buf_last; - uint8_t *buf_multi; - uint32_t ite1; - uint32_t nb; - uint32_t ite2; - uint32_t ite3; - uint64_t prev_len_last; - uint32_t ite4; - uint32_t ite; - lit.fst = wv; - lit.snd = b; - tmp_block_state = lit; - src_b = block_state.snd; - dst_b = tmp_block_state.snd; - memcpy(dst_b, src_b, (uint32_t)16U * sizeof (uint64_t)); - prev_len = total_len - (uint64_t)r; - if (r % (uint32_t)128U == (uint32_t)0U && r > (uint32_t)0U) - { - ite0 = (uint32_t)128U; - } - else - { - ite0 = r % (uint32_t)128U; - } - buf_last = buf_1 + r - ite0; - buf_multi = buf_1; - if - ( - (uint32_t)128U - == Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2B, Hacl_Impl_Blake2_Core_M32) - ) - { - ite1 = (uint32_t)0U; - } - else - { - uint32_t ite5; - if (r % (uint32_t)128U == (uint32_t)0U && r > (uint32_t)0U) - { - ite5 = (uint32_t)128U; - } - else - { - ite5 = r % (uint32_t)128U; - } - ite1 = r - ite5; - } - nb = ite1 / (uint32_t)128U; - if - ( - (uint32_t)128U - == Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2B, Hacl_Impl_Blake2_Core_M32) - ) - { - ite2 = (uint32_t)0U; - } - else - { - uint32_t ite5; - if (r % (uint32_t)128U == (uint32_t)0U && r > (uint32_t)0U) - { - ite5 = (uint32_t)128U; - } - else - { - ite5 = r % (uint32_t)128U; - } - ite2 = r - ite5; - } - Hacl_Blake2b_32_blake2b_update_multi(ite2, - tmp_block_state.fst, - tmp_block_state.snd, - FStar_UInt128_uint64_to_uint128(prev_len), - buf_multi, - nb); - if - ( - (uint32_t)128U - == Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2B, Hacl_Impl_Blake2_Core_M32) - ) - { - ite3 = r; - } - else if (r % (uint32_t)128U == (uint32_t)0U && r > (uint32_t)0U) - { - ite3 = (uint32_t)128U; - } - else - { - ite3 = r % (uint32_t)128U; - } - prev_len_last = total_len - (uint64_t)ite3; - if - ( - (uint32_t)128U - == Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2B, Hacl_Impl_Blake2_Core_M32) - ) - { - ite4 = r; - } - else if (r % (uint32_t)128U == (uint32_t)0U && r > (uint32_t)0U) - { - ite4 = (uint32_t)128U; - } - else - { - ite4 = r % (uint32_t)128U; - } - if - ( - (uint32_t)128U - == Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2B, Hacl_Impl_Blake2_Core_M32) - ) - { - ite = r; - } - else if (r % (uint32_t)128U == (uint32_t)0U && r > (uint32_t)0U) - { - ite = (uint32_t)128U; - } - else - { - ite = r % (uint32_t)128U; - } - Hacl_Blake2b_32_blake2b_update_last(ite4, - tmp_block_state.fst, - tmp_block_state.snd, - FStar_UInt128_uint64_to_uint128(prev_len_last), - ite, - buf_last); - Hacl_Blake2b_32_blake2b_finish((uint32_t)64U, dst, tmp_block_state.snd); - } -} - -/** - Free state function when there is no key -*/ -void Hacl_Streaming_Blake2_blake2b_32_no_key_free(Hacl_Streaming_Blake2_blake2b_32_state *s1) -{ - Hacl_Streaming_Blake2_blake2b_32_state scrut = *s1; - uint8_t *buf = scrut.buf; - Hacl_Streaming_Blake2_blake2b_32_block_state block_state = scrut.block_state; - uint64_t *wv = block_state.fst; - uint64_t *b = block_state.snd; - KRML_HOST_FREE(wv); - KRML_HOST_FREE(b); - KRML_HOST_FREE(buf); - KRML_HOST_FREE(s1); -} - diff --git a/dist/c89-compatible/Hacl_Streaming_Blake2.h b/dist/c89-compatible/Hacl_Streaming_Blake2.h deleted file mode 100644 index 1867bff5c8..0000000000 --- a/dist/c89-compatible/Hacl_Streaming_Blake2.h +++ /dev/null @@ -1,148 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#ifndef __Hacl_Streaming_Blake2_H -#define __Hacl_Streaming_Blake2_H - -#if defined(__cplusplus) -extern "C" { -#endif - -#include -#include "krml/internal/types.h" -#include "krml/lowstar_endianness.h" -#include "krml/internal/target.h" - - -#include "Hacl_Spec.h" -#include "Hacl_Krmllib.h" -#include "Hacl_Hash_Blake2.h" -#include "evercrypt_targetconfig.h" -uint32_t -Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_alg a, Hacl_Impl_Blake2_Core_m_spec m); - -typedef struct Hacl_Streaming_Blake2_blake2s_32_block_state_s -{ - uint32_t *fst; - uint32_t *snd; -} -Hacl_Streaming_Blake2_blake2s_32_block_state; - -typedef struct Hacl_Streaming_Blake2_blake2b_32_block_state_s -{ - uint64_t *fst; - uint64_t *snd; -} -Hacl_Streaming_Blake2_blake2b_32_block_state; - -typedef struct Hacl_Streaming_Blake2_blake2s_32_state_s -{ - Hacl_Streaming_Blake2_blake2s_32_block_state block_state; - uint8_t *buf; - uint64_t total_len; -} -Hacl_Streaming_Blake2_blake2s_32_state; - -typedef struct Hacl_Streaming_Blake2_blake2b_32_state_s -{ - Hacl_Streaming_Blake2_blake2b_32_block_state block_state; - uint8_t *buf; - uint64_t total_len; -} -Hacl_Streaming_Blake2_blake2b_32_state; - -/** - State allocation function when there is no key -*/ -Hacl_Streaming_Blake2_blake2s_32_state *Hacl_Streaming_Blake2_blake2s_32_no_key_create_in(); - -/** - (Re-)initialization function when there is no key -*/ -void Hacl_Streaming_Blake2_blake2s_32_no_key_init(Hacl_Streaming_Blake2_blake2s_32_state *s1); - -/** - Update function when there is no key -*/ -void -Hacl_Streaming_Blake2_blake2s_32_no_key_update( - Hacl_Streaming_Blake2_blake2s_32_state *p, - uint8_t *data, - uint32_t len -); - -/** - Finish function when there is no key -*/ -void -Hacl_Streaming_Blake2_blake2s_32_no_key_finish( - Hacl_Streaming_Blake2_blake2s_32_state *p, - uint8_t *dst -); - -/** - Free state function when there is no key -*/ -void Hacl_Streaming_Blake2_blake2s_32_no_key_free(Hacl_Streaming_Blake2_blake2s_32_state *s1); - -/** - State allocation function when there is no key -*/ -Hacl_Streaming_Blake2_blake2b_32_state *Hacl_Streaming_Blake2_blake2b_32_no_key_create_in(); - -/** - (Re)-initialization function when there is no key -*/ -void Hacl_Streaming_Blake2_blake2b_32_no_key_init(Hacl_Streaming_Blake2_blake2b_32_state *s1); - -/** - Update function when there is no key -*/ -void -Hacl_Streaming_Blake2_blake2b_32_no_key_update( - Hacl_Streaming_Blake2_blake2b_32_state *p, - uint8_t *data, - uint32_t len -); - -/** - Finish function when there is no key -*/ -void -Hacl_Streaming_Blake2_blake2b_32_no_key_finish( - Hacl_Streaming_Blake2_blake2b_32_state *p, - uint8_t *dst -); - -/** - Free state function when there is no key -*/ -void Hacl_Streaming_Blake2_blake2b_32_no_key_free(Hacl_Streaming_Blake2_blake2b_32_state *s1); - -#if defined(__cplusplus) -} -#endif - -#define __Hacl_Streaming_Blake2_H_DEFINED -#endif diff --git a/dist/c89-compatible/Hacl_Streaming_Blake2b_256.c b/dist/c89-compatible/Hacl_Streaming_Blake2b_256.c deleted file mode 100644 index 36c06e85c0..0000000000 --- a/dist/c89-compatible/Hacl_Streaming_Blake2b_256.c +++ /dev/null @@ -1,631 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#include "Hacl_Streaming_Blake2b_256.h" - - - -/** - State allocation function when there is no key -*/ -Hacl_Streaming_Blake2b_256_blake2b_256_state -*Hacl_Streaming_Blake2b_256_blake2b_256_no_key_create_in() -{ - KRML_CHECK_SIZE(sizeof (uint8_t), - Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2B, Hacl_Impl_Blake2_Core_M256)); - { - uint8_t - *buf = - (uint8_t *)KRML_HOST_CALLOC(Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2B, - Hacl_Impl_Blake2_Core_M256), - sizeof (uint8_t)); - Lib_IntVector_Intrinsics_vec256 - *wv = - (Lib_IntVector_Intrinsics_vec256 *)KRML_ALIGNED_MALLOC(32, - sizeof (Lib_IntVector_Intrinsics_vec256) * (uint32_t)4U); - memset(wv, 0U, (uint32_t)4U * sizeof (Lib_IntVector_Intrinsics_vec256)); - { - Lib_IntVector_Intrinsics_vec256 - *b = - (Lib_IntVector_Intrinsics_vec256 *)KRML_ALIGNED_MALLOC(32, - sizeof (Lib_IntVector_Intrinsics_vec256) * (uint32_t)4U); - memset(b, 0U, (uint32_t)4U * sizeof (Lib_IntVector_Intrinsics_vec256)); - { - Hacl_Streaming_Blake2b_256_blake2b_256_block_state lit; - Hacl_Streaming_Blake2b_256_blake2b_256_block_state block_state; - lit.fst = wv; - lit.snd = b; - block_state = lit; - { - Hacl_Streaming_Blake2b_256_blake2b_256_state s; - s.block_state = block_state; - s.buf = buf; - s.total_len = (uint64_t)0U; - KRML_CHECK_SIZE(sizeof (Hacl_Streaming_Blake2b_256_blake2b_256_state), (uint32_t)1U); - { - Hacl_Streaming_Blake2b_256_blake2b_256_state - *p = - (Hacl_Streaming_Blake2b_256_blake2b_256_state *)KRML_HOST_MALLOC(sizeof ( - Hacl_Streaming_Blake2b_256_blake2b_256_state - )); - p[0U] = s; - Hacl_Blake2b_256_blake2b_init(block_state.snd, (uint32_t)0U, (uint32_t)64U); - return p; - } - } - } - } - } -} - -/** - (Re-)initialization function when there is no key -*/ -void -Hacl_Streaming_Blake2b_256_blake2b_256_no_key_init( - Hacl_Streaming_Blake2b_256_blake2b_256_state *s -) -{ - Hacl_Streaming_Blake2b_256_blake2b_256_state scrut = *s; - uint8_t *buf = scrut.buf; - Hacl_Streaming_Blake2b_256_blake2b_256_block_state block_state = scrut.block_state; - Hacl_Blake2b_256_blake2b_init(block_state.snd, (uint32_t)0U, (uint32_t)64U); - { - Hacl_Streaming_Blake2b_256_blake2b_256_state lit; - lit.block_state = block_state; - lit.buf = buf; - lit.total_len = (uint64_t)0U; - s[0U] = lit; - } -} - -/** - Update function when there is no key -*/ -void -Hacl_Streaming_Blake2b_256_blake2b_256_no_key_update( - Hacl_Streaming_Blake2b_256_blake2b_256_state *p, - uint8_t *data, - uint32_t len -) -{ - Hacl_Streaming_Blake2b_256_blake2b_256_state s = *p; - uint64_t total_len = s.total_len; - uint32_t sz; - if - ( - total_len - % - (uint64_t)Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2B, - Hacl_Impl_Blake2_Core_M256) - == (uint64_t)0U - && total_len > (uint64_t)0U - ) - { - sz = Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2B, Hacl_Impl_Blake2_Core_M256); - } - else - { - sz = - (uint32_t)(total_len - % - (uint64_t)Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2B, - Hacl_Impl_Blake2_Core_M256)); - } - if - ( - len - <= Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2B, Hacl_Impl_Blake2_Core_M256) - sz - ) - { - Hacl_Streaming_Blake2b_256_blake2b_256_state s1 = *p; - Hacl_Streaming_Blake2b_256_blake2b_256_block_state block_state1 = s1.block_state; - uint8_t *buf = s1.buf; - uint64_t total_len1 = s1.total_len; - uint32_t sz1; - if - ( - total_len1 - % - (uint64_t)Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2B, - Hacl_Impl_Blake2_Core_M256) - == (uint64_t)0U - && total_len1 > (uint64_t)0U - ) - { - sz1 = Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2B, Hacl_Impl_Blake2_Core_M256); - } - else - { - sz1 = - (uint32_t)(total_len1 - % - (uint64_t)Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2B, - Hacl_Impl_Blake2_Core_M256)); - } - { - uint8_t *buf2 = buf + sz1; - uint64_t total_len2; - memcpy(buf2, data, len * sizeof (uint8_t)); - total_len2 = total_len1 + (uint64_t)len; - { - Hacl_Streaming_Blake2b_256_blake2b_256_state lit; - lit.block_state = block_state1; - lit.buf = buf; - lit.total_len = total_len2; - *p = lit; - return; - } - } - } - if (sz == (uint32_t)0U) - { - Hacl_Streaming_Blake2b_256_blake2b_256_state s1 = *p; - Hacl_Streaming_Blake2b_256_blake2b_256_block_state block_state1 = s1.block_state; - uint8_t *buf = s1.buf; - uint64_t total_len1 = s1.total_len; - uint32_t sz1; - if - ( - total_len1 - % - (uint64_t)Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2B, - Hacl_Impl_Blake2_Core_M256) - == (uint64_t)0U - && total_len1 > (uint64_t)0U - ) - { - sz1 = Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2B, Hacl_Impl_Blake2_Core_M256); - } - else - { - sz1 = - (uint32_t)(total_len1 - % - (uint64_t)Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2B, - Hacl_Impl_Blake2_Core_M256)); - } - { - uint32_t ite; - uint32_t n_blocks; - uint32_t data1_len; - uint32_t data2_len; - uint8_t *data1; - uint8_t *data2; - uint32_t nb0; - uint8_t *dst; - if (!(sz1 == (uint32_t)0U)) - { - uint64_t prevlen = total_len1 - (uint64_t)sz1; - uint32_t - nb = - Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2B, - Hacl_Impl_Blake2_Core_M256) - / (uint32_t)128U; - Hacl_Blake2b_256_blake2b_update_multi(Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2B, - Hacl_Impl_Blake2_Core_M256), - block_state1.fst, - block_state1.snd, - FStar_UInt128_uint64_to_uint128(prevlen), - buf, - nb); - } - if - ( - (uint64_t)len - % - (uint64_t)Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2B, - Hacl_Impl_Blake2_Core_M256) - == (uint64_t)0U - && (uint64_t)len > (uint64_t)0U - ) - { - ite = - Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2B, - Hacl_Impl_Blake2_Core_M256); - } - else - { - ite = - (uint32_t)((uint64_t)len - % - (uint64_t)Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2B, - Hacl_Impl_Blake2_Core_M256)); - } - n_blocks = - (len - ite) - / Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2B, Hacl_Impl_Blake2_Core_M256); - data1_len = - n_blocks - * Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2B, Hacl_Impl_Blake2_Core_M256); - data2_len = len - data1_len; - data1 = data; - data2 = data + data1_len; - nb0 = data1_len / (uint32_t)128U; - Hacl_Blake2b_256_blake2b_update_multi(data1_len, - block_state1.fst, - block_state1.snd, - FStar_UInt128_uint64_to_uint128(total_len1), - data1, - nb0); - dst = buf; - memcpy(dst, data2, data2_len * sizeof (uint8_t)); - { - Hacl_Streaming_Blake2b_256_blake2b_256_state lit; - lit.block_state = block_state1; - lit.buf = buf; - lit.total_len = total_len1 + (uint64_t)len; - *p = lit; - return; - } - } - } - { - uint32_t - diff = - Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2B, - Hacl_Impl_Blake2_Core_M256) - - sz; - uint8_t *data1 = data; - uint8_t *data2 = data + diff; - Hacl_Streaming_Blake2b_256_blake2b_256_state s10 = *p; - Hacl_Streaming_Blake2b_256_blake2b_256_block_state block_state10 = s10.block_state; - uint8_t *buf0 = s10.buf; - uint64_t total_len10 = s10.total_len; - uint32_t sz10; - if - ( - total_len10 - % - (uint64_t)Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2B, - Hacl_Impl_Blake2_Core_M256) - == (uint64_t)0U - && total_len10 > (uint64_t)0U - ) - { - sz10 = Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2B, Hacl_Impl_Blake2_Core_M256); - } - else - { - sz10 = - (uint32_t)(total_len10 - % - (uint64_t)Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2B, - Hacl_Impl_Blake2_Core_M256)); - } - { - uint8_t *buf2 = buf0 + sz10; - uint64_t total_len2; - memcpy(buf2, data1, diff * sizeof (uint8_t)); - total_len2 = total_len10 + (uint64_t)diff; - { - Hacl_Streaming_Blake2b_256_blake2b_256_state lit; - Hacl_Streaming_Blake2b_256_blake2b_256_state s1; - Hacl_Streaming_Blake2b_256_blake2b_256_block_state block_state1; - uint8_t *buf; - uint64_t total_len1; - uint32_t sz1; - uint32_t ite; - uint32_t n_blocks; - uint32_t data1_len; - uint32_t data2_len; - uint8_t *data11; - uint8_t *data21; - uint32_t nb0; - uint8_t *dst; - lit.block_state = block_state10; - lit.buf = buf0; - lit.total_len = total_len2; - *p = lit; - s1 = *p; - block_state1 = s1.block_state; - buf = s1.buf; - total_len1 = s1.total_len; - if - ( - total_len1 - % - (uint64_t)Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2B, - Hacl_Impl_Blake2_Core_M256) - == (uint64_t)0U - && total_len1 > (uint64_t)0U - ) - { - sz1 = - Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2B, - Hacl_Impl_Blake2_Core_M256); - } - else - { - sz1 = - (uint32_t)(total_len1 - % - (uint64_t)Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2B, - Hacl_Impl_Blake2_Core_M256)); - } - if (!(sz1 == (uint32_t)0U)) - { - uint64_t prevlen = total_len1 - (uint64_t)sz1; - uint32_t - nb = - Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2B, - Hacl_Impl_Blake2_Core_M256) - / (uint32_t)128U; - Hacl_Blake2b_256_blake2b_update_multi(Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2B, - Hacl_Impl_Blake2_Core_M256), - block_state1.fst, - block_state1.snd, - FStar_UInt128_uint64_to_uint128(prevlen), - buf, - nb); - } - if - ( - (uint64_t)(len - diff) - % - (uint64_t)Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2B, - Hacl_Impl_Blake2_Core_M256) - == (uint64_t)0U - && (uint64_t)(len - diff) > (uint64_t)0U - ) - { - ite = - Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2B, - Hacl_Impl_Blake2_Core_M256); - } - else - { - ite = - (uint32_t)((uint64_t)(len - diff) - % - (uint64_t)Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2B, - Hacl_Impl_Blake2_Core_M256)); - } - n_blocks = - (len - diff - ite) - / Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2B, Hacl_Impl_Blake2_Core_M256); - data1_len = - n_blocks - * Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2B, Hacl_Impl_Blake2_Core_M256); - data2_len = len - diff - data1_len; - data11 = data2; - data21 = data2 + data1_len; - nb0 = data1_len / (uint32_t)128U; - Hacl_Blake2b_256_blake2b_update_multi(data1_len, - block_state1.fst, - block_state1.snd, - FStar_UInt128_uint64_to_uint128(total_len1), - data11, - nb0); - dst = buf; - memcpy(dst, data21, data2_len * sizeof (uint8_t)); - { - Hacl_Streaming_Blake2b_256_blake2b_256_state lit0; - lit0.block_state = block_state1; - lit0.buf = buf; - lit0.total_len = total_len1 + (uint64_t)(len - diff); - *p = lit0; - } - } - } - } -} - -/** - Finish function when there is no key -*/ -void -Hacl_Streaming_Blake2b_256_blake2b_256_no_key_finish( - Hacl_Streaming_Blake2b_256_blake2b_256_state *p, - uint8_t *dst -) -{ - Hacl_Streaming_Blake2b_256_blake2b_256_state scrut = *p; - Hacl_Streaming_Blake2b_256_blake2b_256_block_state block_state = scrut.block_state; - uint8_t *buf_ = scrut.buf; - uint64_t total_len = scrut.total_len; - uint32_t r; - if - ( - total_len - % - (uint64_t)Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2B, - Hacl_Impl_Blake2_Core_M256) - == (uint64_t)0U - && total_len > (uint64_t)0U - ) - { - r = Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2B, Hacl_Impl_Blake2_Core_M256); - } - else - { - r = - (uint32_t)(total_len - % - (uint64_t)Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2B, - Hacl_Impl_Blake2_Core_M256)); - } - { - uint8_t *buf_1 = buf_; - KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 wv[4U] KRML_POST_ALIGN(32) = { 0U }; - KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 b[4U] KRML_POST_ALIGN(32) = { 0U }; - Hacl_Streaming_Blake2b_256_blake2b_256_block_state lit; - Hacl_Streaming_Blake2b_256_blake2b_256_block_state tmp_block_state; - Lib_IntVector_Intrinsics_vec256 *src_b; - Lib_IntVector_Intrinsics_vec256 *dst_b; - uint64_t prev_len; - uint32_t ite0; - uint8_t *buf_last; - uint8_t *buf_multi; - uint32_t ite1; - uint32_t nb; - uint32_t ite2; - uint32_t ite3; - uint64_t prev_len_last; - uint32_t ite4; - uint32_t ite; - lit.fst = wv; - lit.snd = b; - tmp_block_state = lit; - src_b = block_state.snd; - dst_b = tmp_block_state.snd; - memcpy(dst_b, src_b, (uint32_t)4U * sizeof (Lib_IntVector_Intrinsics_vec256)); - prev_len = total_len - (uint64_t)r; - if (r % (uint32_t)128U == (uint32_t)0U && r > (uint32_t)0U) - { - ite0 = (uint32_t)128U; - } - else - { - ite0 = r % (uint32_t)128U; - } - buf_last = buf_1 + r - ite0; - buf_multi = buf_1; - if - ( - (uint32_t)128U - == Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2B, Hacl_Impl_Blake2_Core_M256) - ) - { - ite1 = (uint32_t)0U; - } - else - { - uint32_t ite5; - if (r % (uint32_t)128U == (uint32_t)0U && r > (uint32_t)0U) - { - ite5 = (uint32_t)128U; - } - else - { - ite5 = r % (uint32_t)128U; - } - ite1 = r - ite5; - } - nb = ite1 / (uint32_t)128U; - if - ( - (uint32_t)128U - == Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2B, Hacl_Impl_Blake2_Core_M256) - ) - { - ite2 = (uint32_t)0U; - } - else - { - uint32_t ite5; - if (r % (uint32_t)128U == (uint32_t)0U && r > (uint32_t)0U) - { - ite5 = (uint32_t)128U; - } - else - { - ite5 = r % (uint32_t)128U; - } - ite2 = r - ite5; - } - Hacl_Blake2b_256_blake2b_update_multi(ite2, - tmp_block_state.fst, - tmp_block_state.snd, - FStar_UInt128_uint64_to_uint128(prev_len), - buf_multi, - nb); - if - ( - (uint32_t)128U - == Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2B, Hacl_Impl_Blake2_Core_M256) - ) - { - ite3 = r; - } - else if (r % (uint32_t)128U == (uint32_t)0U && r > (uint32_t)0U) - { - ite3 = (uint32_t)128U; - } - else - { - ite3 = r % (uint32_t)128U; - } - prev_len_last = total_len - (uint64_t)ite3; - if - ( - (uint32_t)128U - == Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2B, Hacl_Impl_Blake2_Core_M256) - ) - { - ite4 = r; - } - else if (r % (uint32_t)128U == (uint32_t)0U && r > (uint32_t)0U) - { - ite4 = (uint32_t)128U; - } - else - { - ite4 = r % (uint32_t)128U; - } - if - ( - (uint32_t)128U - == Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2B, Hacl_Impl_Blake2_Core_M256) - ) - { - ite = r; - } - else if (r % (uint32_t)128U == (uint32_t)0U && r > (uint32_t)0U) - { - ite = (uint32_t)128U; - } - else - { - ite = r % (uint32_t)128U; - } - Hacl_Blake2b_256_blake2b_update_last(ite4, - tmp_block_state.fst, - tmp_block_state.snd, - FStar_UInt128_uint64_to_uint128(prev_len_last), - ite, - buf_last); - Hacl_Blake2b_256_blake2b_finish((uint32_t)64U, dst, tmp_block_state.snd); - } -} - -/** - Free state function when there is no key -*/ -void -Hacl_Streaming_Blake2b_256_blake2b_256_no_key_free( - Hacl_Streaming_Blake2b_256_blake2b_256_state *s -) -{ - Hacl_Streaming_Blake2b_256_blake2b_256_state scrut = *s; - uint8_t *buf = scrut.buf; - Hacl_Streaming_Blake2b_256_blake2b_256_block_state block_state = scrut.block_state; - Lib_IntVector_Intrinsics_vec256 *wv = block_state.fst; - Lib_IntVector_Intrinsics_vec256 *b = block_state.snd; - KRML_ALIGNED_FREE(wv); - KRML_ALIGNED_FREE(b); - KRML_HOST_FREE(buf); - KRML_HOST_FREE(s); -} - diff --git a/dist/c89-compatible/Hacl_Streaming_Blake2b_256.h b/dist/c89-compatible/Hacl_Streaming_Blake2b_256.h deleted file mode 100644 index 5684430a12..0000000000 --- a/dist/c89-compatible/Hacl_Streaming_Blake2b_256.h +++ /dev/null @@ -1,105 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#ifndef __Hacl_Streaming_Blake2b_256_H -#define __Hacl_Streaming_Blake2b_256_H - -#if defined(__cplusplus) -extern "C" { -#endif - -#include -#include "krml/internal/types.h" -#include "krml/lowstar_endianness.h" -#include "krml/internal/target.h" - - -#include "Hacl_Streaming_Blake2.h" -#include "Hacl_Spec.h" -#include "Hacl_Krmllib.h" -#include "Hacl_Hash_Blake2b_256.h" -#include "Hacl_Hash_Blake2.h" -#include "evercrypt_targetconfig.h" -typedef struct Hacl_Streaming_Blake2b_256_blake2b_256_block_state_s -{ - Lib_IntVector_Intrinsics_vec256 *fst; - Lib_IntVector_Intrinsics_vec256 *snd; -} -Hacl_Streaming_Blake2b_256_blake2b_256_block_state; - -typedef struct Hacl_Streaming_Blake2b_256_blake2b_256_state_s -{ - Hacl_Streaming_Blake2b_256_blake2b_256_block_state block_state; - uint8_t *buf; - uint64_t total_len; -} -Hacl_Streaming_Blake2b_256_blake2b_256_state; - -/** - State allocation function when there is no key -*/ -Hacl_Streaming_Blake2b_256_blake2b_256_state -*Hacl_Streaming_Blake2b_256_blake2b_256_no_key_create_in(); - -/** - (Re-)initialization function when there is no key -*/ -void -Hacl_Streaming_Blake2b_256_blake2b_256_no_key_init( - Hacl_Streaming_Blake2b_256_blake2b_256_state *s -); - -/** - Update function when there is no key -*/ -void -Hacl_Streaming_Blake2b_256_blake2b_256_no_key_update( - Hacl_Streaming_Blake2b_256_blake2b_256_state *p, - uint8_t *data, - uint32_t len -); - -/** - Finish function when there is no key -*/ -void -Hacl_Streaming_Blake2b_256_blake2b_256_no_key_finish( - Hacl_Streaming_Blake2b_256_blake2b_256_state *p, - uint8_t *dst -); - -/** - Free state function when there is no key -*/ -void -Hacl_Streaming_Blake2b_256_blake2b_256_no_key_free( - Hacl_Streaming_Blake2b_256_blake2b_256_state *s -); - -#if defined(__cplusplus) -} -#endif - -#define __Hacl_Streaming_Blake2b_256_H_DEFINED -#endif diff --git a/dist/c89-compatible/Hacl_Streaming_Blake2s_128.c b/dist/c89-compatible/Hacl_Streaming_Blake2s_128.c deleted file mode 100644 index 20197fbd95..0000000000 --- a/dist/c89-compatible/Hacl_Streaming_Blake2s_128.c +++ /dev/null @@ -1,631 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#include "Hacl_Streaming_Blake2s_128.h" - - - -/** - State allocation function when there is no key -*/ -Hacl_Streaming_Blake2s_128_blake2s_128_state -*Hacl_Streaming_Blake2s_128_blake2s_128_no_key_create_in() -{ - KRML_CHECK_SIZE(sizeof (uint8_t), - Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2S, Hacl_Impl_Blake2_Core_M128)); - { - uint8_t - *buf = - (uint8_t *)KRML_HOST_CALLOC(Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2S, - Hacl_Impl_Blake2_Core_M128), - sizeof (uint8_t)); - Lib_IntVector_Intrinsics_vec128 - *wv = - (Lib_IntVector_Intrinsics_vec128 *)KRML_ALIGNED_MALLOC(16, - sizeof (Lib_IntVector_Intrinsics_vec128) * (uint32_t)4U); - memset(wv, 0U, (uint32_t)4U * sizeof (Lib_IntVector_Intrinsics_vec128)); - { - Lib_IntVector_Intrinsics_vec128 - *b = - (Lib_IntVector_Intrinsics_vec128 *)KRML_ALIGNED_MALLOC(16, - sizeof (Lib_IntVector_Intrinsics_vec128) * (uint32_t)4U); - memset(b, 0U, (uint32_t)4U * sizeof (Lib_IntVector_Intrinsics_vec128)); - { - Hacl_Streaming_Blake2s_128_blake2s_128_block_state lit; - Hacl_Streaming_Blake2s_128_blake2s_128_block_state block_state; - lit.fst = wv; - lit.snd = b; - block_state = lit; - { - Hacl_Streaming_Blake2s_128_blake2s_128_state s; - s.block_state = block_state; - s.buf = buf; - s.total_len = (uint64_t)0U; - KRML_CHECK_SIZE(sizeof (Hacl_Streaming_Blake2s_128_blake2s_128_state), (uint32_t)1U); - { - Hacl_Streaming_Blake2s_128_blake2s_128_state - *p = - (Hacl_Streaming_Blake2s_128_blake2s_128_state *)KRML_HOST_MALLOC(sizeof ( - Hacl_Streaming_Blake2s_128_blake2s_128_state - )); - p[0U] = s; - Hacl_Blake2s_128_blake2s_init(block_state.snd, (uint32_t)0U, (uint32_t)32U); - return p; - } - } - } - } - } -} - -/** - (Re-)initialization function when there is no key -*/ -void -Hacl_Streaming_Blake2s_128_blake2s_128_no_key_init( - Hacl_Streaming_Blake2s_128_blake2s_128_state *s -) -{ - Hacl_Streaming_Blake2s_128_blake2s_128_state scrut = *s; - uint8_t *buf = scrut.buf; - Hacl_Streaming_Blake2s_128_blake2s_128_block_state block_state = scrut.block_state; - Hacl_Blake2s_128_blake2s_init(block_state.snd, (uint32_t)0U, (uint32_t)32U); - { - Hacl_Streaming_Blake2s_128_blake2s_128_state lit; - lit.block_state = block_state; - lit.buf = buf; - lit.total_len = (uint64_t)0U; - s[0U] = lit; - } -} - -/** - Update function when there is no key -*/ -void -Hacl_Streaming_Blake2s_128_blake2s_128_no_key_update( - Hacl_Streaming_Blake2s_128_blake2s_128_state *p, - uint8_t *data, - uint32_t len -) -{ - Hacl_Streaming_Blake2s_128_blake2s_128_state s = *p; - uint64_t total_len = s.total_len; - uint32_t sz; - if - ( - total_len - % - (uint64_t)Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2S, - Hacl_Impl_Blake2_Core_M128) - == (uint64_t)0U - && total_len > (uint64_t)0U - ) - { - sz = Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2S, Hacl_Impl_Blake2_Core_M128); - } - else - { - sz = - (uint32_t)(total_len - % - (uint64_t)Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2S, - Hacl_Impl_Blake2_Core_M128)); - } - if - ( - len - <= Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2S, Hacl_Impl_Blake2_Core_M128) - sz - ) - { - Hacl_Streaming_Blake2s_128_blake2s_128_state s1 = *p; - Hacl_Streaming_Blake2s_128_blake2s_128_block_state block_state1 = s1.block_state; - uint8_t *buf = s1.buf; - uint64_t total_len1 = s1.total_len; - uint32_t sz1; - if - ( - total_len1 - % - (uint64_t)Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2S, - Hacl_Impl_Blake2_Core_M128) - == (uint64_t)0U - && total_len1 > (uint64_t)0U - ) - { - sz1 = Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2S, Hacl_Impl_Blake2_Core_M128); - } - else - { - sz1 = - (uint32_t)(total_len1 - % - (uint64_t)Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2S, - Hacl_Impl_Blake2_Core_M128)); - } - { - uint8_t *buf2 = buf + sz1; - uint64_t total_len2; - memcpy(buf2, data, len * sizeof (uint8_t)); - total_len2 = total_len1 + (uint64_t)len; - { - Hacl_Streaming_Blake2s_128_blake2s_128_state lit; - lit.block_state = block_state1; - lit.buf = buf; - lit.total_len = total_len2; - *p = lit; - return; - } - } - } - if (sz == (uint32_t)0U) - { - Hacl_Streaming_Blake2s_128_blake2s_128_state s1 = *p; - Hacl_Streaming_Blake2s_128_blake2s_128_block_state block_state1 = s1.block_state; - uint8_t *buf = s1.buf; - uint64_t total_len1 = s1.total_len; - uint32_t sz1; - if - ( - total_len1 - % - (uint64_t)Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2S, - Hacl_Impl_Blake2_Core_M128) - == (uint64_t)0U - && total_len1 > (uint64_t)0U - ) - { - sz1 = Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2S, Hacl_Impl_Blake2_Core_M128); - } - else - { - sz1 = - (uint32_t)(total_len1 - % - (uint64_t)Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2S, - Hacl_Impl_Blake2_Core_M128)); - } - { - uint32_t ite; - uint32_t n_blocks; - uint32_t data1_len; - uint32_t data2_len; - uint8_t *data1; - uint8_t *data2; - uint32_t nb0; - uint8_t *dst; - if (!(sz1 == (uint32_t)0U)) - { - uint64_t prevlen = total_len1 - (uint64_t)sz1; - uint32_t - nb = - Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2S, - Hacl_Impl_Blake2_Core_M128) - / (uint32_t)64U; - Hacl_Blake2s_128_blake2s_update_multi(Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2S, - Hacl_Impl_Blake2_Core_M128), - block_state1.fst, - block_state1.snd, - prevlen, - buf, - nb); - } - if - ( - (uint64_t)len - % - (uint64_t)Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2S, - Hacl_Impl_Blake2_Core_M128) - == (uint64_t)0U - && (uint64_t)len > (uint64_t)0U - ) - { - ite = - Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2S, - Hacl_Impl_Blake2_Core_M128); - } - else - { - ite = - (uint32_t)((uint64_t)len - % - (uint64_t)Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2S, - Hacl_Impl_Blake2_Core_M128)); - } - n_blocks = - (len - ite) - / Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2S, Hacl_Impl_Blake2_Core_M128); - data1_len = - n_blocks - * Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2S, Hacl_Impl_Blake2_Core_M128); - data2_len = len - data1_len; - data1 = data; - data2 = data + data1_len; - nb0 = data1_len / (uint32_t)64U; - Hacl_Blake2s_128_blake2s_update_multi(data1_len, - block_state1.fst, - block_state1.snd, - total_len1, - data1, - nb0); - dst = buf; - memcpy(dst, data2, data2_len * sizeof (uint8_t)); - { - Hacl_Streaming_Blake2s_128_blake2s_128_state lit; - lit.block_state = block_state1; - lit.buf = buf; - lit.total_len = total_len1 + (uint64_t)len; - *p = lit; - return; - } - } - } - { - uint32_t - diff = - Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2S, - Hacl_Impl_Blake2_Core_M128) - - sz; - uint8_t *data1 = data; - uint8_t *data2 = data + diff; - Hacl_Streaming_Blake2s_128_blake2s_128_state s10 = *p; - Hacl_Streaming_Blake2s_128_blake2s_128_block_state block_state10 = s10.block_state; - uint8_t *buf0 = s10.buf; - uint64_t total_len10 = s10.total_len; - uint32_t sz10; - if - ( - total_len10 - % - (uint64_t)Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2S, - Hacl_Impl_Blake2_Core_M128) - == (uint64_t)0U - && total_len10 > (uint64_t)0U - ) - { - sz10 = Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2S, Hacl_Impl_Blake2_Core_M128); - } - else - { - sz10 = - (uint32_t)(total_len10 - % - (uint64_t)Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2S, - Hacl_Impl_Blake2_Core_M128)); - } - { - uint8_t *buf2 = buf0 + sz10; - uint64_t total_len2; - memcpy(buf2, data1, diff * sizeof (uint8_t)); - total_len2 = total_len10 + (uint64_t)diff; - { - Hacl_Streaming_Blake2s_128_blake2s_128_state lit; - Hacl_Streaming_Blake2s_128_blake2s_128_state s1; - Hacl_Streaming_Blake2s_128_blake2s_128_block_state block_state1; - uint8_t *buf; - uint64_t total_len1; - uint32_t sz1; - uint32_t ite; - uint32_t n_blocks; - uint32_t data1_len; - uint32_t data2_len; - uint8_t *data11; - uint8_t *data21; - uint32_t nb0; - uint8_t *dst; - lit.block_state = block_state10; - lit.buf = buf0; - lit.total_len = total_len2; - *p = lit; - s1 = *p; - block_state1 = s1.block_state; - buf = s1.buf; - total_len1 = s1.total_len; - if - ( - total_len1 - % - (uint64_t)Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2S, - Hacl_Impl_Blake2_Core_M128) - == (uint64_t)0U - && total_len1 > (uint64_t)0U - ) - { - sz1 = - Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2S, - Hacl_Impl_Blake2_Core_M128); - } - else - { - sz1 = - (uint32_t)(total_len1 - % - (uint64_t)Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2S, - Hacl_Impl_Blake2_Core_M128)); - } - if (!(sz1 == (uint32_t)0U)) - { - uint64_t prevlen = total_len1 - (uint64_t)sz1; - uint32_t - nb = - Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2S, - Hacl_Impl_Blake2_Core_M128) - / (uint32_t)64U; - Hacl_Blake2s_128_blake2s_update_multi(Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2S, - Hacl_Impl_Blake2_Core_M128), - block_state1.fst, - block_state1.snd, - prevlen, - buf, - nb); - } - if - ( - (uint64_t)(len - diff) - % - (uint64_t)Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2S, - Hacl_Impl_Blake2_Core_M128) - == (uint64_t)0U - && (uint64_t)(len - diff) > (uint64_t)0U - ) - { - ite = - Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2S, - Hacl_Impl_Blake2_Core_M128); - } - else - { - ite = - (uint32_t)((uint64_t)(len - diff) - % - (uint64_t)Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2S, - Hacl_Impl_Blake2_Core_M128)); - } - n_blocks = - (len - diff - ite) - / Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2S, Hacl_Impl_Blake2_Core_M128); - data1_len = - n_blocks - * Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2S, Hacl_Impl_Blake2_Core_M128); - data2_len = len - diff - data1_len; - data11 = data2; - data21 = data2 + data1_len; - nb0 = data1_len / (uint32_t)64U; - Hacl_Blake2s_128_blake2s_update_multi(data1_len, - block_state1.fst, - block_state1.snd, - total_len1, - data11, - nb0); - dst = buf; - memcpy(dst, data21, data2_len * sizeof (uint8_t)); - { - Hacl_Streaming_Blake2s_128_blake2s_128_state lit0; - lit0.block_state = block_state1; - lit0.buf = buf; - lit0.total_len = total_len1 + (uint64_t)(len - diff); - *p = lit0; - } - } - } - } -} - -/** - Finish function when there is no key -*/ -void -Hacl_Streaming_Blake2s_128_blake2s_128_no_key_finish( - Hacl_Streaming_Blake2s_128_blake2s_128_state *p, - uint8_t *dst -) -{ - Hacl_Streaming_Blake2s_128_blake2s_128_state scrut = *p; - Hacl_Streaming_Blake2s_128_blake2s_128_block_state block_state = scrut.block_state; - uint8_t *buf_ = scrut.buf; - uint64_t total_len = scrut.total_len; - uint32_t r; - if - ( - total_len - % - (uint64_t)Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2S, - Hacl_Impl_Blake2_Core_M128) - == (uint64_t)0U - && total_len > (uint64_t)0U - ) - { - r = Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2S, Hacl_Impl_Blake2_Core_M128); - } - else - { - r = - (uint32_t)(total_len - % - (uint64_t)Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2S, - Hacl_Impl_Blake2_Core_M128)); - } - { - uint8_t *buf_1 = buf_; - KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 wv[4U] KRML_POST_ALIGN(16) = { 0U }; - KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 b[4U] KRML_POST_ALIGN(16) = { 0U }; - Hacl_Streaming_Blake2s_128_blake2s_128_block_state lit; - Hacl_Streaming_Blake2s_128_blake2s_128_block_state tmp_block_state; - Lib_IntVector_Intrinsics_vec128 *src_b; - Lib_IntVector_Intrinsics_vec128 *dst_b; - uint64_t prev_len; - uint32_t ite0; - uint8_t *buf_last; - uint8_t *buf_multi; - uint32_t ite1; - uint32_t nb; - uint32_t ite2; - uint32_t ite3; - uint64_t prev_len_last; - uint32_t ite4; - uint32_t ite; - lit.fst = wv; - lit.snd = b; - tmp_block_state = lit; - src_b = block_state.snd; - dst_b = tmp_block_state.snd; - memcpy(dst_b, src_b, (uint32_t)4U * sizeof (Lib_IntVector_Intrinsics_vec128)); - prev_len = total_len - (uint64_t)r; - if (r % (uint32_t)64U == (uint32_t)0U && r > (uint32_t)0U) - { - ite0 = (uint32_t)64U; - } - else - { - ite0 = r % (uint32_t)64U; - } - buf_last = buf_1 + r - ite0; - buf_multi = buf_1; - if - ( - (uint32_t)64U - == Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2S, Hacl_Impl_Blake2_Core_M128) - ) - { - ite1 = (uint32_t)0U; - } - else - { - uint32_t ite5; - if (r % (uint32_t)64U == (uint32_t)0U && r > (uint32_t)0U) - { - ite5 = (uint32_t)64U; - } - else - { - ite5 = r % (uint32_t)64U; - } - ite1 = r - ite5; - } - nb = ite1 / (uint32_t)64U; - if - ( - (uint32_t)64U - == Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2S, Hacl_Impl_Blake2_Core_M128) - ) - { - ite2 = (uint32_t)0U; - } - else - { - uint32_t ite5; - if (r % (uint32_t)64U == (uint32_t)0U && r > (uint32_t)0U) - { - ite5 = (uint32_t)64U; - } - else - { - ite5 = r % (uint32_t)64U; - } - ite2 = r - ite5; - } - Hacl_Blake2s_128_blake2s_update_multi(ite2, - tmp_block_state.fst, - tmp_block_state.snd, - prev_len, - buf_multi, - nb); - if - ( - (uint32_t)64U - == Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2S, Hacl_Impl_Blake2_Core_M128) - ) - { - ite3 = r; - } - else if (r % (uint32_t)64U == (uint32_t)0U && r > (uint32_t)0U) - { - ite3 = (uint32_t)64U; - } - else - { - ite3 = r % (uint32_t)64U; - } - prev_len_last = total_len - (uint64_t)ite3; - if - ( - (uint32_t)64U - == Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2S, Hacl_Impl_Blake2_Core_M128) - ) - { - ite4 = r; - } - else if (r % (uint32_t)64U == (uint32_t)0U && r > (uint32_t)0U) - { - ite4 = (uint32_t)64U; - } - else - { - ite4 = r % (uint32_t)64U; - } - if - ( - (uint32_t)64U - == Hacl_Streaming_Blake2_blocks_state_len(Spec_Blake2_Blake2S, Hacl_Impl_Blake2_Core_M128) - ) - { - ite = r; - } - else if (r % (uint32_t)64U == (uint32_t)0U && r > (uint32_t)0U) - { - ite = (uint32_t)64U; - } - else - { - ite = r % (uint32_t)64U; - } - Hacl_Blake2s_128_blake2s_update_last(ite4, - tmp_block_state.fst, - tmp_block_state.snd, - prev_len_last, - ite, - buf_last); - Hacl_Blake2s_128_blake2s_finish((uint32_t)32U, dst, tmp_block_state.snd); - } -} - -/** - Free state function when there is no key -*/ -void -Hacl_Streaming_Blake2s_128_blake2s_128_no_key_free( - Hacl_Streaming_Blake2s_128_blake2s_128_state *s -) -{ - Hacl_Streaming_Blake2s_128_blake2s_128_state scrut = *s; - uint8_t *buf = scrut.buf; - Hacl_Streaming_Blake2s_128_blake2s_128_block_state block_state = scrut.block_state; - Lib_IntVector_Intrinsics_vec128 *wv = block_state.fst; - Lib_IntVector_Intrinsics_vec128 *b = block_state.snd; - KRML_ALIGNED_FREE(wv); - KRML_ALIGNED_FREE(b); - KRML_HOST_FREE(buf); - KRML_HOST_FREE(s); -} - diff --git a/dist/c89-compatible/Hacl_Streaming_Blake2s_128.h b/dist/c89-compatible/Hacl_Streaming_Blake2s_128.h deleted file mode 100644 index 1af09b78c3..0000000000 --- a/dist/c89-compatible/Hacl_Streaming_Blake2s_128.h +++ /dev/null @@ -1,104 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#ifndef __Hacl_Streaming_Blake2s_128_H -#define __Hacl_Streaming_Blake2s_128_H - -#if defined(__cplusplus) -extern "C" { -#endif - -#include -#include "krml/internal/types.h" -#include "krml/lowstar_endianness.h" -#include "krml/internal/target.h" - - -#include "Hacl_Streaming_Blake2.h" -#include "Hacl_Spec.h" -#include "Hacl_Hash_Blake2s_128.h" -#include "Hacl_Hash_Blake2.h" -#include "evercrypt_targetconfig.h" -typedef struct Hacl_Streaming_Blake2s_128_blake2s_128_block_state_s -{ - Lib_IntVector_Intrinsics_vec128 *fst; - Lib_IntVector_Intrinsics_vec128 *snd; -} -Hacl_Streaming_Blake2s_128_blake2s_128_block_state; - -typedef struct Hacl_Streaming_Blake2s_128_blake2s_128_state_s -{ - Hacl_Streaming_Blake2s_128_blake2s_128_block_state block_state; - uint8_t *buf; - uint64_t total_len; -} -Hacl_Streaming_Blake2s_128_blake2s_128_state; - -/** - State allocation function when there is no key -*/ -Hacl_Streaming_Blake2s_128_blake2s_128_state -*Hacl_Streaming_Blake2s_128_blake2s_128_no_key_create_in(); - -/** - (Re-)initialization function when there is no key -*/ -void -Hacl_Streaming_Blake2s_128_blake2s_128_no_key_init( - Hacl_Streaming_Blake2s_128_blake2s_128_state *s -); - -/** - Update function when there is no key -*/ -void -Hacl_Streaming_Blake2s_128_blake2s_128_no_key_update( - Hacl_Streaming_Blake2s_128_blake2s_128_state *p, - uint8_t *data, - uint32_t len -); - -/** - Finish function when there is no key -*/ -void -Hacl_Streaming_Blake2s_128_blake2s_128_no_key_finish( - Hacl_Streaming_Blake2s_128_blake2s_128_state *p, - uint8_t *dst -); - -/** - Free state function when there is no key -*/ -void -Hacl_Streaming_Blake2s_128_blake2s_128_no_key_free( - Hacl_Streaming_Blake2s_128_blake2s_128_state *s -); - -#if defined(__cplusplus) -} -#endif - -#define __Hacl_Streaming_Blake2s_128_H_DEFINED -#endif diff --git a/dist/c89-compatible/Hacl_Streaming_MD5.c b/dist/c89-compatible/Hacl_Streaming_MD5.c deleted file mode 100644 index cd08eac13d..0000000000 --- a/dist/c89-compatible/Hacl_Streaming_MD5.c +++ /dev/null @@ -1,305 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#include "Hacl_Streaming_MD5.h" - -#include "internal/Hacl_Hash_MD5.h" - -Hacl_Streaming_SHA2_state_sha2_224 *Hacl_Streaming_MD5_legacy_create_in_md5() -{ - uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)64U, sizeof (uint8_t)); - uint32_t *block_state = (uint32_t *)KRML_HOST_CALLOC((uint32_t)4U, sizeof (uint32_t)); - Hacl_Streaming_SHA2_state_sha2_224 s; - s.block_state = block_state; - s.buf = buf; - s.total_len = (uint64_t)0U; - KRML_CHECK_SIZE(sizeof (Hacl_Streaming_SHA2_state_sha2_224), (uint32_t)1U); - { - Hacl_Streaming_SHA2_state_sha2_224 - *p = - (Hacl_Streaming_SHA2_state_sha2_224 *)KRML_HOST_MALLOC(sizeof ( - Hacl_Streaming_SHA2_state_sha2_224 - )); - p[0U] = s; - Hacl_Hash_Core_MD5_legacy_init(block_state); - return p; - } -} - -void Hacl_Streaming_MD5_legacy_init_md5(Hacl_Streaming_SHA2_state_sha2_224 *s) -{ - Hacl_Streaming_SHA2_state_sha2_224 scrut = *s; - uint8_t *buf = scrut.buf; - uint32_t *block_state = scrut.block_state; - Hacl_Hash_Core_MD5_legacy_init(block_state); - { - Hacl_Streaming_SHA2_state_sha2_224 lit; - lit.block_state = block_state; - lit.buf = buf; - lit.total_len = (uint64_t)0U; - s[0U] = lit; - } -} - -void -Hacl_Streaming_MD5_legacy_update_md5( - Hacl_Streaming_SHA2_state_sha2_224 *p, - uint8_t *data, - uint32_t len -) -{ - Hacl_Streaming_SHA2_state_sha2_224 s = *p; - uint64_t total_len = s.total_len; - uint32_t sz; - if (total_len % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len > (uint64_t)0U) - { - sz = (uint32_t)64U; - } - else - { - sz = (uint32_t)(total_len % (uint64_t)(uint32_t)64U); - } - if (len <= (uint32_t)64U - sz) - { - Hacl_Streaming_SHA2_state_sha2_224 s1 = *p; - uint32_t *block_state1 = s1.block_state; - uint8_t *buf = s1.buf; - uint64_t total_len1 = s1.total_len; - uint32_t sz1; - if (total_len1 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len1 > (uint64_t)0U) - { - sz1 = (uint32_t)64U; - } - else - { - sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)64U); - } - { - uint8_t *buf2 = buf + sz1; - uint64_t total_len2; - memcpy(buf2, data, len * sizeof (uint8_t)); - total_len2 = total_len1 + (uint64_t)len; - { - Hacl_Streaming_SHA2_state_sha2_224 lit; - lit.block_state = block_state1; - lit.buf = buf; - lit.total_len = total_len2; - *p = lit; - return; - } - } - } - if (sz == (uint32_t)0U) - { - Hacl_Streaming_SHA2_state_sha2_224 s1 = *p; - uint32_t *block_state1 = s1.block_state; - uint8_t *buf = s1.buf; - uint64_t total_len1 = s1.total_len; - uint32_t sz1; - if (total_len1 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len1 > (uint64_t)0U) - { - sz1 = (uint32_t)64U; - } - else - { - sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)64U); - } - { - uint32_t ite; - uint32_t n_blocks; - uint32_t data1_len; - uint32_t data2_len; - uint8_t *data1; - uint8_t *data2; - uint8_t *dst; - if (!(sz1 == (uint32_t)0U)) - { - Hacl_Hash_MD5_legacy_update_multi(block_state1, buf, (uint32_t)1U); - } - if ((uint64_t)len % (uint64_t)(uint32_t)64U == (uint64_t)0U && (uint64_t)len > (uint64_t)0U) - { - ite = (uint32_t)64U; - } - else - { - ite = (uint32_t)((uint64_t)len % (uint64_t)(uint32_t)64U); - } - n_blocks = (len - ite) / (uint32_t)64U; - data1_len = n_blocks * (uint32_t)64U; - data2_len = len - data1_len; - data1 = data; - data2 = data + data1_len; - Hacl_Hash_MD5_legacy_update_multi(block_state1, data1, data1_len / (uint32_t)64U); - dst = buf; - memcpy(dst, data2, data2_len * sizeof (uint8_t)); - { - Hacl_Streaming_SHA2_state_sha2_224 lit; - lit.block_state = block_state1; - lit.buf = buf; - lit.total_len = total_len1 + (uint64_t)len; - *p = lit; - return; - } - } - } - { - uint32_t diff = (uint32_t)64U - sz; - uint8_t *data1 = data; - uint8_t *data2 = data + diff; - Hacl_Streaming_SHA2_state_sha2_224 s10 = *p; - uint32_t *block_state10 = s10.block_state; - uint8_t *buf0 = s10.buf; - uint64_t total_len10 = s10.total_len; - uint32_t sz10; - if (total_len10 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len10 > (uint64_t)0U) - { - sz10 = (uint32_t)64U; - } - else - { - sz10 = (uint32_t)(total_len10 % (uint64_t)(uint32_t)64U); - } - { - uint8_t *buf2 = buf0 + sz10; - uint64_t total_len2; - memcpy(buf2, data1, diff * sizeof (uint8_t)); - total_len2 = total_len10 + (uint64_t)diff; - { - Hacl_Streaming_SHA2_state_sha2_224 lit; - Hacl_Streaming_SHA2_state_sha2_224 s1; - uint32_t *block_state1; - uint8_t *buf; - uint64_t total_len1; - uint32_t sz1; - uint32_t ite; - uint32_t n_blocks; - uint32_t data1_len; - uint32_t data2_len; - uint8_t *data11; - uint8_t *data21; - uint8_t *dst; - lit.block_state = block_state10; - lit.buf = buf0; - lit.total_len = total_len2; - *p = lit; - s1 = *p; - block_state1 = s1.block_state; - buf = s1.buf; - total_len1 = s1.total_len; - if (total_len1 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len1 > (uint64_t)0U) - { - sz1 = (uint32_t)64U; - } - else - { - sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)64U); - } - if (!(sz1 == (uint32_t)0U)) - { - Hacl_Hash_MD5_legacy_update_multi(block_state1, buf, (uint32_t)1U); - } - if - ( - (uint64_t)(len - diff) - % (uint64_t)(uint32_t)64U - == (uint64_t)0U - && (uint64_t)(len - diff) > (uint64_t)0U - ) - { - ite = (uint32_t)64U; - } - else - { - ite = (uint32_t)((uint64_t)(len - diff) % (uint64_t)(uint32_t)64U); - } - n_blocks = (len - diff - ite) / (uint32_t)64U; - data1_len = n_blocks * (uint32_t)64U; - data2_len = len - diff - data1_len; - data11 = data2; - data21 = data2 + data1_len; - Hacl_Hash_MD5_legacy_update_multi(block_state1, data11, data1_len / (uint32_t)64U); - dst = buf; - memcpy(dst, data21, data2_len * sizeof (uint8_t)); - { - Hacl_Streaming_SHA2_state_sha2_224 lit0; - lit0.block_state = block_state1; - lit0.buf = buf; - lit0.total_len = total_len1 + (uint64_t)(len - diff); - *p = lit0; - } - } - } - } -} - -void Hacl_Streaming_MD5_legacy_finish_md5(Hacl_Streaming_SHA2_state_sha2_224 *p, uint8_t *dst) -{ - Hacl_Streaming_SHA2_state_sha2_224 scrut = *p; - uint32_t *block_state = scrut.block_state; - uint8_t *buf_ = scrut.buf; - uint64_t total_len = scrut.total_len; - uint32_t r; - if (total_len % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len > (uint64_t)0U) - { - r = (uint32_t)64U; - } - else - { - r = (uint32_t)(total_len % (uint64_t)(uint32_t)64U); - } - { - uint8_t *buf_1 = buf_; - uint32_t tmp_block_state[4U] = { 0U }; - uint32_t ite; - uint8_t *buf_last; - uint8_t *buf_multi; - uint64_t prev_len_last; - memcpy(tmp_block_state, block_state, (uint32_t)4U * sizeof (uint32_t)); - if (r % (uint32_t)64U == (uint32_t)0U && r > (uint32_t)0U) - { - ite = (uint32_t)64U; - } - else - { - ite = r % (uint32_t)64U; - } - buf_last = buf_1 + r - ite; - buf_multi = buf_1; - Hacl_Hash_MD5_legacy_update_multi(tmp_block_state, buf_multi, (uint32_t)0U); - prev_len_last = total_len - (uint64_t)r; - Hacl_Hash_MD5_legacy_update_last(tmp_block_state, prev_len_last, buf_last, r); - Hacl_Hash_Core_MD5_legacy_finish(tmp_block_state, dst); - } -} - -void Hacl_Streaming_MD5_legacy_free_md5(Hacl_Streaming_SHA2_state_sha2_224 *s) -{ - Hacl_Streaming_SHA2_state_sha2_224 scrut = *s; - uint8_t *buf = scrut.buf; - uint32_t *block_state = scrut.block_state; - KRML_HOST_FREE(block_state); - KRML_HOST_FREE(buf); - KRML_HOST_FREE(s); -} - diff --git a/dist/c89-compatible/Hacl_Streaming_MD5.h b/dist/c89-compatible/Hacl_Streaming_MD5.h deleted file mode 100644 index e034e2c154..0000000000 --- a/dist/c89-compatible/Hacl_Streaming_MD5.h +++ /dev/null @@ -1,63 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#ifndef __Hacl_Streaming_MD5_H -#define __Hacl_Streaming_MD5_H - -#if defined(__cplusplus) -extern "C" { -#endif - -#include -#include "krml/internal/types.h" -#include "krml/lowstar_endianness.h" -#include "krml/internal/target.h" - - -#include "Hacl_Streaming_SHA2.h" -#include "Hacl_Hash_MD5.h" -#include "evercrypt_targetconfig.h" -typedef Hacl_Streaming_SHA2_state_sha2_224 Hacl_Streaming_MD5_state_md5; - -Hacl_Streaming_SHA2_state_sha2_224 *Hacl_Streaming_MD5_legacy_create_in_md5(); - -void Hacl_Streaming_MD5_legacy_init_md5(Hacl_Streaming_SHA2_state_sha2_224 *s); - -void -Hacl_Streaming_MD5_legacy_update_md5( - Hacl_Streaming_SHA2_state_sha2_224 *p, - uint8_t *data, - uint32_t len -); - -void Hacl_Streaming_MD5_legacy_finish_md5(Hacl_Streaming_SHA2_state_sha2_224 *p, uint8_t *dst); - -void Hacl_Streaming_MD5_legacy_free_md5(Hacl_Streaming_SHA2_state_sha2_224 *s); - -#if defined(__cplusplus) -} -#endif - -#define __Hacl_Streaming_MD5_H_DEFINED -#endif diff --git a/dist/c89-compatible/Hacl_Streaming_Poly1305_128.c b/dist/c89-compatible/Hacl_Streaming_Poly1305_128.c deleted file mode 100644 index 2129d7f2ed..0000000000 --- a/dist/c89-compatible/Hacl_Streaming_Poly1305_128.c +++ /dev/null @@ -1,374 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#include "Hacl_Streaming_Poly1305_128.h" - - - -Hacl_Streaming_Poly1305_128_poly1305_128_state -*Hacl_Streaming_Poly1305_128_create_in(uint8_t *k) -{ - uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)32U, sizeof (uint8_t)); - Lib_IntVector_Intrinsics_vec128 - *r1 = - (Lib_IntVector_Intrinsics_vec128 *)KRML_ALIGNED_MALLOC(16, - sizeof (Lib_IntVector_Intrinsics_vec128) * (uint32_t)25U); - memset(r1, 0U, (uint32_t)25U * sizeof (Lib_IntVector_Intrinsics_vec128)); - { - Lib_IntVector_Intrinsics_vec128 *block_state = r1; - uint8_t *k_ = (uint8_t *)KRML_HOST_CALLOC((uint32_t)32U, sizeof (uint8_t)); - uint8_t *k_0; - memcpy(k_, k, (uint32_t)32U * sizeof (uint8_t)); - k_0 = k_; - { - Hacl_Streaming_Poly1305_128_poly1305_128_state s; - s.block_state = block_state; - s.buf = buf; - s.total_len = (uint64_t)0U; - s.p_key = k_0; - KRML_CHECK_SIZE(sizeof (Hacl_Streaming_Poly1305_128_poly1305_128_state), (uint32_t)1U); - { - Hacl_Streaming_Poly1305_128_poly1305_128_state - *p = - (Hacl_Streaming_Poly1305_128_poly1305_128_state *)KRML_HOST_MALLOC(sizeof ( - Hacl_Streaming_Poly1305_128_poly1305_128_state - )); - p[0U] = s; - Hacl_Poly1305_128_poly1305_init(block_state, k); - return p; - } - } - } -} - -void -Hacl_Streaming_Poly1305_128_init(uint8_t *k, Hacl_Streaming_Poly1305_128_poly1305_128_state *s) -{ - Hacl_Streaming_Poly1305_128_poly1305_128_state scrut = *s; - uint8_t *k_ = scrut.p_key; - uint8_t *buf = scrut.buf; - Lib_IntVector_Intrinsics_vec128 *block_state = scrut.block_state; - uint8_t *k_1; - Hacl_Poly1305_128_poly1305_init(block_state, k); - memcpy(k_, k, (uint32_t)32U * sizeof (uint8_t)); - k_1 = k_; - { - Hacl_Streaming_Poly1305_128_poly1305_128_state lit; - lit.block_state = block_state; - lit.buf = buf; - lit.total_len = (uint64_t)0U; - lit.p_key = k_1; - s[0U] = lit; - } -} - -void -Hacl_Streaming_Poly1305_128_update( - Hacl_Streaming_Poly1305_128_poly1305_128_state *p, - uint8_t *data, - uint32_t len -) -{ - Hacl_Streaming_Poly1305_128_poly1305_128_state s = *p; - uint64_t total_len = s.total_len; - uint32_t sz; - if (total_len % (uint64_t)(uint32_t)32U == (uint64_t)0U && total_len > (uint64_t)0U) - { - sz = (uint32_t)32U; - } - else - { - sz = (uint32_t)(total_len % (uint64_t)(uint32_t)32U); - } - if (len <= (uint32_t)32U - sz) - { - Hacl_Streaming_Poly1305_128_poly1305_128_state s1 = *p; - Lib_IntVector_Intrinsics_vec128 *block_state1 = s1.block_state; - uint8_t *buf = s1.buf; - uint64_t total_len1 = s1.total_len; - uint8_t *k_1 = s1.p_key; - uint32_t sz1; - if (total_len1 % (uint64_t)(uint32_t)32U == (uint64_t)0U && total_len1 > (uint64_t)0U) - { - sz1 = (uint32_t)32U; - } - else - { - sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)32U); - } - { - uint8_t *buf2 = buf + sz1; - uint64_t total_len2; - memcpy(buf2, data, len * sizeof (uint8_t)); - total_len2 = total_len1 + (uint64_t)len; - { - Hacl_Streaming_Poly1305_128_poly1305_128_state lit; - lit.block_state = block_state1; - lit.buf = buf; - lit.total_len = total_len2; - lit.p_key = k_1; - *p = lit; - return; - } - } - } - if (sz == (uint32_t)0U) - { - Hacl_Streaming_Poly1305_128_poly1305_128_state s1 = *p; - Lib_IntVector_Intrinsics_vec128 *block_state1 = s1.block_state; - uint8_t *buf = s1.buf; - uint64_t total_len1 = s1.total_len; - uint8_t *k_1 = s1.p_key; - uint32_t sz1; - if (total_len1 % (uint64_t)(uint32_t)32U == (uint64_t)0U && total_len1 > (uint64_t)0U) - { - sz1 = (uint32_t)32U; - } - else - { - sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)32U); - } - { - uint32_t ite; - uint32_t n_blocks; - uint32_t data1_len; - uint32_t data2_len; - uint8_t *data1; - uint8_t *data2; - uint8_t *dst; - if (!(sz1 == (uint32_t)0U)) - { - Hacl_Poly1305_128_poly1305_update(block_state1, (uint32_t)32U, buf); - } - if ((uint64_t)len % (uint64_t)(uint32_t)32U == (uint64_t)0U && (uint64_t)len > (uint64_t)0U) - { - ite = (uint32_t)32U; - } - else - { - ite = (uint32_t)((uint64_t)len % (uint64_t)(uint32_t)32U); - } - n_blocks = (len - ite) / (uint32_t)32U; - data1_len = n_blocks * (uint32_t)32U; - data2_len = len - data1_len; - data1 = data; - data2 = data + data1_len; - Hacl_Poly1305_128_poly1305_update(block_state1, data1_len, data1); - dst = buf; - memcpy(dst, data2, data2_len * sizeof (uint8_t)); - { - Hacl_Streaming_Poly1305_128_poly1305_128_state lit; - lit.block_state = block_state1; - lit.buf = buf; - lit.total_len = total_len1 + (uint64_t)len; - lit.p_key = k_1; - *p = lit; - return; - } - } - } - { - uint32_t diff = (uint32_t)32U - sz; - uint8_t *data1 = data; - uint8_t *data2 = data + diff; - Hacl_Streaming_Poly1305_128_poly1305_128_state s10 = *p; - Lib_IntVector_Intrinsics_vec128 *block_state10 = s10.block_state; - uint8_t *buf0 = s10.buf; - uint64_t total_len10 = s10.total_len; - uint8_t *k_1 = s10.p_key; - uint32_t sz10; - if (total_len10 % (uint64_t)(uint32_t)32U == (uint64_t)0U && total_len10 > (uint64_t)0U) - { - sz10 = (uint32_t)32U; - } - else - { - sz10 = (uint32_t)(total_len10 % (uint64_t)(uint32_t)32U); - } - { - uint8_t *buf2 = buf0 + sz10; - uint64_t total_len2; - memcpy(buf2, data1, diff * sizeof (uint8_t)); - total_len2 = total_len10 + (uint64_t)diff; - { - Hacl_Streaming_Poly1305_128_poly1305_128_state lit; - Hacl_Streaming_Poly1305_128_poly1305_128_state s1; - Lib_IntVector_Intrinsics_vec128 *block_state1; - uint8_t *buf; - uint64_t total_len1; - uint8_t *k_10; - uint32_t sz1; - uint32_t ite; - uint32_t n_blocks; - uint32_t data1_len; - uint32_t data2_len; - uint8_t *data11; - uint8_t *data21; - uint8_t *dst; - lit.block_state = block_state10; - lit.buf = buf0; - lit.total_len = total_len2; - lit.p_key = k_1; - *p = lit; - s1 = *p; - block_state1 = s1.block_state; - buf = s1.buf; - total_len1 = s1.total_len; - k_10 = s1.p_key; - if (total_len1 % (uint64_t)(uint32_t)32U == (uint64_t)0U && total_len1 > (uint64_t)0U) - { - sz1 = (uint32_t)32U; - } - else - { - sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)32U); - } - if (!(sz1 == (uint32_t)0U)) - { - Hacl_Poly1305_128_poly1305_update(block_state1, (uint32_t)32U, buf); - } - if - ( - (uint64_t)(len - diff) - % (uint64_t)(uint32_t)32U - == (uint64_t)0U - && (uint64_t)(len - diff) > (uint64_t)0U - ) - { - ite = (uint32_t)32U; - } - else - { - ite = (uint32_t)((uint64_t)(len - diff) % (uint64_t)(uint32_t)32U); - } - n_blocks = (len - diff - ite) / (uint32_t)32U; - data1_len = n_blocks * (uint32_t)32U; - data2_len = len - diff - data1_len; - data11 = data2; - data21 = data2 + data1_len; - Hacl_Poly1305_128_poly1305_update(block_state1, data1_len, data11); - dst = buf; - memcpy(dst, data21, data2_len * sizeof (uint8_t)); - { - Hacl_Streaming_Poly1305_128_poly1305_128_state lit0; - lit0.block_state = block_state1; - lit0.buf = buf; - lit0.total_len = total_len1 + (uint64_t)(len - diff); - lit0.p_key = k_10; - *p = lit0; - } - } - } - } -} - -void -Hacl_Streaming_Poly1305_128_finish( - Hacl_Streaming_Poly1305_128_poly1305_128_state *p, - uint8_t *dst -) -{ - Hacl_Streaming_Poly1305_128_poly1305_128_state scrut = *p; - Lib_IntVector_Intrinsics_vec128 *block_state = scrut.block_state; - uint8_t *buf_ = scrut.buf; - uint64_t total_len = scrut.total_len; - uint8_t *k_ = scrut.p_key; - uint32_t r; - if (total_len % (uint64_t)(uint32_t)32U == (uint64_t)0U && total_len > (uint64_t)0U) - { - r = (uint32_t)32U; - } - else - { - r = (uint32_t)(total_len % (uint64_t)(uint32_t)32U); - } - { - uint8_t *buf_1 = buf_; - KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 r1[25U] KRML_POST_ALIGN(16) = { 0U }; - Lib_IntVector_Intrinsics_vec128 *tmp_block_state = r1; - uint32_t ite0; - uint8_t *buf_last; - uint8_t *buf_multi; - uint32_t ite1; - uint32_t ite2; - uint64_t prev_len_last; - uint32_t ite; - memcpy(tmp_block_state, block_state, (uint32_t)25U * sizeof (Lib_IntVector_Intrinsics_vec128)); - if (r % (uint32_t)16U == (uint32_t)0U && r > (uint32_t)0U) - { - ite0 = (uint32_t)16U; - } - else - { - ite0 = r % (uint32_t)16U; - } - buf_last = buf_1 + r - ite0; - buf_multi = buf_1; - if (r % (uint32_t)16U == (uint32_t)0U && r > (uint32_t)0U) - { - ite1 = (uint32_t)16U; - } - else - { - ite1 = r % (uint32_t)16U; - } - Hacl_Poly1305_128_poly1305_update(tmp_block_state, r - ite1, buf_multi); - if (r % (uint32_t)16U == (uint32_t)0U && r > (uint32_t)0U) - { - ite2 = (uint32_t)16U; - } - else - { - ite2 = r % (uint32_t)16U; - } - prev_len_last = total_len - (uint64_t)ite2; - if (r % (uint32_t)16U == (uint32_t)0U && r > (uint32_t)0U) - { - ite = (uint32_t)16U; - } - else - { - ite = r % (uint32_t)16U; - } - Hacl_Poly1305_128_poly1305_update(tmp_block_state, ite, buf_last); - { - KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 tmp[25U] KRML_POST_ALIGN(16) = { 0U }; - memcpy(tmp, tmp_block_state, (uint32_t)25U * sizeof (Lib_IntVector_Intrinsics_vec128)); - Hacl_Poly1305_128_poly1305_finish(dst, k_, tmp); - } - } -} - -void Hacl_Streaming_Poly1305_128_free(Hacl_Streaming_Poly1305_128_poly1305_128_state *s) -{ - Hacl_Streaming_Poly1305_128_poly1305_128_state scrut = *s; - uint8_t *k_ = scrut.p_key; - uint8_t *buf = scrut.buf; - Lib_IntVector_Intrinsics_vec128 *block_state = scrut.block_state; - KRML_HOST_FREE(k_); - KRML_ALIGNED_FREE(block_state); - KRML_HOST_FREE(buf); - KRML_HOST_FREE(s); -} - diff --git a/dist/c89-compatible/Hacl_Streaming_Poly1305_128.h b/dist/c89-compatible/Hacl_Streaming_Poly1305_128.h deleted file mode 100644 index bd453354c0..0000000000 --- a/dist/c89-compatible/Hacl_Streaming_Poly1305_128.h +++ /dev/null @@ -1,75 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#ifndef __Hacl_Streaming_Poly1305_128_H -#define __Hacl_Streaming_Poly1305_128_H - -#if defined(__cplusplus) -extern "C" { -#endif - -#include -#include "krml/internal/types.h" -#include "krml/lowstar_endianness.h" -#include "krml/internal/target.h" - - -#include "Hacl_Poly1305_128.h" -#include "evercrypt_targetconfig.h" -typedef struct Hacl_Streaming_Poly1305_128_poly1305_128_state_s -{ - Lib_IntVector_Intrinsics_vec128 *block_state; - uint8_t *buf; - uint64_t total_len; - uint8_t *p_key; -} -Hacl_Streaming_Poly1305_128_poly1305_128_state; - -Hacl_Streaming_Poly1305_128_poly1305_128_state -*Hacl_Streaming_Poly1305_128_create_in(uint8_t *k); - -void -Hacl_Streaming_Poly1305_128_init(uint8_t *k, Hacl_Streaming_Poly1305_128_poly1305_128_state *s); - -void -Hacl_Streaming_Poly1305_128_update( - Hacl_Streaming_Poly1305_128_poly1305_128_state *p, - uint8_t *data, - uint32_t len -); - -void -Hacl_Streaming_Poly1305_128_finish( - Hacl_Streaming_Poly1305_128_poly1305_128_state *p, - uint8_t *dst -); - -void Hacl_Streaming_Poly1305_128_free(Hacl_Streaming_Poly1305_128_poly1305_128_state *s); - -#if defined(__cplusplus) -} -#endif - -#define __Hacl_Streaming_Poly1305_128_H_DEFINED -#endif diff --git a/dist/c89-compatible/Hacl_Streaming_Poly1305_256.c b/dist/c89-compatible/Hacl_Streaming_Poly1305_256.c deleted file mode 100644 index 4b6b4965a9..0000000000 --- a/dist/c89-compatible/Hacl_Streaming_Poly1305_256.c +++ /dev/null @@ -1,374 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#include "Hacl_Streaming_Poly1305_256.h" - - - -Hacl_Streaming_Poly1305_256_poly1305_256_state -*Hacl_Streaming_Poly1305_256_create_in(uint8_t *k) -{ - uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)64U, sizeof (uint8_t)); - Lib_IntVector_Intrinsics_vec256 - *r1 = - (Lib_IntVector_Intrinsics_vec256 *)KRML_ALIGNED_MALLOC(32, - sizeof (Lib_IntVector_Intrinsics_vec256) * (uint32_t)25U); - memset(r1, 0U, (uint32_t)25U * sizeof (Lib_IntVector_Intrinsics_vec256)); - { - Lib_IntVector_Intrinsics_vec256 *block_state = r1; - uint8_t *k_ = (uint8_t *)KRML_HOST_CALLOC((uint32_t)32U, sizeof (uint8_t)); - uint8_t *k_0; - memcpy(k_, k, (uint32_t)32U * sizeof (uint8_t)); - k_0 = k_; - { - Hacl_Streaming_Poly1305_256_poly1305_256_state s; - s.block_state = block_state; - s.buf = buf; - s.total_len = (uint64_t)0U; - s.p_key = k_0; - KRML_CHECK_SIZE(sizeof (Hacl_Streaming_Poly1305_256_poly1305_256_state), (uint32_t)1U); - { - Hacl_Streaming_Poly1305_256_poly1305_256_state - *p = - (Hacl_Streaming_Poly1305_256_poly1305_256_state *)KRML_HOST_MALLOC(sizeof ( - Hacl_Streaming_Poly1305_256_poly1305_256_state - )); - p[0U] = s; - Hacl_Poly1305_256_poly1305_init(block_state, k); - return p; - } - } - } -} - -void -Hacl_Streaming_Poly1305_256_init(uint8_t *k, Hacl_Streaming_Poly1305_256_poly1305_256_state *s) -{ - Hacl_Streaming_Poly1305_256_poly1305_256_state scrut = *s; - uint8_t *k_ = scrut.p_key; - uint8_t *buf = scrut.buf; - Lib_IntVector_Intrinsics_vec256 *block_state = scrut.block_state; - uint8_t *k_1; - Hacl_Poly1305_256_poly1305_init(block_state, k); - memcpy(k_, k, (uint32_t)32U * sizeof (uint8_t)); - k_1 = k_; - { - Hacl_Streaming_Poly1305_256_poly1305_256_state lit; - lit.block_state = block_state; - lit.buf = buf; - lit.total_len = (uint64_t)0U; - lit.p_key = k_1; - s[0U] = lit; - } -} - -void -Hacl_Streaming_Poly1305_256_update( - Hacl_Streaming_Poly1305_256_poly1305_256_state *p, - uint8_t *data, - uint32_t len -) -{ - Hacl_Streaming_Poly1305_256_poly1305_256_state s = *p; - uint64_t total_len = s.total_len; - uint32_t sz; - if (total_len % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len > (uint64_t)0U) - { - sz = (uint32_t)64U; - } - else - { - sz = (uint32_t)(total_len % (uint64_t)(uint32_t)64U); - } - if (len <= (uint32_t)64U - sz) - { - Hacl_Streaming_Poly1305_256_poly1305_256_state s1 = *p; - Lib_IntVector_Intrinsics_vec256 *block_state1 = s1.block_state; - uint8_t *buf = s1.buf; - uint64_t total_len1 = s1.total_len; - uint8_t *k_1 = s1.p_key; - uint32_t sz1; - if (total_len1 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len1 > (uint64_t)0U) - { - sz1 = (uint32_t)64U; - } - else - { - sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)64U); - } - { - uint8_t *buf2 = buf + sz1; - uint64_t total_len2; - memcpy(buf2, data, len * sizeof (uint8_t)); - total_len2 = total_len1 + (uint64_t)len; - { - Hacl_Streaming_Poly1305_256_poly1305_256_state lit; - lit.block_state = block_state1; - lit.buf = buf; - lit.total_len = total_len2; - lit.p_key = k_1; - *p = lit; - return; - } - } - } - if (sz == (uint32_t)0U) - { - Hacl_Streaming_Poly1305_256_poly1305_256_state s1 = *p; - Lib_IntVector_Intrinsics_vec256 *block_state1 = s1.block_state; - uint8_t *buf = s1.buf; - uint64_t total_len1 = s1.total_len; - uint8_t *k_1 = s1.p_key; - uint32_t sz1; - if (total_len1 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len1 > (uint64_t)0U) - { - sz1 = (uint32_t)64U; - } - else - { - sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)64U); - } - { - uint32_t ite; - uint32_t n_blocks; - uint32_t data1_len; - uint32_t data2_len; - uint8_t *data1; - uint8_t *data2; - uint8_t *dst; - if (!(sz1 == (uint32_t)0U)) - { - Hacl_Poly1305_256_poly1305_update(block_state1, (uint32_t)64U, buf); - } - if ((uint64_t)len % (uint64_t)(uint32_t)64U == (uint64_t)0U && (uint64_t)len > (uint64_t)0U) - { - ite = (uint32_t)64U; - } - else - { - ite = (uint32_t)((uint64_t)len % (uint64_t)(uint32_t)64U); - } - n_blocks = (len - ite) / (uint32_t)64U; - data1_len = n_blocks * (uint32_t)64U; - data2_len = len - data1_len; - data1 = data; - data2 = data + data1_len; - Hacl_Poly1305_256_poly1305_update(block_state1, data1_len, data1); - dst = buf; - memcpy(dst, data2, data2_len * sizeof (uint8_t)); - { - Hacl_Streaming_Poly1305_256_poly1305_256_state lit; - lit.block_state = block_state1; - lit.buf = buf; - lit.total_len = total_len1 + (uint64_t)len; - lit.p_key = k_1; - *p = lit; - return; - } - } - } - { - uint32_t diff = (uint32_t)64U - sz; - uint8_t *data1 = data; - uint8_t *data2 = data + diff; - Hacl_Streaming_Poly1305_256_poly1305_256_state s10 = *p; - Lib_IntVector_Intrinsics_vec256 *block_state10 = s10.block_state; - uint8_t *buf0 = s10.buf; - uint64_t total_len10 = s10.total_len; - uint8_t *k_1 = s10.p_key; - uint32_t sz10; - if (total_len10 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len10 > (uint64_t)0U) - { - sz10 = (uint32_t)64U; - } - else - { - sz10 = (uint32_t)(total_len10 % (uint64_t)(uint32_t)64U); - } - { - uint8_t *buf2 = buf0 + sz10; - uint64_t total_len2; - memcpy(buf2, data1, diff * sizeof (uint8_t)); - total_len2 = total_len10 + (uint64_t)diff; - { - Hacl_Streaming_Poly1305_256_poly1305_256_state lit; - Hacl_Streaming_Poly1305_256_poly1305_256_state s1; - Lib_IntVector_Intrinsics_vec256 *block_state1; - uint8_t *buf; - uint64_t total_len1; - uint8_t *k_10; - uint32_t sz1; - uint32_t ite; - uint32_t n_blocks; - uint32_t data1_len; - uint32_t data2_len; - uint8_t *data11; - uint8_t *data21; - uint8_t *dst; - lit.block_state = block_state10; - lit.buf = buf0; - lit.total_len = total_len2; - lit.p_key = k_1; - *p = lit; - s1 = *p; - block_state1 = s1.block_state; - buf = s1.buf; - total_len1 = s1.total_len; - k_10 = s1.p_key; - if (total_len1 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len1 > (uint64_t)0U) - { - sz1 = (uint32_t)64U; - } - else - { - sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)64U); - } - if (!(sz1 == (uint32_t)0U)) - { - Hacl_Poly1305_256_poly1305_update(block_state1, (uint32_t)64U, buf); - } - if - ( - (uint64_t)(len - diff) - % (uint64_t)(uint32_t)64U - == (uint64_t)0U - && (uint64_t)(len - diff) > (uint64_t)0U - ) - { - ite = (uint32_t)64U; - } - else - { - ite = (uint32_t)((uint64_t)(len - diff) % (uint64_t)(uint32_t)64U); - } - n_blocks = (len - diff - ite) / (uint32_t)64U; - data1_len = n_blocks * (uint32_t)64U; - data2_len = len - diff - data1_len; - data11 = data2; - data21 = data2 + data1_len; - Hacl_Poly1305_256_poly1305_update(block_state1, data1_len, data11); - dst = buf; - memcpy(dst, data21, data2_len * sizeof (uint8_t)); - { - Hacl_Streaming_Poly1305_256_poly1305_256_state lit0; - lit0.block_state = block_state1; - lit0.buf = buf; - lit0.total_len = total_len1 + (uint64_t)(len - diff); - lit0.p_key = k_10; - *p = lit0; - } - } - } - } -} - -void -Hacl_Streaming_Poly1305_256_finish( - Hacl_Streaming_Poly1305_256_poly1305_256_state *p, - uint8_t *dst -) -{ - Hacl_Streaming_Poly1305_256_poly1305_256_state scrut = *p; - Lib_IntVector_Intrinsics_vec256 *block_state = scrut.block_state; - uint8_t *buf_ = scrut.buf; - uint64_t total_len = scrut.total_len; - uint8_t *k_ = scrut.p_key; - uint32_t r; - if (total_len % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len > (uint64_t)0U) - { - r = (uint32_t)64U; - } - else - { - r = (uint32_t)(total_len % (uint64_t)(uint32_t)64U); - } - { - uint8_t *buf_1 = buf_; - KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 r1[25U] KRML_POST_ALIGN(32) = { 0U }; - Lib_IntVector_Intrinsics_vec256 *tmp_block_state = r1; - uint32_t ite0; - uint8_t *buf_last; - uint8_t *buf_multi; - uint32_t ite1; - uint32_t ite2; - uint64_t prev_len_last; - uint32_t ite; - memcpy(tmp_block_state, block_state, (uint32_t)25U * sizeof (Lib_IntVector_Intrinsics_vec256)); - if (r % (uint32_t)16U == (uint32_t)0U && r > (uint32_t)0U) - { - ite0 = (uint32_t)16U; - } - else - { - ite0 = r % (uint32_t)16U; - } - buf_last = buf_1 + r - ite0; - buf_multi = buf_1; - if (r % (uint32_t)16U == (uint32_t)0U && r > (uint32_t)0U) - { - ite1 = (uint32_t)16U; - } - else - { - ite1 = r % (uint32_t)16U; - } - Hacl_Poly1305_256_poly1305_update(tmp_block_state, r - ite1, buf_multi); - if (r % (uint32_t)16U == (uint32_t)0U && r > (uint32_t)0U) - { - ite2 = (uint32_t)16U; - } - else - { - ite2 = r % (uint32_t)16U; - } - prev_len_last = total_len - (uint64_t)ite2; - if (r % (uint32_t)16U == (uint32_t)0U && r > (uint32_t)0U) - { - ite = (uint32_t)16U; - } - else - { - ite = r % (uint32_t)16U; - } - Hacl_Poly1305_256_poly1305_update(tmp_block_state, ite, buf_last); - { - KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 tmp[25U] KRML_POST_ALIGN(32) = { 0U }; - memcpy(tmp, tmp_block_state, (uint32_t)25U * sizeof (Lib_IntVector_Intrinsics_vec256)); - Hacl_Poly1305_256_poly1305_finish(dst, k_, tmp); - } - } -} - -void Hacl_Streaming_Poly1305_256_free(Hacl_Streaming_Poly1305_256_poly1305_256_state *s) -{ - Hacl_Streaming_Poly1305_256_poly1305_256_state scrut = *s; - uint8_t *k_ = scrut.p_key; - uint8_t *buf = scrut.buf; - Lib_IntVector_Intrinsics_vec256 *block_state = scrut.block_state; - KRML_HOST_FREE(k_); - KRML_ALIGNED_FREE(block_state); - KRML_HOST_FREE(buf); - KRML_HOST_FREE(s); -} - diff --git a/dist/c89-compatible/Hacl_Streaming_Poly1305_256.h b/dist/c89-compatible/Hacl_Streaming_Poly1305_256.h deleted file mode 100644 index c25f9a838f..0000000000 --- a/dist/c89-compatible/Hacl_Streaming_Poly1305_256.h +++ /dev/null @@ -1,75 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#ifndef __Hacl_Streaming_Poly1305_256_H -#define __Hacl_Streaming_Poly1305_256_H - -#if defined(__cplusplus) -extern "C" { -#endif - -#include -#include "krml/internal/types.h" -#include "krml/lowstar_endianness.h" -#include "krml/internal/target.h" - - -#include "Hacl_Poly1305_256.h" -#include "evercrypt_targetconfig.h" -typedef struct Hacl_Streaming_Poly1305_256_poly1305_256_state_s -{ - Lib_IntVector_Intrinsics_vec256 *block_state; - uint8_t *buf; - uint64_t total_len; - uint8_t *p_key; -} -Hacl_Streaming_Poly1305_256_poly1305_256_state; - -Hacl_Streaming_Poly1305_256_poly1305_256_state -*Hacl_Streaming_Poly1305_256_create_in(uint8_t *k); - -void -Hacl_Streaming_Poly1305_256_init(uint8_t *k, Hacl_Streaming_Poly1305_256_poly1305_256_state *s); - -void -Hacl_Streaming_Poly1305_256_update( - Hacl_Streaming_Poly1305_256_poly1305_256_state *p, - uint8_t *data, - uint32_t len -); - -void -Hacl_Streaming_Poly1305_256_finish( - Hacl_Streaming_Poly1305_256_poly1305_256_state *p, - uint8_t *dst -); - -void Hacl_Streaming_Poly1305_256_free(Hacl_Streaming_Poly1305_256_poly1305_256_state *s); - -#if defined(__cplusplus) -} -#endif - -#define __Hacl_Streaming_Poly1305_256_H_DEFINED -#endif diff --git a/dist/c89-compatible/Hacl_Streaming_Poly1305_32.c b/dist/c89-compatible/Hacl_Streaming_Poly1305_32.c deleted file mode 100644 index bf1cfcfa62..0000000000 --- a/dist/c89-compatible/Hacl_Streaming_Poly1305_32.c +++ /dev/null @@ -1,338 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#include "Hacl_Streaming_Poly1305_32.h" - - - -Hacl_Streaming_Poly1305_32_poly1305_32_state *Hacl_Streaming_Poly1305_32_create_in(uint8_t *k) -{ - uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)16U, sizeof (uint8_t)); - uint64_t *r1 = (uint64_t *)KRML_HOST_CALLOC((uint32_t)25U, sizeof (uint64_t)); - uint64_t *block_state = r1; - uint8_t *k_ = (uint8_t *)KRML_HOST_CALLOC((uint32_t)32U, sizeof (uint8_t)); - uint8_t *k_0; - memcpy(k_, k, (uint32_t)32U * sizeof (uint8_t)); - k_0 = k_; - { - Hacl_Streaming_Poly1305_32_poly1305_32_state s; - s.block_state = block_state; - s.buf = buf; - s.total_len = (uint64_t)0U; - s.p_key = k_0; - KRML_CHECK_SIZE(sizeof (Hacl_Streaming_Poly1305_32_poly1305_32_state), (uint32_t)1U); - { - Hacl_Streaming_Poly1305_32_poly1305_32_state - *p = - (Hacl_Streaming_Poly1305_32_poly1305_32_state *)KRML_HOST_MALLOC(sizeof ( - Hacl_Streaming_Poly1305_32_poly1305_32_state - )); - p[0U] = s; - Hacl_Poly1305_32_poly1305_init(block_state, k); - return p; - } - } -} - -void -Hacl_Streaming_Poly1305_32_init(uint8_t *k, Hacl_Streaming_Poly1305_32_poly1305_32_state *s) -{ - Hacl_Streaming_Poly1305_32_poly1305_32_state scrut = *s; - uint8_t *k_ = scrut.p_key; - uint8_t *buf = scrut.buf; - uint64_t *block_state = scrut.block_state; - uint8_t *k_1; - Hacl_Poly1305_32_poly1305_init(block_state, k); - memcpy(k_, k, (uint32_t)32U * sizeof (uint8_t)); - k_1 = k_; - { - Hacl_Streaming_Poly1305_32_poly1305_32_state lit; - lit.block_state = block_state; - lit.buf = buf; - lit.total_len = (uint64_t)0U; - lit.p_key = k_1; - s[0U] = lit; - } -} - -void -Hacl_Streaming_Poly1305_32_update( - Hacl_Streaming_Poly1305_32_poly1305_32_state *p, - uint8_t *data, - uint32_t len -) -{ - Hacl_Streaming_Poly1305_32_poly1305_32_state s = *p; - uint64_t total_len = s.total_len; - uint32_t sz; - if (total_len % (uint64_t)(uint32_t)16U == (uint64_t)0U && total_len > (uint64_t)0U) - { - sz = (uint32_t)16U; - } - else - { - sz = (uint32_t)(total_len % (uint64_t)(uint32_t)16U); - } - if (len <= (uint32_t)16U - sz) - { - Hacl_Streaming_Poly1305_32_poly1305_32_state s1 = *p; - uint64_t *block_state1 = s1.block_state; - uint8_t *buf = s1.buf; - uint64_t total_len1 = s1.total_len; - uint8_t *k_1 = s1.p_key; - uint32_t sz1; - if (total_len1 % (uint64_t)(uint32_t)16U == (uint64_t)0U && total_len1 > (uint64_t)0U) - { - sz1 = (uint32_t)16U; - } - else - { - sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)16U); - } - { - uint8_t *buf2 = buf + sz1; - uint64_t total_len2; - memcpy(buf2, data, len * sizeof (uint8_t)); - total_len2 = total_len1 + (uint64_t)len; - { - Hacl_Streaming_Poly1305_32_poly1305_32_state lit; - lit.block_state = block_state1; - lit.buf = buf; - lit.total_len = total_len2; - lit.p_key = k_1; - *p = lit; - return; - } - } - } - if (sz == (uint32_t)0U) - { - Hacl_Streaming_Poly1305_32_poly1305_32_state s1 = *p; - uint64_t *block_state1 = s1.block_state; - uint8_t *buf = s1.buf; - uint64_t total_len1 = s1.total_len; - uint8_t *k_1 = s1.p_key; - uint32_t sz1; - if (total_len1 % (uint64_t)(uint32_t)16U == (uint64_t)0U && total_len1 > (uint64_t)0U) - { - sz1 = (uint32_t)16U; - } - else - { - sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)16U); - } - { - uint32_t ite; - uint32_t n_blocks; - uint32_t data1_len; - uint32_t data2_len; - uint8_t *data1; - uint8_t *data2; - uint8_t *dst; - if (!(sz1 == (uint32_t)0U)) - { - Hacl_Poly1305_32_poly1305_update(block_state1, (uint32_t)16U, buf); - } - if ((uint64_t)len % (uint64_t)(uint32_t)16U == (uint64_t)0U && (uint64_t)len > (uint64_t)0U) - { - ite = (uint32_t)16U; - } - else - { - ite = (uint32_t)((uint64_t)len % (uint64_t)(uint32_t)16U); - } - n_blocks = (len - ite) / (uint32_t)16U; - data1_len = n_blocks * (uint32_t)16U; - data2_len = len - data1_len; - data1 = data; - data2 = data + data1_len; - Hacl_Poly1305_32_poly1305_update(block_state1, data1_len, data1); - dst = buf; - memcpy(dst, data2, data2_len * sizeof (uint8_t)); - { - Hacl_Streaming_Poly1305_32_poly1305_32_state lit; - lit.block_state = block_state1; - lit.buf = buf; - lit.total_len = total_len1 + (uint64_t)len; - lit.p_key = k_1; - *p = lit; - return; - } - } - } - { - uint32_t diff = (uint32_t)16U - sz; - uint8_t *data1 = data; - uint8_t *data2 = data + diff; - Hacl_Streaming_Poly1305_32_poly1305_32_state s10 = *p; - uint64_t *block_state10 = s10.block_state; - uint8_t *buf0 = s10.buf; - uint64_t total_len10 = s10.total_len; - uint8_t *k_1 = s10.p_key; - uint32_t sz10; - if (total_len10 % (uint64_t)(uint32_t)16U == (uint64_t)0U && total_len10 > (uint64_t)0U) - { - sz10 = (uint32_t)16U; - } - else - { - sz10 = (uint32_t)(total_len10 % (uint64_t)(uint32_t)16U); - } - { - uint8_t *buf2 = buf0 + sz10; - uint64_t total_len2; - memcpy(buf2, data1, diff * sizeof (uint8_t)); - total_len2 = total_len10 + (uint64_t)diff; - { - Hacl_Streaming_Poly1305_32_poly1305_32_state lit; - Hacl_Streaming_Poly1305_32_poly1305_32_state s1; - uint64_t *block_state1; - uint8_t *buf; - uint64_t total_len1; - uint8_t *k_10; - uint32_t sz1; - uint32_t ite; - uint32_t n_blocks; - uint32_t data1_len; - uint32_t data2_len; - uint8_t *data11; - uint8_t *data21; - uint8_t *dst; - lit.block_state = block_state10; - lit.buf = buf0; - lit.total_len = total_len2; - lit.p_key = k_1; - *p = lit; - s1 = *p; - block_state1 = s1.block_state; - buf = s1.buf; - total_len1 = s1.total_len; - k_10 = s1.p_key; - if (total_len1 % (uint64_t)(uint32_t)16U == (uint64_t)0U && total_len1 > (uint64_t)0U) - { - sz1 = (uint32_t)16U; - } - else - { - sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)16U); - } - if (!(sz1 == (uint32_t)0U)) - { - Hacl_Poly1305_32_poly1305_update(block_state1, (uint32_t)16U, buf); - } - if - ( - (uint64_t)(len - diff) - % (uint64_t)(uint32_t)16U - == (uint64_t)0U - && (uint64_t)(len - diff) > (uint64_t)0U - ) - { - ite = (uint32_t)16U; - } - else - { - ite = (uint32_t)((uint64_t)(len - diff) % (uint64_t)(uint32_t)16U); - } - n_blocks = (len - diff - ite) / (uint32_t)16U; - data1_len = n_blocks * (uint32_t)16U; - data2_len = len - diff - data1_len; - data11 = data2; - data21 = data2 + data1_len; - Hacl_Poly1305_32_poly1305_update(block_state1, data1_len, data11); - dst = buf; - memcpy(dst, data21, data2_len * sizeof (uint8_t)); - { - Hacl_Streaming_Poly1305_32_poly1305_32_state lit0; - lit0.block_state = block_state1; - lit0.buf = buf; - lit0.total_len = total_len1 + (uint64_t)(len - diff); - lit0.p_key = k_10; - *p = lit0; - } - } - } - } -} - -void -Hacl_Streaming_Poly1305_32_finish( - Hacl_Streaming_Poly1305_32_poly1305_32_state *p, - uint8_t *dst -) -{ - Hacl_Streaming_Poly1305_32_poly1305_32_state scrut = *p; - uint64_t *block_state = scrut.block_state; - uint8_t *buf_ = scrut.buf; - uint64_t total_len = scrut.total_len; - uint8_t *k_ = scrut.p_key; - uint32_t r; - if (total_len % (uint64_t)(uint32_t)16U == (uint64_t)0U && total_len > (uint64_t)0U) - { - r = (uint32_t)16U; - } - else - { - r = (uint32_t)(total_len % (uint64_t)(uint32_t)16U); - } - { - uint8_t *buf_1 = buf_; - uint64_t r1[25U] = { 0U }; - uint64_t *tmp_block_state = r1; - uint32_t ite; - uint8_t *buf_last; - uint8_t *buf_multi; - memcpy(tmp_block_state, block_state, (uint32_t)25U * sizeof (uint64_t)); - if (r % (uint32_t)16U == (uint32_t)0U && r > (uint32_t)0U) - { - ite = (uint32_t)16U; - } - else - { - ite = r % (uint32_t)16U; - } - buf_last = buf_1 + r - ite; - buf_multi = buf_1; - Hacl_Poly1305_32_poly1305_update(tmp_block_state, (uint32_t)0U, buf_multi); - Hacl_Poly1305_32_poly1305_update(tmp_block_state, r, buf_last); - { - uint64_t tmp[25U] = { 0U }; - memcpy(tmp, tmp_block_state, (uint32_t)25U * sizeof (uint64_t)); - Hacl_Poly1305_32_poly1305_finish(dst, k_, tmp); - } - } -} - -void Hacl_Streaming_Poly1305_32_free(Hacl_Streaming_Poly1305_32_poly1305_32_state *s) -{ - Hacl_Streaming_Poly1305_32_poly1305_32_state scrut = *s; - uint8_t *k_ = scrut.p_key; - uint8_t *buf = scrut.buf; - uint64_t *block_state = scrut.block_state; - KRML_HOST_FREE(k_); - KRML_HOST_FREE(block_state); - KRML_HOST_FREE(buf); - KRML_HOST_FREE(s); -} - diff --git a/dist/c89-compatible/Hacl_Streaming_Poly1305_32.h b/dist/c89-compatible/Hacl_Streaming_Poly1305_32.h deleted file mode 100644 index 1b5a6c5250..0000000000 --- a/dist/c89-compatible/Hacl_Streaming_Poly1305_32.h +++ /dev/null @@ -1,74 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#ifndef __Hacl_Streaming_Poly1305_32_H -#define __Hacl_Streaming_Poly1305_32_H - -#if defined(__cplusplus) -extern "C" { -#endif - -#include -#include "krml/internal/types.h" -#include "krml/lowstar_endianness.h" -#include "krml/internal/target.h" - - -#include "Hacl_Poly1305_32.h" -#include "evercrypt_targetconfig.h" -typedef struct Hacl_Streaming_Poly1305_32_poly1305_32_state_s -{ - uint64_t *block_state; - uint8_t *buf; - uint64_t total_len; - uint8_t *p_key; -} -Hacl_Streaming_Poly1305_32_poly1305_32_state; - -Hacl_Streaming_Poly1305_32_poly1305_32_state *Hacl_Streaming_Poly1305_32_create_in(uint8_t *k); - -void -Hacl_Streaming_Poly1305_32_init(uint8_t *k, Hacl_Streaming_Poly1305_32_poly1305_32_state *s); - -void -Hacl_Streaming_Poly1305_32_update( - Hacl_Streaming_Poly1305_32_poly1305_32_state *p, - uint8_t *data, - uint32_t len -); - -void -Hacl_Streaming_Poly1305_32_finish( - Hacl_Streaming_Poly1305_32_poly1305_32_state *p, - uint8_t *dst -); - -void Hacl_Streaming_Poly1305_32_free(Hacl_Streaming_Poly1305_32_poly1305_32_state *s); - -#if defined(__cplusplus) -} -#endif - -#define __Hacl_Streaming_Poly1305_32_H_DEFINED -#endif diff --git a/dist/c89-compatible/Hacl_Streaming_SHA1.c b/dist/c89-compatible/Hacl_Streaming_SHA1.c deleted file mode 100644 index 0823c114cc..0000000000 --- a/dist/c89-compatible/Hacl_Streaming_SHA1.c +++ /dev/null @@ -1,306 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#include "Hacl_Streaming_SHA1.h" - -#include "internal/Hacl_Hash_SHA1.h" - -Hacl_Streaming_SHA2_state_sha2_224 *Hacl_Streaming_SHA1_legacy_create_in_sha1() -{ - uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)64U, sizeof (uint8_t)); - uint32_t *block_state = (uint32_t *)KRML_HOST_CALLOC((uint32_t)5U, sizeof (uint32_t)); - Hacl_Streaming_SHA2_state_sha2_224 s; - s.block_state = block_state; - s.buf = buf; - s.total_len = (uint64_t)0U; - KRML_CHECK_SIZE(sizeof (Hacl_Streaming_SHA2_state_sha2_224), (uint32_t)1U); - { - Hacl_Streaming_SHA2_state_sha2_224 - *p = - (Hacl_Streaming_SHA2_state_sha2_224 *)KRML_HOST_MALLOC(sizeof ( - Hacl_Streaming_SHA2_state_sha2_224 - )); - p[0U] = s; - Hacl_Hash_Core_SHA1_legacy_init(block_state); - return p; - } -} - -void Hacl_Streaming_SHA1_legacy_init_sha1(Hacl_Streaming_SHA2_state_sha2_224 *s) -{ - Hacl_Streaming_SHA2_state_sha2_224 scrut = *s; - uint8_t *buf = scrut.buf; - uint32_t *block_state = scrut.block_state; - Hacl_Hash_Core_SHA1_legacy_init(block_state); - { - Hacl_Streaming_SHA2_state_sha2_224 lit; - lit.block_state = block_state; - lit.buf = buf; - lit.total_len = (uint64_t)0U; - s[0U] = lit; - } -} - -void -Hacl_Streaming_SHA1_legacy_update_sha1( - Hacl_Streaming_SHA2_state_sha2_224 *p, - uint8_t *data, - uint32_t len -) -{ - Hacl_Streaming_SHA2_state_sha2_224 s = *p; - uint64_t total_len = s.total_len; - uint32_t sz; - if (total_len % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len > (uint64_t)0U) - { - sz = (uint32_t)64U; - } - else - { - sz = (uint32_t)(total_len % (uint64_t)(uint32_t)64U); - } - if (len <= (uint32_t)64U - sz) - { - Hacl_Streaming_SHA2_state_sha2_224 s1 = *p; - uint32_t *block_state1 = s1.block_state; - uint8_t *buf = s1.buf; - uint64_t total_len1 = s1.total_len; - uint32_t sz1; - if (total_len1 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len1 > (uint64_t)0U) - { - sz1 = (uint32_t)64U; - } - else - { - sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)64U); - } - { - uint8_t *buf2 = buf + sz1; - uint64_t total_len2; - memcpy(buf2, data, len * sizeof (uint8_t)); - total_len2 = total_len1 + (uint64_t)len; - { - Hacl_Streaming_SHA2_state_sha2_224 lit; - lit.block_state = block_state1; - lit.buf = buf; - lit.total_len = total_len2; - *p = lit; - return; - } - } - } - if (sz == (uint32_t)0U) - { - Hacl_Streaming_SHA2_state_sha2_224 s1 = *p; - uint32_t *block_state1 = s1.block_state; - uint8_t *buf = s1.buf; - uint64_t total_len1 = s1.total_len; - uint32_t sz1; - if (total_len1 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len1 > (uint64_t)0U) - { - sz1 = (uint32_t)64U; - } - else - { - sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)64U); - } - { - uint32_t ite; - uint32_t n_blocks; - uint32_t data1_len; - uint32_t data2_len; - uint8_t *data1; - uint8_t *data2; - uint8_t *dst; - if (!(sz1 == (uint32_t)0U)) - { - Hacl_Hash_SHA1_legacy_update_multi(block_state1, buf, (uint32_t)1U); - } - if ((uint64_t)len % (uint64_t)(uint32_t)64U == (uint64_t)0U && (uint64_t)len > (uint64_t)0U) - { - ite = (uint32_t)64U; - } - else - { - ite = (uint32_t)((uint64_t)len % (uint64_t)(uint32_t)64U); - } - n_blocks = (len - ite) / (uint32_t)64U; - data1_len = n_blocks * (uint32_t)64U; - data2_len = len - data1_len; - data1 = data; - data2 = data + data1_len; - Hacl_Hash_SHA1_legacy_update_multi(block_state1, data1, data1_len / (uint32_t)64U); - dst = buf; - memcpy(dst, data2, data2_len * sizeof (uint8_t)); - { - Hacl_Streaming_SHA2_state_sha2_224 lit; - lit.block_state = block_state1; - lit.buf = buf; - lit.total_len = total_len1 + (uint64_t)len; - *p = lit; - return; - } - } - } - { - uint32_t diff = (uint32_t)64U - sz; - uint8_t *data1 = data; - uint8_t *data2 = data + diff; - Hacl_Streaming_SHA2_state_sha2_224 s10 = *p; - uint32_t *block_state10 = s10.block_state; - uint8_t *buf0 = s10.buf; - uint64_t total_len10 = s10.total_len; - uint32_t sz10; - if (total_len10 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len10 > (uint64_t)0U) - { - sz10 = (uint32_t)64U; - } - else - { - sz10 = (uint32_t)(total_len10 % (uint64_t)(uint32_t)64U); - } - { - uint8_t *buf2 = buf0 + sz10; - uint64_t total_len2; - memcpy(buf2, data1, diff * sizeof (uint8_t)); - total_len2 = total_len10 + (uint64_t)diff; - { - Hacl_Streaming_SHA2_state_sha2_224 lit; - Hacl_Streaming_SHA2_state_sha2_224 s1; - uint32_t *block_state1; - uint8_t *buf; - uint64_t total_len1; - uint32_t sz1; - uint32_t ite; - uint32_t n_blocks; - uint32_t data1_len; - uint32_t data2_len; - uint8_t *data11; - uint8_t *data21; - uint8_t *dst; - lit.block_state = block_state10; - lit.buf = buf0; - lit.total_len = total_len2; - *p = lit; - s1 = *p; - block_state1 = s1.block_state; - buf = s1.buf; - total_len1 = s1.total_len; - if (total_len1 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len1 > (uint64_t)0U) - { - sz1 = (uint32_t)64U; - } - else - { - sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)64U); - } - if (!(sz1 == (uint32_t)0U)) - { - Hacl_Hash_SHA1_legacy_update_multi(block_state1, buf, (uint32_t)1U); - } - if - ( - (uint64_t)(len - diff) - % (uint64_t)(uint32_t)64U - == (uint64_t)0U - && (uint64_t)(len - diff) > (uint64_t)0U - ) - { - ite = (uint32_t)64U; - } - else - { - ite = (uint32_t)((uint64_t)(len - diff) % (uint64_t)(uint32_t)64U); - } - n_blocks = (len - diff - ite) / (uint32_t)64U; - data1_len = n_blocks * (uint32_t)64U; - data2_len = len - diff - data1_len; - data11 = data2; - data21 = data2 + data1_len; - Hacl_Hash_SHA1_legacy_update_multi(block_state1, data11, data1_len / (uint32_t)64U); - dst = buf; - memcpy(dst, data21, data2_len * sizeof (uint8_t)); - { - Hacl_Streaming_SHA2_state_sha2_224 lit0; - lit0.block_state = block_state1; - lit0.buf = buf; - lit0.total_len = total_len1 + (uint64_t)(len - diff); - *p = lit0; - } - } - } - } -} - -void -Hacl_Streaming_SHA1_legacy_finish_sha1(Hacl_Streaming_SHA2_state_sha2_224 *p, uint8_t *dst) -{ - Hacl_Streaming_SHA2_state_sha2_224 scrut = *p; - uint32_t *block_state = scrut.block_state; - uint8_t *buf_ = scrut.buf; - uint64_t total_len = scrut.total_len; - uint32_t r; - if (total_len % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len > (uint64_t)0U) - { - r = (uint32_t)64U; - } - else - { - r = (uint32_t)(total_len % (uint64_t)(uint32_t)64U); - } - { - uint8_t *buf_1 = buf_; - uint32_t tmp_block_state[5U] = { 0U }; - uint32_t ite; - uint8_t *buf_last; - uint8_t *buf_multi; - uint64_t prev_len_last; - memcpy(tmp_block_state, block_state, (uint32_t)5U * sizeof (uint32_t)); - if (r % (uint32_t)64U == (uint32_t)0U && r > (uint32_t)0U) - { - ite = (uint32_t)64U; - } - else - { - ite = r % (uint32_t)64U; - } - buf_last = buf_1 + r - ite; - buf_multi = buf_1; - Hacl_Hash_SHA1_legacy_update_multi(tmp_block_state, buf_multi, (uint32_t)0U); - prev_len_last = total_len - (uint64_t)r; - Hacl_Hash_SHA1_legacy_update_last(tmp_block_state, prev_len_last, buf_last, r); - Hacl_Hash_Core_SHA1_legacy_finish(tmp_block_state, dst); - } -} - -void Hacl_Streaming_SHA1_legacy_free_sha1(Hacl_Streaming_SHA2_state_sha2_224 *s) -{ - Hacl_Streaming_SHA2_state_sha2_224 scrut = *s; - uint8_t *buf = scrut.buf; - uint32_t *block_state = scrut.block_state; - KRML_HOST_FREE(block_state); - KRML_HOST_FREE(buf); - KRML_HOST_FREE(s); -} - diff --git a/dist/c89-compatible/Hacl_Streaming_SHA1.h b/dist/c89-compatible/Hacl_Streaming_SHA1.h deleted file mode 100644 index a274de6616..0000000000 --- a/dist/c89-compatible/Hacl_Streaming_SHA1.h +++ /dev/null @@ -1,64 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#ifndef __Hacl_Streaming_SHA1_H -#define __Hacl_Streaming_SHA1_H - -#if defined(__cplusplus) -extern "C" { -#endif - -#include -#include "krml/internal/types.h" -#include "krml/lowstar_endianness.h" -#include "krml/internal/target.h" - - -#include "Hacl_Streaming_SHA2.h" -#include "Hacl_Hash_SHA1.h" -#include "evercrypt_targetconfig.h" -typedef Hacl_Streaming_SHA2_state_sha2_224 Hacl_Streaming_SHA1_state_sha1; - -Hacl_Streaming_SHA2_state_sha2_224 *Hacl_Streaming_SHA1_legacy_create_in_sha1(); - -void Hacl_Streaming_SHA1_legacy_init_sha1(Hacl_Streaming_SHA2_state_sha2_224 *s); - -void -Hacl_Streaming_SHA1_legacy_update_sha1( - Hacl_Streaming_SHA2_state_sha2_224 *p, - uint8_t *data, - uint32_t len -); - -void -Hacl_Streaming_SHA1_legacy_finish_sha1(Hacl_Streaming_SHA2_state_sha2_224 *p, uint8_t *dst); - -void Hacl_Streaming_SHA1_legacy_free_sha1(Hacl_Streaming_SHA2_state_sha2_224 *s); - -#if defined(__cplusplus) -} -#endif - -#define __Hacl_Streaming_SHA1_H_DEFINED -#endif diff --git a/dist/c89-compatible/Hacl_Streaming_SHA2.c b/dist/c89-compatible/Hacl_Streaming_SHA2.c deleted file mode 100644 index cda67e1f54..0000000000 --- a/dist/c89-compatible/Hacl_Streaming_SHA2.c +++ /dev/null @@ -1,1142 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#include "Hacl_Streaming_SHA2.h" - -#include "internal/Hacl_Hash_SHA2.h" - -Hacl_Streaming_SHA2_state_sha2_224 *Hacl_Streaming_SHA2_create_in_224() -{ - uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)64U, sizeof (uint8_t)); - uint32_t *block_state = (uint32_t *)KRML_HOST_CALLOC((uint32_t)8U, sizeof (uint32_t)); - Hacl_Streaming_SHA2_state_sha2_224 s; - s.block_state = block_state; - s.buf = buf; - s.total_len = (uint64_t)0U; - KRML_CHECK_SIZE(sizeof (Hacl_Streaming_SHA2_state_sha2_224), (uint32_t)1U); - { - Hacl_Streaming_SHA2_state_sha2_224 - *p = - (Hacl_Streaming_SHA2_state_sha2_224 *)KRML_HOST_MALLOC(sizeof ( - Hacl_Streaming_SHA2_state_sha2_224 - )); - p[0U] = s; - Hacl_Hash_Core_SHA2_init_224(block_state); - return p; - } -} - -void Hacl_Streaming_SHA2_init_224(Hacl_Streaming_SHA2_state_sha2_224 *s) -{ - Hacl_Streaming_SHA2_state_sha2_224 scrut = *s; - uint8_t *buf = scrut.buf; - uint32_t *block_state = scrut.block_state; - Hacl_Hash_Core_SHA2_init_224(block_state); - { - Hacl_Streaming_SHA2_state_sha2_224 lit; - lit.block_state = block_state; - lit.buf = buf; - lit.total_len = (uint64_t)0U; - s[0U] = lit; - } -} - -void -Hacl_Streaming_SHA2_update_224( - Hacl_Streaming_SHA2_state_sha2_224 *p, - uint8_t *data, - uint32_t len -) -{ - Hacl_Streaming_SHA2_state_sha2_224 s = *p; - uint64_t total_len = s.total_len; - uint32_t sz; - if (total_len % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len > (uint64_t)0U) - { - sz = (uint32_t)64U; - } - else - { - sz = (uint32_t)(total_len % (uint64_t)(uint32_t)64U); - } - if (len <= (uint32_t)64U - sz) - { - Hacl_Streaming_SHA2_state_sha2_224 s1 = *p; - uint32_t *block_state1 = s1.block_state; - uint8_t *buf = s1.buf; - uint64_t total_len1 = s1.total_len; - uint32_t sz1; - if (total_len1 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len1 > (uint64_t)0U) - { - sz1 = (uint32_t)64U; - } - else - { - sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)64U); - } - { - uint8_t *buf2 = buf + sz1; - uint64_t total_len2; - memcpy(buf2, data, len * sizeof (uint8_t)); - total_len2 = total_len1 + (uint64_t)len; - { - Hacl_Streaming_SHA2_state_sha2_224 lit; - lit.block_state = block_state1; - lit.buf = buf; - lit.total_len = total_len2; - *p = lit; - return; - } - } - } - if (sz == (uint32_t)0U) - { - Hacl_Streaming_SHA2_state_sha2_224 s1 = *p; - uint32_t *block_state1 = s1.block_state; - uint8_t *buf = s1.buf; - uint64_t total_len1 = s1.total_len; - uint32_t sz1; - if (total_len1 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len1 > (uint64_t)0U) - { - sz1 = (uint32_t)64U; - } - else - { - sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)64U); - } - { - uint32_t ite; - uint32_t n_blocks; - uint32_t data1_len; - uint32_t data2_len; - uint8_t *data1; - uint8_t *data2; - uint8_t *dst; - if (!(sz1 == (uint32_t)0U)) - { - Hacl_Hash_SHA2_update_multi_224(block_state1, buf, (uint32_t)1U); - } - if ((uint64_t)len % (uint64_t)(uint32_t)64U == (uint64_t)0U && (uint64_t)len > (uint64_t)0U) - { - ite = (uint32_t)64U; - } - else - { - ite = (uint32_t)((uint64_t)len % (uint64_t)(uint32_t)64U); - } - n_blocks = (len - ite) / (uint32_t)64U; - data1_len = n_blocks * (uint32_t)64U; - data2_len = len - data1_len; - data1 = data; - data2 = data + data1_len; - Hacl_Hash_SHA2_update_multi_224(block_state1, data1, data1_len / (uint32_t)64U); - dst = buf; - memcpy(dst, data2, data2_len * sizeof (uint8_t)); - { - Hacl_Streaming_SHA2_state_sha2_224 lit; - lit.block_state = block_state1; - lit.buf = buf; - lit.total_len = total_len1 + (uint64_t)len; - *p = lit; - return; - } - } - } - { - uint32_t diff = (uint32_t)64U - sz; - uint8_t *data1 = data; - uint8_t *data2 = data + diff; - Hacl_Streaming_SHA2_state_sha2_224 s10 = *p; - uint32_t *block_state10 = s10.block_state; - uint8_t *buf0 = s10.buf; - uint64_t total_len10 = s10.total_len; - uint32_t sz10; - if (total_len10 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len10 > (uint64_t)0U) - { - sz10 = (uint32_t)64U; - } - else - { - sz10 = (uint32_t)(total_len10 % (uint64_t)(uint32_t)64U); - } - { - uint8_t *buf2 = buf0 + sz10; - uint64_t total_len2; - memcpy(buf2, data1, diff * sizeof (uint8_t)); - total_len2 = total_len10 + (uint64_t)diff; - { - Hacl_Streaming_SHA2_state_sha2_224 lit; - Hacl_Streaming_SHA2_state_sha2_224 s1; - uint32_t *block_state1; - uint8_t *buf; - uint64_t total_len1; - uint32_t sz1; - uint32_t ite; - uint32_t n_blocks; - uint32_t data1_len; - uint32_t data2_len; - uint8_t *data11; - uint8_t *data21; - uint8_t *dst; - lit.block_state = block_state10; - lit.buf = buf0; - lit.total_len = total_len2; - *p = lit; - s1 = *p; - block_state1 = s1.block_state; - buf = s1.buf; - total_len1 = s1.total_len; - if (total_len1 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len1 > (uint64_t)0U) - { - sz1 = (uint32_t)64U; - } - else - { - sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)64U); - } - if (!(sz1 == (uint32_t)0U)) - { - Hacl_Hash_SHA2_update_multi_224(block_state1, buf, (uint32_t)1U); - } - if - ( - (uint64_t)(len - diff) - % (uint64_t)(uint32_t)64U - == (uint64_t)0U - && (uint64_t)(len - diff) > (uint64_t)0U - ) - { - ite = (uint32_t)64U; - } - else - { - ite = (uint32_t)((uint64_t)(len - diff) % (uint64_t)(uint32_t)64U); - } - n_blocks = (len - diff - ite) / (uint32_t)64U; - data1_len = n_blocks * (uint32_t)64U; - data2_len = len - diff - data1_len; - data11 = data2; - data21 = data2 + data1_len; - Hacl_Hash_SHA2_update_multi_224(block_state1, data11, data1_len / (uint32_t)64U); - dst = buf; - memcpy(dst, data21, data2_len * sizeof (uint8_t)); - { - Hacl_Streaming_SHA2_state_sha2_224 lit0; - lit0.block_state = block_state1; - lit0.buf = buf; - lit0.total_len = total_len1 + (uint64_t)(len - diff); - *p = lit0; - } - } - } - } -} - -void Hacl_Streaming_SHA2_finish_224(Hacl_Streaming_SHA2_state_sha2_224 *p, uint8_t *dst) -{ - Hacl_Streaming_SHA2_state_sha2_224 scrut = *p; - uint32_t *block_state = scrut.block_state; - uint8_t *buf_ = scrut.buf; - uint64_t total_len = scrut.total_len; - uint32_t r; - if (total_len % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len > (uint64_t)0U) - { - r = (uint32_t)64U; - } - else - { - r = (uint32_t)(total_len % (uint64_t)(uint32_t)64U); - } - { - uint8_t *buf_1 = buf_; - uint32_t tmp_block_state[8U] = { 0U }; - uint32_t ite; - uint8_t *buf_last; - uint8_t *buf_multi; - uint64_t prev_len_last; - memcpy(tmp_block_state, block_state, (uint32_t)8U * sizeof (uint32_t)); - if (r % (uint32_t)64U == (uint32_t)0U && r > (uint32_t)0U) - { - ite = (uint32_t)64U; - } - else - { - ite = r % (uint32_t)64U; - } - buf_last = buf_1 + r - ite; - buf_multi = buf_1; - Hacl_Hash_SHA2_update_multi_224(tmp_block_state, buf_multi, (uint32_t)0U); - prev_len_last = total_len - (uint64_t)r; - Hacl_Hash_SHA2_update_last_224(tmp_block_state, prev_len_last, buf_last, r); - Hacl_Hash_Core_SHA2_finish_224(tmp_block_state, dst); - } -} - -void Hacl_Streaming_SHA2_free_224(Hacl_Streaming_SHA2_state_sha2_224 *s) -{ - Hacl_Streaming_SHA2_state_sha2_224 scrut = *s; - uint8_t *buf = scrut.buf; - uint32_t *block_state = scrut.block_state; - KRML_HOST_FREE(block_state); - KRML_HOST_FREE(buf); - KRML_HOST_FREE(s); -} - -Hacl_Streaming_SHA2_state_sha2_224 *Hacl_Streaming_SHA2_create_in_256() -{ - uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)64U, sizeof (uint8_t)); - uint32_t *block_state = (uint32_t *)KRML_HOST_CALLOC((uint32_t)8U, sizeof (uint32_t)); - Hacl_Streaming_SHA2_state_sha2_224 s; - s.block_state = block_state; - s.buf = buf; - s.total_len = (uint64_t)0U; - KRML_CHECK_SIZE(sizeof (Hacl_Streaming_SHA2_state_sha2_224), (uint32_t)1U); - { - Hacl_Streaming_SHA2_state_sha2_224 - *p = - (Hacl_Streaming_SHA2_state_sha2_224 *)KRML_HOST_MALLOC(sizeof ( - Hacl_Streaming_SHA2_state_sha2_224 - )); - p[0U] = s; - Hacl_Hash_Core_SHA2_init_256(block_state); - return p; - } -} - -void Hacl_Streaming_SHA2_init_256(Hacl_Streaming_SHA2_state_sha2_224 *s) -{ - Hacl_Streaming_SHA2_state_sha2_224 scrut = *s; - uint8_t *buf = scrut.buf; - uint32_t *block_state = scrut.block_state; - Hacl_Hash_Core_SHA2_init_256(block_state); - { - Hacl_Streaming_SHA2_state_sha2_224 lit; - lit.block_state = block_state; - lit.buf = buf; - lit.total_len = (uint64_t)0U; - s[0U] = lit; - } -} - -void -Hacl_Streaming_SHA2_update_256( - Hacl_Streaming_SHA2_state_sha2_224 *p, - uint8_t *data, - uint32_t len -) -{ - Hacl_Streaming_SHA2_state_sha2_224 s = *p; - uint64_t total_len = s.total_len; - uint32_t sz; - if (total_len % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len > (uint64_t)0U) - { - sz = (uint32_t)64U; - } - else - { - sz = (uint32_t)(total_len % (uint64_t)(uint32_t)64U); - } - if (len <= (uint32_t)64U - sz) - { - Hacl_Streaming_SHA2_state_sha2_224 s1 = *p; - uint32_t *block_state1 = s1.block_state; - uint8_t *buf = s1.buf; - uint64_t total_len1 = s1.total_len; - uint32_t sz1; - if (total_len1 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len1 > (uint64_t)0U) - { - sz1 = (uint32_t)64U; - } - else - { - sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)64U); - } - { - uint8_t *buf2 = buf + sz1; - uint64_t total_len2; - memcpy(buf2, data, len * sizeof (uint8_t)); - total_len2 = total_len1 + (uint64_t)len; - { - Hacl_Streaming_SHA2_state_sha2_224 lit; - lit.block_state = block_state1; - lit.buf = buf; - lit.total_len = total_len2; - *p = lit; - return; - } - } - } - if (sz == (uint32_t)0U) - { - Hacl_Streaming_SHA2_state_sha2_224 s1 = *p; - uint32_t *block_state1 = s1.block_state; - uint8_t *buf = s1.buf; - uint64_t total_len1 = s1.total_len; - uint32_t sz1; - if (total_len1 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len1 > (uint64_t)0U) - { - sz1 = (uint32_t)64U; - } - else - { - sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)64U); - } - { - uint32_t ite; - uint32_t n_blocks; - uint32_t data1_len; - uint32_t data2_len; - uint8_t *data1; - uint8_t *data2; - uint8_t *dst; - if (!(sz1 == (uint32_t)0U)) - { - Hacl_Hash_SHA2_update_multi_256(block_state1, buf, (uint32_t)1U); - } - if ((uint64_t)len % (uint64_t)(uint32_t)64U == (uint64_t)0U && (uint64_t)len > (uint64_t)0U) - { - ite = (uint32_t)64U; - } - else - { - ite = (uint32_t)((uint64_t)len % (uint64_t)(uint32_t)64U); - } - n_blocks = (len - ite) / (uint32_t)64U; - data1_len = n_blocks * (uint32_t)64U; - data2_len = len - data1_len; - data1 = data; - data2 = data + data1_len; - Hacl_Hash_SHA2_update_multi_256(block_state1, data1, data1_len / (uint32_t)64U); - dst = buf; - memcpy(dst, data2, data2_len * sizeof (uint8_t)); - { - Hacl_Streaming_SHA2_state_sha2_224 lit; - lit.block_state = block_state1; - lit.buf = buf; - lit.total_len = total_len1 + (uint64_t)len; - *p = lit; - return; - } - } - } - { - uint32_t diff = (uint32_t)64U - sz; - uint8_t *data1 = data; - uint8_t *data2 = data + diff; - Hacl_Streaming_SHA2_state_sha2_224 s10 = *p; - uint32_t *block_state10 = s10.block_state; - uint8_t *buf0 = s10.buf; - uint64_t total_len10 = s10.total_len; - uint32_t sz10; - if (total_len10 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len10 > (uint64_t)0U) - { - sz10 = (uint32_t)64U; - } - else - { - sz10 = (uint32_t)(total_len10 % (uint64_t)(uint32_t)64U); - } - { - uint8_t *buf2 = buf0 + sz10; - uint64_t total_len2; - memcpy(buf2, data1, diff * sizeof (uint8_t)); - total_len2 = total_len10 + (uint64_t)diff; - { - Hacl_Streaming_SHA2_state_sha2_224 lit; - Hacl_Streaming_SHA2_state_sha2_224 s1; - uint32_t *block_state1; - uint8_t *buf; - uint64_t total_len1; - uint32_t sz1; - uint32_t ite; - uint32_t n_blocks; - uint32_t data1_len; - uint32_t data2_len; - uint8_t *data11; - uint8_t *data21; - uint8_t *dst; - lit.block_state = block_state10; - lit.buf = buf0; - lit.total_len = total_len2; - *p = lit; - s1 = *p; - block_state1 = s1.block_state; - buf = s1.buf; - total_len1 = s1.total_len; - if (total_len1 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len1 > (uint64_t)0U) - { - sz1 = (uint32_t)64U; - } - else - { - sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)64U); - } - if (!(sz1 == (uint32_t)0U)) - { - Hacl_Hash_SHA2_update_multi_256(block_state1, buf, (uint32_t)1U); - } - if - ( - (uint64_t)(len - diff) - % (uint64_t)(uint32_t)64U - == (uint64_t)0U - && (uint64_t)(len - diff) > (uint64_t)0U - ) - { - ite = (uint32_t)64U; - } - else - { - ite = (uint32_t)((uint64_t)(len - diff) % (uint64_t)(uint32_t)64U); - } - n_blocks = (len - diff - ite) / (uint32_t)64U; - data1_len = n_blocks * (uint32_t)64U; - data2_len = len - diff - data1_len; - data11 = data2; - data21 = data2 + data1_len; - Hacl_Hash_SHA2_update_multi_256(block_state1, data11, data1_len / (uint32_t)64U); - dst = buf; - memcpy(dst, data21, data2_len * sizeof (uint8_t)); - { - Hacl_Streaming_SHA2_state_sha2_224 lit0; - lit0.block_state = block_state1; - lit0.buf = buf; - lit0.total_len = total_len1 + (uint64_t)(len - diff); - *p = lit0; - } - } - } - } -} - -void Hacl_Streaming_SHA2_finish_256(Hacl_Streaming_SHA2_state_sha2_224 *p, uint8_t *dst) -{ - Hacl_Streaming_SHA2_state_sha2_224 scrut = *p; - uint32_t *block_state = scrut.block_state; - uint8_t *buf_ = scrut.buf; - uint64_t total_len = scrut.total_len; - uint32_t r; - if (total_len % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len > (uint64_t)0U) - { - r = (uint32_t)64U; - } - else - { - r = (uint32_t)(total_len % (uint64_t)(uint32_t)64U); - } - { - uint8_t *buf_1 = buf_; - uint32_t tmp_block_state[8U] = { 0U }; - uint32_t ite; - uint8_t *buf_last; - uint8_t *buf_multi; - uint64_t prev_len_last; - memcpy(tmp_block_state, block_state, (uint32_t)8U * sizeof (uint32_t)); - if (r % (uint32_t)64U == (uint32_t)0U && r > (uint32_t)0U) - { - ite = (uint32_t)64U; - } - else - { - ite = r % (uint32_t)64U; - } - buf_last = buf_1 + r - ite; - buf_multi = buf_1; - Hacl_Hash_SHA2_update_multi_256(tmp_block_state, buf_multi, (uint32_t)0U); - prev_len_last = total_len - (uint64_t)r; - Hacl_Hash_SHA2_update_last_256(tmp_block_state, prev_len_last, buf_last, r); - Hacl_Hash_Core_SHA2_finish_256(tmp_block_state, dst); - } -} - -void Hacl_Streaming_SHA2_free_256(Hacl_Streaming_SHA2_state_sha2_224 *s) -{ - Hacl_Streaming_SHA2_state_sha2_224 scrut = *s; - uint8_t *buf = scrut.buf; - uint32_t *block_state = scrut.block_state; - KRML_HOST_FREE(block_state); - KRML_HOST_FREE(buf); - KRML_HOST_FREE(s); -} - -Hacl_Streaming_SHA2_state_sha2_384 *Hacl_Streaming_SHA2_create_in_384() -{ - uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)128U, sizeof (uint8_t)); - uint64_t *block_state = (uint64_t *)KRML_HOST_CALLOC((uint32_t)8U, sizeof (uint64_t)); - Hacl_Streaming_SHA2_state_sha2_384 s; - s.block_state = block_state; - s.buf = buf; - s.total_len = (uint64_t)0U; - KRML_CHECK_SIZE(sizeof (Hacl_Streaming_SHA2_state_sha2_384), (uint32_t)1U); - { - Hacl_Streaming_SHA2_state_sha2_384 - *p = - (Hacl_Streaming_SHA2_state_sha2_384 *)KRML_HOST_MALLOC(sizeof ( - Hacl_Streaming_SHA2_state_sha2_384 - )); - p[0U] = s; - Hacl_Hash_Core_SHA2_init_384(block_state); - return p; - } -} - -void Hacl_Streaming_SHA2_init_384(Hacl_Streaming_SHA2_state_sha2_384 *s) -{ - Hacl_Streaming_SHA2_state_sha2_384 scrut = *s; - uint8_t *buf = scrut.buf; - uint64_t *block_state = scrut.block_state; - Hacl_Hash_Core_SHA2_init_384(block_state); - { - Hacl_Streaming_SHA2_state_sha2_384 lit; - lit.block_state = block_state; - lit.buf = buf; - lit.total_len = (uint64_t)0U; - s[0U] = lit; - } -} - -void -Hacl_Streaming_SHA2_update_384( - Hacl_Streaming_SHA2_state_sha2_384 *p, - uint8_t *data, - uint32_t len -) -{ - Hacl_Streaming_SHA2_state_sha2_384 s = *p; - uint64_t total_len = s.total_len; - uint32_t sz; - if (total_len % (uint64_t)(uint32_t)128U == (uint64_t)0U && total_len > (uint64_t)0U) - { - sz = (uint32_t)128U; - } - else - { - sz = (uint32_t)(total_len % (uint64_t)(uint32_t)128U); - } - if (len <= (uint32_t)128U - sz) - { - Hacl_Streaming_SHA2_state_sha2_384 s1 = *p; - uint64_t *block_state1 = s1.block_state; - uint8_t *buf = s1.buf; - uint64_t total_len1 = s1.total_len; - uint32_t sz1; - if (total_len1 % (uint64_t)(uint32_t)128U == (uint64_t)0U && total_len1 > (uint64_t)0U) - { - sz1 = (uint32_t)128U; - } - else - { - sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)128U); - } - { - uint8_t *buf2 = buf + sz1; - uint64_t total_len2; - memcpy(buf2, data, len * sizeof (uint8_t)); - total_len2 = total_len1 + (uint64_t)len; - { - Hacl_Streaming_SHA2_state_sha2_384 lit; - lit.block_state = block_state1; - lit.buf = buf; - lit.total_len = total_len2; - *p = lit; - return; - } - } - } - if (sz == (uint32_t)0U) - { - Hacl_Streaming_SHA2_state_sha2_384 s1 = *p; - uint64_t *block_state1 = s1.block_state; - uint8_t *buf = s1.buf; - uint64_t total_len1 = s1.total_len; - uint32_t sz1; - if (total_len1 % (uint64_t)(uint32_t)128U == (uint64_t)0U && total_len1 > (uint64_t)0U) - { - sz1 = (uint32_t)128U; - } - else - { - sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)128U); - } - { - uint32_t ite; - uint32_t n_blocks; - uint32_t data1_len; - uint32_t data2_len; - uint8_t *data1; - uint8_t *data2; - uint8_t *dst; - if (!(sz1 == (uint32_t)0U)) - { - Hacl_Hash_SHA2_update_multi_384(block_state1, buf, (uint32_t)1U); - } - if ((uint64_t)len % (uint64_t)(uint32_t)128U == (uint64_t)0U && (uint64_t)len > (uint64_t)0U) - { - ite = (uint32_t)128U; - } - else - { - ite = (uint32_t)((uint64_t)len % (uint64_t)(uint32_t)128U); - } - n_blocks = (len - ite) / (uint32_t)128U; - data1_len = n_blocks * (uint32_t)128U; - data2_len = len - data1_len; - data1 = data; - data2 = data + data1_len; - Hacl_Hash_SHA2_update_multi_384(block_state1, data1, data1_len / (uint32_t)128U); - dst = buf; - memcpy(dst, data2, data2_len * sizeof (uint8_t)); - { - Hacl_Streaming_SHA2_state_sha2_384 lit; - lit.block_state = block_state1; - lit.buf = buf; - lit.total_len = total_len1 + (uint64_t)len; - *p = lit; - return; - } - } - } - { - uint32_t diff = (uint32_t)128U - sz; - uint8_t *data1 = data; - uint8_t *data2 = data + diff; - Hacl_Streaming_SHA2_state_sha2_384 s10 = *p; - uint64_t *block_state10 = s10.block_state; - uint8_t *buf0 = s10.buf; - uint64_t total_len10 = s10.total_len; - uint32_t sz10; - if (total_len10 % (uint64_t)(uint32_t)128U == (uint64_t)0U && total_len10 > (uint64_t)0U) - { - sz10 = (uint32_t)128U; - } - else - { - sz10 = (uint32_t)(total_len10 % (uint64_t)(uint32_t)128U); - } - { - uint8_t *buf2 = buf0 + sz10; - uint64_t total_len2; - memcpy(buf2, data1, diff * sizeof (uint8_t)); - total_len2 = total_len10 + (uint64_t)diff; - { - Hacl_Streaming_SHA2_state_sha2_384 lit; - Hacl_Streaming_SHA2_state_sha2_384 s1; - uint64_t *block_state1; - uint8_t *buf; - uint64_t total_len1; - uint32_t sz1; - uint32_t ite; - uint32_t n_blocks; - uint32_t data1_len; - uint32_t data2_len; - uint8_t *data11; - uint8_t *data21; - uint8_t *dst; - lit.block_state = block_state10; - lit.buf = buf0; - lit.total_len = total_len2; - *p = lit; - s1 = *p; - block_state1 = s1.block_state; - buf = s1.buf; - total_len1 = s1.total_len; - if (total_len1 % (uint64_t)(uint32_t)128U == (uint64_t)0U && total_len1 > (uint64_t)0U) - { - sz1 = (uint32_t)128U; - } - else - { - sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)128U); - } - if (!(sz1 == (uint32_t)0U)) - { - Hacl_Hash_SHA2_update_multi_384(block_state1, buf, (uint32_t)1U); - } - if - ( - (uint64_t)(len - diff) - % (uint64_t)(uint32_t)128U - == (uint64_t)0U - && (uint64_t)(len - diff) > (uint64_t)0U - ) - { - ite = (uint32_t)128U; - } - else - { - ite = (uint32_t)((uint64_t)(len - diff) % (uint64_t)(uint32_t)128U); - } - n_blocks = (len - diff - ite) / (uint32_t)128U; - data1_len = n_blocks * (uint32_t)128U; - data2_len = len - diff - data1_len; - data11 = data2; - data21 = data2 + data1_len; - Hacl_Hash_SHA2_update_multi_384(block_state1, data11, data1_len / (uint32_t)128U); - dst = buf; - memcpy(dst, data21, data2_len * sizeof (uint8_t)); - { - Hacl_Streaming_SHA2_state_sha2_384 lit0; - lit0.block_state = block_state1; - lit0.buf = buf; - lit0.total_len = total_len1 + (uint64_t)(len - diff); - *p = lit0; - } - } - } - } -} - -void Hacl_Streaming_SHA2_finish_384(Hacl_Streaming_SHA2_state_sha2_384 *p, uint8_t *dst) -{ - Hacl_Streaming_SHA2_state_sha2_384 scrut = *p; - uint64_t *block_state = scrut.block_state; - uint8_t *buf_ = scrut.buf; - uint64_t total_len = scrut.total_len; - uint32_t r; - if (total_len % (uint64_t)(uint32_t)128U == (uint64_t)0U && total_len > (uint64_t)0U) - { - r = (uint32_t)128U; - } - else - { - r = (uint32_t)(total_len % (uint64_t)(uint32_t)128U); - } - { - uint8_t *buf_1 = buf_; - uint64_t tmp_block_state[8U] = { 0U }; - uint32_t ite; - uint8_t *buf_last; - uint8_t *buf_multi; - uint64_t prev_len_last; - memcpy(tmp_block_state, block_state, (uint32_t)8U * sizeof (uint64_t)); - if (r % (uint32_t)128U == (uint32_t)0U && r > (uint32_t)0U) - { - ite = (uint32_t)128U; - } - else - { - ite = r % (uint32_t)128U; - } - buf_last = buf_1 + r - ite; - buf_multi = buf_1; - Hacl_Hash_SHA2_update_multi_384(tmp_block_state, buf_multi, (uint32_t)0U); - prev_len_last = total_len - (uint64_t)r; - Hacl_Hash_SHA2_update_last_384(tmp_block_state, - FStar_UInt128_uint64_to_uint128(prev_len_last), - buf_last, - r); - Hacl_Hash_Core_SHA2_finish_384(tmp_block_state, dst); - } -} - -void Hacl_Streaming_SHA2_free_384(Hacl_Streaming_SHA2_state_sha2_384 *s) -{ - Hacl_Streaming_SHA2_state_sha2_384 scrut = *s; - uint8_t *buf = scrut.buf; - uint64_t *block_state = scrut.block_state; - KRML_HOST_FREE(block_state); - KRML_HOST_FREE(buf); - KRML_HOST_FREE(s); -} - -Hacl_Streaming_SHA2_state_sha2_384 *Hacl_Streaming_SHA2_create_in_512() -{ - uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)128U, sizeof (uint8_t)); - uint64_t *block_state = (uint64_t *)KRML_HOST_CALLOC((uint32_t)8U, sizeof (uint64_t)); - Hacl_Streaming_SHA2_state_sha2_384 s; - s.block_state = block_state; - s.buf = buf; - s.total_len = (uint64_t)0U; - KRML_CHECK_SIZE(sizeof (Hacl_Streaming_SHA2_state_sha2_384), (uint32_t)1U); - { - Hacl_Streaming_SHA2_state_sha2_384 - *p = - (Hacl_Streaming_SHA2_state_sha2_384 *)KRML_HOST_MALLOC(sizeof ( - Hacl_Streaming_SHA2_state_sha2_384 - )); - p[0U] = s; - Hacl_Hash_Core_SHA2_init_512(block_state); - return p; - } -} - -void Hacl_Streaming_SHA2_init_512(Hacl_Streaming_SHA2_state_sha2_384 *s) -{ - Hacl_Streaming_SHA2_state_sha2_384 scrut = *s; - uint8_t *buf = scrut.buf; - uint64_t *block_state = scrut.block_state; - Hacl_Hash_Core_SHA2_init_512(block_state); - { - Hacl_Streaming_SHA2_state_sha2_384 lit; - lit.block_state = block_state; - lit.buf = buf; - lit.total_len = (uint64_t)0U; - s[0U] = lit; - } -} - -void -Hacl_Streaming_SHA2_update_512( - Hacl_Streaming_SHA2_state_sha2_384 *p, - uint8_t *data, - uint32_t len -) -{ - Hacl_Streaming_SHA2_state_sha2_384 s = *p; - uint64_t total_len = s.total_len; - uint32_t sz; - if (total_len % (uint64_t)(uint32_t)128U == (uint64_t)0U && total_len > (uint64_t)0U) - { - sz = (uint32_t)128U; - } - else - { - sz = (uint32_t)(total_len % (uint64_t)(uint32_t)128U); - } - if (len <= (uint32_t)128U - sz) - { - Hacl_Streaming_SHA2_state_sha2_384 s1 = *p; - uint64_t *block_state1 = s1.block_state; - uint8_t *buf = s1.buf; - uint64_t total_len1 = s1.total_len; - uint32_t sz1; - if (total_len1 % (uint64_t)(uint32_t)128U == (uint64_t)0U && total_len1 > (uint64_t)0U) - { - sz1 = (uint32_t)128U; - } - else - { - sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)128U); - } - { - uint8_t *buf2 = buf + sz1; - uint64_t total_len2; - memcpy(buf2, data, len * sizeof (uint8_t)); - total_len2 = total_len1 + (uint64_t)len; - { - Hacl_Streaming_SHA2_state_sha2_384 lit; - lit.block_state = block_state1; - lit.buf = buf; - lit.total_len = total_len2; - *p = lit; - return; - } - } - } - if (sz == (uint32_t)0U) - { - Hacl_Streaming_SHA2_state_sha2_384 s1 = *p; - uint64_t *block_state1 = s1.block_state; - uint8_t *buf = s1.buf; - uint64_t total_len1 = s1.total_len; - uint32_t sz1; - if (total_len1 % (uint64_t)(uint32_t)128U == (uint64_t)0U && total_len1 > (uint64_t)0U) - { - sz1 = (uint32_t)128U; - } - else - { - sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)128U); - } - { - uint32_t ite; - uint32_t n_blocks; - uint32_t data1_len; - uint32_t data2_len; - uint8_t *data1; - uint8_t *data2; - uint8_t *dst; - if (!(sz1 == (uint32_t)0U)) - { - Hacl_Hash_SHA2_update_multi_512(block_state1, buf, (uint32_t)1U); - } - if ((uint64_t)len % (uint64_t)(uint32_t)128U == (uint64_t)0U && (uint64_t)len > (uint64_t)0U) - { - ite = (uint32_t)128U; - } - else - { - ite = (uint32_t)((uint64_t)len % (uint64_t)(uint32_t)128U); - } - n_blocks = (len - ite) / (uint32_t)128U; - data1_len = n_blocks * (uint32_t)128U; - data2_len = len - data1_len; - data1 = data; - data2 = data + data1_len; - Hacl_Hash_SHA2_update_multi_512(block_state1, data1, data1_len / (uint32_t)128U); - dst = buf; - memcpy(dst, data2, data2_len * sizeof (uint8_t)); - { - Hacl_Streaming_SHA2_state_sha2_384 lit; - lit.block_state = block_state1; - lit.buf = buf; - lit.total_len = total_len1 + (uint64_t)len; - *p = lit; - return; - } - } - } - { - uint32_t diff = (uint32_t)128U - sz; - uint8_t *data1 = data; - uint8_t *data2 = data + diff; - Hacl_Streaming_SHA2_state_sha2_384 s10 = *p; - uint64_t *block_state10 = s10.block_state; - uint8_t *buf0 = s10.buf; - uint64_t total_len10 = s10.total_len; - uint32_t sz10; - if (total_len10 % (uint64_t)(uint32_t)128U == (uint64_t)0U && total_len10 > (uint64_t)0U) - { - sz10 = (uint32_t)128U; - } - else - { - sz10 = (uint32_t)(total_len10 % (uint64_t)(uint32_t)128U); - } - { - uint8_t *buf2 = buf0 + sz10; - uint64_t total_len2; - memcpy(buf2, data1, diff * sizeof (uint8_t)); - total_len2 = total_len10 + (uint64_t)diff; - { - Hacl_Streaming_SHA2_state_sha2_384 lit; - Hacl_Streaming_SHA2_state_sha2_384 s1; - uint64_t *block_state1; - uint8_t *buf; - uint64_t total_len1; - uint32_t sz1; - uint32_t ite; - uint32_t n_blocks; - uint32_t data1_len; - uint32_t data2_len; - uint8_t *data11; - uint8_t *data21; - uint8_t *dst; - lit.block_state = block_state10; - lit.buf = buf0; - lit.total_len = total_len2; - *p = lit; - s1 = *p; - block_state1 = s1.block_state; - buf = s1.buf; - total_len1 = s1.total_len; - if (total_len1 % (uint64_t)(uint32_t)128U == (uint64_t)0U && total_len1 > (uint64_t)0U) - { - sz1 = (uint32_t)128U; - } - else - { - sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)128U); - } - if (!(sz1 == (uint32_t)0U)) - { - Hacl_Hash_SHA2_update_multi_512(block_state1, buf, (uint32_t)1U); - } - if - ( - (uint64_t)(len - diff) - % (uint64_t)(uint32_t)128U - == (uint64_t)0U - && (uint64_t)(len - diff) > (uint64_t)0U - ) - { - ite = (uint32_t)128U; - } - else - { - ite = (uint32_t)((uint64_t)(len - diff) % (uint64_t)(uint32_t)128U); - } - n_blocks = (len - diff - ite) / (uint32_t)128U; - data1_len = n_blocks * (uint32_t)128U; - data2_len = len - diff - data1_len; - data11 = data2; - data21 = data2 + data1_len; - Hacl_Hash_SHA2_update_multi_512(block_state1, data11, data1_len / (uint32_t)128U); - dst = buf; - memcpy(dst, data21, data2_len * sizeof (uint8_t)); - { - Hacl_Streaming_SHA2_state_sha2_384 lit0; - lit0.block_state = block_state1; - lit0.buf = buf; - lit0.total_len = total_len1 + (uint64_t)(len - diff); - *p = lit0; - } - } - } - } -} - -void Hacl_Streaming_SHA2_finish_512(Hacl_Streaming_SHA2_state_sha2_384 *p, uint8_t *dst) -{ - Hacl_Streaming_SHA2_state_sha2_384 scrut = *p; - uint64_t *block_state = scrut.block_state; - uint8_t *buf_ = scrut.buf; - uint64_t total_len = scrut.total_len; - uint32_t r; - if (total_len % (uint64_t)(uint32_t)128U == (uint64_t)0U && total_len > (uint64_t)0U) - { - r = (uint32_t)128U; - } - else - { - r = (uint32_t)(total_len % (uint64_t)(uint32_t)128U); - } - { - uint8_t *buf_1 = buf_; - uint64_t tmp_block_state[8U] = { 0U }; - uint32_t ite; - uint8_t *buf_last; - uint8_t *buf_multi; - uint64_t prev_len_last; - memcpy(tmp_block_state, block_state, (uint32_t)8U * sizeof (uint64_t)); - if (r % (uint32_t)128U == (uint32_t)0U && r > (uint32_t)0U) - { - ite = (uint32_t)128U; - } - else - { - ite = r % (uint32_t)128U; - } - buf_last = buf_1 + r - ite; - buf_multi = buf_1; - Hacl_Hash_SHA2_update_multi_512(tmp_block_state, buf_multi, (uint32_t)0U); - prev_len_last = total_len - (uint64_t)r; - Hacl_Hash_SHA2_update_last_512(tmp_block_state, - FStar_UInt128_uint64_to_uint128(prev_len_last), - buf_last, - r); - Hacl_Hash_Core_SHA2_finish_512(tmp_block_state, dst); - } -} - -void Hacl_Streaming_SHA2_free_512(Hacl_Streaming_SHA2_state_sha2_384 *s) -{ - Hacl_Streaming_SHA2_state_sha2_384 scrut = *s; - uint8_t *buf = scrut.buf; - uint64_t *block_state = scrut.block_state; - KRML_HOST_FREE(block_state); - KRML_HOST_FREE(buf); - KRML_HOST_FREE(s); -} - diff --git a/dist/c89-compatible/Hacl_Streaming_SHA2.h b/dist/c89-compatible/Hacl_Streaming_SHA2.h deleted file mode 100644 index d2d37f4b8c..0000000000 --- a/dist/c89-compatible/Hacl_Streaming_SHA2.h +++ /dev/null @@ -1,126 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#ifndef __Hacl_Streaming_SHA2_H -#define __Hacl_Streaming_SHA2_H - -#if defined(__cplusplus) -extern "C" { -#endif - -#include -#include "krml/internal/types.h" -#include "krml/lowstar_endianness.h" -#include "krml/internal/target.h" - - -#include "Hacl_Krmllib.h" -#include "Hacl_Hash_SHA2.h" -#include "evercrypt_targetconfig.h" -typedef struct Hacl_Streaming_SHA2_state_sha2_224_s -{ - uint32_t *block_state; - uint8_t *buf; - uint64_t total_len; -} -Hacl_Streaming_SHA2_state_sha2_224; - -typedef Hacl_Streaming_SHA2_state_sha2_224 Hacl_Streaming_SHA2_state_sha2_256; - -typedef struct Hacl_Streaming_SHA2_state_sha2_384_s -{ - uint64_t *block_state; - uint8_t *buf; - uint64_t total_len; -} -Hacl_Streaming_SHA2_state_sha2_384; - -typedef Hacl_Streaming_SHA2_state_sha2_384 Hacl_Streaming_SHA2_state_sha2_512; - -Hacl_Streaming_SHA2_state_sha2_224 *Hacl_Streaming_SHA2_create_in_224(); - -void Hacl_Streaming_SHA2_init_224(Hacl_Streaming_SHA2_state_sha2_224 *s); - -void -Hacl_Streaming_SHA2_update_224( - Hacl_Streaming_SHA2_state_sha2_224 *p, - uint8_t *data, - uint32_t len -); - -void Hacl_Streaming_SHA2_finish_224(Hacl_Streaming_SHA2_state_sha2_224 *p, uint8_t *dst); - -void Hacl_Streaming_SHA2_free_224(Hacl_Streaming_SHA2_state_sha2_224 *s); - -Hacl_Streaming_SHA2_state_sha2_224 *Hacl_Streaming_SHA2_create_in_256(); - -void Hacl_Streaming_SHA2_init_256(Hacl_Streaming_SHA2_state_sha2_224 *s); - -void -Hacl_Streaming_SHA2_update_256( - Hacl_Streaming_SHA2_state_sha2_224 *p, - uint8_t *data, - uint32_t len -); - -void Hacl_Streaming_SHA2_finish_256(Hacl_Streaming_SHA2_state_sha2_224 *p, uint8_t *dst); - -void Hacl_Streaming_SHA2_free_256(Hacl_Streaming_SHA2_state_sha2_224 *s); - -Hacl_Streaming_SHA2_state_sha2_384 *Hacl_Streaming_SHA2_create_in_384(); - -void Hacl_Streaming_SHA2_init_384(Hacl_Streaming_SHA2_state_sha2_384 *s); - -void -Hacl_Streaming_SHA2_update_384( - Hacl_Streaming_SHA2_state_sha2_384 *p, - uint8_t *data, - uint32_t len -); - -void Hacl_Streaming_SHA2_finish_384(Hacl_Streaming_SHA2_state_sha2_384 *p, uint8_t *dst); - -void Hacl_Streaming_SHA2_free_384(Hacl_Streaming_SHA2_state_sha2_384 *s); - -Hacl_Streaming_SHA2_state_sha2_384 *Hacl_Streaming_SHA2_create_in_512(); - -void Hacl_Streaming_SHA2_init_512(Hacl_Streaming_SHA2_state_sha2_384 *s); - -void -Hacl_Streaming_SHA2_update_512( - Hacl_Streaming_SHA2_state_sha2_384 *p, - uint8_t *data, - uint32_t len -); - -void Hacl_Streaming_SHA2_finish_512(Hacl_Streaming_SHA2_state_sha2_384 *p, uint8_t *dst); - -void Hacl_Streaming_SHA2_free_512(Hacl_Streaming_SHA2_state_sha2_384 *s); - -#if defined(__cplusplus) -} -#endif - -#define __Hacl_Streaming_SHA2_H_DEFINED -#endif diff --git a/dist/c89-compatible/Hacl_Streaming_SHA3.c b/dist/c89-compatible/Hacl_Streaming_SHA3.c deleted file mode 100644 index 3552171e61..0000000000 --- a/dist/c89-compatible/Hacl_Streaming_SHA3.c +++ /dev/null @@ -1,364 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#include "Hacl_Streaming_SHA3.h" - - - -Hacl_Streaming_SHA2_state_sha2_384 *Hacl_Streaming_SHA3_create_in_256() -{ - uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)136U, sizeof (uint8_t)); - uint64_t *block_state = (uint64_t *)KRML_HOST_CALLOC((uint32_t)25U, sizeof (uint64_t)); - Hacl_Streaming_SHA2_state_sha2_384 s; - s.block_state = block_state; - s.buf = buf; - s.total_len = (uint64_t)0U; - KRML_CHECK_SIZE(sizeof (Hacl_Streaming_SHA2_state_sha2_384), (uint32_t)1U); - { - Hacl_Streaming_SHA2_state_sha2_384 - *p = - (Hacl_Streaming_SHA2_state_sha2_384 *)KRML_HOST_MALLOC(sizeof ( - Hacl_Streaming_SHA2_state_sha2_384 - )); - p[0U] = s; - memset(block_state, 0U, (uint32_t)25U * sizeof (uint64_t)); - return p; - } -} - -void Hacl_Streaming_SHA3_init_256(Hacl_Streaming_SHA2_state_sha2_384 *s) -{ - Hacl_Streaming_SHA2_state_sha2_384 scrut = *s; - uint8_t *buf = scrut.buf; - uint64_t *block_state = scrut.block_state; - memset(block_state, 0U, (uint32_t)25U * sizeof (uint64_t)); - { - Hacl_Streaming_SHA2_state_sha2_384 lit; - lit.block_state = block_state; - lit.buf = buf; - lit.total_len = (uint64_t)0U; - s[0U] = lit; - } -} - -void -Hacl_Streaming_SHA3_update_256( - Hacl_Streaming_SHA2_state_sha2_384 *p, - uint8_t *data, - uint32_t len -) -{ - Hacl_Streaming_SHA2_state_sha2_384 s = *p; - uint64_t total_len = s.total_len; - uint32_t sz; - if (total_len % (uint64_t)(uint32_t)136U == (uint64_t)0U && total_len > (uint64_t)0U) - { - sz = (uint32_t)136U; - } - else - { - sz = (uint32_t)(total_len % (uint64_t)(uint32_t)136U); - } - if (len <= (uint32_t)136U - sz) - { - Hacl_Streaming_SHA2_state_sha2_384 s1 = *p; - uint64_t *block_state1 = s1.block_state; - uint8_t *buf = s1.buf; - uint64_t total_len1 = s1.total_len; - uint32_t sz1; - if (total_len1 % (uint64_t)(uint32_t)136U == (uint64_t)0U && total_len1 > (uint64_t)0U) - { - sz1 = (uint32_t)136U; - } - else - { - sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)136U); - } - { - uint8_t *buf2 = buf + sz1; - uint64_t total_len2; - memcpy(buf2, data, len * sizeof (uint8_t)); - total_len2 = total_len1 + (uint64_t)len; - { - Hacl_Streaming_SHA2_state_sha2_384 lit; - lit.block_state = block_state1; - lit.buf = buf; - lit.total_len = total_len2; - *p = lit; - return; - } - } - } - if (sz == (uint32_t)0U) - { - Hacl_Streaming_SHA2_state_sha2_384 s1 = *p; - uint64_t *block_state1 = s1.block_state; - uint8_t *buf = s1.buf; - uint64_t total_len1 = s1.total_len; - uint32_t sz1; - if (total_len1 % (uint64_t)(uint32_t)136U == (uint64_t)0U && total_len1 > (uint64_t)0U) - { - sz1 = (uint32_t)136U; - } - else - { - sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)136U); - } - { - uint32_t ite; - uint32_t n_blocks; - uint32_t data1_len; - uint32_t data2_len; - uint8_t *data1; - uint8_t *data2; - uint8_t *dst; - if (!(sz1 == (uint32_t)0U)) - { - { - uint32_t sz2 = (uint32_t)136U; - uint8_t *block = buf + sz2 * (uint32_t)0U; - Hacl_Impl_SHA3_loadState((uint32_t)136U, block, block_state1); - Hacl_Impl_SHA3_state_permute(block_state1); - } - } - if ((uint64_t)len % (uint64_t)(uint32_t)136U == (uint64_t)0U && (uint64_t)len > (uint64_t)0U) - { - ite = (uint32_t)136U; - } - else - { - ite = (uint32_t)((uint64_t)len % (uint64_t)(uint32_t)136U); - } - n_blocks = (len - ite) / (uint32_t)136U; - data1_len = n_blocks * (uint32_t)136U; - data2_len = len - data1_len; - data1 = data; - data2 = data + data1_len; - { - uint32_t i; - for (i = (uint32_t)0U; i < data1_len / (uint32_t)136U; i++) - { - uint32_t sz2 = (uint32_t)136U; - uint8_t *block = data1 + sz2 * i; - Hacl_Impl_SHA3_loadState((uint32_t)136U, block, block_state1); - Hacl_Impl_SHA3_state_permute(block_state1); - } - } - dst = buf; - memcpy(dst, data2, data2_len * sizeof (uint8_t)); - { - Hacl_Streaming_SHA2_state_sha2_384 lit; - lit.block_state = block_state1; - lit.buf = buf; - lit.total_len = total_len1 + (uint64_t)len; - *p = lit; - return; - } - } - } - { - uint32_t diff = (uint32_t)136U - sz; - uint8_t *data1 = data; - uint8_t *data2 = data + diff; - Hacl_Streaming_SHA2_state_sha2_384 s10 = *p; - uint64_t *block_state10 = s10.block_state; - uint8_t *buf0 = s10.buf; - uint64_t total_len10 = s10.total_len; - uint32_t sz10; - if (total_len10 % (uint64_t)(uint32_t)136U == (uint64_t)0U && total_len10 > (uint64_t)0U) - { - sz10 = (uint32_t)136U; - } - else - { - sz10 = (uint32_t)(total_len10 % (uint64_t)(uint32_t)136U); - } - { - uint8_t *buf2 = buf0 + sz10; - uint64_t total_len2; - memcpy(buf2, data1, diff * sizeof (uint8_t)); - total_len2 = total_len10 + (uint64_t)diff; - { - Hacl_Streaming_SHA2_state_sha2_384 lit; - Hacl_Streaming_SHA2_state_sha2_384 s1; - uint64_t *block_state1; - uint8_t *buf; - uint64_t total_len1; - uint32_t sz1; - uint32_t ite; - uint32_t n_blocks; - uint32_t data1_len; - uint32_t data2_len; - uint8_t *data11; - uint8_t *data21; - uint8_t *dst; - lit.block_state = block_state10; - lit.buf = buf0; - lit.total_len = total_len2; - *p = lit; - s1 = *p; - block_state1 = s1.block_state; - buf = s1.buf; - total_len1 = s1.total_len; - if (total_len1 % (uint64_t)(uint32_t)136U == (uint64_t)0U && total_len1 > (uint64_t)0U) - { - sz1 = (uint32_t)136U; - } - else - { - sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)136U); - } - if (!(sz1 == (uint32_t)0U)) - { - { - uint32_t sz2 = (uint32_t)136U; - uint8_t *block = buf + sz2 * (uint32_t)0U; - Hacl_Impl_SHA3_loadState((uint32_t)136U, block, block_state1); - Hacl_Impl_SHA3_state_permute(block_state1); - } - } - if - ( - (uint64_t)(len - diff) - % (uint64_t)(uint32_t)136U - == (uint64_t)0U - && (uint64_t)(len - diff) > (uint64_t)0U - ) - { - ite = (uint32_t)136U; - } - else - { - ite = (uint32_t)((uint64_t)(len - diff) % (uint64_t)(uint32_t)136U); - } - n_blocks = (len - diff - ite) / (uint32_t)136U; - data1_len = n_blocks * (uint32_t)136U; - data2_len = len - diff - data1_len; - data11 = data2; - data21 = data2 + data1_len; - { - uint32_t i; - for (i = (uint32_t)0U; i < data1_len / (uint32_t)136U; i++) - { - uint32_t sz2 = (uint32_t)136U; - uint8_t *block = data11 + sz2 * i; - Hacl_Impl_SHA3_loadState((uint32_t)136U, block, block_state1); - Hacl_Impl_SHA3_state_permute(block_state1); - } - } - dst = buf; - memcpy(dst, data21, data2_len * sizeof (uint8_t)); - { - Hacl_Streaming_SHA2_state_sha2_384 lit0; - lit0.block_state = block_state1; - lit0.buf = buf; - lit0.total_len = total_len1 + (uint64_t)(len - diff); - *p = lit0; - } - } - } - } -} - -void Hacl_Streaming_SHA3_finish_256(Hacl_Streaming_SHA2_state_sha2_384 *p, uint8_t *dst) -{ - Hacl_Streaming_SHA2_state_sha2_384 scrut = *p; - uint64_t *block_state = scrut.block_state; - uint8_t *buf_ = scrut.buf; - uint64_t total_len = scrut.total_len; - uint32_t r; - if (total_len % (uint64_t)(uint32_t)136U == (uint64_t)0U && total_len > (uint64_t)0U) - { - r = (uint32_t)136U; - } - else - { - r = (uint32_t)(total_len % (uint64_t)(uint32_t)136U); - } - { - uint8_t *buf_1 = buf_; - uint64_t tmp_block_state[25U] = { 0U }; - uint32_t ite; - uint8_t *buf_last; - uint8_t *buf_multi; - memcpy(tmp_block_state, block_state, (uint32_t)25U * sizeof (uint64_t)); - if (r % (uint32_t)136U == (uint32_t)0U && r > (uint32_t)0U) - { - ite = (uint32_t)136U; - } - else - { - ite = r % (uint32_t)136U; - } - buf_last = buf_1 + r - ite; - buf_multi = buf_1; - if (r == (uint32_t)136U) - { - Hacl_Impl_SHA3_loadState((uint32_t)136U, buf_last, tmp_block_state); - Hacl_Impl_SHA3_state_permute(tmp_block_state); - { - uint8_t *uu____0 = buf_last + r; - uint8_t b[136U] = { 0U }; - memcpy(b, uu____0, (uint32_t)0U * sizeof (uint8_t)); - b[0U] = (uint8_t)0x06U; - Hacl_Impl_SHA3_loadState((uint32_t)136U, b, tmp_block_state); - { - uint8_t b1[136U] = { 0U }; - b1[135U] = (uint8_t)0x80U; - Hacl_Impl_SHA3_loadState((uint32_t)136U, b1, tmp_block_state); - Hacl_Impl_SHA3_state_permute(tmp_block_state); - Lib_Memzero0_memzero(b1, (uint32_t)136U * sizeof (b1[0U])); - Lib_Memzero0_memzero(b, (uint32_t)136U * sizeof (b[0U])); - } - } - } - else - { - uint8_t b[136U] = { 0U }; - memcpy(b, buf_last, r * sizeof (uint8_t)); - b[r] = (uint8_t)0x06U; - Hacl_Impl_SHA3_loadState((uint32_t)136U, b, tmp_block_state); - { - uint8_t b1[136U] = { 0U }; - b1[135U] = (uint8_t)0x80U; - Hacl_Impl_SHA3_loadState((uint32_t)136U, b1, tmp_block_state); - Hacl_Impl_SHA3_state_permute(tmp_block_state); - Lib_Memzero0_memzero(b1, (uint32_t)136U * sizeof (b1[0U])); - Lib_Memzero0_memzero(b, (uint32_t)136U * sizeof (b[0U])); - } - } - Hacl_Impl_SHA3_squeeze(tmp_block_state, (uint32_t)136U, (uint32_t)32U, dst); - } -} - -void Hacl_Streaming_SHA3_free_256(Hacl_Streaming_SHA2_state_sha2_384 *s) -{ - Hacl_Streaming_SHA2_state_sha2_384 scrut = *s; - uint8_t *buf = scrut.buf; - uint64_t *block_state = scrut.block_state; - KRML_HOST_FREE(block_state); - KRML_HOST_FREE(buf); - KRML_HOST_FREE(s); -} - diff --git a/dist/c89-compatible/Hacl_Streaming_SHA3.h b/dist/c89-compatible/Hacl_Streaming_SHA3.h deleted file mode 100644 index 99119e6b26..0000000000 --- a/dist/c89-compatible/Hacl_Streaming_SHA3.h +++ /dev/null @@ -1,64 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#ifndef __Hacl_Streaming_SHA3_H -#define __Hacl_Streaming_SHA3_H - -#if defined(__cplusplus) -extern "C" { -#endif - -#include -#include "krml/internal/types.h" -#include "krml/lowstar_endianness.h" -#include "krml/internal/target.h" - - -#include "Lib_Memzero0.h" -#include "Hacl_Streaming_SHA2.h" -#include "Hacl_SHA3.h" -#include "evercrypt_targetconfig.h" -typedef Hacl_Streaming_SHA2_state_sha2_384 Hacl_Streaming_SHA3_state_sha3_256; - -Hacl_Streaming_SHA2_state_sha2_384 *Hacl_Streaming_SHA3_create_in_256(); - -void Hacl_Streaming_SHA3_init_256(Hacl_Streaming_SHA2_state_sha2_384 *s); - -void -Hacl_Streaming_SHA3_update_256( - Hacl_Streaming_SHA2_state_sha2_384 *p, - uint8_t *data, - uint32_t len -); - -void Hacl_Streaming_SHA3_finish_256(Hacl_Streaming_SHA2_state_sha2_384 *p, uint8_t *dst); - -void Hacl_Streaming_SHA3_free_256(Hacl_Streaming_SHA2_state_sha2_384 *s); - -#if defined(__cplusplus) -} -#endif - -#define __Hacl_Streaming_SHA3_H_DEFINED -#endif diff --git a/dist/c89-compatible/INFO.txt b/dist/c89-compatible/INFO.txt deleted file mode 100644 index 0aa505e03b..0000000000 --- a/dist/c89-compatible/INFO.txt +++ /dev/null @@ -1,4 +0,0 @@ -This code was generated with the following toolchain. -F* version: 20e3eede1fc3aeb4b5828c3661d4991161d2b03d -Karamel version: e0a41f4e120e4f3e1c08090856a05f3e208edf29 -Vale version: 0.3.19 diff --git a/dist/c89-compatible/Lib_Memzero0.c b/dist/c89-compatible/Lib_Memzero0.c deleted file mode 100644 index 8e2253186c..0000000000 --- a/dist/c89-compatible/Lib_Memzero0.c +++ /dev/null @@ -1,53 +0,0 @@ -#if defined(__has_include) -#if __has_include("config.h") -#include "config.h" -#endif -#endif - -#ifdef _WIN32 -#include -#endif - -#if (defined(__APPLE__) && defined(__MACH__)) || defined(__linux__) -#define __STDC_WANT_LIB_EXT1__ 1 -#include -#endif - -#ifdef __FreeBSD__ -#include -#endif - -#include -#include -#include -#include - -#include "Lib_Memzero0.h" -#include "krml/internal/target.h" - -/* The F* formalization talks about the number of elements in the array. The C - implementation wants a number of bytes in the array. KaRaMeL is aware of this - and inserts a sizeof multiplication. */ -void Lib_Memzero0_memzero(void *dst, uint64_t len) { - /* This is safe: karamel checks at run-time (if needed) that all object sizes - fit within a size_t, so the size we receive has been checked at - allocation-time, possibly via KRML_CHECK_SIZE, to fit in a size_t. */ - size_t len_ = (size_t) len; - - #ifdef _WIN32 - SecureZeroMemory(dst, len); - #elif defined(__APPLE__) && defined(__MACH__) - memset_s(dst, len_, 0, len_); - #elif (defined(__linux__) && !defined(LINUX_NO_EXPLICIT_BZERO)) || defined(__FreeBSD__) - explicit_bzero(dst, len_); - #elif defined(__NetBSD__) - explicit_memset(dst, 0, len_); - #else - /* Default implementation for platforms with no particular support. */ - #warning "Your platform does not support any safe implementation of memzero -- consider a pull request!" - volatile unsigned char *volatile dst_ = (volatile unsigned char *volatile) dst; - size_t i = 0U; - while (i < len) - dst_[i++] = 0U; - #endif -} diff --git a/dist/c89-compatible/Lib_Memzero0.h b/dist/c89-compatible/Lib_Memzero0.h deleted file mode 100644 index 1ee6120a94..0000000000 --- a/dist/c89-compatible/Lib_Memzero0.h +++ /dev/null @@ -1,47 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#ifndef __Lib_Memzero0_H -#define __Lib_Memzero0_H - -#if defined(__cplusplus) -extern "C" { -#endif - -#include -#include "krml/internal/types.h" -#include "krml/lowstar_endianness.h" -#include "krml/internal/target.h" - - - -#include "evercrypt_targetconfig.h" -extern void Lib_Memzero0_memzero(void *x0, uint64_t x1); - -#if defined(__cplusplus) -} -#endif - -#define __Lib_Memzero0_H_DEFINED -#endif diff --git a/dist/c89-compatible/Lib_PrintBuffer.c b/dist/c89-compatible/Lib_PrintBuffer.c deleted file mode 100644 index 1b9c5021be..0000000000 --- a/dist/c89-compatible/Lib_PrintBuffer.c +++ /dev/null @@ -1,56 +0,0 @@ -#include -#include -#include -#include - -void Lib_PrintBuffer_print_bytes(uint32_t len, uint8_t* buffer) { - uint32_t i; - for (i = 0; i < len; i++){ - printf("%02x ", buffer[i]); - } - printf("\n"); -} - -void Lib_PrintBuffer_print_compare(uint32_t len, uint8_t* buffer1, uint8_t* buffer2) { - uint32_t i; - for (i = 0; i < len; i++){ - printf("%02x ", buffer1[i]); - } - printf("\n"); - for (i = 0; i < len; i++){ - printf("%02x ", buffer2[i]); - } - printf("\n"); -} - -void Lib_PrintBuffer_print_compare_display(uint32_t len, const uint8_t* buffer1, const uint8_t* buffer2) { - uint8_t res = 0; - uint32_t i; - Lib_PrintBuffer_print_compare(len, (uint8_t*)buffer1, (uint8_t*)buffer2); - for (i = 0; i < len; i++) { - res |= buffer1[i] ^ buffer2[i]; - } - if (res == 0) { - printf("Success !\n"); - } else { - printf("Failure !\n"); - } - printf("\n"); -} - -bool Lib_PrintBuffer_result_compare_display(uint32_t len, const uint8_t* buffer1, const uint8_t* buffer2) { - uint8_t res = 0; - uint32_t i; - Lib_PrintBuffer_print_compare(len, (uint8_t*)buffer1, (uint8_t*)buffer2); - for (i = 0; i < len; i++) { - res |= buffer1[i] ^ buffer2[i]; - } - if (res == 0) { - printf("Success !\n\n"); - return true; - } else { - printf("Failure !\n\n"); - return false; - } -} - diff --git a/dist/c89-compatible/Lib_PrintBuffer.h b/dist/c89-compatible/Lib_PrintBuffer.h deleted file mode 100644 index cccd152c12..0000000000 --- a/dist/c89-compatible/Lib_PrintBuffer.h +++ /dev/null @@ -1,55 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#ifndef __Lib_PrintBuffer_H -#define __Lib_PrintBuffer_H - -#if defined(__cplusplus) -extern "C" { -#endif - -#include -#include "krml/internal/types.h" -#include "krml/lowstar_endianness.h" -#include "krml/internal/target.h" - - - -#include "evercrypt_targetconfig.h" -extern void Lib_PrintBuffer_print_bytes(uint32_t len, uint8_t *buf); - -extern void Lib_PrintBuffer_print_compare(uint32_t len, uint8_t *buf0, uint8_t *buf1); - -extern void -Lib_PrintBuffer_print_compare_display(uint32_t len, const uint8_t *buf0, const uint8_t *buf1); - -extern bool -Lib_PrintBuffer_result_compare_display(uint32_t len, const uint8_t *buf0, const uint8_t *buf1); - -#if defined(__cplusplus) -} -#endif - -#define __Lib_PrintBuffer_H_DEFINED -#endif diff --git a/dist/c89-compatible/Lib_RandomBuffer_System.c b/dist/c89-compatible/Lib_RandomBuffer_System.c deleted file mode 100644 index 0d7924b414..0000000000 --- a/dist/c89-compatible/Lib_RandomBuffer_System.c +++ /dev/null @@ -1,62 +0,0 @@ -#include "Lib_RandomBuffer_System.h" - -#if (defined(_WIN32) || defined(_WIN64)) - -#include -#include -#include -#include - -bool read_random_bytes(uint32_t len, uint8_t *buf) { - HCRYPTPROV ctxt; - if (!(CryptAcquireContext(&ctxt, NULL, NULL, PROV_RSA_FULL, - CRYPT_VERIFYCONTEXT))) { - DWORD error = GetLastError(); - /* printf("Cannot acquire crypto context: 0x%lx\n", error); */ - return false; - } - bool pass = true; - if (!(CryptGenRandom(ctxt, (uint64_t)len, buf))) { - /* printf("Cannot read random bytes\n"); */ - pass = false; - } - CryptReleaseContext(ctxt, 0); - return pass; -} - -#else - -/* assume POSIX here */ -#include -#include -#include -#include -#include - -bool read_random_bytes(uint32_t len, uint8_t *buf) { -#ifdef SYS_getrandom - ssize_t res = syscall(SYS_getrandom, buf, (size_t)len, 0); - if (res == -1) { - return false; - } -#else // !defined(SYS_getrandom) - int fd = open("/dev/urandom", O_RDONLY); - if (fd == -1) { - return false; - } - ssize_t res = read(fd, buf, (uint64_t)len); - close(fd); -#endif // defined(SYS_getrandom) - return ((size_t)res == (size_t)len); -} - -#endif - -// WARNING: this function is deprecated -bool Lib_RandomBuffer_System_randombytes(uint8_t *x, uint32_t len) { - return read_random_bytes(len, x); -} - -void Lib_RandomBuffer_System_crypto_random(uint8_t *x, uint32_t len) { - while(!read_random_bytes(len, x)) {} -} diff --git a/dist/c89-compatible/Lib_RandomBuffer_System.h b/dist/c89-compatible/Lib_RandomBuffer_System.h deleted file mode 100644 index 1c23ac13da..0000000000 --- a/dist/c89-compatible/Lib_RandomBuffer_System.h +++ /dev/null @@ -1,53 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#ifndef __Lib_RandomBuffer_System_H -#define __Lib_RandomBuffer_System_H - -#if defined(__cplusplus) -extern "C" { -#endif - -#include -#include "krml/internal/types.h" -#include "krml/lowstar_endianness.h" -#include "krml/internal/target.h" - - - -#include "evercrypt_targetconfig.h" -KRML_DEPRECATED("random_crypto") - -extern bool Lib_RandomBuffer_System_randombytes(uint8_t *buf, uint32_t len); - -extern void *Lib_RandomBuffer_System_entropy_p; - -extern void Lib_RandomBuffer_System_crypto_random(uint8_t *buf, uint32_t len); - -#if defined(__cplusplus) -} -#endif - -#define __Lib_RandomBuffer_System_H_DEFINED -#endif diff --git a/dist/c89-compatible/Makefile b/dist/c89-compatible/Makefile deleted file mode 100644 index 3f7df5db33..0000000000 --- a/dist/c89-compatible/Makefile +++ /dev/null @@ -1,187 +0,0 @@ -# This is the universal Makefile that will build any distribution of EverCrypt. -# - It is copied from hacl-star/providers/dist/Makefile -# - It relies on the KaRaMeL-generated Makefile.basic and Makefile.include -# -# This Makefile may (conservatively) link in some Vale assemblies that may end -# up un-needed in the final shared object. -# -# Additionally, this Makefile works out of the box on Linux, OSX and -# Cygwin/MinGW. -# -# The Makefile produces: -# - libevercrypt.so, a shared object where unused symbols have been removed -# - libevercrypt.a - -# By default, this Makefile relies on the local checkout of krmllib -KRML_HOME ?= ../karamel - -ifeq (,$(wildcard $(KRML_HOME)/include/krmllib.h)) - $(error Incorrect KRML_HOME) -endif - --include Makefile.config - -UNAME ?= $(shell uname) -MARCH ?= $(shell uname -m | sed 's/amd64/x86_64/') -ifeq ($(UNAME),Darwin) - VARIANT = -darwin - SO = so -else ifeq ($(UNAME),Linux) - CFLAGS += -fPIC - VARIANT = -linux - SO = so -else ifeq ($(OS),Windows_NT) - CFLAGS += -fno-asynchronous-unwind-tables - CC = $(MARCH)-w64-mingw32-gcc - AR = $(MARCH)-w64-mingw32-ar - VARIANT = -mingw - SO = dll - LDFLAGS = -Wl,--out-implib,libevercrypt.dll.a -else ifeq ($(UNAME),FreeBSD) - CFLAGS += -fPIC - VARIANT = -linux - SO = so -endif - -OBJS += $(patsubst %.S,%.o,$(wildcard *-$(MARCH)$(VARIANT).S)) - -include Makefile.basic - -CFLAGS += -Wno-parentheses -Wno-deprecated-declarations -Wno-\#warnings -Wno-error=cpp -Wno-cpp -g -std=gnu11 -O3 - -Hacl_Poly1305_128.o Hacl_Streaming_Poly1305_128.o Hacl_Chacha20_Vec128.o Hacl_Chacha20Poly1305_128.o Hacl_Hash_Blake2s_128.o Hacl_HMAC_Blake2s_128.o Hacl_HKDF_Blake2s_128.o Hacl_Streaming_Blake2s_128.o Hacl_SHA2_Vec128.o: CFLAGS += $(CFLAGS_128) -Hacl_Poly1305_256.o Hacl_Streaming_Poly1305_256.o Hacl_Chacha20_Vec256.o Hacl_Chacha20Poly1305_256.o Hacl_Hash_Blake2b_256.o Hacl_HMAC_Blake2b_256.o Hacl_HKDF_Blake2b_256.o Hacl_Streaming_Blake2b_256.o Hacl_SHA2_Vec256.o: CFLAGS += $(CFLAGS_256) - -all: libevercrypt.$(SO) - -# This one and the one below are for people who run "make" without running -# configure. It's not perfect but perhaps a tiny bit better than nothing. -Makefile.config: - ./configure - -# If the configure script has not run, create an empty config.h -config.h: - touch $@ - -libevercrypt.$(SO): config.h $(OBJS) - $(CC) $(CFLAGS) -shared -o $@ $(filter-out %.h,$^) $(LDFLAGS) - -# 4. Compilation of OCaml bindings; conditional on the presence of the lib_gen -# folder, possibly disabled by configure. - -ifeq (,$(DISABLE_OCAML_BINDINGS)) -ifneq (,$(wildcard lib_gen)) - -.PRECIOUS: %.cmx - -OCAMLOPT=ocamlfind opt -package ctypes,ctypes.stubs -linkpkg -I lib -OCAMLDEP=ocamlfind dep -I lib -slash - -OCAMLC=ocamlfind c -g -package ctypes,ctypes.stubs -linkpkg -I lib - -CFLAGS += -I "$(shell ocamlfind query ctypes)" -I "$(shell ocamlfind c -where)" - -# Don't include bindings for files that cannot be compiled. -BLACKLIST_ML=$(patsubst %.c,%,$(BLACKLIST)) -ALL_OCAML=$(filter-out $(BLACKLIST_ML),$(patsubst lib_gen/%_gen.ml,%,$(wildcard lib_gen/*_gen.ml))) - -# Just names. -ALL_BINDINGS=$(patsubst %,lib/%_bindings.cmx,$(ALL_OCAML)) -ALL_GENERATORS=$(patsubst %,lib_gen/%_gen.exe, $(ALL_OCAML)) -ALL_ML_STUBS=$(patsubst %,lib/%_stubs.cmx,$(ALL_OCAML)) -ALL_C_STUBS=$(patsubst %,lib/%_c_stubs.o,$(ALL_OCAML)) - -include .depend.ocaml -include ctypes.depend - -lib_gen/Lib_RandomBuffer_System_gen.cmx: lib/Lib_RandomBuffer_System_bindings.cmx -lib_gen/Lib_RandomBuffer_System_gen.exe: lib/Lib_RandomBuffer_System_bindings.cmx lib_gen/Lib_RandomBuffer_System_gen.cmx - -.depend.ocaml: - $(OCAMLDEP) $(wildcard lib/*.ml) $(wildcard lib_gen/*.ml) > $@ - -# Note: for some reason, this minimal Makefile doesn't apply the shortest stem -# rule. -# -# %.exe: -# echo long stem -# -# %_foo.exe: %.b -# cat $< -# -# %.b: -# echo $@ > $@ -# -# Which really puzzles me (e.g. make foo_foo.exe always echoes "long stem"), -# even though the shortest stem rule should apply. However, we can cancel a -# previous pattern rule, thanks to -# https://www.gnu.org/software/make/manual/html_node/Canceling-Rules.html -%.exe: - -all: ocamlevercrypt.cmxa - -lib_gen/%_gen.exe: libevercrypt.a - $(OCAMLOPT) $(filter-out %.a,$^) libevercrypt.a -o $@ - -%.cmx: %.ml - $(OCAMLOPT) -c $^ -o $@ - -%.cmo: %.ml - $(OCAMLC) -c $^ -o $@ - - -.PRECIOUS: lib/%_stubs.ml -lib/%_stubs.ml: lib/%_c_stubs.c - -ifeq ($(shell uname -s),Darwin) - DY=DYLD_LIBRARY_ -else ifeq ($(OS),Windows_NT) - DY= -else - DY=LD_LIBRARY_ -endif - -lib/%_stubs.ml lib/%_c_stubs.c: lib_gen/%_gen.exe - $< - -# Compiling these files raises the pointer sign warning because the Ctypes declaration for bytes expects a char*, not a uint8_t* -# Can be remove once ctypes is upgraded to 0.17 -lib/%_c_stubs.o: CFLAGS += -Wno-pointer-sign - -BLACKLIST_CMX = $(patsubst %,lib/%_stubs.cmx,$(BLACKLIST_ML)) -BLACKLIST_CMX += $(patsubst %,lib/%_bindings.cmx,$(BLACKLIST_ML)) -CTYPES_CMX = $(filter-out $(BLACKLIST_CMX),$(CTYPES_DEPS)) -CTYPES_CMX += lib/Lib_RandomBuffer_System_stubs.cmx lib/Lib_RandomBuffer_System_bindings.cmx -CTYPES_ML = $(patsubst %.cmx,%.ml,$(CTYPES_CMX)) -CTYPES_CMI = $(patsubst %.cmx,%.cmi,$(CTYPES_CMX)) -CTYPES_CMO = $(patsubst %.cmx,%.cmo,$(CTYPES_CMX)) - - -ocamlevercrypt.cma: $(ALL_BINDINGS) $(CTYPES_CMO) $(ALL_C_STUBS) $(CTYPES_CMX) libevercrypt.$(SO) - ocamlmklib -o ocamlevercrypt $(CTYPES_CMO) -L. -levercrypt - -ocamlevercrypt.cmxa: $(ALL_BINDINGS) $(ALL_ML_STUBS) $(ALL_C_STUBS) libevercrypt.$(SO) - ocamlmklib -o ocamlevercrypt $(CTYPES_CMX) -L. -levercrypt - -STUBLIBS_PATH=$(OPAM_SWITCH_PREFIX)/lib/stublibs - -dllocamlevercrypt.$(SO): ocamlevercrypt.cmxa ocamlevercrypt.cma - ocamlmklib -o ocamlevercrypt $(ALL_C_STUBS) -L. -L$(STUBLIBS_PATH) -levercrypt - -install-hacl-star-raw: dllocamlevercrypt.$(SO) - ocamlfind remove hacl-star-raw || true - ocamlfind install hacl-star-raw META - ocamlfind install -add hacl-star-raw $(CTYPES_ML) - ocamlfind install -add hacl-star-raw $(CTYPES_CMX) - ocamlfind install -add hacl-star-raw $(CTYPES_CMO) - ocamlfind install -add hacl-star-raw $(CTYPES_CMI) - ocamlfind install -add hacl-star-raw \ - libevercrypt.a libevercrypt.$(SO) ocamlevercrypt.cma ocamlevercrypt.cmxa ocamlevercrypt.a \ - libocamlevercrypt.a dllocamlevercrypt.$(SO) config.h - -.PHONY: install-ocaml -install-ocaml: install-hacl-star-raw - cd ../../bindings/ocaml && dune build && dune install - -endif -endif diff --git a/dist/c89-compatible/Makefile.basic b/dist/c89-compatible/Makefile.basic deleted file mode 100644 index d7a1fdfd70..0000000000 --- a/dist/c89-compatible/Makefile.basic +++ /dev/null @@ -1,56 +0,0 @@ -# A basic Makefile that KaRaMeL copies in the output directory; this is not -# guaranteed to work and will only work well for very simple projects. This -# Makefile uses: -# - the custom C files passed to your krml invocation -# - the custom C flags passed to your krml invocation -# - the -o option passed to your krml invocation - -include Makefile.include - -ifeq (,$(KRML_HOME)) - $(error please define KRML_HOME to point to the root of your KaRaMeL git checkout) -endif - -CFLAGS += -I. -I $(KRML_HOME)/include -I $(KRML_HOME)/krmllib/dist/minimal -CFLAGS += -Wall -Wextra -Werror -std=c11 -Wno-unused-variable \ - -Wno-unknown-warning-option -Wno-unused-but-set-variable -Wno-unused-function \ - -Wno-unused-parameter -Wno-infinite-recursion \ - -g -fwrapv -D_BSD_SOURCE -D_DEFAULT_SOURCE -ifeq ($(OS),Windows_NT) -CFLAGS += -D__USE_MINGW_ANSI_STDIO -else -CFLAGS += -fPIC -endif -CFLAGS += $(USER_CFLAGS) - -SOURCES += $(ALL_C_FILES) $(USER_C_FILES) -ifneq (,$(BLACKLIST)) - SOURCES := $(filter-out $(BLACKLIST),$(SOURCES)) -endif -OBJS += $(patsubst %.c,%.o,$(SOURCES)) - -all: $(USER_TARGET) - -$(USER_TARGET): $(OBJS) - -AR ?= ar - -%.a: - $(AR) cr $@ $^ - -%.exe: - $(CC) $(CFLAGS) -o $@ $^ $(KRML_HOME)/krmllib/dist/generic/libkrmllib.a - -%.so: - $(CC) $(CFLAGS) -shared -o $@ $^ - -%.d: %.c - @set -e; rm -f $@; \ - $(CC) -MM -MG $(CFLAGS) $< > $@.$$$$; \ - sed 's,\($(notdir $*)\)\.o[ :]*,$(dir $@)\1.o $@ : ,g' < $@.$$$$ > $@; \ - rm -f $@.$$$$ - -include $(patsubst %.c,%.d,$(SOURCES)) - -clean: - rm -rf *.o *.d $(USER_TARGET) diff --git a/dist/c89-compatible/Makefile.include b/dist/c89-compatible/Makefile.include deleted file mode 100644 index ed0ccab6a1..0000000000 --- a/dist/c89-compatible/Makefile.include +++ /dev/null @@ -1,5 +0,0 @@ -USER_TARGET=libevercrypt.a -USER_CFLAGS=-std=c89 -Wno-typedef-redefinition -Wno-unused -USER_C_FILES=Lib_Memzero0.c Lib_PrintBuffer.c Lib_RandomBuffer_System.c -ALL_C_FILES=EverCrypt_AEAD.c EverCrypt_AutoConfig2.c EverCrypt_Chacha20Poly1305.c EverCrypt_Cipher.c EverCrypt_Curve25519.c EverCrypt_DRBG.c EverCrypt_Ed25519.c EverCrypt_HKDF.c EverCrypt_HMAC.c EverCrypt_Hash.c EverCrypt_Poly1305.c Hacl_Bignum.c Hacl_Bignum256.c Hacl_Bignum256_32.c Hacl_Bignum32.c Hacl_Bignum4096.c Hacl_Bignum4096_32.c Hacl_Bignum64.c Hacl_Chacha20.c Hacl_Chacha20Poly1305_128.c Hacl_Chacha20Poly1305_256.c Hacl_Chacha20Poly1305_32.c Hacl_Chacha20_Vec128.c Hacl_Chacha20_Vec256.c Hacl_Chacha20_Vec32.c Hacl_Curve25519_51.c Hacl_Curve25519_64.c Hacl_Curve25519_64_Slow.c Hacl_EC_Ed25519.c Hacl_EC_K256.c Hacl_Ed25519.c Hacl_FFDHE.c Hacl_Frodo1344.c Hacl_Frodo64.c Hacl_Frodo640.c Hacl_Frodo976.c Hacl_Frodo_KEM.c Hacl_GenericField32.c Hacl_GenericField64.c Hacl_HKDF.c Hacl_HKDF_Blake2b_256.c Hacl_HKDF_Blake2s_128.c Hacl_HMAC.c Hacl_HMAC_Blake2b_256.c Hacl_HMAC_Blake2s_128.c Hacl_HMAC_DRBG.c Hacl_HPKE_Curve51_CP128_SHA256.c Hacl_HPKE_Curve51_CP128_SHA512.c Hacl_HPKE_Curve51_CP256_SHA256.c Hacl_HPKE_Curve51_CP256_SHA512.c Hacl_HPKE_Curve51_CP32_SHA256.c Hacl_HPKE_Curve51_CP32_SHA512.c Hacl_HPKE_Curve64_CP128_SHA256.c Hacl_HPKE_Curve64_CP128_SHA512.c Hacl_HPKE_Curve64_CP256_SHA256.c Hacl_HPKE_Curve64_CP256_SHA512.c Hacl_HPKE_Curve64_CP32_SHA256.c Hacl_HPKE_Curve64_CP32_SHA512.c Hacl_HPKE_P256_CP128_SHA256.c Hacl_HPKE_P256_CP256_SHA256.c Hacl_HPKE_P256_CP32_SHA256.c Hacl_Hash_Base.c Hacl_Hash_Blake2.c Hacl_Hash_Blake2b_256.c Hacl_Hash_Blake2s_128.c Hacl_Hash_MD5.c Hacl_Hash_SHA1.c Hacl_Hash_SHA2.c Hacl_K256_ECDSA.c Hacl_NaCl.c Hacl_P256.c Hacl_Poly1305_128.c Hacl_Poly1305_256.c Hacl_Poly1305_32.c Hacl_RSAPSS.c Hacl_SHA2_Scalar32.c Hacl_SHA2_Vec128.c Hacl_SHA2_Vec256.c Hacl_SHA3.c Hacl_Salsa20.c Hacl_Streaming_Blake2.c Hacl_Streaming_Blake2b_256.c Hacl_Streaming_Blake2s_128.c Hacl_Streaming_MD5.c Hacl_Streaming_Poly1305_128.c Hacl_Streaming_Poly1305_256.c Hacl_Streaming_Poly1305_32.c Hacl_Streaming_SHA1.c Hacl_Streaming_SHA2.c Hacl_Streaming_SHA3.c Vale.c -ALL_H_FILES=EverCrypt_AEAD.h EverCrypt_AutoConfig2.h EverCrypt_Chacha20Poly1305.h EverCrypt_Cipher.h EverCrypt_Curve25519.h EverCrypt_DRBG.h EverCrypt_Ed25519.h EverCrypt_Error.h EverCrypt_HKDF.h EverCrypt_HMAC.h EverCrypt_Hash.h EverCrypt_Poly1305.h Hacl_AES128.h Hacl_Bignum.h Hacl_Bignum25519_51.h Hacl_Bignum256.h Hacl_Bignum256_32.h Hacl_Bignum32.h Hacl_Bignum4096.h Hacl_Bignum4096_32.h Hacl_Bignum64.h Hacl_Bignum_Base.h Hacl_Bignum_K256.h Hacl_Chacha20.h Hacl_Chacha20Poly1305_128.h Hacl_Chacha20Poly1305_256.h Hacl_Chacha20Poly1305_32.h Hacl_Chacha20_Vec128.h Hacl_Chacha20_Vec256.h Hacl_Chacha20_Vec32.h Hacl_Curve25519_51.h Hacl_Curve25519_64.h Hacl_Curve25519_64_Slow.h Hacl_EC_Ed25519.h Hacl_EC_K256.h Hacl_Ed25519.h Hacl_FFDHE.h Hacl_Frodo1344.h Hacl_Frodo64.h Hacl_Frodo640.h Hacl_Frodo976.h Hacl_Frodo_KEM.h Hacl_GenericField32.h Hacl_GenericField64.h Hacl_HKDF.h Hacl_HKDF_Blake2b_256.h Hacl_HKDF_Blake2s_128.h Hacl_HMAC.h Hacl_HMAC_Blake2b_256.h Hacl_HMAC_Blake2s_128.h Hacl_HMAC_DRBG.h Hacl_HPKE_Curve51_CP128_SHA256.h Hacl_HPKE_Curve51_CP128_SHA512.h Hacl_HPKE_Curve51_CP256_SHA256.h Hacl_HPKE_Curve51_CP256_SHA512.h Hacl_HPKE_Curve51_CP32_SHA256.h Hacl_HPKE_Curve51_CP32_SHA512.h Hacl_HPKE_Curve64_CP128_SHA256.h Hacl_HPKE_Curve64_CP128_SHA512.h Hacl_HPKE_Curve64_CP256_SHA256.h Hacl_HPKE_Curve64_CP256_SHA512.h Hacl_HPKE_Curve64_CP32_SHA256.h Hacl_HPKE_Curve64_CP32_SHA512.h Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE.h Hacl_HPKE_P256_CP128_SHA256.h Hacl_HPKE_P256_CP256_SHA256.h Hacl_HPKE_P256_CP32_SHA256.h Hacl_Hash_Base.h Hacl_Hash_Blake2.h Hacl_Hash_Blake2b_256.h Hacl_Hash_Blake2s_128.h Hacl_Hash_MD5.h Hacl_Hash_SHA1.h Hacl_Hash_SHA2.h Hacl_Impl_Blake2_Constants.h Hacl_Impl_FFDHE_Constants.h Hacl_IntTypes_Intrinsics.h Hacl_IntTypes_Intrinsics_128.h Hacl_K256_ECDSA.h Hacl_Krmllib.h Hacl_NaCl.h Hacl_P256.h Hacl_Poly1305_128.h Hacl_Poly1305_256.h Hacl_Poly1305_32.h Hacl_RSAPSS.h Hacl_SHA2_Generic.h Hacl_SHA2_Scalar32.h Hacl_SHA2_Types.h Hacl_SHA2_Vec128.h Hacl_SHA2_Vec256.h Hacl_SHA3.h Hacl_Salsa20.h Hacl_Spec.h Hacl_Streaming_Blake2.h Hacl_Streaming_Blake2b_256.h Hacl_Streaming_Blake2s_128.h Hacl_Streaming_MD5.h Hacl_Streaming_Poly1305_128.h Hacl_Streaming_Poly1305_256.h Hacl_Streaming_Poly1305_32.h Hacl_Streaming_SHA1.h Hacl_Streaming_SHA2.h Hacl_Streaming_SHA3.h Lib_Memzero0.h Lib_PrintBuffer.h Lib_RandomBuffer_System.h TestLib.h internal/Hacl_Bignum.h internal/Hacl_Chacha20.h internal/Hacl_Curve25519_51.h internal/Hacl_Ed25519.h internal/Hacl_Frodo_KEM.h internal/Hacl_HMAC.h internal/Hacl_Hash_Blake2.h internal/Hacl_Hash_Blake2b_256.h internal/Hacl_Hash_Blake2s_128.h internal/Hacl_Hash_MD5.h internal/Hacl_Hash_SHA1.h internal/Hacl_Hash_SHA2.h internal/Hacl_K256_ECDSA.h internal/Hacl_P256.h internal/Hacl_Poly1305_128.h internal/Hacl_Poly1305_256.h internal/Hacl_SHA2_Types.h internal/Hacl_Spec.h internal/Vale.h diff --git a/dist/c89-compatible/TestLib.h b/dist/c89-compatible/TestLib.h deleted file mode 100644 index ac036822d3..0000000000 --- a/dist/c89-compatible/TestLib.h +++ /dev/null @@ -1,90 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#ifndef __TestLib_H -#define __TestLib_H - -#if defined(__cplusplus) -extern "C" { -#endif - -#include -#include "krml/internal/types.h" -#include "krml/lowstar_endianness.h" -#include "krml/internal/target.h" - - - -#include "evercrypt_targetconfig.h" -extern void TestLib_touch(int32_t uu___); - -extern void TestLib_check(bool uu___); - -extern void TestLib_check8(int8_t uu___, int8_t uu___1); - -extern void TestLib_check16(int16_t uu___, int16_t uu___1); - -extern void TestLib_check32(int32_t uu___, int32_t uu___1); - -extern void TestLib_check64(int64_t uu___, int64_t uu___1); - -extern void TestLib_checku8(uint8_t uu___, uint8_t uu___1); - -extern void TestLib_checku16(uint16_t uu___, uint16_t uu___1); - -extern void TestLib_checku32(uint32_t uu___, uint32_t uu___1); - -extern void TestLib_checku64(uint64_t uu___, uint64_t uu___1); - -extern void TestLib_compare_and_print(C_String_t uu___, uint8_t *b1, uint8_t *b2, uint32_t l); - -extern uint8_t *TestLib_unsafe_malloc(uint32_t l); - -extern void TestLib_perr(uint32_t uu___); - -extern void TestLib_print_clock_diff(clock_t uu___, clock_t uu___1); - -KRML_DEPRECATED("p_null from TestLib; use LowStar.Buffer.null instead") - -extern uint8_t *TestLib_uint8_p_null; - -KRML_DEPRECATED("p_null from TestLib; use LowStar.Buffer.null instead") - -extern uint32_t *TestLib_uint32_p_null; - -KRML_DEPRECATED("p_null from TestLib; use LowStar.Buffer.null instead") - -extern uint64_t *TestLib_uint64_p_null; - -extern TestLib_cycles TestLib_cpucycles(); - -extern void -TestLib_print_cycles_per_round(TestLib_cycles uu___, TestLib_cycles uu___1, uint32_t uu___2); - -#if defined(__cplusplus) -} -#endif - -#define __TestLib_H_DEFINED -#endif diff --git a/dist/c89-compatible/Vale.c b/dist/c89-compatible/Vale.c deleted file mode 100644 index 8b95592e65..0000000000 --- a/dist/c89-compatible/Vale.c +++ /dev/null @@ -1,30 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#include "internal/Vale.h" - - - -typedef uint64_t als_ret; - diff --git a/dist/c89-compatible/aesgcm-x86_64-darwin.S b/dist/c89-compatible/aesgcm-x86_64-darwin.S deleted file mode 100644 index e0c8c9f65f..0000000000 --- a/dist/c89-compatible/aesgcm-x86_64-darwin.S +++ /dev/null @@ -1,8101 +0,0 @@ -.text -.global _aes128_key_expansion -_aes128_key_expansion: - movdqu 0(%rdi), %xmm1 - mov %rsi, %rdx - movdqu %xmm1, 0(%rdx) - aeskeygenassist $1, %xmm1, %xmm2 - pshufd $255, %xmm2, %xmm2 - vpslldq $4, %xmm1, %xmm3 - pxor %xmm3, %xmm1 - vpslldq $4, %xmm1, %xmm3 - pxor %xmm3, %xmm1 - vpslldq $4, %xmm1, %xmm3 - pxor %xmm3, %xmm1 - pxor %xmm2, %xmm1 - movdqu %xmm1, 16(%rdx) - aeskeygenassist $2, %xmm1, %xmm2 - pshufd $255, %xmm2, %xmm2 - vpslldq $4, %xmm1, %xmm3 - pxor %xmm3, %xmm1 - vpslldq $4, %xmm1, %xmm3 - pxor %xmm3, %xmm1 - vpslldq $4, %xmm1, %xmm3 - pxor %xmm3, %xmm1 - pxor %xmm2, %xmm1 - movdqu %xmm1, 32(%rdx) - aeskeygenassist $4, %xmm1, %xmm2 - pshufd $255, %xmm2, %xmm2 - vpslldq $4, %xmm1, %xmm3 - pxor %xmm3, %xmm1 - vpslldq $4, %xmm1, %xmm3 - pxor %xmm3, %xmm1 - vpslldq $4, %xmm1, %xmm3 - pxor %xmm3, %xmm1 - pxor %xmm2, %xmm1 - movdqu %xmm1, 48(%rdx) - aeskeygenassist $8, %xmm1, %xmm2 - pshufd $255, %xmm2, %xmm2 - vpslldq $4, %xmm1, %xmm3 - pxor %xmm3, %xmm1 - vpslldq $4, %xmm1, %xmm3 - pxor %xmm3, %xmm1 - vpslldq $4, %xmm1, %xmm3 - pxor %xmm3, %xmm1 - pxor %xmm2, %xmm1 - movdqu %xmm1, 64(%rdx) - aeskeygenassist $16, %xmm1, %xmm2 - pshufd $255, %xmm2, %xmm2 - vpslldq $4, %xmm1, %xmm3 - pxor %xmm3, %xmm1 - vpslldq $4, %xmm1, %xmm3 - pxor %xmm3, %xmm1 - vpslldq $4, %xmm1, %xmm3 - pxor %xmm3, %xmm1 - pxor %xmm2, %xmm1 - movdqu %xmm1, 80(%rdx) - aeskeygenassist $32, %xmm1, %xmm2 - pshufd $255, %xmm2, %xmm2 - vpslldq $4, %xmm1, %xmm3 - pxor %xmm3, %xmm1 - vpslldq $4, %xmm1, %xmm3 - pxor %xmm3, %xmm1 - vpslldq $4, %xmm1, %xmm3 - pxor %xmm3, %xmm1 - pxor %xmm2, %xmm1 - movdqu %xmm1, 96(%rdx) - aeskeygenassist $64, %xmm1, %xmm2 - pshufd $255, %xmm2, %xmm2 - vpslldq $4, %xmm1, %xmm3 - pxor %xmm3, %xmm1 - vpslldq $4, %xmm1, %xmm3 - pxor %xmm3, %xmm1 - vpslldq $4, %xmm1, %xmm3 - pxor %xmm3, %xmm1 - pxor %xmm2, %xmm1 - movdqu %xmm1, 112(%rdx) - aeskeygenassist $128, %xmm1, %xmm2 - pshufd $255, %xmm2, %xmm2 - vpslldq $4, %xmm1, %xmm3 - pxor %xmm3, %xmm1 - vpslldq $4, %xmm1, %xmm3 - pxor %xmm3, %xmm1 - vpslldq $4, %xmm1, %xmm3 - pxor %xmm3, %xmm1 - pxor %xmm2, %xmm1 - movdqu %xmm1, 128(%rdx) - aeskeygenassist $27, %xmm1, %xmm2 - pshufd $255, %xmm2, %xmm2 - vpslldq $4, %xmm1, %xmm3 - pxor %xmm3, %xmm1 - vpslldq $4, %xmm1, %xmm3 - pxor %xmm3, %xmm1 - vpslldq $4, %xmm1, %xmm3 - pxor %xmm3, %xmm1 - pxor %xmm2, %xmm1 - movdqu %xmm1, 144(%rdx) - aeskeygenassist $54, %xmm1, %xmm2 - pshufd $255, %xmm2, %xmm2 - vpslldq $4, %xmm1, %xmm3 - pxor %xmm3, %xmm1 - vpslldq $4, %xmm1, %xmm3 - pxor %xmm3, %xmm1 - vpslldq $4, %xmm1, %xmm3 - pxor %xmm3, %xmm1 - pxor %xmm2, %xmm1 - movdqu %xmm1, 160(%rdx) - pxor %xmm1, %xmm1 - pxor %xmm2, %xmm2 - pxor %xmm3, %xmm3 - ret - -.global _aes128_keyhash_init -_aes128_keyhash_init: - mov $579005069656919567, %r8 - pinsrq $0, %r8, %xmm4 - mov $283686952306183, %r8 - pinsrq $1, %r8, %xmm4 - pxor %xmm0, %xmm0 - movdqu %xmm0, 80(%rsi) - mov %rdi, %r8 - movdqu 0(%r8), %xmm2 - pxor %xmm2, %xmm0 - movdqu 16(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 32(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 48(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 64(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 80(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 96(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 112(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 128(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 144(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 160(%r8), %xmm2 - aesenclast %xmm2, %xmm0 - pxor %xmm2, %xmm2 - pshufb %xmm4, %xmm0 - mov %rsi, %rcx - movdqu %xmm0, 32(%rcx) - movdqu %xmm6, %xmm0 - mov %r12, %rax - movdqu 32(%rcx), %xmm1 - movdqu %xmm1, %xmm6 - movdqu %xmm1, %xmm3 - pxor %xmm4, %xmm4 - pxor %xmm5, %xmm5 - mov $3254779904, %r12 - pinsrd $3, %r12d, %xmm4 - mov $1, %r12 - pinsrd $0, %r12d, %xmm4 - mov $2147483648, %r12 - pinsrd $3, %r12d, %xmm5 - movdqu %xmm3, %xmm1 - movdqu %xmm1, %xmm2 - psrld $31, %xmm2 - pslld $1, %xmm1 - vpslldq $4, %xmm2, %xmm2 - pxor %xmm2, %xmm1 - pand %xmm5, %xmm3 - pcmpeqd %xmm5, %xmm3 - pshufd $255, %xmm3, %xmm3 - pand %xmm4, %xmm3 - vpxor %xmm3, %xmm1, %xmm1 - movdqu %xmm1, 0(%rcx) - movdqu %xmm6, %xmm1 - movdqu %xmm6, %xmm2 - movdqu %xmm1, %xmm5 - pclmulqdq $16, %xmm2, %xmm1 - movdqu %xmm1, %xmm3 - movdqu %xmm5, %xmm1 - pclmulqdq $1, %xmm2, %xmm1 - movdqu %xmm1, %xmm4 - movdqu %xmm5, %xmm1 - pclmulqdq $0, %xmm2, %xmm1 - pclmulqdq $17, %xmm2, %xmm5 - movdqu %xmm5, %xmm2 - movdqu %xmm1, %xmm5 - movdqu %xmm3, %xmm1 - mov $0, %r12 - pinsrd $0, %r12d, %xmm1 - pshufd $14, %xmm1, %xmm1 - pxor %xmm1, %xmm2 - movdqu %xmm4, %xmm1 - mov $0, %r12 - pinsrd $0, %r12d, %xmm1 - pshufd $14, %xmm1, %xmm1 - pxor %xmm1, %xmm2 - movdqu %xmm3, %xmm1 - mov $0, %r12 - pinsrd $3, %r12d, %xmm1 - pshufd $79, %xmm1, %xmm1 - mov $0, %r12 - pinsrd $3, %r12d, %xmm4 - pshufd $79, %xmm4, %xmm4 - pxor %xmm4, %xmm1 - pxor %xmm5, %xmm1 - movdqu %xmm1, %xmm3 - psrld $31, %xmm3 - movdqu %xmm2, %xmm4 - psrld $31, %xmm4 - pslld $1, %xmm1 - pslld $1, %xmm2 - vpslldq $4, %xmm3, %xmm5 - vpslldq $4, %xmm4, %xmm4 - mov $0, %r12 - pinsrd $0, %r12d, %xmm3 - pshufd $3, %xmm3, %xmm3 - pxor %xmm4, %xmm3 - pxor %xmm5, %xmm1 - pxor %xmm3, %xmm2 - movdqu %xmm2, %xmm6 - pxor %xmm2, %xmm2 - mov $3774873600, %r12 - pinsrd $3, %r12d, %xmm2 - movdqu %xmm1, %xmm5 - pclmulqdq $16, %xmm2, %xmm1 - movdqu %xmm1, %xmm3 - movdqu %xmm5, %xmm1 - pclmulqdq $1, %xmm2, %xmm1 - movdqu %xmm1, %xmm4 - movdqu %xmm5, %xmm1 - pclmulqdq $0, %xmm2, %xmm1 - pclmulqdq $17, %xmm2, %xmm5 - movdqu %xmm5, %xmm2 - movdqu %xmm1, %xmm5 - movdqu %xmm3, %xmm1 - mov $0, %r12 - pinsrd $0, %r12d, %xmm1 - pshufd $14, %xmm1, %xmm1 - pxor %xmm1, %xmm2 - movdqu %xmm4, %xmm1 - mov $0, %r12 - pinsrd $0, %r12d, %xmm1 - pshufd $14, %xmm1, %xmm1 - pxor %xmm1, %xmm2 - movdqu %xmm3, %xmm1 - mov $0, %r12 - pinsrd $3, %r12d, %xmm1 - pshufd $79, %xmm1, %xmm1 - mov $0, %r12 - pinsrd $3, %r12d, %xmm4 - pshufd $79, %xmm4, %xmm4 - pxor %xmm4, %xmm1 - pxor %xmm5, %xmm1 - movdqu %xmm1, %xmm3 - psrld $31, %xmm3 - movdqu %xmm2, %xmm4 - psrld $31, %xmm4 - pslld $1, %xmm1 - pslld $1, %xmm2 - vpslldq $4, %xmm3, %xmm5 - vpslldq $4, %xmm4, %xmm4 - mov $0, %r12 - pinsrd $0, %r12d, %xmm3 - pshufd $3, %xmm3, %xmm3 - pxor %xmm4, %xmm3 - pxor %xmm5, %xmm1 - pxor %xmm3, %xmm2 - movdqu %xmm2, %xmm5 - pxor %xmm2, %xmm2 - mov $3774873600, %r12 - pinsrd $3, %r12d, %xmm2 - pclmulqdq $17, %xmm2, %xmm1 - movdqu %xmm1, %xmm2 - psrld $31, %xmm2 - pslld $1, %xmm1 - vpslldq $4, %xmm2, %xmm2 - pxor %xmm2, %xmm1 - pxor %xmm5, %xmm1 - pxor %xmm6, %xmm1 - movdqu %xmm1, %xmm6 - movdqu %xmm1, %xmm3 - pxor %xmm4, %xmm4 - pxor %xmm5, %xmm5 - mov $3254779904, %r12 - pinsrd $3, %r12d, %xmm4 - mov $1, %r12 - pinsrd $0, %r12d, %xmm4 - mov $2147483648, %r12 - pinsrd $3, %r12d, %xmm5 - movdqu %xmm3, %xmm1 - movdqu %xmm1, %xmm2 - psrld $31, %xmm2 - pslld $1, %xmm1 - vpslldq $4, %xmm2, %xmm2 - pxor %xmm2, %xmm1 - pand %xmm5, %xmm3 - pcmpeqd %xmm5, %xmm3 - pshufd $255, %xmm3, %xmm3 - pand %xmm4, %xmm3 - vpxor %xmm3, %xmm1, %xmm1 - movdqu %xmm1, 16(%rcx) - movdqu %xmm6, %xmm2 - movdqu 32(%rcx), %xmm1 - movdqu %xmm1, %xmm5 - pclmulqdq $16, %xmm2, %xmm1 - movdqu %xmm1, %xmm3 - movdqu %xmm5, %xmm1 - pclmulqdq $1, %xmm2, %xmm1 - movdqu %xmm1, %xmm4 - movdqu %xmm5, %xmm1 - pclmulqdq $0, %xmm2, %xmm1 - pclmulqdq $17, %xmm2, %xmm5 - movdqu %xmm5, %xmm2 - movdqu %xmm1, %xmm5 - movdqu %xmm3, %xmm1 - mov $0, %r12 - pinsrd $0, %r12d, %xmm1 - pshufd $14, %xmm1, %xmm1 - pxor %xmm1, %xmm2 - movdqu %xmm4, %xmm1 - mov $0, %r12 - pinsrd $0, %r12d, %xmm1 - pshufd $14, %xmm1, %xmm1 - pxor %xmm1, %xmm2 - movdqu %xmm3, %xmm1 - mov $0, %r12 - pinsrd $3, %r12d, %xmm1 - pshufd $79, %xmm1, %xmm1 - mov $0, %r12 - pinsrd $3, %r12d, %xmm4 - pshufd $79, %xmm4, %xmm4 - pxor %xmm4, %xmm1 - pxor %xmm5, %xmm1 - movdqu %xmm1, %xmm3 - psrld $31, %xmm3 - movdqu %xmm2, %xmm4 - psrld $31, %xmm4 - pslld $1, %xmm1 - pslld $1, %xmm2 - vpslldq $4, %xmm3, %xmm5 - vpslldq $4, %xmm4, %xmm4 - mov $0, %r12 - pinsrd $0, %r12d, %xmm3 - pshufd $3, %xmm3, %xmm3 - pxor %xmm4, %xmm3 - pxor %xmm5, %xmm1 - pxor %xmm3, %xmm2 - movdqu %xmm2, %xmm6 - pxor %xmm2, %xmm2 - mov $3774873600, %r12 - pinsrd $3, %r12d, %xmm2 - movdqu %xmm1, %xmm5 - pclmulqdq $16, %xmm2, %xmm1 - movdqu %xmm1, %xmm3 - movdqu %xmm5, %xmm1 - pclmulqdq $1, %xmm2, %xmm1 - movdqu %xmm1, %xmm4 - movdqu %xmm5, %xmm1 - pclmulqdq $0, %xmm2, %xmm1 - pclmulqdq $17, %xmm2, %xmm5 - movdqu %xmm5, %xmm2 - movdqu %xmm1, %xmm5 - movdqu %xmm3, %xmm1 - mov $0, %r12 - pinsrd $0, %r12d, %xmm1 - pshufd $14, %xmm1, %xmm1 - pxor %xmm1, %xmm2 - movdqu %xmm4, %xmm1 - mov $0, %r12 - pinsrd $0, %r12d, %xmm1 - pshufd $14, %xmm1, %xmm1 - pxor %xmm1, %xmm2 - movdqu %xmm3, %xmm1 - mov $0, %r12 - pinsrd $3, %r12d, %xmm1 - pshufd $79, %xmm1, %xmm1 - mov $0, %r12 - pinsrd $3, %r12d, %xmm4 - pshufd $79, %xmm4, %xmm4 - pxor %xmm4, %xmm1 - pxor %xmm5, %xmm1 - movdqu %xmm1, %xmm3 - psrld $31, %xmm3 - movdqu %xmm2, %xmm4 - psrld $31, %xmm4 - pslld $1, %xmm1 - pslld $1, %xmm2 - vpslldq $4, %xmm3, %xmm5 - vpslldq $4, %xmm4, %xmm4 - mov $0, %r12 - pinsrd $0, %r12d, %xmm3 - pshufd $3, %xmm3, %xmm3 - pxor %xmm4, %xmm3 - pxor %xmm5, %xmm1 - pxor %xmm3, %xmm2 - movdqu %xmm2, %xmm5 - pxor %xmm2, %xmm2 - mov $3774873600, %r12 - pinsrd $3, %r12d, %xmm2 - pclmulqdq $17, %xmm2, %xmm1 - movdqu %xmm1, %xmm2 - psrld $31, %xmm2 - pslld $1, %xmm1 - vpslldq $4, %xmm2, %xmm2 - pxor %xmm2, %xmm1 - pxor %xmm5, %xmm1 - pxor %xmm6, %xmm1 - movdqu %xmm1, %xmm6 - movdqu %xmm1, %xmm3 - pxor %xmm4, %xmm4 - pxor %xmm5, %xmm5 - mov $3254779904, %r12 - pinsrd $3, %r12d, %xmm4 - mov $1, %r12 - pinsrd $0, %r12d, %xmm4 - mov $2147483648, %r12 - pinsrd $3, %r12d, %xmm5 - movdqu %xmm3, %xmm1 - movdqu %xmm1, %xmm2 - psrld $31, %xmm2 - pslld $1, %xmm1 - vpslldq $4, %xmm2, %xmm2 - pxor %xmm2, %xmm1 - pand %xmm5, %xmm3 - pcmpeqd %xmm5, %xmm3 - pshufd $255, %xmm3, %xmm3 - pand %xmm4, %xmm3 - vpxor %xmm3, %xmm1, %xmm1 - movdqu %xmm1, 48(%rcx) - movdqu %xmm6, %xmm2 - movdqu 32(%rcx), %xmm1 - movdqu %xmm1, %xmm5 - pclmulqdq $16, %xmm2, %xmm1 - movdqu %xmm1, %xmm3 - movdqu %xmm5, %xmm1 - pclmulqdq $1, %xmm2, %xmm1 - movdqu %xmm1, %xmm4 - movdqu %xmm5, %xmm1 - pclmulqdq $0, %xmm2, %xmm1 - pclmulqdq $17, %xmm2, %xmm5 - movdqu %xmm5, %xmm2 - movdqu %xmm1, %xmm5 - movdqu %xmm3, %xmm1 - mov $0, %r12 - pinsrd $0, %r12d, %xmm1 - pshufd $14, %xmm1, %xmm1 - pxor %xmm1, %xmm2 - movdqu %xmm4, %xmm1 - mov $0, %r12 - pinsrd $0, %r12d, %xmm1 - pshufd $14, %xmm1, %xmm1 - pxor %xmm1, %xmm2 - movdqu %xmm3, %xmm1 - mov $0, %r12 - pinsrd $3, %r12d, %xmm1 - pshufd $79, %xmm1, %xmm1 - mov $0, %r12 - pinsrd $3, %r12d, %xmm4 - pshufd $79, %xmm4, %xmm4 - pxor %xmm4, %xmm1 - pxor %xmm5, %xmm1 - movdqu %xmm1, %xmm3 - psrld $31, %xmm3 - movdqu %xmm2, %xmm4 - psrld $31, %xmm4 - pslld $1, %xmm1 - pslld $1, %xmm2 - vpslldq $4, %xmm3, %xmm5 - vpslldq $4, %xmm4, %xmm4 - mov $0, %r12 - pinsrd $0, %r12d, %xmm3 - pshufd $3, %xmm3, %xmm3 - pxor %xmm4, %xmm3 - pxor %xmm5, %xmm1 - pxor %xmm3, %xmm2 - movdqu %xmm2, %xmm6 - pxor %xmm2, %xmm2 - mov $3774873600, %r12 - pinsrd $3, %r12d, %xmm2 - movdqu %xmm1, %xmm5 - pclmulqdq $16, %xmm2, %xmm1 - movdqu %xmm1, %xmm3 - movdqu %xmm5, %xmm1 - pclmulqdq $1, %xmm2, %xmm1 - movdqu %xmm1, %xmm4 - movdqu %xmm5, %xmm1 - pclmulqdq $0, %xmm2, %xmm1 - pclmulqdq $17, %xmm2, %xmm5 - movdqu %xmm5, %xmm2 - movdqu %xmm1, %xmm5 - movdqu %xmm3, %xmm1 - mov $0, %r12 - pinsrd $0, %r12d, %xmm1 - pshufd $14, %xmm1, %xmm1 - pxor %xmm1, %xmm2 - movdqu %xmm4, %xmm1 - mov $0, %r12 - pinsrd $0, %r12d, %xmm1 - pshufd $14, %xmm1, %xmm1 - pxor %xmm1, %xmm2 - movdqu %xmm3, %xmm1 - mov $0, %r12 - pinsrd $3, %r12d, %xmm1 - pshufd $79, %xmm1, %xmm1 - mov $0, %r12 - pinsrd $3, %r12d, %xmm4 - pshufd $79, %xmm4, %xmm4 - pxor %xmm4, %xmm1 - pxor %xmm5, %xmm1 - movdqu %xmm1, %xmm3 - psrld $31, %xmm3 - movdqu %xmm2, %xmm4 - psrld $31, %xmm4 - pslld $1, %xmm1 - pslld $1, %xmm2 - vpslldq $4, %xmm3, %xmm5 - vpslldq $4, %xmm4, %xmm4 - mov $0, %r12 - pinsrd $0, %r12d, %xmm3 - pshufd $3, %xmm3, %xmm3 - pxor %xmm4, %xmm3 - pxor %xmm5, %xmm1 - pxor %xmm3, %xmm2 - movdqu %xmm2, %xmm5 - pxor %xmm2, %xmm2 - mov $3774873600, %r12 - pinsrd $3, %r12d, %xmm2 - pclmulqdq $17, %xmm2, %xmm1 - movdqu %xmm1, %xmm2 - psrld $31, %xmm2 - pslld $1, %xmm1 - vpslldq $4, %xmm2, %xmm2 - pxor %xmm2, %xmm1 - pxor %xmm5, %xmm1 - pxor %xmm6, %xmm1 - movdqu %xmm1, %xmm6 - movdqu %xmm1, %xmm3 - pxor %xmm4, %xmm4 - pxor %xmm5, %xmm5 - mov $3254779904, %r12 - pinsrd $3, %r12d, %xmm4 - mov $1, %r12 - pinsrd $0, %r12d, %xmm4 - mov $2147483648, %r12 - pinsrd $3, %r12d, %xmm5 - movdqu %xmm3, %xmm1 - movdqu %xmm1, %xmm2 - psrld $31, %xmm2 - pslld $1, %xmm1 - vpslldq $4, %xmm2, %xmm2 - pxor %xmm2, %xmm1 - pand %xmm5, %xmm3 - pcmpeqd %xmm5, %xmm3 - pshufd $255, %xmm3, %xmm3 - pand %xmm4, %xmm3 - vpxor %xmm3, %xmm1, %xmm1 - movdqu %xmm1, 64(%rcx) - movdqu %xmm6, %xmm2 - movdqu 32(%rcx), %xmm1 - movdqu %xmm1, %xmm5 - pclmulqdq $16, %xmm2, %xmm1 - movdqu %xmm1, %xmm3 - movdqu %xmm5, %xmm1 - pclmulqdq $1, %xmm2, %xmm1 - movdqu %xmm1, %xmm4 - movdqu %xmm5, %xmm1 - pclmulqdq $0, %xmm2, %xmm1 - pclmulqdq $17, %xmm2, %xmm5 - movdqu %xmm5, %xmm2 - movdqu %xmm1, %xmm5 - movdqu %xmm3, %xmm1 - mov $0, %r12 - pinsrd $0, %r12d, %xmm1 - pshufd $14, %xmm1, %xmm1 - pxor %xmm1, %xmm2 - movdqu %xmm4, %xmm1 - mov $0, %r12 - pinsrd $0, %r12d, %xmm1 - pshufd $14, %xmm1, %xmm1 - pxor %xmm1, %xmm2 - movdqu %xmm3, %xmm1 - mov $0, %r12 - pinsrd $3, %r12d, %xmm1 - pshufd $79, %xmm1, %xmm1 - mov $0, %r12 - pinsrd $3, %r12d, %xmm4 - pshufd $79, %xmm4, %xmm4 - pxor %xmm4, %xmm1 - pxor %xmm5, %xmm1 - movdqu %xmm1, %xmm3 - psrld $31, %xmm3 - movdqu %xmm2, %xmm4 - psrld $31, %xmm4 - pslld $1, %xmm1 - pslld $1, %xmm2 - vpslldq $4, %xmm3, %xmm5 - vpslldq $4, %xmm4, %xmm4 - mov $0, %r12 - pinsrd $0, %r12d, %xmm3 - pshufd $3, %xmm3, %xmm3 - pxor %xmm4, %xmm3 - pxor %xmm5, %xmm1 - pxor %xmm3, %xmm2 - movdqu %xmm2, %xmm6 - pxor %xmm2, %xmm2 - mov $3774873600, %r12 - pinsrd $3, %r12d, %xmm2 - movdqu %xmm1, %xmm5 - pclmulqdq $16, %xmm2, %xmm1 - movdqu %xmm1, %xmm3 - movdqu %xmm5, %xmm1 - pclmulqdq $1, %xmm2, %xmm1 - movdqu %xmm1, %xmm4 - movdqu %xmm5, %xmm1 - pclmulqdq $0, %xmm2, %xmm1 - pclmulqdq $17, %xmm2, %xmm5 - movdqu %xmm5, %xmm2 - movdqu %xmm1, %xmm5 - movdqu %xmm3, %xmm1 - mov $0, %r12 - pinsrd $0, %r12d, %xmm1 - pshufd $14, %xmm1, %xmm1 - pxor %xmm1, %xmm2 - movdqu %xmm4, %xmm1 - mov $0, %r12 - pinsrd $0, %r12d, %xmm1 - pshufd $14, %xmm1, %xmm1 - pxor %xmm1, %xmm2 - movdqu %xmm3, %xmm1 - mov $0, %r12 - pinsrd $3, %r12d, %xmm1 - pshufd $79, %xmm1, %xmm1 - mov $0, %r12 - pinsrd $3, %r12d, %xmm4 - pshufd $79, %xmm4, %xmm4 - pxor %xmm4, %xmm1 - pxor %xmm5, %xmm1 - movdqu %xmm1, %xmm3 - psrld $31, %xmm3 - movdqu %xmm2, %xmm4 - psrld $31, %xmm4 - pslld $1, %xmm1 - pslld $1, %xmm2 - vpslldq $4, %xmm3, %xmm5 - vpslldq $4, %xmm4, %xmm4 - mov $0, %r12 - pinsrd $0, %r12d, %xmm3 - pshufd $3, %xmm3, %xmm3 - pxor %xmm4, %xmm3 - pxor %xmm5, %xmm1 - pxor %xmm3, %xmm2 - movdqu %xmm2, %xmm5 - pxor %xmm2, %xmm2 - mov $3774873600, %r12 - pinsrd $3, %r12d, %xmm2 - pclmulqdq $17, %xmm2, %xmm1 - movdqu %xmm1, %xmm2 - psrld $31, %xmm2 - pslld $1, %xmm1 - vpslldq $4, %xmm2, %xmm2 - pxor %xmm2, %xmm1 - pxor %xmm5, %xmm1 - pxor %xmm6, %xmm1 - movdqu %xmm1, %xmm6 - movdqu %xmm1, %xmm3 - pxor %xmm4, %xmm4 - pxor %xmm5, %xmm5 - mov $3254779904, %r12 - pinsrd $3, %r12d, %xmm4 - mov $1, %r12 - pinsrd $0, %r12d, %xmm4 - mov $2147483648, %r12 - pinsrd $3, %r12d, %xmm5 - movdqu %xmm3, %xmm1 - movdqu %xmm1, %xmm2 - psrld $31, %xmm2 - pslld $1, %xmm1 - vpslldq $4, %xmm2, %xmm2 - pxor %xmm2, %xmm1 - pand %xmm5, %xmm3 - pcmpeqd %xmm5, %xmm3 - pshufd $255, %xmm3, %xmm3 - pand %xmm4, %xmm3 - vpxor %xmm3, %xmm1, %xmm1 - movdqu %xmm1, 96(%rcx) - movdqu %xmm6, %xmm2 - movdqu 32(%rcx), %xmm1 - movdqu %xmm1, %xmm5 - pclmulqdq $16, %xmm2, %xmm1 - movdqu %xmm1, %xmm3 - movdqu %xmm5, %xmm1 - pclmulqdq $1, %xmm2, %xmm1 - movdqu %xmm1, %xmm4 - movdqu %xmm5, %xmm1 - pclmulqdq $0, %xmm2, %xmm1 - pclmulqdq $17, %xmm2, %xmm5 - movdqu %xmm5, %xmm2 - movdqu %xmm1, %xmm5 - movdqu %xmm3, %xmm1 - mov $0, %r12 - pinsrd $0, %r12d, %xmm1 - pshufd $14, %xmm1, %xmm1 - pxor %xmm1, %xmm2 - movdqu %xmm4, %xmm1 - mov $0, %r12 - pinsrd $0, %r12d, %xmm1 - pshufd $14, %xmm1, %xmm1 - pxor %xmm1, %xmm2 - movdqu %xmm3, %xmm1 - mov $0, %r12 - pinsrd $3, %r12d, %xmm1 - pshufd $79, %xmm1, %xmm1 - mov $0, %r12 - pinsrd $3, %r12d, %xmm4 - pshufd $79, %xmm4, %xmm4 - pxor %xmm4, %xmm1 - pxor %xmm5, %xmm1 - movdqu %xmm1, %xmm3 - psrld $31, %xmm3 - movdqu %xmm2, %xmm4 - psrld $31, %xmm4 - pslld $1, %xmm1 - pslld $1, %xmm2 - vpslldq $4, %xmm3, %xmm5 - vpslldq $4, %xmm4, %xmm4 - mov $0, %r12 - pinsrd $0, %r12d, %xmm3 - pshufd $3, %xmm3, %xmm3 - pxor %xmm4, %xmm3 - pxor %xmm5, %xmm1 - pxor %xmm3, %xmm2 - movdqu %xmm2, %xmm6 - pxor %xmm2, %xmm2 - mov $3774873600, %r12 - pinsrd $3, %r12d, %xmm2 - movdqu %xmm1, %xmm5 - pclmulqdq $16, %xmm2, %xmm1 - movdqu %xmm1, %xmm3 - movdqu %xmm5, %xmm1 - pclmulqdq $1, %xmm2, %xmm1 - movdqu %xmm1, %xmm4 - movdqu %xmm5, %xmm1 - pclmulqdq $0, %xmm2, %xmm1 - pclmulqdq $17, %xmm2, %xmm5 - movdqu %xmm5, %xmm2 - movdqu %xmm1, %xmm5 - movdqu %xmm3, %xmm1 - mov $0, %r12 - pinsrd $0, %r12d, %xmm1 - pshufd $14, %xmm1, %xmm1 - pxor %xmm1, %xmm2 - movdqu %xmm4, %xmm1 - mov $0, %r12 - pinsrd $0, %r12d, %xmm1 - pshufd $14, %xmm1, %xmm1 - pxor %xmm1, %xmm2 - movdqu %xmm3, %xmm1 - mov $0, %r12 - pinsrd $3, %r12d, %xmm1 - pshufd $79, %xmm1, %xmm1 - mov $0, %r12 - pinsrd $3, %r12d, %xmm4 - pshufd $79, %xmm4, %xmm4 - pxor %xmm4, %xmm1 - pxor %xmm5, %xmm1 - movdqu %xmm1, %xmm3 - psrld $31, %xmm3 - movdqu %xmm2, %xmm4 - psrld $31, %xmm4 - pslld $1, %xmm1 - pslld $1, %xmm2 - vpslldq $4, %xmm3, %xmm5 - vpslldq $4, %xmm4, %xmm4 - mov $0, %r12 - pinsrd $0, %r12d, %xmm3 - pshufd $3, %xmm3, %xmm3 - pxor %xmm4, %xmm3 - pxor %xmm5, %xmm1 - pxor %xmm3, %xmm2 - movdqu %xmm2, %xmm5 - pxor %xmm2, %xmm2 - mov $3774873600, %r12 - pinsrd $3, %r12d, %xmm2 - pclmulqdq $17, %xmm2, %xmm1 - movdqu %xmm1, %xmm2 - psrld $31, %xmm2 - pslld $1, %xmm1 - vpslldq $4, %xmm2, %xmm2 - pxor %xmm2, %xmm1 - pxor %xmm5, %xmm1 - pxor %xmm6, %xmm1 - movdqu %xmm1, %xmm6 - movdqu %xmm1, %xmm3 - pxor %xmm4, %xmm4 - pxor %xmm5, %xmm5 - mov $3254779904, %r12 - pinsrd $3, %r12d, %xmm4 - mov $1, %r12 - pinsrd $0, %r12d, %xmm4 - mov $2147483648, %r12 - pinsrd $3, %r12d, %xmm5 - movdqu %xmm3, %xmm1 - movdqu %xmm1, %xmm2 - psrld $31, %xmm2 - pslld $1, %xmm1 - vpslldq $4, %xmm2, %xmm2 - pxor %xmm2, %xmm1 - pand %xmm5, %xmm3 - pcmpeqd %xmm5, %xmm3 - pshufd $255, %xmm3, %xmm3 - pand %xmm4, %xmm3 - vpxor %xmm3, %xmm1, %xmm1 - movdqu %xmm1, 112(%rcx) - movdqu %xmm0, %xmm6 - mov %rax, %r12 - ret - -.global _aes256_key_expansion -_aes256_key_expansion: - movdqu 0(%rdi), %xmm1 - movdqu 16(%rdi), %xmm3 - mov %rsi, %rdx - movdqu %xmm1, 0(%rdx) - movdqu %xmm3, 16(%rdx) - aeskeygenassist $1, %xmm3, %xmm2 - pshufd $255, %xmm2, %xmm2 - vpslldq $4, %xmm1, %xmm4 - pxor %xmm4, %xmm1 - vpslldq $4, %xmm1, %xmm4 - pxor %xmm4, %xmm1 - vpslldq $4, %xmm1, %xmm4 - pxor %xmm4, %xmm1 - pxor %xmm2, %xmm1 - movdqu %xmm1, 32(%rdx) - aeskeygenassist $0, %xmm1, %xmm2 - pshufd $170, %xmm2, %xmm2 - vpslldq $4, %xmm3, %xmm4 - pxor %xmm4, %xmm3 - vpslldq $4, %xmm3, %xmm4 - pxor %xmm4, %xmm3 - vpslldq $4, %xmm3, %xmm4 - pxor %xmm4, %xmm3 - pxor %xmm2, %xmm3 - movdqu %xmm3, 48(%rdx) - aeskeygenassist $2, %xmm3, %xmm2 - pshufd $255, %xmm2, %xmm2 - vpslldq $4, %xmm1, %xmm4 - pxor %xmm4, %xmm1 - vpslldq $4, %xmm1, %xmm4 - pxor %xmm4, %xmm1 - vpslldq $4, %xmm1, %xmm4 - pxor %xmm4, %xmm1 - pxor %xmm2, %xmm1 - movdqu %xmm1, 64(%rdx) - aeskeygenassist $0, %xmm1, %xmm2 - pshufd $170, %xmm2, %xmm2 - vpslldq $4, %xmm3, %xmm4 - pxor %xmm4, %xmm3 - vpslldq $4, %xmm3, %xmm4 - pxor %xmm4, %xmm3 - vpslldq $4, %xmm3, %xmm4 - pxor %xmm4, %xmm3 - pxor %xmm2, %xmm3 - movdqu %xmm3, 80(%rdx) - aeskeygenassist $4, %xmm3, %xmm2 - pshufd $255, %xmm2, %xmm2 - vpslldq $4, %xmm1, %xmm4 - pxor %xmm4, %xmm1 - vpslldq $4, %xmm1, %xmm4 - pxor %xmm4, %xmm1 - vpslldq $4, %xmm1, %xmm4 - pxor %xmm4, %xmm1 - pxor %xmm2, %xmm1 - movdqu %xmm1, 96(%rdx) - aeskeygenassist $0, %xmm1, %xmm2 - pshufd $170, %xmm2, %xmm2 - vpslldq $4, %xmm3, %xmm4 - pxor %xmm4, %xmm3 - vpslldq $4, %xmm3, %xmm4 - pxor %xmm4, %xmm3 - vpslldq $4, %xmm3, %xmm4 - pxor %xmm4, %xmm3 - pxor %xmm2, %xmm3 - movdqu %xmm3, 112(%rdx) - aeskeygenassist $8, %xmm3, %xmm2 - pshufd $255, %xmm2, %xmm2 - vpslldq $4, %xmm1, %xmm4 - pxor %xmm4, %xmm1 - vpslldq $4, %xmm1, %xmm4 - pxor %xmm4, %xmm1 - vpslldq $4, %xmm1, %xmm4 - pxor %xmm4, %xmm1 - pxor %xmm2, %xmm1 - movdqu %xmm1, 128(%rdx) - aeskeygenassist $0, %xmm1, %xmm2 - pshufd $170, %xmm2, %xmm2 - vpslldq $4, %xmm3, %xmm4 - pxor %xmm4, %xmm3 - vpslldq $4, %xmm3, %xmm4 - pxor %xmm4, %xmm3 - vpslldq $4, %xmm3, %xmm4 - pxor %xmm4, %xmm3 - pxor %xmm2, %xmm3 - movdqu %xmm3, 144(%rdx) - aeskeygenassist $16, %xmm3, %xmm2 - pshufd $255, %xmm2, %xmm2 - vpslldq $4, %xmm1, %xmm4 - pxor %xmm4, %xmm1 - vpslldq $4, %xmm1, %xmm4 - pxor %xmm4, %xmm1 - vpslldq $4, %xmm1, %xmm4 - pxor %xmm4, %xmm1 - pxor %xmm2, %xmm1 - movdqu %xmm1, 160(%rdx) - aeskeygenassist $0, %xmm1, %xmm2 - pshufd $170, %xmm2, %xmm2 - vpslldq $4, %xmm3, %xmm4 - pxor %xmm4, %xmm3 - vpslldq $4, %xmm3, %xmm4 - pxor %xmm4, %xmm3 - vpslldq $4, %xmm3, %xmm4 - pxor %xmm4, %xmm3 - pxor %xmm2, %xmm3 - movdqu %xmm3, 176(%rdx) - aeskeygenassist $32, %xmm3, %xmm2 - pshufd $255, %xmm2, %xmm2 - vpslldq $4, %xmm1, %xmm4 - pxor %xmm4, %xmm1 - vpslldq $4, %xmm1, %xmm4 - pxor %xmm4, %xmm1 - vpslldq $4, %xmm1, %xmm4 - pxor %xmm4, %xmm1 - pxor %xmm2, %xmm1 - movdqu %xmm1, 192(%rdx) - aeskeygenassist $0, %xmm1, %xmm2 - pshufd $170, %xmm2, %xmm2 - vpslldq $4, %xmm3, %xmm4 - pxor %xmm4, %xmm3 - vpslldq $4, %xmm3, %xmm4 - pxor %xmm4, %xmm3 - vpslldq $4, %xmm3, %xmm4 - pxor %xmm4, %xmm3 - pxor %xmm2, %xmm3 - movdqu %xmm3, 208(%rdx) - aeskeygenassist $64, %xmm3, %xmm2 - pshufd $255, %xmm2, %xmm2 - vpslldq $4, %xmm1, %xmm4 - pxor %xmm4, %xmm1 - vpslldq $4, %xmm1, %xmm4 - pxor %xmm4, %xmm1 - vpslldq $4, %xmm1, %xmm4 - pxor %xmm4, %xmm1 - pxor %xmm2, %xmm1 - movdqu %xmm1, 224(%rdx) - pxor %xmm1, %xmm1 - pxor %xmm2, %xmm2 - pxor %xmm3, %xmm3 - pxor %xmm4, %xmm4 - ret - -.global _aes256_keyhash_init -_aes256_keyhash_init: - mov $579005069656919567, %r8 - pinsrq $0, %r8, %xmm4 - mov $283686952306183, %r8 - pinsrq $1, %r8, %xmm4 - pxor %xmm0, %xmm0 - movdqu %xmm0, 80(%rsi) - mov %rdi, %r8 - movdqu 0(%r8), %xmm2 - pxor %xmm2, %xmm0 - movdqu 16(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 32(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 48(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 64(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 80(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 96(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 112(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 128(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 144(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 160(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 176(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 192(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 208(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 224(%r8), %xmm2 - aesenclast %xmm2, %xmm0 - pxor %xmm2, %xmm2 - pshufb %xmm4, %xmm0 - mov %rsi, %rcx - movdqu %xmm0, 32(%rcx) - movdqu %xmm6, %xmm0 - mov %r12, %rax - movdqu 32(%rcx), %xmm1 - movdqu %xmm1, %xmm6 - movdqu %xmm1, %xmm3 - pxor %xmm4, %xmm4 - pxor %xmm5, %xmm5 - mov $3254779904, %r12 - pinsrd $3, %r12d, %xmm4 - mov $1, %r12 - pinsrd $0, %r12d, %xmm4 - mov $2147483648, %r12 - pinsrd $3, %r12d, %xmm5 - movdqu %xmm3, %xmm1 - movdqu %xmm1, %xmm2 - psrld $31, %xmm2 - pslld $1, %xmm1 - vpslldq $4, %xmm2, %xmm2 - pxor %xmm2, %xmm1 - pand %xmm5, %xmm3 - pcmpeqd %xmm5, %xmm3 - pshufd $255, %xmm3, %xmm3 - pand %xmm4, %xmm3 - vpxor %xmm3, %xmm1, %xmm1 - movdqu %xmm1, 0(%rcx) - movdqu %xmm6, %xmm1 - movdqu %xmm6, %xmm2 - movdqu %xmm1, %xmm5 - pclmulqdq $16, %xmm2, %xmm1 - movdqu %xmm1, %xmm3 - movdqu %xmm5, %xmm1 - pclmulqdq $1, %xmm2, %xmm1 - movdqu %xmm1, %xmm4 - movdqu %xmm5, %xmm1 - pclmulqdq $0, %xmm2, %xmm1 - pclmulqdq $17, %xmm2, %xmm5 - movdqu %xmm5, %xmm2 - movdqu %xmm1, %xmm5 - movdqu %xmm3, %xmm1 - mov $0, %r12 - pinsrd $0, %r12d, %xmm1 - pshufd $14, %xmm1, %xmm1 - pxor %xmm1, %xmm2 - movdqu %xmm4, %xmm1 - mov $0, %r12 - pinsrd $0, %r12d, %xmm1 - pshufd $14, %xmm1, %xmm1 - pxor %xmm1, %xmm2 - movdqu %xmm3, %xmm1 - mov $0, %r12 - pinsrd $3, %r12d, %xmm1 - pshufd $79, %xmm1, %xmm1 - mov $0, %r12 - pinsrd $3, %r12d, %xmm4 - pshufd $79, %xmm4, %xmm4 - pxor %xmm4, %xmm1 - pxor %xmm5, %xmm1 - movdqu %xmm1, %xmm3 - psrld $31, %xmm3 - movdqu %xmm2, %xmm4 - psrld $31, %xmm4 - pslld $1, %xmm1 - pslld $1, %xmm2 - vpslldq $4, %xmm3, %xmm5 - vpslldq $4, %xmm4, %xmm4 - mov $0, %r12 - pinsrd $0, %r12d, %xmm3 - pshufd $3, %xmm3, %xmm3 - pxor %xmm4, %xmm3 - pxor %xmm5, %xmm1 - pxor %xmm3, %xmm2 - movdqu %xmm2, %xmm6 - pxor %xmm2, %xmm2 - mov $3774873600, %r12 - pinsrd $3, %r12d, %xmm2 - movdqu %xmm1, %xmm5 - pclmulqdq $16, %xmm2, %xmm1 - movdqu %xmm1, %xmm3 - movdqu %xmm5, %xmm1 - pclmulqdq $1, %xmm2, %xmm1 - movdqu %xmm1, %xmm4 - movdqu %xmm5, %xmm1 - pclmulqdq $0, %xmm2, %xmm1 - pclmulqdq $17, %xmm2, %xmm5 - movdqu %xmm5, %xmm2 - movdqu %xmm1, %xmm5 - movdqu %xmm3, %xmm1 - mov $0, %r12 - pinsrd $0, %r12d, %xmm1 - pshufd $14, %xmm1, %xmm1 - pxor %xmm1, %xmm2 - movdqu %xmm4, %xmm1 - mov $0, %r12 - pinsrd $0, %r12d, %xmm1 - pshufd $14, %xmm1, %xmm1 - pxor %xmm1, %xmm2 - movdqu %xmm3, %xmm1 - mov $0, %r12 - pinsrd $3, %r12d, %xmm1 - pshufd $79, %xmm1, %xmm1 - mov $0, %r12 - pinsrd $3, %r12d, %xmm4 - pshufd $79, %xmm4, %xmm4 - pxor %xmm4, %xmm1 - pxor %xmm5, %xmm1 - movdqu %xmm1, %xmm3 - psrld $31, %xmm3 - movdqu %xmm2, %xmm4 - psrld $31, %xmm4 - pslld $1, %xmm1 - pslld $1, %xmm2 - vpslldq $4, %xmm3, %xmm5 - vpslldq $4, %xmm4, %xmm4 - mov $0, %r12 - pinsrd $0, %r12d, %xmm3 - pshufd $3, %xmm3, %xmm3 - pxor %xmm4, %xmm3 - pxor %xmm5, %xmm1 - pxor %xmm3, %xmm2 - movdqu %xmm2, %xmm5 - pxor %xmm2, %xmm2 - mov $3774873600, %r12 - pinsrd $3, %r12d, %xmm2 - pclmulqdq $17, %xmm2, %xmm1 - movdqu %xmm1, %xmm2 - psrld $31, %xmm2 - pslld $1, %xmm1 - vpslldq $4, %xmm2, %xmm2 - pxor %xmm2, %xmm1 - pxor %xmm5, %xmm1 - pxor %xmm6, %xmm1 - movdqu %xmm1, %xmm6 - movdqu %xmm1, %xmm3 - pxor %xmm4, %xmm4 - pxor %xmm5, %xmm5 - mov $3254779904, %r12 - pinsrd $3, %r12d, %xmm4 - mov $1, %r12 - pinsrd $0, %r12d, %xmm4 - mov $2147483648, %r12 - pinsrd $3, %r12d, %xmm5 - movdqu %xmm3, %xmm1 - movdqu %xmm1, %xmm2 - psrld $31, %xmm2 - pslld $1, %xmm1 - vpslldq $4, %xmm2, %xmm2 - pxor %xmm2, %xmm1 - pand %xmm5, %xmm3 - pcmpeqd %xmm5, %xmm3 - pshufd $255, %xmm3, %xmm3 - pand %xmm4, %xmm3 - vpxor %xmm3, %xmm1, %xmm1 - movdqu %xmm1, 16(%rcx) - movdqu %xmm6, %xmm2 - movdqu 32(%rcx), %xmm1 - movdqu %xmm1, %xmm5 - pclmulqdq $16, %xmm2, %xmm1 - movdqu %xmm1, %xmm3 - movdqu %xmm5, %xmm1 - pclmulqdq $1, %xmm2, %xmm1 - movdqu %xmm1, %xmm4 - movdqu %xmm5, %xmm1 - pclmulqdq $0, %xmm2, %xmm1 - pclmulqdq $17, %xmm2, %xmm5 - movdqu %xmm5, %xmm2 - movdqu %xmm1, %xmm5 - movdqu %xmm3, %xmm1 - mov $0, %r12 - pinsrd $0, %r12d, %xmm1 - pshufd $14, %xmm1, %xmm1 - pxor %xmm1, %xmm2 - movdqu %xmm4, %xmm1 - mov $0, %r12 - pinsrd $0, %r12d, %xmm1 - pshufd $14, %xmm1, %xmm1 - pxor %xmm1, %xmm2 - movdqu %xmm3, %xmm1 - mov $0, %r12 - pinsrd $3, %r12d, %xmm1 - pshufd $79, %xmm1, %xmm1 - mov $0, %r12 - pinsrd $3, %r12d, %xmm4 - pshufd $79, %xmm4, %xmm4 - pxor %xmm4, %xmm1 - pxor %xmm5, %xmm1 - movdqu %xmm1, %xmm3 - psrld $31, %xmm3 - movdqu %xmm2, %xmm4 - psrld $31, %xmm4 - pslld $1, %xmm1 - pslld $1, %xmm2 - vpslldq $4, %xmm3, %xmm5 - vpslldq $4, %xmm4, %xmm4 - mov $0, %r12 - pinsrd $0, %r12d, %xmm3 - pshufd $3, %xmm3, %xmm3 - pxor %xmm4, %xmm3 - pxor %xmm5, %xmm1 - pxor %xmm3, %xmm2 - movdqu %xmm2, %xmm6 - pxor %xmm2, %xmm2 - mov $3774873600, %r12 - pinsrd $3, %r12d, %xmm2 - movdqu %xmm1, %xmm5 - pclmulqdq $16, %xmm2, %xmm1 - movdqu %xmm1, %xmm3 - movdqu %xmm5, %xmm1 - pclmulqdq $1, %xmm2, %xmm1 - movdqu %xmm1, %xmm4 - movdqu %xmm5, %xmm1 - pclmulqdq $0, %xmm2, %xmm1 - pclmulqdq $17, %xmm2, %xmm5 - movdqu %xmm5, %xmm2 - movdqu %xmm1, %xmm5 - movdqu %xmm3, %xmm1 - mov $0, %r12 - pinsrd $0, %r12d, %xmm1 - pshufd $14, %xmm1, %xmm1 - pxor %xmm1, %xmm2 - movdqu %xmm4, %xmm1 - mov $0, %r12 - pinsrd $0, %r12d, %xmm1 - pshufd $14, %xmm1, %xmm1 - pxor %xmm1, %xmm2 - movdqu %xmm3, %xmm1 - mov $0, %r12 - pinsrd $3, %r12d, %xmm1 - pshufd $79, %xmm1, %xmm1 - mov $0, %r12 - pinsrd $3, %r12d, %xmm4 - pshufd $79, %xmm4, %xmm4 - pxor %xmm4, %xmm1 - pxor %xmm5, %xmm1 - movdqu %xmm1, %xmm3 - psrld $31, %xmm3 - movdqu %xmm2, %xmm4 - psrld $31, %xmm4 - pslld $1, %xmm1 - pslld $1, %xmm2 - vpslldq $4, %xmm3, %xmm5 - vpslldq $4, %xmm4, %xmm4 - mov $0, %r12 - pinsrd $0, %r12d, %xmm3 - pshufd $3, %xmm3, %xmm3 - pxor %xmm4, %xmm3 - pxor %xmm5, %xmm1 - pxor %xmm3, %xmm2 - movdqu %xmm2, %xmm5 - pxor %xmm2, %xmm2 - mov $3774873600, %r12 - pinsrd $3, %r12d, %xmm2 - pclmulqdq $17, %xmm2, %xmm1 - movdqu %xmm1, %xmm2 - psrld $31, %xmm2 - pslld $1, %xmm1 - vpslldq $4, %xmm2, %xmm2 - pxor %xmm2, %xmm1 - pxor %xmm5, %xmm1 - pxor %xmm6, %xmm1 - movdqu %xmm1, %xmm6 - movdqu %xmm1, %xmm3 - pxor %xmm4, %xmm4 - pxor %xmm5, %xmm5 - mov $3254779904, %r12 - pinsrd $3, %r12d, %xmm4 - mov $1, %r12 - pinsrd $0, %r12d, %xmm4 - mov $2147483648, %r12 - pinsrd $3, %r12d, %xmm5 - movdqu %xmm3, %xmm1 - movdqu %xmm1, %xmm2 - psrld $31, %xmm2 - pslld $1, %xmm1 - vpslldq $4, %xmm2, %xmm2 - pxor %xmm2, %xmm1 - pand %xmm5, %xmm3 - pcmpeqd %xmm5, %xmm3 - pshufd $255, %xmm3, %xmm3 - pand %xmm4, %xmm3 - vpxor %xmm3, %xmm1, %xmm1 - movdqu %xmm1, 48(%rcx) - movdqu %xmm6, %xmm2 - movdqu 32(%rcx), %xmm1 - movdqu %xmm1, %xmm5 - pclmulqdq $16, %xmm2, %xmm1 - movdqu %xmm1, %xmm3 - movdqu %xmm5, %xmm1 - pclmulqdq $1, %xmm2, %xmm1 - movdqu %xmm1, %xmm4 - movdqu %xmm5, %xmm1 - pclmulqdq $0, %xmm2, %xmm1 - pclmulqdq $17, %xmm2, %xmm5 - movdqu %xmm5, %xmm2 - movdqu %xmm1, %xmm5 - movdqu %xmm3, %xmm1 - mov $0, %r12 - pinsrd $0, %r12d, %xmm1 - pshufd $14, %xmm1, %xmm1 - pxor %xmm1, %xmm2 - movdqu %xmm4, %xmm1 - mov $0, %r12 - pinsrd $0, %r12d, %xmm1 - pshufd $14, %xmm1, %xmm1 - pxor %xmm1, %xmm2 - movdqu %xmm3, %xmm1 - mov $0, %r12 - pinsrd $3, %r12d, %xmm1 - pshufd $79, %xmm1, %xmm1 - mov $0, %r12 - pinsrd $3, %r12d, %xmm4 - pshufd $79, %xmm4, %xmm4 - pxor %xmm4, %xmm1 - pxor %xmm5, %xmm1 - movdqu %xmm1, %xmm3 - psrld $31, %xmm3 - movdqu %xmm2, %xmm4 - psrld $31, %xmm4 - pslld $1, %xmm1 - pslld $1, %xmm2 - vpslldq $4, %xmm3, %xmm5 - vpslldq $4, %xmm4, %xmm4 - mov $0, %r12 - pinsrd $0, %r12d, %xmm3 - pshufd $3, %xmm3, %xmm3 - pxor %xmm4, %xmm3 - pxor %xmm5, %xmm1 - pxor %xmm3, %xmm2 - movdqu %xmm2, %xmm6 - pxor %xmm2, %xmm2 - mov $3774873600, %r12 - pinsrd $3, %r12d, %xmm2 - movdqu %xmm1, %xmm5 - pclmulqdq $16, %xmm2, %xmm1 - movdqu %xmm1, %xmm3 - movdqu %xmm5, %xmm1 - pclmulqdq $1, %xmm2, %xmm1 - movdqu %xmm1, %xmm4 - movdqu %xmm5, %xmm1 - pclmulqdq $0, %xmm2, %xmm1 - pclmulqdq $17, %xmm2, %xmm5 - movdqu %xmm5, %xmm2 - movdqu %xmm1, %xmm5 - movdqu %xmm3, %xmm1 - mov $0, %r12 - pinsrd $0, %r12d, %xmm1 - pshufd $14, %xmm1, %xmm1 - pxor %xmm1, %xmm2 - movdqu %xmm4, %xmm1 - mov $0, %r12 - pinsrd $0, %r12d, %xmm1 - pshufd $14, %xmm1, %xmm1 - pxor %xmm1, %xmm2 - movdqu %xmm3, %xmm1 - mov $0, %r12 - pinsrd $3, %r12d, %xmm1 - pshufd $79, %xmm1, %xmm1 - mov $0, %r12 - pinsrd $3, %r12d, %xmm4 - pshufd $79, %xmm4, %xmm4 - pxor %xmm4, %xmm1 - pxor %xmm5, %xmm1 - movdqu %xmm1, %xmm3 - psrld $31, %xmm3 - movdqu %xmm2, %xmm4 - psrld $31, %xmm4 - pslld $1, %xmm1 - pslld $1, %xmm2 - vpslldq $4, %xmm3, %xmm5 - vpslldq $4, %xmm4, %xmm4 - mov $0, %r12 - pinsrd $0, %r12d, %xmm3 - pshufd $3, %xmm3, %xmm3 - pxor %xmm4, %xmm3 - pxor %xmm5, %xmm1 - pxor %xmm3, %xmm2 - movdqu %xmm2, %xmm5 - pxor %xmm2, %xmm2 - mov $3774873600, %r12 - pinsrd $3, %r12d, %xmm2 - pclmulqdq $17, %xmm2, %xmm1 - movdqu %xmm1, %xmm2 - psrld $31, %xmm2 - pslld $1, %xmm1 - vpslldq $4, %xmm2, %xmm2 - pxor %xmm2, %xmm1 - pxor %xmm5, %xmm1 - pxor %xmm6, %xmm1 - movdqu %xmm1, %xmm6 - movdqu %xmm1, %xmm3 - pxor %xmm4, %xmm4 - pxor %xmm5, %xmm5 - mov $3254779904, %r12 - pinsrd $3, %r12d, %xmm4 - mov $1, %r12 - pinsrd $0, %r12d, %xmm4 - mov $2147483648, %r12 - pinsrd $3, %r12d, %xmm5 - movdqu %xmm3, %xmm1 - movdqu %xmm1, %xmm2 - psrld $31, %xmm2 - pslld $1, %xmm1 - vpslldq $4, %xmm2, %xmm2 - pxor %xmm2, %xmm1 - pand %xmm5, %xmm3 - pcmpeqd %xmm5, %xmm3 - pshufd $255, %xmm3, %xmm3 - pand %xmm4, %xmm3 - vpxor %xmm3, %xmm1, %xmm1 - movdqu %xmm1, 64(%rcx) - movdqu %xmm6, %xmm2 - movdqu 32(%rcx), %xmm1 - movdqu %xmm1, %xmm5 - pclmulqdq $16, %xmm2, %xmm1 - movdqu %xmm1, %xmm3 - movdqu %xmm5, %xmm1 - pclmulqdq $1, %xmm2, %xmm1 - movdqu %xmm1, %xmm4 - movdqu %xmm5, %xmm1 - pclmulqdq $0, %xmm2, %xmm1 - pclmulqdq $17, %xmm2, %xmm5 - movdqu %xmm5, %xmm2 - movdqu %xmm1, %xmm5 - movdqu %xmm3, %xmm1 - mov $0, %r12 - pinsrd $0, %r12d, %xmm1 - pshufd $14, %xmm1, %xmm1 - pxor %xmm1, %xmm2 - movdqu %xmm4, %xmm1 - mov $0, %r12 - pinsrd $0, %r12d, %xmm1 - pshufd $14, %xmm1, %xmm1 - pxor %xmm1, %xmm2 - movdqu %xmm3, %xmm1 - mov $0, %r12 - pinsrd $3, %r12d, %xmm1 - pshufd $79, %xmm1, %xmm1 - mov $0, %r12 - pinsrd $3, %r12d, %xmm4 - pshufd $79, %xmm4, %xmm4 - pxor %xmm4, %xmm1 - pxor %xmm5, %xmm1 - movdqu %xmm1, %xmm3 - psrld $31, %xmm3 - movdqu %xmm2, %xmm4 - psrld $31, %xmm4 - pslld $1, %xmm1 - pslld $1, %xmm2 - vpslldq $4, %xmm3, %xmm5 - vpslldq $4, %xmm4, %xmm4 - mov $0, %r12 - pinsrd $0, %r12d, %xmm3 - pshufd $3, %xmm3, %xmm3 - pxor %xmm4, %xmm3 - pxor %xmm5, %xmm1 - pxor %xmm3, %xmm2 - movdqu %xmm2, %xmm6 - pxor %xmm2, %xmm2 - mov $3774873600, %r12 - pinsrd $3, %r12d, %xmm2 - movdqu %xmm1, %xmm5 - pclmulqdq $16, %xmm2, %xmm1 - movdqu %xmm1, %xmm3 - movdqu %xmm5, %xmm1 - pclmulqdq $1, %xmm2, %xmm1 - movdqu %xmm1, %xmm4 - movdqu %xmm5, %xmm1 - pclmulqdq $0, %xmm2, %xmm1 - pclmulqdq $17, %xmm2, %xmm5 - movdqu %xmm5, %xmm2 - movdqu %xmm1, %xmm5 - movdqu %xmm3, %xmm1 - mov $0, %r12 - pinsrd $0, %r12d, %xmm1 - pshufd $14, %xmm1, %xmm1 - pxor %xmm1, %xmm2 - movdqu %xmm4, %xmm1 - mov $0, %r12 - pinsrd $0, %r12d, %xmm1 - pshufd $14, %xmm1, %xmm1 - pxor %xmm1, %xmm2 - movdqu %xmm3, %xmm1 - mov $0, %r12 - pinsrd $3, %r12d, %xmm1 - pshufd $79, %xmm1, %xmm1 - mov $0, %r12 - pinsrd $3, %r12d, %xmm4 - pshufd $79, %xmm4, %xmm4 - pxor %xmm4, %xmm1 - pxor %xmm5, %xmm1 - movdqu %xmm1, %xmm3 - psrld $31, %xmm3 - movdqu %xmm2, %xmm4 - psrld $31, %xmm4 - pslld $1, %xmm1 - pslld $1, %xmm2 - vpslldq $4, %xmm3, %xmm5 - vpslldq $4, %xmm4, %xmm4 - mov $0, %r12 - pinsrd $0, %r12d, %xmm3 - pshufd $3, %xmm3, %xmm3 - pxor %xmm4, %xmm3 - pxor %xmm5, %xmm1 - pxor %xmm3, %xmm2 - movdqu %xmm2, %xmm5 - pxor %xmm2, %xmm2 - mov $3774873600, %r12 - pinsrd $3, %r12d, %xmm2 - pclmulqdq $17, %xmm2, %xmm1 - movdqu %xmm1, %xmm2 - psrld $31, %xmm2 - pslld $1, %xmm1 - vpslldq $4, %xmm2, %xmm2 - pxor %xmm2, %xmm1 - pxor %xmm5, %xmm1 - pxor %xmm6, %xmm1 - movdqu %xmm1, %xmm6 - movdqu %xmm1, %xmm3 - pxor %xmm4, %xmm4 - pxor %xmm5, %xmm5 - mov $3254779904, %r12 - pinsrd $3, %r12d, %xmm4 - mov $1, %r12 - pinsrd $0, %r12d, %xmm4 - mov $2147483648, %r12 - pinsrd $3, %r12d, %xmm5 - movdqu %xmm3, %xmm1 - movdqu %xmm1, %xmm2 - psrld $31, %xmm2 - pslld $1, %xmm1 - vpslldq $4, %xmm2, %xmm2 - pxor %xmm2, %xmm1 - pand %xmm5, %xmm3 - pcmpeqd %xmm5, %xmm3 - pshufd $255, %xmm3, %xmm3 - pand %xmm4, %xmm3 - vpxor %xmm3, %xmm1, %xmm1 - movdqu %xmm1, 96(%rcx) - movdqu %xmm6, %xmm2 - movdqu 32(%rcx), %xmm1 - movdqu %xmm1, %xmm5 - pclmulqdq $16, %xmm2, %xmm1 - movdqu %xmm1, %xmm3 - movdqu %xmm5, %xmm1 - pclmulqdq $1, %xmm2, %xmm1 - movdqu %xmm1, %xmm4 - movdqu %xmm5, %xmm1 - pclmulqdq $0, %xmm2, %xmm1 - pclmulqdq $17, %xmm2, %xmm5 - movdqu %xmm5, %xmm2 - movdqu %xmm1, %xmm5 - movdqu %xmm3, %xmm1 - mov $0, %r12 - pinsrd $0, %r12d, %xmm1 - pshufd $14, %xmm1, %xmm1 - pxor %xmm1, %xmm2 - movdqu %xmm4, %xmm1 - mov $0, %r12 - pinsrd $0, %r12d, %xmm1 - pshufd $14, %xmm1, %xmm1 - pxor %xmm1, %xmm2 - movdqu %xmm3, %xmm1 - mov $0, %r12 - pinsrd $3, %r12d, %xmm1 - pshufd $79, %xmm1, %xmm1 - mov $0, %r12 - pinsrd $3, %r12d, %xmm4 - pshufd $79, %xmm4, %xmm4 - pxor %xmm4, %xmm1 - pxor %xmm5, %xmm1 - movdqu %xmm1, %xmm3 - psrld $31, %xmm3 - movdqu %xmm2, %xmm4 - psrld $31, %xmm4 - pslld $1, %xmm1 - pslld $1, %xmm2 - vpslldq $4, %xmm3, %xmm5 - vpslldq $4, %xmm4, %xmm4 - mov $0, %r12 - pinsrd $0, %r12d, %xmm3 - pshufd $3, %xmm3, %xmm3 - pxor %xmm4, %xmm3 - pxor %xmm5, %xmm1 - pxor %xmm3, %xmm2 - movdqu %xmm2, %xmm6 - pxor %xmm2, %xmm2 - mov $3774873600, %r12 - pinsrd $3, %r12d, %xmm2 - movdqu %xmm1, %xmm5 - pclmulqdq $16, %xmm2, %xmm1 - movdqu %xmm1, %xmm3 - movdqu %xmm5, %xmm1 - pclmulqdq $1, %xmm2, %xmm1 - movdqu %xmm1, %xmm4 - movdqu %xmm5, %xmm1 - pclmulqdq $0, %xmm2, %xmm1 - pclmulqdq $17, %xmm2, %xmm5 - movdqu %xmm5, %xmm2 - movdqu %xmm1, %xmm5 - movdqu %xmm3, %xmm1 - mov $0, %r12 - pinsrd $0, %r12d, %xmm1 - pshufd $14, %xmm1, %xmm1 - pxor %xmm1, %xmm2 - movdqu %xmm4, %xmm1 - mov $0, %r12 - pinsrd $0, %r12d, %xmm1 - pshufd $14, %xmm1, %xmm1 - pxor %xmm1, %xmm2 - movdqu %xmm3, %xmm1 - mov $0, %r12 - pinsrd $3, %r12d, %xmm1 - pshufd $79, %xmm1, %xmm1 - mov $0, %r12 - pinsrd $3, %r12d, %xmm4 - pshufd $79, %xmm4, %xmm4 - pxor %xmm4, %xmm1 - pxor %xmm5, %xmm1 - movdqu %xmm1, %xmm3 - psrld $31, %xmm3 - movdqu %xmm2, %xmm4 - psrld $31, %xmm4 - pslld $1, %xmm1 - pslld $1, %xmm2 - vpslldq $4, %xmm3, %xmm5 - vpslldq $4, %xmm4, %xmm4 - mov $0, %r12 - pinsrd $0, %r12d, %xmm3 - pshufd $3, %xmm3, %xmm3 - pxor %xmm4, %xmm3 - pxor %xmm5, %xmm1 - pxor %xmm3, %xmm2 - movdqu %xmm2, %xmm5 - pxor %xmm2, %xmm2 - mov $3774873600, %r12 - pinsrd $3, %r12d, %xmm2 - pclmulqdq $17, %xmm2, %xmm1 - movdqu %xmm1, %xmm2 - psrld $31, %xmm2 - pslld $1, %xmm1 - vpslldq $4, %xmm2, %xmm2 - pxor %xmm2, %xmm1 - pxor %xmm5, %xmm1 - pxor %xmm6, %xmm1 - movdqu %xmm1, %xmm6 - movdqu %xmm1, %xmm3 - pxor %xmm4, %xmm4 - pxor %xmm5, %xmm5 - mov $3254779904, %r12 - pinsrd $3, %r12d, %xmm4 - mov $1, %r12 - pinsrd $0, %r12d, %xmm4 - mov $2147483648, %r12 - pinsrd $3, %r12d, %xmm5 - movdqu %xmm3, %xmm1 - movdqu %xmm1, %xmm2 - psrld $31, %xmm2 - pslld $1, %xmm1 - vpslldq $4, %xmm2, %xmm2 - pxor %xmm2, %xmm1 - pand %xmm5, %xmm3 - pcmpeqd %xmm5, %xmm3 - pshufd $255, %xmm3, %xmm3 - pand %xmm4, %xmm3 - vpxor %xmm3, %xmm1, %xmm1 - movdqu %xmm1, 112(%rcx) - movdqu %xmm0, %xmm6 - mov %rax, %r12 - ret - -.global _gctr128_bytes -_gctr128_bytes: - push %r15 - push %r14 - push %r13 - push %r12 - push %rsi - push %rdi - push %rbp - push %rbx - movdqu 0(%r9), %xmm7 - mov %rdi, %rax - mov %rdx, %rbx - mov %rcx, %r13 - mov 72(%rsp), %rcx - mov %rcx, %rbp - imul $16, %rbp - mov $579005069656919567, %r12 - pinsrq $0, %r12, %xmm8 - mov $283686952306183, %r12 - pinsrq $1, %r12, %xmm8 - mov %rcx, %rdx - shr $2, %rdx - and $3, %rcx - cmp $0, %rdx - jbe L0 - mov %rax, %r9 - mov %rbx, %r10 - pshufb %xmm8, %xmm7 - movdqu %xmm7, %xmm9 - mov $579005069656919567, %rax - pinsrq $0, %rax, %xmm0 - mov $579005069656919567, %rax - pinsrq $1, %rax, %xmm0 - pshufb %xmm0, %xmm9 - movdqu %xmm9, %xmm10 - pxor %xmm3, %xmm3 - mov $1, %rax - pinsrd $2, %eax, %xmm3 - paddd %xmm3, %xmm9 - mov $3, %rax - pinsrd $2, %eax, %xmm3 - mov $2, %rax - pinsrd $0, %eax, %xmm3 - paddd %xmm3, %xmm10 - pshufb %xmm8, %xmm9 - pshufb %xmm8, %xmm10 - pextrq $0, %xmm7, %rdi - mov $283686952306183, %rax - pinsrq $0, %rax, %xmm0 - mov $579005069656919567, %rax - pinsrq $1, %rax, %xmm0 - pxor %xmm15, %xmm15 - mov $4, %rax - pinsrd $0, %eax, %xmm15 - mov $4, %rax - pinsrd $2, %eax, %xmm15 - jmp L3 -.balign 16 -L2: - pinsrq $0, %rdi, %xmm2 - pinsrq $0, %rdi, %xmm12 - pinsrq $0, %rdi, %xmm13 - pinsrq $0, %rdi, %xmm14 - shufpd $2, %xmm9, %xmm2 - shufpd $0, %xmm9, %xmm12 - shufpd $2, %xmm10, %xmm13 - shufpd $0, %xmm10, %xmm14 - pshufb %xmm0, %xmm9 - pshufb %xmm0, %xmm10 - movdqu 0(%r8), %xmm3 - movdqu 16(%r8), %xmm4 - movdqu 32(%r8), %xmm5 - movdqu 48(%r8), %xmm6 - paddd %xmm15, %xmm9 - paddd %xmm15, %xmm10 - pxor %xmm3, %xmm2 - pxor %xmm3, %xmm12 - pxor %xmm3, %xmm13 - pxor %xmm3, %xmm14 - pshufb %xmm0, %xmm9 - pshufb %xmm0, %xmm10 - aesenc %xmm4, %xmm2 - aesenc %xmm4, %xmm12 - aesenc %xmm4, %xmm13 - aesenc %xmm4, %xmm14 - aesenc %xmm5, %xmm2 - aesenc %xmm5, %xmm12 - aesenc %xmm5, %xmm13 - aesenc %xmm5, %xmm14 - aesenc %xmm6, %xmm2 - aesenc %xmm6, %xmm12 - aesenc %xmm6, %xmm13 - aesenc %xmm6, %xmm14 - movdqu 64(%r8), %xmm3 - movdqu 80(%r8), %xmm4 - movdqu 96(%r8), %xmm5 - movdqu 112(%r8), %xmm6 - aesenc %xmm3, %xmm2 - aesenc %xmm3, %xmm12 - aesenc %xmm3, %xmm13 - aesenc %xmm3, %xmm14 - aesenc %xmm4, %xmm2 - aesenc %xmm4, %xmm12 - aesenc %xmm4, %xmm13 - aesenc %xmm4, %xmm14 - aesenc %xmm5, %xmm2 - aesenc %xmm5, %xmm12 - aesenc %xmm5, %xmm13 - aesenc %xmm5, %xmm14 - aesenc %xmm6, %xmm2 - aesenc %xmm6, %xmm12 - aesenc %xmm6, %xmm13 - aesenc %xmm6, %xmm14 - movdqu 128(%r8), %xmm3 - movdqu 144(%r8), %xmm4 - movdqu 160(%r8), %xmm5 - aesenc %xmm3, %xmm2 - aesenc %xmm3, %xmm12 - aesenc %xmm3, %xmm13 - aesenc %xmm3, %xmm14 - aesenc %xmm4, %xmm2 - aesenc %xmm4, %xmm12 - aesenc %xmm4, %xmm13 - aesenc %xmm4, %xmm14 - aesenclast %xmm5, %xmm2 - aesenclast %xmm5, %xmm12 - aesenclast %xmm5, %xmm13 - aesenclast %xmm5, %xmm14 - movdqu 0(%r9), %xmm7 - pxor %xmm7, %xmm2 - movdqu 16(%r9), %xmm7 - pxor %xmm7, %xmm12 - movdqu 32(%r9), %xmm7 - pxor %xmm7, %xmm13 - movdqu 48(%r9), %xmm7 - pxor %xmm7, %xmm14 - movdqu %xmm2, 0(%r10) - movdqu %xmm12, 16(%r10) - movdqu %xmm13, 32(%r10) - movdqu %xmm14, 48(%r10) - sub $1, %rdx - add $64, %r9 - add $64, %r10 -.balign 16 -L3: - cmp $0, %rdx - ja L2 - movdqu %xmm9, %xmm7 - pinsrq $0, %rdi, %xmm7 - pshufb %xmm8, %xmm7 - mov %r9, %rax - mov %r10, %rbx - jmp L1 -L0: -L1: - mov $0, %rdx - mov %rax, %r9 - mov %rbx, %r10 - pxor %xmm4, %xmm4 - mov $1, %r12 - pinsrd $0, %r12d, %xmm4 - jmp L5 -.balign 16 -L4: - movdqu %xmm7, %xmm0 - pshufb %xmm8, %xmm0 - movdqu 0(%r8), %xmm2 - pxor %xmm2, %xmm0 - movdqu 16(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 32(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 48(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 64(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 80(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 96(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 112(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 128(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 144(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 160(%r8), %xmm2 - aesenclast %xmm2, %xmm0 - pxor %xmm2, %xmm2 - movdqu 0(%r9), %xmm2 - pxor %xmm0, %xmm2 - movdqu %xmm2, 0(%r10) - add $1, %rdx - add $16, %r9 - add $16, %r10 - paddd %xmm4, %xmm7 -.balign 16 -L5: - cmp %rcx, %rdx - jne L4 - cmp %rbp, %rsi - jbe L6 - movdqu 0(%r13), %xmm1 - movdqu %xmm7, %xmm0 - mov $579005069656919567, %r12 - pinsrq $0, %r12, %xmm2 - mov $283686952306183, %r12 - pinsrq $1, %r12, %xmm2 - pshufb %xmm2, %xmm0 - movdqu 0(%r8), %xmm2 - pxor %xmm2, %xmm0 - movdqu 16(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 32(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 48(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 64(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 80(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 96(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 112(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 128(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 144(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 160(%r8), %xmm2 - aesenclast %xmm2, %xmm0 - pxor %xmm2, %xmm2 - pxor %xmm0, %xmm1 - movdqu %xmm1, 0(%r13) - jmp L7 -L6: -L7: - pop %rbx - pop %rbp - pop %rdi - pop %rsi - pop %r12 - pop %r13 - pop %r14 - pop %r15 - ret - -.global _gctr256_bytes -_gctr256_bytes: - push %r15 - push %r14 - push %r13 - push %r12 - push %rsi - push %rdi - push %rbp - push %rbx - movdqu 0(%r9), %xmm7 - mov %rdi, %rax - mov %rdx, %rbx - mov %rcx, %r13 - mov 72(%rsp), %rcx - mov %rcx, %rbp - imul $16, %rbp - mov $579005069656919567, %r12 - pinsrq $0, %r12, %xmm8 - mov $283686952306183, %r12 - pinsrq $1, %r12, %xmm8 - mov %rcx, %rdx - shr $2, %rdx - and $3, %rcx - cmp $0, %rdx - jbe L8 - mov %rax, %r9 - mov %rbx, %r10 - pshufb %xmm8, %xmm7 - movdqu %xmm7, %xmm9 - mov $579005069656919567, %rax - pinsrq $0, %rax, %xmm0 - mov $579005069656919567, %rax - pinsrq $1, %rax, %xmm0 - pshufb %xmm0, %xmm9 - movdqu %xmm9, %xmm10 - pxor %xmm3, %xmm3 - mov $1, %rax - pinsrd $2, %eax, %xmm3 - paddd %xmm3, %xmm9 - mov $3, %rax - pinsrd $2, %eax, %xmm3 - mov $2, %rax - pinsrd $0, %eax, %xmm3 - paddd %xmm3, %xmm10 - pshufb %xmm8, %xmm9 - pshufb %xmm8, %xmm10 - pextrq $0, %xmm7, %rdi - mov $283686952306183, %rax - pinsrq $0, %rax, %xmm0 - mov $579005069656919567, %rax - pinsrq $1, %rax, %xmm0 - pxor %xmm15, %xmm15 - mov $4, %rax - pinsrd $0, %eax, %xmm15 - mov $4, %rax - pinsrd $2, %eax, %xmm15 - jmp L11 -.balign 16 -L10: - pinsrq $0, %rdi, %xmm2 - pinsrq $0, %rdi, %xmm12 - pinsrq $0, %rdi, %xmm13 - pinsrq $0, %rdi, %xmm14 - shufpd $2, %xmm9, %xmm2 - shufpd $0, %xmm9, %xmm12 - shufpd $2, %xmm10, %xmm13 - shufpd $0, %xmm10, %xmm14 - pshufb %xmm0, %xmm9 - pshufb %xmm0, %xmm10 - movdqu 0(%r8), %xmm3 - movdqu 16(%r8), %xmm4 - movdqu 32(%r8), %xmm5 - movdqu 48(%r8), %xmm6 - paddd %xmm15, %xmm9 - paddd %xmm15, %xmm10 - pxor %xmm3, %xmm2 - pxor %xmm3, %xmm12 - pxor %xmm3, %xmm13 - pxor %xmm3, %xmm14 - pshufb %xmm0, %xmm9 - pshufb %xmm0, %xmm10 - aesenc %xmm4, %xmm2 - aesenc %xmm4, %xmm12 - aesenc %xmm4, %xmm13 - aesenc %xmm4, %xmm14 - aesenc %xmm5, %xmm2 - aesenc %xmm5, %xmm12 - aesenc %xmm5, %xmm13 - aesenc %xmm5, %xmm14 - aesenc %xmm6, %xmm2 - aesenc %xmm6, %xmm12 - aesenc %xmm6, %xmm13 - aesenc %xmm6, %xmm14 - movdqu 64(%r8), %xmm3 - movdqu 80(%r8), %xmm4 - movdqu 96(%r8), %xmm5 - movdqu 112(%r8), %xmm6 - aesenc %xmm3, %xmm2 - aesenc %xmm3, %xmm12 - aesenc %xmm3, %xmm13 - aesenc %xmm3, %xmm14 - aesenc %xmm4, %xmm2 - aesenc %xmm4, %xmm12 - aesenc %xmm4, %xmm13 - aesenc %xmm4, %xmm14 - aesenc %xmm5, %xmm2 - aesenc %xmm5, %xmm12 - aesenc %xmm5, %xmm13 - aesenc %xmm5, %xmm14 - aesenc %xmm6, %xmm2 - aesenc %xmm6, %xmm12 - aesenc %xmm6, %xmm13 - aesenc %xmm6, %xmm14 - movdqu 128(%r8), %xmm3 - movdqu 144(%r8), %xmm4 - movdqu 160(%r8), %xmm5 - aesenc %xmm3, %xmm2 - aesenc %xmm3, %xmm12 - aesenc %xmm3, %xmm13 - aesenc %xmm3, %xmm14 - aesenc %xmm4, %xmm2 - aesenc %xmm4, %xmm12 - aesenc %xmm4, %xmm13 - aesenc %xmm4, %xmm14 - movdqu %xmm5, %xmm3 - movdqu 176(%r8), %xmm4 - movdqu 192(%r8), %xmm5 - movdqu 208(%r8), %xmm6 - aesenc %xmm3, %xmm2 - aesenc %xmm3, %xmm12 - aesenc %xmm3, %xmm13 - aesenc %xmm3, %xmm14 - aesenc %xmm4, %xmm2 - aesenc %xmm4, %xmm12 - aesenc %xmm4, %xmm13 - aesenc %xmm4, %xmm14 - aesenc %xmm5, %xmm2 - aesenc %xmm5, %xmm12 - aesenc %xmm5, %xmm13 - aesenc %xmm5, %xmm14 - aesenc %xmm6, %xmm2 - aesenc %xmm6, %xmm12 - aesenc %xmm6, %xmm13 - aesenc %xmm6, %xmm14 - movdqu 224(%r8), %xmm5 - aesenclast %xmm5, %xmm2 - aesenclast %xmm5, %xmm12 - aesenclast %xmm5, %xmm13 - aesenclast %xmm5, %xmm14 - movdqu 0(%r9), %xmm7 - pxor %xmm7, %xmm2 - movdqu 16(%r9), %xmm7 - pxor %xmm7, %xmm12 - movdqu 32(%r9), %xmm7 - pxor %xmm7, %xmm13 - movdqu 48(%r9), %xmm7 - pxor %xmm7, %xmm14 - movdqu %xmm2, 0(%r10) - movdqu %xmm12, 16(%r10) - movdqu %xmm13, 32(%r10) - movdqu %xmm14, 48(%r10) - sub $1, %rdx - add $64, %r9 - add $64, %r10 -.balign 16 -L11: - cmp $0, %rdx - ja L10 - movdqu %xmm9, %xmm7 - pinsrq $0, %rdi, %xmm7 - pshufb %xmm8, %xmm7 - mov %r9, %rax - mov %r10, %rbx - jmp L9 -L8: -L9: - mov $0, %rdx - mov %rax, %r9 - mov %rbx, %r10 - pxor %xmm4, %xmm4 - mov $1, %r12 - pinsrd $0, %r12d, %xmm4 - jmp L13 -.balign 16 -L12: - movdqu %xmm7, %xmm0 - pshufb %xmm8, %xmm0 - movdqu 0(%r8), %xmm2 - pxor %xmm2, %xmm0 - movdqu 16(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 32(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 48(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 64(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 80(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 96(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 112(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 128(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 144(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 160(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 176(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 192(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 208(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 224(%r8), %xmm2 - aesenclast %xmm2, %xmm0 - pxor %xmm2, %xmm2 - movdqu 0(%r9), %xmm2 - pxor %xmm0, %xmm2 - movdqu %xmm2, 0(%r10) - add $1, %rdx - add $16, %r9 - add $16, %r10 - paddd %xmm4, %xmm7 -.balign 16 -L13: - cmp %rcx, %rdx - jne L12 - cmp %rbp, %rsi - jbe L14 - movdqu 0(%r13), %xmm1 - movdqu %xmm7, %xmm0 - mov $579005069656919567, %r12 - pinsrq $0, %r12, %xmm2 - mov $283686952306183, %r12 - pinsrq $1, %r12, %xmm2 - pshufb %xmm2, %xmm0 - movdqu 0(%r8), %xmm2 - pxor %xmm2, %xmm0 - movdqu 16(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 32(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 48(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 64(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 80(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 96(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 112(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 128(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 144(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 160(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 176(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 192(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 208(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 224(%r8), %xmm2 - aesenclast %xmm2, %xmm0 - pxor %xmm2, %xmm2 - pxor %xmm0, %xmm1 - movdqu %xmm1, 0(%r13) - jmp L15 -L14: -L15: - pop %rbx - pop %rbp - pop %rdi - pop %rsi - pop %r12 - pop %r13 - pop %r14 - pop %r15 - ret - -.global _compute_iv_stdcall -_compute_iv_stdcall: - cmp $12, %rsi - jne L16 - cmp $12, %rsi - jne L18 - movdqu 0(%r8), %xmm0 - mov $579005069656919567, %rax - pinsrq $0, %rax, %xmm1 - mov $283686952306183, %rax - pinsrq $1, %rax, %xmm1 - pshufb %xmm1, %xmm0 - mov $1, %rax - pinsrd $0, %eax, %xmm0 - movdqu %xmm0, 0(%rcx) - jmp L19 -L18: - mov %rcx, %rax - add $32, %r9 - mov %r8, %rbx - mov %rdx, %rcx - imul $16, %rcx - mov $579005069656919567, %r10 - pinsrq $0, %r10, %xmm9 - mov $283686952306183, %r10 - pinsrq $1, %r10, %xmm9 - pxor %xmm8, %xmm8 - mov %rdi, %r11 - jmp L21 -.balign 16 -L20: - add $80, %r11 - movdqu -32(%r9), %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - movdqu %xmm1, %xmm4 - movdqu -16(%r9), %xmm1 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - movdqu %xmm1, %xmm5 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 16(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 32(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 64(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 80(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - vpxor %xmm1, %xmm4, %xmm4 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - pxor %xmm3, %xmm3 - mov $3254779904, %r10 - pinsrd $3, %r10d, %xmm3 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm5, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpsrldq $8, %xmm6, %xmm6 - vpxor %xmm6, %xmm7, %xmm7 - vpxor %xmm0, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm8 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor %xmm7, %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 - add $96, %r11 - sub $6, %rdx -.balign 16 -L21: - cmp $6, %rdx - jae L20 - cmp $0, %rdx - jbe L22 - mov %rdx, %r10 - sub $1, %r10 - imul $16, %r10 - add %r10, %r11 - movdqu -32(%r9), %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - cmp $1, %rdx - jne L24 - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu %xmm1, %xmm4 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - jmp L25 -L24: - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - movdqu %xmm1, %xmm4 - movdqu -16(%r9), %xmm1 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - movdqu %xmm1, %xmm5 - cmp $2, %rdx - je L26 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 16(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - cmp $3, %rdx - je L28 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 32(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - cmp $4, %rdx - je L30 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 64(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - jmp L31 -L30: -L31: - jmp L29 -L28: -L29: - jmp L27 -L26: -L27: - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - vpxor %xmm1, %xmm4, %xmm4 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 -L25: - pxor %xmm3, %xmm3 - mov $3254779904, %r10 - pinsrd $3, %r10d, %xmm3 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm5, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpsrldq $8, %xmm6, %xmm6 - vpxor %xmm6, %xmm7, %xmm7 - vpxor %xmm0, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm8 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor %xmm7, %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 - jmp L23 -L22: -L23: - mov %rsi, %r15 - cmp %rcx, %rsi - jbe L32 - movdqu 0(%rbx), %xmm0 - mov %rsi, %r10 - and $15, %r10 - cmp $8, %r10 - jae L34 - mov $0, %rcx - pinsrq $1, %rcx, %xmm0 - mov %r10, %rcx - shl $3, %rcx - mov $1, %r11 - shl %cl, %r11 - sub $1, %r11 - pextrq $0, %xmm0, %rcx - and %r11, %rcx - pinsrq $0, %rcx, %xmm0 - jmp L35 -L34: - mov %r10, %rcx - sub $8, %rcx - shl $3, %rcx - mov $1, %r11 - shl %cl, %r11 - sub $1, %r11 - pextrq $1, %xmm0, %rcx - and %r11, %rcx - pinsrq $1, %rcx, %xmm0 -L35: - pshufb %xmm9, %xmm0 - movdqu -32(%r9), %xmm5 - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu %xmm1, %xmm4 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - pxor %xmm3, %xmm3 - mov $3254779904, %r11 - pinsrd $3, %r11d, %xmm3 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm5, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpsrldq $8, %xmm6, %xmm6 - vpxor %xmm6, %xmm7, %xmm7 - vpxor %xmm0, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm8 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor %xmm7, %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 - jmp L33 -L32: -L33: - mov %rax, %rcx - mov $0, %r11 - mov %rsi, %r13 - pxor %xmm0, %xmm0 - mov %r11, %rax - imul $8, %rax - pinsrq $1, %rax, %xmm0 - mov %r13, %rax - imul $8, %rax - pinsrq $0, %rax, %xmm0 - movdqu -32(%r9), %xmm5 - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu %xmm1, %xmm4 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - pxor %xmm3, %xmm3 - mov $3254779904, %r11 - pinsrd $3, %r11d, %xmm3 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm5, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpsrldq $8, %xmm6, %xmm6 - vpxor %xmm6, %xmm7, %xmm7 - vpxor %xmm0, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm8 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor %xmm7, %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 - movdqu %xmm8, 0(%rcx) -L19: - jmp L17 -L16: - push %r15 - push %r14 - push %r13 - push %r12 - push %rsi - push %rdi - push %rbp - push %rbx - cmp $12, %rsi - jne L36 - movdqu 0(%r8), %xmm0 - mov $579005069656919567, %rax - pinsrq $0, %rax, %xmm1 - mov $283686952306183, %rax - pinsrq $1, %rax, %xmm1 - pshufb %xmm1, %xmm0 - mov $1, %rax - pinsrd $0, %eax, %xmm0 - movdqu %xmm0, 0(%rcx) - jmp L37 -L36: - mov %rcx, %rax - add $32, %r9 - mov %r8, %rbx - mov %rdx, %rcx - imul $16, %rcx - mov $579005069656919567, %r10 - pinsrq $0, %r10, %xmm9 - mov $283686952306183, %r10 - pinsrq $1, %r10, %xmm9 - pxor %xmm8, %xmm8 - mov %rdi, %r11 - jmp L39 -.balign 16 -L38: - add $80, %r11 - movdqu -32(%r9), %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - movdqu %xmm1, %xmm4 - movdqu -16(%r9), %xmm1 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - movdqu %xmm1, %xmm5 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 16(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 32(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 64(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 80(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - vpxor %xmm1, %xmm4, %xmm4 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - pxor %xmm3, %xmm3 - mov $3254779904, %r10 - pinsrd $3, %r10d, %xmm3 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm5, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpsrldq $8, %xmm6, %xmm6 - vpxor %xmm6, %xmm7, %xmm7 - vpxor %xmm0, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm8 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor %xmm7, %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 - add $96, %r11 - sub $6, %rdx -.balign 16 -L39: - cmp $6, %rdx - jae L38 - cmp $0, %rdx - jbe L40 - mov %rdx, %r10 - sub $1, %r10 - imul $16, %r10 - add %r10, %r11 - movdqu -32(%r9), %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - cmp $1, %rdx - jne L42 - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu %xmm1, %xmm4 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - jmp L43 -L42: - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - movdqu %xmm1, %xmm4 - movdqu -16(%r9), %xmm1 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - movdqu %xmm1, %xmm5 - cmp $2, %rdx - je L44 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 16(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - cmp $3, %rdx - je L46 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 32(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - cmp $4, %rdx - je L48 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 64(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - jmp L49 -L48: -L49: - jmp L47 -L46: -L47: - jmp L45 -L44: -L45: - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - vpxor %xmm1, %xmm4, %xmm4 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 -L43: - pxor %xmm3, %xmm3 - mov $3254779904, %r10 - pinsrd $3, %r10d, %xmm3 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm5, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpsrldq $8, %xmm6, %xmm6 - vpxor %xmm6, %xmm7, %xmm7 - vpxor %xmm0, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm8 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor %xmm7, %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 - jmp L41 -L40: -L41: - mov %rsi, %r15 - cmp %rcx, %rsi - jbe L50 - movdqu 0(%rbx), %xmm0 - mov %rsi, %r10 - and $15, %r10 - cmp $8, %r10 - jae L52 - mov $0, %rcx - pinsrq $1, %rcx, %xmm0 - mov %r10, %rcx - shl $3, %rcx - mov $1, %r11 - shl %cl, %r11 - sub $1, %r11 - pextrq $0, %xmm0, %rcx - and %r11, %rcx - pinsrq $0, %rcx, %xmm0 - jmp L53 -L52: - mov %r10, %rcx - sub $8, %rcx - shl $3, %rcx - mov $1, %r11 - shl %cl, %r11 - sub $1, %r11 - pextrq $1, %xmm0, %rcx - and %r11, %rcx - pinsrq $1, %rcx, %xmm0 -L53: - pshufb %xmm9, %xmm0 - movdqu -32(%r9), %xmm5 - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu %xmm1, %xmm4 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - pxor %xmm3, %xmm3 - mov $3254779904, %r11 - pinsrd $3, %r11d, %xmm3 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm5, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpsrldq $8, %xmm6, %xmm6 - vpxor %xmm6, %xmm7, %xmm7 - vpxor %xmm0, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm8 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor %xmm7, %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 - jmp L51 -L50: -L51: - mov %rax, %rcx - mov $0, %r11 - mov %rsi, %r13 - pxor %xmm0, %xmm0 - mov %r11, %rax - imul $8, %rax - pinsrq $1, %rax, %xmm0 - mov %r13, %rax - imul $8, %rax - pinsrq $0, %rax, %xmm0 - movdqu -32(%r9), %xmm5 - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu %xmm1, %xmm4 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - pxor %xmm3, %xmm3 - mov $3254779904, %r11 - pinsrd $3, %r11d, %xmm3 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm5, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpsrldq $8, %xmm6, %xmm6 - vpxor %xmm6, %xmm7, %xmm7 - vpxor %xmm0, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm8 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor %xmm7, %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 - movdqu %xmm8, 0(%rcx) -L37: - pop %rbx - pop %rbp - pop %rdi - pop %rsi - pop %r12 - pop %r13 - pop %r14 - pop %r15 -L17: - ret - -.global _gcm128_encrypt_opt -_gcm128_encrypt_opt: - push %r15 - push %r14 - push %r13 - push %r12 - push %rsi - push %rdi - push %rbp - push %rbx - mov 144(%rsp), %rbp - mov %rcx, %r13 - lea 32(%r9), %r9 - mov 72(%rsp), %rbx - mov %rdx, %rcx - imul $16, %rcx - mov $579005069656919567, %r10 - pinsrq $0, %r10, %xmm9 - mov $283686952306183, %r10 - pinsrq $1, %r10, %xmm9 - pxor %xmm8, %xmm8 - mov %rdi, %r11 - jmp L55 -.balign 16 -L54: - add $80, %r11 - movdqu -32(%r9), %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - movdqu %xmm1, %xmm4 - movdqu -16(%r9), %xmm1 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - movdqu %xmm1, %xmm5 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 16(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 32(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 64(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 80(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - vpxor %xmm1, %xmm4, %xmm4 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - pxor %xmm3, %xmm3 - mov $3254779904, %r10 - pinsrd $3, %r10d, %xmm3 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm5, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpsrldq $8, %xmm6, %xmm6 - vpxor %xmm6, %xmm7, %xmm7 - vpxor %xmm0, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm8 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor %xmm7, %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 - add $96, %r11 - sub $6, %rdx -.balign 16 -L55: - cmp $6, %rdx - jae L54 - cmp $0, %rdx - jbe L56 - mov %rdx, %r10 - sub $1, %r10 - imul $16, %r10 - add %r10, %r11 - movdqu -32(%r9), %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - cmp $1, %rdx - jne L58 - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu %xmm1, %xmm4 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - jmp L59 -L58: - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - movdqu %xmm1, %xmm4 - movdqu -16(%r9), %xmm1 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - movdqu %xmm1, %xmm5 - cmp $2, %rdx - je L60 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 16(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - cmp $3, %rdx - je L62 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 32(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - cmp $4, %rdx - je L64 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 64(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - jmp L65 -L64: -L65: - jmp L63 -L62: -L63: - jmp L61 -L60: -L61: - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - vpxor %xmm1, %xmm4, %xmm4 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 -L59: - pxor %xmm3, %xmm3 - mov $3254779904, %r10 - pinsrd $3, %r10d, %xmm3 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm5, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpsrldq $8, %xmm6, %xmm6 - vpxor %xmm6, %xmm7, %xmm7 - vpxor %xmm0, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm8 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor %xmm7, %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 - jmp L57 -L56: -L57: - mov %rsi, %r15 - cmp %rcx, %rsi - jbe L66 - movdqu 0(%rbx), %xmm0 - mov %rsi, %r10 - and $15, %r10 - cmp $8, %r10 - jae L68 - mov $0, %rcx - pinsrq $1, %rcx, %xmm0 - mov %r10, %rcx - shl $3, %rcx - mov $1, %r11 - shl %cl, %r11 - sub $1, %r11 - pextrq $0, %xmm0, %rcx - and %r11, %rcx - pinsrq $0, %rcx, %xmm0 - jmp L69 -L68: - mov %r10, %rcx - sub $8, %rcx - shl $3, %rcx - mov $1, %r11 - shl %cl, %r11 - sub $1, %r11 - pextrq $1, %xmm0, %rcx - and %r11, %rcx - pinsrq $1, %rcx, %xmm0 -L69: - pshufb %xmm9, %xmm0 - movdqu -32(%r9), %xmm5 - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu %xmm1, %xmm4 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - pxor %xmm3, %xmm3 - mov $3254779904, %r11 - pinsrd $3, %r11d, %xmm3 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm5, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpsrldq $8, %xmm6, %xmm6 - vpxor %xmm6, %xmm7, %xmm7 - vpxor %xmm0, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm8 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor %xmm7, %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 - jmp L67 -L66: -L67: - mov 80(%rsp), %rdi - mov 88(%rsp), %rsi - mov 96(%rsp), %rdx - mov %r13, %rcx - movdqu %xmm9, %xmm0 - movdqu 0(%r8), %xmm1 - movdqu %xmm1, 0(%rbp) - pxor %xmm10, %xmm10 - mov $1, %r11 - pinsrq $0, %r11, %xmm10 - vpaddd %xmm10, %xmm1, %xmm1 - cmp $0, %rdx - jne L70 - vpshufb %xmm0, %xmm1, %xmm1 - movdqu %xmm1, 32(%rbp) - jmp L71 -L70: - movdqu %xmm8, 32(%rbp) - add $128, %rcx - pextrq $0, %xmm1, %rbx - and $255, %rbx - vpshufb %xmm0, %xmm1, %xmm1 - lea 96(%rsi), %r14 - movdqu -128(%rcx), %xmm4 - pxor %xmm2, %xmm2 - mov $72057594037927936, %r11 - pinsrq $1, %r11, %xmm2 - movdqu -112(%rcx), %xmm15 - mov %rcx, %r12 - sub $96, %r12 - vpxor %xmm4, %xmm1, %xmm9 - add $6, %rbx - cmp $256, %rbx - jae L72 - vpaddd %xmm2, %xmm1, %xmm10 - vpaddd %xmm2, %xmm10, %xmm11 - vpxor %xmm4, %xmm10, %xmm10 - vpaddd %xmm2, %xmm11, %xmm12 - vpxor %xmm4, %xmm11, %xmm11 - vpaddd %xmm2, %xmm12, %xmm13 - vpxor %xmm4, %xmm12, %xmm12 - vpaddd %xmm2, %xmm13, %xmm14 - vpxor %xmm4, %xmm13, %xmm13 - vpaddd %xmm2, %xmm14, %xmm1 - vpxor %xmm4, %xmm14, %xmm14 - jmp L73 -L72: - sub $256, %rbx - vpshufb %xmm0, %xmm1, %xmm6 - pxor %xmm5, %xmm5 - mov $1, %r11 - pinsrq $0, %r11, %xmm5 - vpaddd %xmm5, %xmm6, %xmm10 - pxor %xmm5, %xmm5 - mov $2, %r11 - pinsrq $0, %r11, %xmm5 - vpaddd %xmm5, %xmm6, %xmm11 - vpaddd %xmm5, %xmm10, %xmm12 - vpshufb %xmm0, %xmm10, %xmm10 - vpaddd %xmm5, %xmm11, %xmm13 - vpshufb %xmm0, %xmm11, %xmm11 - vpxor %xmm4, %xmm10, %xmm10 - vpaddd %xmm5, %xmm12, %xmm14 - vpshufb %xmm0, %xmm12, %xmm12 - vpxor %xmm4, %xmm11, %xmm11 - vpaddd %xmm5, %xmm13, %xmm1 - vpshufb %xmm0, %xmm13, %xmm13 - vpxor %xmm4, %xmm12, %xmm12 - vpshufb %xmm0, %xmm14, %xmm14 - vpxor %xmm4, %xmm13, %xmm13 - vpshufb %xmm0, %xmm1, %xmm1 - vpxor %xmm4, %xmm14, %xmm14 -L73: - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu -96(%rcx), %xmm15 - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu -80(%rcx), %xmm15 - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu -64(%rcx), %xmm15 - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu -48(%rcx), %xmm15 - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu -32(%rcx), %xmm15 - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu -16(%rcx), %xmm15 - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu 0(%rcx), %xmm15 - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu 16(%rcx), %xmm15 - movdqu 32(%rcx), %xmm3 - vaesenc %xmm15, %xmm9, %xmm9 - vpxor 0(%rdi), %xmm3, %xmm4 - vaesenc %xmm15, %xmm10, %xmm10 - vpxor 16(%rdi), %xmm3, %xmm5 - vaesenc %xmm15, %xmm11, %xmm11 - vpxor 32(%rdi), %xmm3, %xmm6 - vaesenc %xmm15, %xmm12, %xmm12 - vpxor 48(%rdi), %xmm3, %xmm8 - vaesenc %xmm15, %xmm13, %xmm13 - vpxor 64(%rdi), %xmm3, %xmm2 - vaesenc %xmm15, %xmm14, %xmm14 - vpxor 80(%rdi), %xmm3, %xmm3 - lea 96(%rdi), %rdi - vaesenclast %xmm4, %xmm9, %xmm9 - vaesenclast %xmm5, %xmm10, %xmm10 - vaesenclast %xmm6, %xmm11, %xmm11 - vaesenclast %xmm8, %xmm12, %xmm12 - vaesenclast %xmm2, %xmm13, %xmm13 - vaesenclast %xmm3, %xmm14, %xmm14 - movdqu %xmm9, 0(%rsi) - movdqu %xmm10, 16(%rsi) - movdqu %xmm11, 32(%rsi) - movdqu %xmm12, 48(%rsi) - movdqu %xmm13, 64(%rsi) - movdqu %xmm14, 80(%rsi) - lea 96(%rsi), %rsi - vpshufb %xmm0, %xmm9, %xmm8 - vpshufb %xmm0, %xmm10, %xmm2 - movdqu %xmm8, 112(%rbp) - vpshufb %xmm0, %xmm11, %xmm4 - movdqu %xmm2, 96(%rbp) - vpshufb %xmm0, %xmm12, %xmm5 - movdqu %xmm4, 80(%rbp) - vpshufb %xmm0, %xmm13, %xmm6 - movdqu %xmm5, 64(%rbp) - vpshufb %xmm0, %xmm14, %xmm7 - movdqu %xmm6, 48(%rbp) - movdqu -128(%rcx), %xmm4 - pxor %xmm2, %xmm2 - mov $72057594037927936, %r11 - pinsrq $1, %r11, %xmm2 - movdqu -112(%rcx), %xmm15 - mov %rcx, %r12 - sub $96, %r12 - vpxor %xmm4, %xmm1, %xmm9 - add $6, %rbx - cmp $256, %rbx - jae L74 - vpaddd %xmm2, %xmm1, %xmm10 - vpaddd %xmm2, %xmm10, %xmm11 - vpxor %xmm4, %xmm10, %xmm10 - vpaddd %xmm2, %xmm11, %xmm12 - vpxor %xmm4, %xmm11, %xmm11 - vpaddd %xmm2, %xmm12, %xmm13 - vpxor %xmm4, %xmm12, %xmm12 - vpaddd %xmm2, %xmm13, %xmm14 - vpxor %xmm4, %xmm13, %xmm13 - vpaddd %xmm2, %xmm14, %xmm1 - vpxor %xmm4, %xmm14, %xmm14 - jmp L75 -L74: - sub $256, %rbx - vpshufb %xmm0, %xmm1, %xmm6 - pxor %xmm5, %xmm5 - mov $1, %r11 - pinsrq $0, %r11, %xmm5 - vpaddd %xmm5, %xmm6, %xmm10 - pxor %xmm5, %xmm5 - mov $2, %r11 - pinsrq $0, %r11, %xmm5 - vpaddd %xmm5, %xmm6, %xmm11 - vpaddd %xmm5, %xmm10, %xmm12 - vpshufb %xmm0, %xmm10, %xmm10 - vpaddd %xmm5, %xmm11, %xmm13 - vpshufb %xmm0, %xmm11, %xmm11 - vpxor %xmm4, %xmm10, %xmm10 - vpaddd %xmm5, %xmm12, %xmm14 - vpshufb %xmm0, %xmm12, %xmm12 - vpxor %xmm4, %xmm11, %xmm11 - vpaddd %xmm5, %xmm13, %xmm1 - vpshufb %xmm0, %xmm13, %xmm13 - vpxor %xmm4, %xmm12, %xmm12 - vpshufb %xmm0, %xmm14, %xmm14 - vpxor %xmm4, %xmm13, %xmm13 - vpshufb %xmm0, %xmm1, %xmm1 - vpxor %xmm4, %xmm14, %xmm14 -L75: - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu -96(%rcx), %xmm15 - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu -80(%rcx), %xmm15 - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu -64(%rcx), %xmm15 - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu -48(%rcx), %xmm15 - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu -32(%rcx), %xmm15 - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu -16(%rcx), %xmm15 - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu 0(%rcx), %xmm15 - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu 16(%rcx), %xmm15 - movdqu 32(%rcx), %xmm3 - vaesenc %xmm15, %xmm9, %xmm9 - vpxor 0(%rdi), %xmm3, %xmm4 - vaesenc %xmm15, %xmm10, %xmm10 - vpxor 16(%rdi), %xmm3, %xmm5 - vaesenc %xmm15, %xmm11, %xmm11 - vpxor 32(%rdi), %xmm3, %xmm6 - vaesenc %xmm15, %xmm12, %xmm12 - vpxor 48(%rdi), %xmm3, %xmm8 - vaesenc %xmm15, %xmm13, %xmm13 - vpxor 64(%rdi), %xmm3, %xmm2 - vaesenc %xmm15, %xmm14, %xmm14 - vpxor 80(%rdi), %xmm3, %xmm3 - lea 96(%rdi), %rdi - vaesenclast %xmm4, %xmm9, %xmm9 - vaesenclast %xmm5, %xmm10, %xmm10 - vaesenclast %xmm6, %xmm11, %xmm11 - vaesenclast %xmm8, %xmm12, %xmm12 - vaesenclast %xmm2, %xmm13, %xmm13 - vaesenclast %xmm3, %xmm14, %xmm14 - movdqu %xmm9, 0(%rsi) - movdqu %xmm10, 16(%rsi) - movdqu %xmm11, 32(%rsi) - movdqu %xmm12, 48(%rsi) - movdqu %xmm13, 64(%rsi) - movdqu %xmm14, 80(%rsi) - lea 96(%rsi), %rsi - sub $12, %rdx - movdqu 32(%rbp), %xmm8 - pxor %xmm2, %xmm2 - mov $72057594037927936, %r11 - pinsrq $1, %r11, %xmm2 - vpxor %xmm4, %xmm4, %xmm4 - movdqu -128(%rcx), %xmm15 - vpaddd %xmm2, %xmm1, %xmm10 - vpaddd %xmm2, %xmm10, %xmm11 - vpaddd %xmm2, %xmm11, %xmm12 - vpaddd %xmm2, %xmm12, %xmm13 - vpaddd %xmm2, %xmm13, %xmm14 - vpxor %xmm15, %xmm1, %xmm9 - movdqu %xmm4, 16(%rbp) - jmp L77 -.balign 16 -L76: - add $6, %rbx - cmp $256, %rbx - jb L78 - mov $579005069656919567, %r11 - pinsrq $0, %r11, %xmm0 - mov $283686952306183, %r11 - pinsrq $1, %r11, %xmm0 - vpshufb %xmm0, %xmm1, %xmm6 - pxor %xmm5, %xmm5 - mov $1, %r11 - pinsrq $0, %r11, %xmm5 - vpaddd %xmm5, %xmm6, %xmm10 - pxor %xmm5, %xmm5 - mov $2, %r11 - pinsrq $0, %r11, %xmm5 - vpaddd %xmm5, %xmm6, %xmm11 - movdqu -32(%r9), %xmm3 - vpaddd %xmm5, %xmm10, %xmm12 - vpshufb %xmm0, %xmm10, %xmm10 - vpaddd %xmm5, %xmm11, %xmm13 - vpshufb %xmm0, %xmm11, %xmm11 - vpxor %xmm15, %xmm10, %xmm10 - vpaddd %xmm5, %xmm12, %xmm14 - vpshufb %xmm0, %xmm12, %xmm12 - vpxor %xmm15, %xmm11, %xmm11 - vpaddd %xmm5, %xmm13, %xmm1 - vpshufb %xmm0, %xmm13, %xmm13 - vpshufb %xmm0, %xmm14, %xmm14 - vpshufb %xmm0, %xmm1, %xmm1 - sub $256, %rbx - jmp L79 -L78: - movdqu -32(%r9), %xmm3 - vpaddd %xmm14, %xmm2, %xmm1 - vpxor %xmm15, %xmm10, %xmm10 - vpxor %xmm15, %xmm11, %xmm11 -L79: - movdqu %xmm1, 128(%rbp) - vpclmulqdq $16, %xmm3, %xmm7, %xmm5 - vpxor %xmm15, %xmm12, %xmm12 - movdqu -112(%rcx), %xmm2 - vpclmulqdq $1, %xmm3, %xmm7, %xmm6 - vaesenc %xmm2, %xmm9, %xmm9 - movdqu 48(%rbp), %xmm0 - vpxor %xmm15, %xmm13, %xmm13 - vpclmulqdq $0, %xmm3, %xmm7, %xmm1 - vaesenc %xmm2, %xmm10, %xmm10 - vpxor %xmm15, %xmm14, %xmm14 - vpclmulqdq $17, %xmm3, %xmm7, %xmm7 - vaesenc %xmm2, %xmm11, %xmm11 - movdqu -16(%r9), %xmm3 - vaesenc %xmm2, %xmm12, %xmm12 - vpxor %xmm5, %xmm6, %xmm6 - vpclmulqdq $0, %xmm3, %xmm0, %xmm5 - vpxor %xmm4, %xmm8, %xmm8 - vaesenc %xmm2, %xmm13, %xmm13 - vpxor %xmm5, %xmm1, %xmm4 - vpclmulqdq $16, %xmm3, %xmm0, %xmm1 - vaesenc %xmm2, %xmm14, %xmm14 - movdqu -96(%rcx), %xmm15 - vpclmulqdq $1, %xmm3, %xmm0, %xmm2 - vaesenc %xmm15, %xmm9, %xmm9 - vpxor 16(%rbp), %xmm8, %xmm8 - vpclmulqdq $17, %xmm3, %xmm0, %xmm3 - movdqu 64(%rbp), %xmm0 - vaesenc %xmm15, %xmm10, %xmm10 - movbeq 88(%r14), %r13 - vaesenc %xmm15, %xmm11, %xmm11 - movbeq 80(%r14), %r12 - vaesenc %xmm15, %xmm12, %xmm12 - movq %r13, 32(%rbp) - vaesenc %xmm15, %xmm13, %xmm13 - movq %r12, 40(%rbp) - movdqu 16(%r9), %xmm5 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu -80(%rcx), %xmm15 - vpxor %xmm1, %xmm6, %xmm6 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vaesenc %xmm15, %xmm9, %xmm9 - vpxor %xmm2, %xmm6, %xmm6 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vaesenc %xmm15, %xmm10, %xmm10 - vpxor %xmm3, %xmm7, %xmm7 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vaesenc %xmm15, %xmm11, %xmm11 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 80(%rbp), %xmm0 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 32(%r9), %xmm1 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu -64(%rcx), %xmm15 - vpxor %xmm2, %xmm6, %xmm6 - vpclmulqdq $0, %xmm1, %xmm0, %xmm2 - vaesenc %xmm15, %xmm9, %xmm9 - vpxor %xmm3, %xmm6, %xmm6 - vpclmulqdq $16, %xmm1, %xmm0, %xmm3 - vaesenc %xmm15, %xmm10, %xmm10 - movbeq 72(%r14), %r13 - vpxor %xmm5, %xmm7, %xmm7 - vpclmulqdq $1, %xmm1, %xmm0, %xmm5 - vaesenc %xmm15, %xmm11, %xmm11 - movbeq 64(%r14), %r12 - vpclmulqdq $17, %xmm1, %xmm0, %xmm1 - movdqu 96(%rbp), %xmm0 - vaesenc %xmm15, %xmm12, %xmm12 - movq %r13, 48(%rbp) - vaesenc %xmm15, %xmm13, %xmm13 - movq %r12, 56(%rbp) - vpxor %xmm2, %xmm4, %xmm4 - movdqu 64(%r9), %xmm2 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu -48(%rcx), %xmm15 - vpxor %xmm3, %xmm6, %xmm6 - vpclmulqdq $0, %xmm2, %xmm0, %xmm3 - vaesenc %xmm15, %xmm9, %xmm9 - vpxor %xmm5, %xmm6, %xmm6 - vpclmulqdq $16, %xmm2, %xmm0, %xmm5 - vaesenc %xmm15, %xmm10, %xmm10 - movbeq 56(%r14), %r13 - vpxor %xmm1, %xmm7, %xmm7 - vpclmulqdq $1, %xmm2, %xmm0, %xmm1 - vpxor 112(%rbp), %xmm8, %xmm8 - vaesenc %xmm15, %xmm11, %xmm11 - movbeq 48(%r14), %r12 - vpclmulqdq $17, %xmm2, %xmm0, %xmm2 - vaesenc %xmm15, %xmm12, %xmm12 - movq %r13, 64(%rbp) - vaesenc %xmm15, %xmm13, %xmm13 - movq %r12, 72(%rbp) - vpxor %xmm3, %xmm4, %xmm4 - movdqu 80(%r9), %xmm3 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu -32(%rcx), %xmm15 - vpxor %xmm5, %xmm6, %xmm6 - vpclmulqdq $16, %xmm3, %xmm8, %xmm5 - vaesenc %xmm15, %xmm9, %xmm9 - vpxor %xmm1, %xmm6, %xmm6 - vpclmulqdq $1, %xmm3, %xmm8, %xmm1 - vaesenc %xmm15, %xmm10, %xmm10 - movbeq 40(%r14), %r13 - vpxor %xmm2, %xmm7, %xmm7 - vpclmulqdq $0, %xmm3, %xmm8, %xmm2 - vaesenc %xmm15, %xmm11, %xmm11 - movbeq 32(%r14), %r12 - vpclmulqdq $17, %xmm3, %xmm8, %xmm8 - vaesenc %xmm15, %xmm12, %xmm12 - movq %r13, 80(%rbp) - vaesenc %xmm15, %xmm13, %xmm13 - movq %r12, 88(%rbp) - vpxor %xmm5, %xmm6, %xmm6 - vaesenc %xmm15, %xmm14, %xmm14 - vpxor %xmm1, %xmm6, %xmm6 - movdqu -16(%rcx), %xmm15 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm2, %xmm4, %xmm4 - pxor %xmm3, %xmm3 - mov $13979173243358019584, %r11 - pinsrq $1, %r11, %xmm3 - vaesenc %xmm15, %xmm9, %xmm9 - vpxor %xmm8, %xmm7, %xmm7 - vaesenc %xmm15, %xmm10, %xmm10 - vpxor %xmm5, %xmm4, %xmm4 - movbeq 24(%r14), %r13 - vaesenc %xmm15, %xmm11, %xmm11 - movbeq 16(%r14), %r12 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - movq %r13, 96(%rbp) - vaesenc %xmm15, %xmm12, %xmm12 - movq %r12, 104(%rbp) - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu 0(%rcx), %xmm1 - vaesenc %xmm1, %xmm9, %xmm9 - movdqu 16(%rcx), %xmm15 - vaesenc %xmm1, %xmm10, %xmm10 - vpsrldq $8, %xmm6, %xmm6 - vaesenc %xmm1, %xmm11, %xmm11 - vpxor %xmm6, %xmm7, %xmm7 - vaesenc %xmm1, %xmm12, %xmm12 - vpxor %xmm0, %xmm4, %xmm4 - movbeq 8(%r14), %r13 - vaesenc %xmm1, %xmm13, %xmm13 - movbeq 0(%r14), %r12 - vaesenc %xmm1, %xmm14, %xmm14 - movdqu 32(%rcx), %xmm1 - vaesenc %xmm15, %xmm9, %xmm9 - movdqu %xmm7, 16(%rbp) - vpalignr $8, %xmm4, %xmm4, %xmm8 - vaesenc %xmm15, %xmm10, %xmm10 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor 0(%rdi), %xmm1, %xmm2 - vaesenc %xmm15, %xmm11, %xmm11 - vpxor 16(%rdi), %xmm1, %xmm0 - vaesenc %xmm15, %xmm12, %xmm12 - vpxor 32(%rdi), %xmm1, %xmm5 - vaesenc %xmm15, %xmm13, %xmm13 - vpxor 48(%rdi), %xmm1, %xmm6 - vaesenc %xmm15, %xmm14, %xmm14 - vpxor 64(%rdi), %xmm1, %xmm7 - vpxor 80(%rdi), %xmm1, %xmm3 - movdqu 128(%rbp), %xmm1 - vaesenclast %xmm2, %xmm9, %xmm9 - pxor %xmm2, %xmm2 - mov $72057594037927936, %r11 - pinsrq $1, %r11, %xmm2 - vaesenclast %xmm0, %xmm10, %xmm10 - vpaddd %xmm2, %xmm1, %xmm0 - movq %r13, 112(%rbp) - lea 96(%rdi), %rdi - vaesenclast %xmm5, %xmm11, %xmm11 - vpaddd %xmm2, %xmm0, %xmm5 - movq %r12, 120(%rbp) - lea 96(%rsi), %rsi - movdqu -128(%rcx), %xmm15 - vaesenclast %xmm6, %xmm12, %xmm12 - vpaddd %xmm2, %xmm5, %xmm6 - vaesenclast %xmm7, %xmm13, %xmm13 - vpaddd %xmm2, %xmm6, %xmm7 - vaesenclast %xmm3, %xmm14, %xmm14 - vpaddd %xmm2, %xmm7, %xmm3 - sub $6, %rdx - add $96, %r14 - cmp $0, %rdx - jbe L80 - movdqu %xmm9, -96(%rsi) - vpxor %xmm15, %xmm1, %xmm9 - movdqu %xmm10, -80(%rsi) - movdqu %xmm0, %xmm10 - movdqu %xmm11, -64(%rsi) - movdqu %xmm5, %xmm11 - movdqu %xmm12, -48(%rsi) - movdqu %xmm6, %xmm12 - movdqu %xmm13, -32(%rsi) - movdqu %xmm7, %xmm13 - movdqu %xmm14, -16(%rsi) - movdqu %xmm3, %xmm14 - movdqu 32(%rbp), %xmm7 - jmp L81 -L80: - vpxor 16(%rbp), %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 -L81: -.balign 16 -L77: - cmp $0, %rdx - ja L76 - movdqu 32(%rbp), %xmm7 - movdqu %xmm1, 32(%rbp) - pxor %xmm4, %xmm4 - movdqu %xmm4, 16(%rbp) - movdqu -32(%r9), %xmm3 - vpclmulqdq $0, %xmm3, %xmm7, %xmm1 - vpclmulqdq $16, %xmm3, %xmm7, %xmm5 - movdqu 48(%rbp), %xmm0 - vpclmulqdq $1, %xmm3, %xmm7, %xmm6 - vpclmulqdq $17, %xmm3, %xmm7, %xmm7 - movdqu -16(%r9), %xmm3 - vpxor %xmm5, %xmm6, %xmm6 - vpclmulqdq $0, %xmm3, %xmm0, %xmm5 - vpxor %xmm4, %xmm8, %xmm8 - vpxor %xmm5, %xmm1, %xmm4 - vpclmulqdq $16, %xmm3, %xmm0, %xmm1 - vpclmulqdq $1, %xmm3, %xmm0, %xmm2 - vpxor 16(%rbp), %xmm8, %xmm8 - vpclmulqdq $17, %xmm3, %xmm0, %xmm3 - movdqu 64(%rbp), %xmm0 - movdqu 16(%r9), %xmm5 - vpxor %xmm1, %xmm6, %xmm6 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpxor %xmm3, %xmm7, %xmm7 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 80(%rbp), %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 32(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpclmulqdq $0, %xmm1, %xmm0, %xmm2 - vpxor %xmm3, %xmm6, %xmm6 - vpclmulqdq $16, %xmm1, %xmm0, %xmm3 - vpxor %xmm5, %xmm7, %xmm7 - vpclmulqdq $1, %xmm1, %xmm0, %xmm5 - vpclmulqdq $17, %xmm1, %xmm0, %xmm1 - movdqu 96(%rbp), %xmm0 - vpxor %xmm2, %xmm4, %xmm4 - movdqu 64(%r9), %xmm2 - vpxor %xmm3, %xmm6, %xmm6 - vpclmulqdq $0, %xmm2, %xmm0, %xmm3 - vpxor %xmm5, %xmm6, %xmm6 - vpclmulqdq $16, %xmm2, %xmm0, %xmm5 - vpxor %xmm1, %xmm7, %xmm7 - vpclmulqdq $1, %xmm2, %xmm0, %xmm1 - vpxor 112(%rbp), %xmm8, %xmm8 - vpclmulqdq $17, %xmm2, %xmm0, %xmm2 - vpxor %xmm3, %xmm4, %xmm4 - movdqu 80(%r9), %xmm3 - vpxor %xmm5, %xmm6, %xmm6 - vpclmulqdq $16, %xmm3, %xmm8, %xmm5 - vpxor %xmm1, %xmm6, %xmm6 - vpclmulqdq $1, %xmm3, %xmm8, %xmm1 - vpxor %xmm2, %xmm7, %xmm7 - vpclmulqdq $0, %xmm3, %xmm8, %xmm2 - vpclmulqdq $17, %xmm3, %xmm8, %xmm8 - vpxor %xmm5, %xmm6, %xmm6 - vpxor %xmm1, %xmm6, %xmm6 - vpxor %xmm2, %xmm4, %xmm4 - pxor %xmm3, %xmm3 - mov $3254779904, %rax - pinsrd $3, %eax, %xmm3 - vpxor %xmm8, %xmm7, %xmm7 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm5, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpsrldq $8, %xmm6, %xmm6 - vpxor %xmm6, %xmm7, %xmm7 - vpxor %xmm0, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm8 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor %xmm7, %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 - mov $579005069656919567, %r12 - pinsrq $0, %r12, %xmm0 - mov $283686952306183, %r12 - pinsrq $1, %r12, %xmm0 - movdqu %xmm9, -96(%rsi) - vpshufb %xmm0, %xmm9, %xmm9 - vpxor %xmm7, %xmm1, %xmm1 - movdqu %xmm10, -80(%rsi) - vpshufb %xmm0, %xmm10, %xmm10 - movdqu %xmm11, -64(%rsi) - vpshufb %xmm0, %xmm11, %xmm11 - movdqu %xmm12, -48(%rsi) - vpshufb %xmm0, %xmm12, %xmm12 - movdqu %xmm13, -32(%rsi) - vpshufb %xmm0, %xmm13, %xmm13 - movdqu %xmm14, -16(%rsi) - vpshufb %xmm0, %xmm14, %xmm14 - pxor %xmm4, %xmm4 - movdqu %xmm14, %xmm7 - movdqu %xmm4, 16(%rbp) - movdqu %xmm13, 48(%rbp) - movdqu %xmm12, 64(%rbp) - movdqu %xmm11, 80(%rbp) - movdqu %xmm10, 96(%rbp) - movdqu %xmm9, 112(%rbp) - movdqu -32(%r9), %xmm3 - vpclmulqdq $0, %xmm3, %xmm7, %xmm1 - vpclmulqdq $16, %xmm3, %xmm7, %xmm5 - movdqu 48(%rbp), %xmm0 - vpclmulqdq $1, %xmm3, %xmm7, %xmm6 - vpclmulqdq $17, %xmm3, %xmm7, %xmm7 - movdqu -16(%r9), %xmm3 - vpxor %xmm5, %xmm6, %xmm6 - vpclmulqdq $0, %xmm3, %xmm0, %xmm5 - vpxor %xmm4, %xmm8, %xmm8 - vpxor %xmm5, %xmm1, %xmm4 - vpclmulqdq $16, %xmm3, %xmm0, %xmm1 - vpclmulqdq $1, %xmm3, %xmm0, %xmm2 - vpxor 16(%rbp), %xmm8, %xmm8 - vpclmulqdq $17, %xmm3, %xmm0, %xmm3 - movdqu 64(%rbp), %xmm0 - movdqu 16(%r9), %xmm5 - vpxor %xmm1, %xmm6, %xmm6 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpxor %xmm3, %xmm7, %xmm7 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 80(%rbp), %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 32(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpclmulqdq $0, %xmm1, %xmm0, %xmm2 - vpxor %xmm3, %xmm6, %xmm6 - vpclmulqdq $16, %xmm1, %xmm0, %xmm3 - vpxor %xmm5, %xmm7, %xmm7 - vpclmulqdq $1, %xmm1, %xmm0, %xmm5 - vpclmulqdq $17, %xmm1, %xmm0, %xmm1 - movdqu 96(%rbp), %xmm0 - vpxor %xmm2, %xmm4, %xmm4 - movdqu 64(%r9), %xmm2 - vpxor %xmm3, %xmm6, %xmm6 - vpclmulqdq $0, %xmm2, %xmm0, %xmm3 - vpxor %xmm5, %xmm6, %xmm6 - vpclmulqdq $16, %xmm2, %xmm0, %xmm5 - vpxor %xmm1, %xmm7, %xmm7 - vpclmulqdq $1, %xmm2, %xmm0, %xmm1 - vpxor 112(%rbp), %xmm8, %xmm8 - vpclmulqdq $17, %xmm2, %xmm0, %xmm2 - vpxor %xmm3, %xmm4, %xmm4 - movdqu 80(%r9), %xmm3 - vpxor %xmm5, %xmm6, %xmm6 - vpclmulqdq $16, %xmm3, %xmm8, %xmm5 - vpxor %xmm1, %xmm6, %xmm6 - vpclmulqdq $1, %xmm3, %xmm8, %xmm1 - vpxor %xmm2, %xmm7, %xmm7 - vpclmulqdq $0, %xmm3, %xmm8, %xmm2 - vpclmulqdq $17, %xmm3, %xmm8, %xmm8 - vpxor %xmm5, %xmm6, %xmm6 - vpxor %xmm1, %xmm6, %xmm6 - vpxor %xmm2, %xmm4, %xmm4 - pxor %xmm3, %xmm3 - mov $3254779904, %rax - pinsrd $3, %eax, %xmm3 - vpxor %xmm8, %xmm7, %xmm7 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm5, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpsrldq $8, %xmm6, %xmm6 - vpxor %xmm6, %xmm7, %xmm7 - vpxor %xmm0, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm8 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor %xmm7, %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 - sub $128, %rcx -L71: - movdqu 32(%rbp), %xmm11 - mov %rcx, %r8 - mov 104(%rsp), %rax - mov 112(%rsp), %rdi - mov 120(%rsp), %rdx - mov %rdx, %r14 - mov $579005069656919567, %r12 - pinsrq $0, %r12, %xmm9 - mov $283686952306183, %r12 - pinsrq $1, %r12, %xmm9 - pshufb %xmm9, %xmm11 - pxor %xmm10, %xmm10 - mov $1, %rbx - pinsrd $0, %ebx, %xmm10 - mov %rax, %r11 - mov %rdi, %r10 - mov $0, %rbx - jmp L83 -.balign 16 -L82: - movdqu %xmm11, %xmm0 - pshufb %xmm9, %xmm0 - movdqu 0(%r8), %xmm2 - pxor %xmm2, %xmm0 - movdqu 16(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 32(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 48(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 64(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 80(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 96(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 112(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 128(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 144(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 160(%r8), %xmm2 - aesenclast %xmm2, %xmm0 - pxor %xmm2, %xmm2 - movdqu 0(%r11), %xmm2 - pxor %xmm0, %xmm2 - movdqu %xmm2, 0(%r10) - add $1, %rbx - add $16, %r11 - add $16, %r10 - paddd %xmm10, %xmm11 -.balign 16 -L83: - cmp %rdx, %rbx - jne L82 - mov %rdi, %r11 - jmp L85 -.balign 16 -L84: - add $80, %r11 - movdqu -32(%r9), %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - movdqu %xmm1, %xmm4 - movdqu -16(%r9), %xmm1 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - movdqu %xmm1, %xmm5 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 16(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 32(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 64(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 80(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - vpxor %xmm1, %xmm4, %xmm4 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - pxor %xmm3, %xmm3 - mov $3254779904, %r10 - pinsrd $3, %r10d, %xmm3 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm5, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpsrldq $8, %xmm6, %xmm6 - vpxor %xmm6, %xmm7, %xmm7 - vpxor %xmm0, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm8 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor %xmm7, %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 - add $96, %r11 - sub $6, %rdx -.balign 16 -L85: - cmp $6, %rdx - jae L84 - cmp $0, %rdx - jbe L86 - mov %rdx, %r10 - sub $1, %r10 - imul $16, %r10 - add %r10, %r11 - movdqu -32(%r9), %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - cmp $1, %rdx - jne L88 - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu %xmm1, %xmm4 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - jmp L89 -L88: - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - movdqu %xmm1, %xmm4 - movdqu -16(%r9), %xmm1 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - movdqu %xmm1, %xmm5 - cmp $2, %rdx - je L90 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 16(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - cmp $3, %rdx - je L92 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 32(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - cmp $4, %rdx - je L94 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 64(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - jmp L95 -L94: -L95: - jmp L93 -L92: -L93: - jmp L91 -L90: -L91: - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - vpxor %xmm1, %xmm4, %xmm4 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 -L89: - pxor %xmm3, %xmm3 - mov $3254779904, %r10 - pinsrd $3, %r10d, %xmm3 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm5, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpsrldq $8, %xmm6, %xmm6 - vpxor %xmm6, %xmm7, %xmm7 - vpxor %xmm0, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm8 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor %xmm7, %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 - jmp L87 -L86: -L87: - add 96(%rsp), %r14 - imul $16, %r14 - mov 136(%rsp), %r13 - cmp %r14, %r13 - jbe L96 - mov 128(%rsp), %rax - mov %r13, %r10 - and $15, %r10 - movdqu %xmm11, %xmm0 - pshufb %xmm9, %xmm0 - movdqu 0(%r8), %xmm2 - pxor %xmm2, %xmm0 - movdqu 16(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 32(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 48(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 64(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 80(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 96(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 112(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 128(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 144(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 160(%r8), %xmm2 - aesenclast %xmm2, %xmm0 - pxor %xmm2, %xmm2 - movdqu 0(%rax), %xmm4 - pxor %xmm4, %xmm0 - movdqu %xmm0, 0(%rax) - cmp $8, %r10 - jae L98 - mov $0, %rcx - pinsrq $1, %rcx, %xmm0 - mov %r10, %rcx - shl $3, %rcx - mov $1, %r11 - shl %cl, %r11 - sub $1, %r11 - pextrq $0, %xmm0, %rcx - and %r11, %rcx - pinsrq $0, %rcx, %xmm0 - jmp L99 -L98: - mov %r10, %rcx - sub $8, %rcx - shl $3, %rcx - mov $1, %r11 - shl %cl, %r11 - sub $1, %r11 - pextrq $1, %xmm0, %rcx - and %r11, %rcx - pinsrq $1, %rcx, %xmm0 -L99: - pshufb %xmm9, %xmm0 - movdqu -32(%r9), %xmm5 - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu %xmm1, %xmm4 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - pxor %xmm3, %xmm3 - mov $3254779904, %r11 - pinsrd $3, %r11d, %xmm3 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm5, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpsrldq $8, %xmm6, %xmm6 - vpxor %xmm6, %xmm7, %xmm7 - vpxor %xmm0, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm8 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor %xmm7, %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 - jmp L97 -L96: -L97: - mov %r15, %r11 - pxor %xmm0, %xmm0 - mov %r11, %rax - imul $8, %rax - pinsrq $1, %rax, %xmm0 - mov %r13, %rax - imul $8, %rax - pinsrq $0, %rax, %xmm0 - movdqu -32(%r9), %xmm5 - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu %xmm1, %xmm4 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - pxor %xmm3, %xmm3 - mov $3254779904, %r11 - pinsrd $3, %r11d, %xmm3 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm5, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpsrldq $8, %xmm6, %xmm6 - vpxor %xmm6, %xmm7, %xmm7 - vpxor %xmm0, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm8 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor %xmm7, %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 - movdqu 0(%rbp), %xmm0 - pshufb %xmm9, %xmm0 - movdqu 0(%r8), %xmm2 - pxor %xmm2, %xmm0 - movdqu 16(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 32(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 48(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 64(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 80(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 96(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 112(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 128(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 144(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 160(%r8), %xmm2 - aesenclast %xmm2, %xmm0 - pxor %xmm2, %xmm2 - pshufb %xmm9, %xmm8 - pxor %xmm0, %xmm8 - mov 152(%rsp), %r15 - movdqu %xmm8, 0(%r15) - pop %rbx - pop %rbp - pop %rdi - pop %rsi - pop %r12 - pop %r13 - pop %r14 - pop %r15 - ret - -.global _gcm256_encrypt_opt -_gcm256_encrypt_opt: - push %r15 - push %r14 - push %r13 - push %r12 - push %rsi - push %rdi - push %rbp - push %rbx - mov 144(%rsp), %rbp - mov %rcx, %r13 - lea 32(%r9), %r9 - mov 72(%rsp), %rbx - mov %rdx, %rcx - imul $16, %rcx - mov $579005069656919567, %r10 - pinsrq $0, %r10, %xmm9 - mov $283686952306183, %r10 - pinsrq $1, %r10, %xmm9 - pxor %xmm8, %xmm8 - mov %rdi, %r11 - jmp L101 -.balign 16 -L100: - add $80, %r11 - movdqu -32(%r9), %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - movdqu %xmm1, %xmm4 - movdqu -16(%r9), %xmm1 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - movdqu %xmm1, %xmm5 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 16(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 32(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 64(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 80(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - vpxor %xmm1, %xmm4, %xmm4 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - pxor %xmm3, %xmm3 - mov $3254779904, %r10 - pinsrd $3, %r10d, %xmm3 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm5, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpsrldq $8, %xmm6, %xmm6 - vpxor %xmm6, %xmm7, %xmm7 - vpxor %xmm0, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm8 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor %xmm7, %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 - add $96, %r11 - sub $6, %rdx -.balign 16 -L101: - cmp $6, %rdx - jae L100 - cmp $0, %rdx - jbe L102 - mov %rdx, %r10 - sub $1, %r10 - imul $16, %r10 - add %r10, %r11 - movdqu -32(%r9), %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - cmp $1, %rdx - jne L104 - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu %xmm1, %xmm4 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - jmp L105 -L104: - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - movdqu %xmm1, %xmm4 - movdqu -16(%r9), %xmm1 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - movdqu %xmm1, %xmm5 - cmp $2, %rdx - je L106 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 16(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - cmp $3, %rdx - je L108 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 32(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - cmp $4, %rdx - je L110 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 64(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - jmp L111 -L110: -L111: - jmp L109 -L108: -L109: - jmp L107 -L106: -L107: - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - vpxor %xmm1, %xmm4, %xmm4 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 -L105: - pxor %xmm3, %xmm3 - mov $3254779904, %r10 - pinsrd $3, %r10d, %xmm3 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm5, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpsrldq $8, %xmm6, %xmm6 - vpxor %xmm6, %xmm7, %xmm7 - vpxor %xmm0, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm8 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor %xmm7, %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 - jmp L103 -L102: -L103: - mov %rsi, %r15 - cmp %rcx, %rsi - jbe L112 - movdqu 0(%rbx), %xmm0 - mov %rsi, %r10 - and $15, %r10 - cmp $8, %r10 - jae L114 - mov $0, %rcx - pinsrq $1, %rcx, %xmm0 - mov %r10, %rcx - shl $3, %rcx - mov $1, %r11 - shl %cl, %r11 - sub $1, %r11 - pextrq $0, %xmm0, %rcx - and %r11, %rcx - pinsrq $0, %rcx, %xmm0 - jmp L115 -L114: - mov %r10, %rcx - sub $8, %rcx - shl $3, %rcx - mov $1, %r11 - shl %cl, %r11 - sub $1, %r11 - pextrq $1, %xmm0, %rcx - and %r11, %rcx - pinsrq $1, %rcx, %xmm0 -L115: - pshufb %xmm9, %xmm0 - movdqu -32(%r9), %xmm5 - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu %xmm1, %xmm4 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - pxor %xmm3, %xmm3 - mov $3254779904, %r11 - pinsrd $3, %r11d, %xmm3 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm5, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpsrldq $8, %xmm6, %xmm6 - vpxor %xmm6, %xmm7, %xmm7 - vpxor %xmm0, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm8 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor %xmm7, %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 - jmp L113 -L112: -L113: - mov 80(%rsp), %rdi - mov 88(%rsp), %rsi - mov 96(%rsp), %rdx - mov %r13, %rcx - movdqu %xmm9, %xmm0 - movdqu 0(%r8), %xmm1 - movdqu %xmm1, 0(%rbp) - pxor %xmm10, %xmm10 - mov $1, %r11 - pinsrq $0, %r11, %xmm10 - vpaddd %xmm10, %xmm1, %xmm1 - cmp $0, %rdx - jne L116 - vpshufb %xmm0, %xmm1, %xmm1 - movdqu %xmm1, 32(%rbp) - jmp L117 -L116: - movdqu %xmm8, 32(%rbp) - add $128, %rcx - pextrq $0, %xmm1, %rbx - and $255, %rbx - vpshufb %xmm0, %xmm1, %xmm1 - lea 96(%rsi), %r14 - movdqu -128(%rcx), %xmm4 - pxor %xmm2, %xmm2 - mov $72057594037927936, %r11 - pinsrq $1, %r11, %xmm2 - movdqu -112(%rcx), %xmm15 - mov %rcx, %r12 - sub $96, %r12 - vpxor %xmm4, %xmm1, %xmm9 - add $6, %rbx - cmp $256, %rbx - jae L118 - vpaddd %xmm2, %xmm1, %xmm10 - vpaddd %xmm2, %xmm10, %xmm11 - vpxor %xmm4, %xmm10, %xmm10 - vpaddd %xmm2, %xmm11, %xmm12 - vpxor %xmm4, %xmm11, %xmm11 - vpaddd %xmm2, %xmm12, %xmm13 - vpxor %xmm4, %xmm12, %xmm12 - vpaddd %xmm2, %xmm13, %xmm14 - vpxor %xmm4, %xmm13, %xmm13 - vpaddd %xmm2, %xmm14, %xmm1 - vpxor %xmm4, %xmm14, %xmm14 - jmp L119 -L118: - sub $256, %rbx - vpshufb %xmm0, %xmm1, %xmm6 - pxor %xmm5, %xmm5 - mov $1, %r11 - pinsrq $0, %r11, %xmm5 - vpaddd %xmm5, %xmm6, %xmm10 - pxor %xmm5, %xmm5 - mov $2, %r11 - pinsrq $0, %r11, %xmm5 - vpaddd %xmm5, %xmm6, %xmm11 - vpaddd %xmm5, %xmm10, %xmm12 - vpshufb %xmm0, %xmm10, %xmm10 - vpaddd %xmm5, %xmm11, %xmm13 - vpshufb %xmm0, %xmm11, %xmm11 - vpxor %xmm4, %xmm10, %xmm10 - vpaddd %xmm5, %xmm12, %xmm14 - vpshufb %xmm0, %xmm12, %xmm12 - vpxor %xmm4, %xmm11, %xmm11 - vpaddd %xmm5, %xmm13, %xmm1 - vpshufb %xmm0, %xmm13, %xmm13 - vpxor %xmm4, %xmm12, %xmm12 - vpshufb %xmm0, %xmm14, %xmm14 - vpxor %xmm4, %xmm13, %xmm13 - vpshufb %xmm0, %xmm1, %xmm1 - vpxor %xmm4, %xmm14, %xmm14 -L119: - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu -96(%rcx), %xmm15 - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu -80(%rcx), %xmm15 - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu -64(%rcx), %xmm15 - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu -48(%rcx), %xmm15 - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu -32(%rcx), %xmm15 - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu -16(%rcx), %xmm15 - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu 0(%rcx), %xmm15 - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu 16(%rcx), %xmm15 - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu 32(%rcx), %xmm15 - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu 48(%rcx), %xmm15 - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu 64(%rcx), %xmm15 - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu 80(%rcx), %xmm15 - movdqu 96(%rcx), %xmm3 - vaesenc %xmm15, %xmm9, %xmm9 - vpxor 0(%rdi), %xmm3, %xmm4 - vaesenc %xmm15, %xmm10, %xmm10 - vpxor 16(%rdi), %xmm3, %xmm5 - vaesenc %xmm15, %xmm11, %xmm11 - vpxor 32(%rdi), %xmm3, %xmm6 - vaesenc %xmm15, %xmm12, %xmm12 - vpxor 48(%rdi), %xmm3, %xmm8 - vaesenc %xmm15, %xmm13, %xmm13 - vpxor 64(%rdi), %xmm3, %xmm2 - vaesenc %xmm15, %xmm14, %xmm14 - vpxor 80(%rdi), %xmm3, %xmm3 - lea 96(%rdi), %rdi - vaesenclast %xmm4, %xmm9, %xmm9 - vaesenclast %xmm5, %xmm10, %xmm10 - vaesenclast %xmm6, %xmm11, %xmm11 - vaesenclast %xmm8, %xmm12, %xmm12 - vaesenclast %xmm2, %xmm13, %xmm13 - vaesenclast %xmm3, %xmm14, %xmm14 - movdqu %xmm9, 0(%rsi) - movdqu %xmm10, 16(%rsi) - movdqu %xmm11, 32(%rsi) - movdqu %xmm12, 48(%rsi) - movdqu %xmm13, 64(%rsi) - movdqu %xmm14, 80(%rsi) - lea 96(%rsi), %rsi - vpshufb %xmm0, %xmm9, %xmm8 - vpshufb %xmm0, %xmm10, %xmm2 - movdqu %xmm8, 112(%rbp) - vpshufb %xmm0, %xmm11, %xmm4 - movdqu %xmm2, 96(%rbp) - vpshufb %xmm0, %xmm12, %xmm5 - movdqu %xmm4, 80(%rbp) - vpshufb %xmm0, %xmm13, %xmm6 - movdqu %xmm5, 64(%rbp) - vpshufb %xmm0, %xmm14, %xmm7 - movdqu %xmm6, 48(%rbp) - movdqu -128(%rcx), %xmm4 - pxor %xmm2, %xmm2 - mov $72057594037927936, %r11 - pinsrq $1, %r11, %xmm2 - movdqu -112(%rcx), %xmm15 - mov %rcx, %r12 - sub $96, %r12 - vpxor %xmm4, %xmm1, %xmm9 - add $6, %rbx - cmp $256, %rbx - jae L120 - vpaddd %xmm2, %xmm1, %xmm10 - vpaddd %xmm2, %xmm10, %xmm11 - vpxor %xmm4, %xmm10, %xmm10 - vpaddd %xmm2, %xmm11, %xmm12 - vpxor %xmm4, %xmm11, %xmm11 - vpaddd %xmm2, %xmm12, %xmm13 - vpxor %xmm4, %xmm12, %xmm12 - vpaddd %xmm2, %xmm13, %xmm14 - vpxor %xmm4, %xmm13, %xmm13 - vpaddd %xmm2, %xmm14, %xmm1 - vpxor %xmm4, %xmm14, %xmm14 - jmp L121 -L120: - sub $256, %rbx - vpshufb %xmm0, %xmm1, %xmm6 - pxor %xmm5, %xmm5 - mov $1, %r11 - pinsrq $0, %r11, %xmm5 - vpaddd %xmm5, %xmm6, %xmm10 - pxor %xmm5, %xmm5 - mov $2, %r11 - pinsrq $0, %r11, %xmm5 - vpaddd %xmm5, %xmm6, %xmm11 - vpaddd %xmm5, %xmm10, %xmm12 - vpshufb %xmm0, %xmm10, %xmm10 - vpaddd %xmm5, %xmm11, %xmm13 - vpshufb %xmm0, %xmm11, %xmm11 - vpxor %xmm4, %xmm10, %xmm10 - vpaddd %xmm5, %xmm12, %xmm14 - vpshufb %xmm0, %xmm12, %xmm12 - vpxor %xmm4, %xmm11, %xmm11 - vpaddd %xmm5, %xmm13, %xmm1 - vpshufb %xmm0, %xmm13, %xmm13 - vpxor %xmm4, %xmm12, %xmm12 - vpshufb %xmm0, %xmm14, %xmm14 - vpxor %xmm4, %xmm13, %xmm13 - vpshufb %xmm0, %xmm1, %xmm1 - vpxor %xmm4, %xmm14, %xmm14 -L121: - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu -96(%rcx), %xmm15 - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu -80(%rcx), %xmm15 - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu -64(%rcx), %xmm15 - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu -48(%rcx), %xmm15 - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu -32(%rcx), %xmm15 - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu -16(%rcx), %xmm15 - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu 0(%rcx), %xmm15 - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu 16(%rcx), %xmm15 - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu 32(%rcx), %xmm15 - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu 48(%rcx), %xmm15 - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu 64(%rcx), %xmm15 - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu 80(%rcx), %xmm15 - movdqu 96(%rcx), %xmm3 - vaesenc %xmm15, %xmm9, %xmm9 - vpxor 0(%rdi), %xmm3, %xmm4 - vaesenc %xmm15, %xmm10, %xmm10 - vpxor 16(%rdi), %xmm3, %xmm5 - vaesenc %xmm15, %xmm11, %xmm11 - vpxor 32(%rdi), %xmm3, %xmm6 - vaesenc %xmm15, %xmm12, %xmm12 - vpxor 48(%rdi), %xmm3, %xmm8 - vaesenc %xmm15, %xmm13, %xmm13 - vpxor 64(%rdi), %xmm3, %xmm2 - vaesenc %xmm15, %xmm14, %xmm14 - vpxor 80(%rdi), %xmm3, %xmm3 - lea 96(%rdi), %rdi - vaesenclast %xmm4, %xmm9, %xmm9 - vaesenclast %xmm5, %xmm10, %xmm10 - vaesenclast %xmm6, %xmm11, %xmm11 - vaesenclast %xmm8, %xmm12, %xmm12 - vaesenclast %xmm2, %xmm13, %xmm13 - vaesenclast %xmm3, %xmm14, %xmm14 - movdqu %xmm9, 0(%rsi) - movdqu %xmm10, 16(%rsi) - movdqu %xmm11, 32(%rsi) - movdqu %xmm12, 48(%rsi) - movdqu %xmm13, 64(%rsi) - movdqu %xmm14, 80(%rsi) - lea 96(%rsi), %rsi - sub $12, %rdx - movdqu 32(%rbp), %xmm8 - pxor %xmm2, %xmm2 - mov $72057594037927936, %r11 - pinsrq $1, %r11, %xmm2 - vpxor %xmm4, %xmm4, %xmm4 - movdqu -128(%rcx), %xmm15 - vpaddd %xmm2, %xmm1, %xmm10 - vpaddd %xmm2, %xmm10, %xmm11 - vpaddd %xmm2, %xmm11, %xmm12 - vpaddd %xmm2, %xmm12, %xmm13 - vpaddd %xmm2, %xmm13, %xmm14 - vpxor %xmm15, %xmm1, %xmm9 - movdqu %xmm4, 16(%rbp) - jmp L123 -.balign 16 -L122: - add $6, %rbx - cmp $256, %rbx - jb L124 - mov $579005069656919567, %r11 - pinsrq $0, %r11, %xmm0 - mov $283686952306183, %r11 - pinsrq $1, %r11, %xmm0 - vpshufb %xmm0, %xmm1, %xmm6 - pxor %xmm5, %xmm5 - mov $1, %r11 - pinsrq $0, %r11, %xmm5 - vpaddd %xmm5, %xmm6, %xmm10 - pxor %xmm5, %xmm5 - mov $2, %r11 - pinsrq $0, %r11, %xmm5 - vpaddd %xmm5, %xmm6, %xmm11 - movdqu -32(%r9), %xmm3 - vpaddd %xmm5, %xmm10, %xmm12 - vpshufb %xmm0, %xmm10, %xmm10 - vpaddd %xmm5, %xmm11, %xmm13 - vpshufb %xmm0, %xmm11, %xmm11 - vpxor %xmm15, %xmm10, %xmm10 - vpaddd %xmm5, %xmm12, %xmm14 - vpshufb %xmm0, %xmm12, %xmm12 - vpxor %xmm15, %xmm11, %xmm11 - vpaddd %xmm5, %xmm13, %xmm1 - vpshufb %xmm0, %xmm13, %xmm13 - vpshufb %xmm0, %xmm14, %xmm14 - vpshufb %xmm0, %xmm1, %xmm1 - sub $256, %rbx - jmp L125 -L124: - movdqu -32(%r9), %xmm3 - vpaddd %xmm14, %xmm2, %xmm1 - vpxor %xmm15, %xmm10, %xmm10 - vpxor %xmm15, %xmm11, %xmm11 -L125: - movdqu %xmm1, 128(%rbp) - vpclmulqdq $16, %xmm3, %xmm7, %xmm5 - vpxor %xmm15, %xmm12, %xmm12 - movdqu -112(%rcx), %xmm2 - vpclmulqdq $1, %xmm3, %xmm7, %xmm6 - vaesenc %xmm2, %xmm9, %xmm9 - movdqu 48(%rbp), %xmm0 - vpxor %xmm15, %xmm13, %xmm13 - vpclmulqdq $0, %xmm3, %xmm7, %xmm1 - vaesenc %xmm2, %xmm10, %xmm10 - vpxor %xmm15, %xmm14, %xmm14 - vpclmulqdq $17, %xmm3, %xmm7, %xmm7 - vaesenc %xmm2, %xmm11, %xmm11 - movdqu -16(%r9), %xmm3 - vaesenc %xmm2, %xmm12, %xmm12 - vpxor %xmm5, %xmm6, %xmm6 - vpclmulqdq $0, %xmm3, %xmm0, %xmm5 - vpxor %xmm4, %xmm8, %xmm8 - vaesenc %xmm2, %xmm13, %xmm13 - vpxor %xmm5, %xmm1, %xmm4 - vpclmulqdq $16, %xmm3, %xmm0, %xmm1 - vaesenc %xmm2, %xmm14, %xmm14 - movdqu -96(%rcx), %xmm15 - vpclmulqdq $1, %xmm3, %xmm0, %xmm2 - vaesenc %xmm15, %xmm9, %xmm9 - vpxor 16(%rbp), %xmm8, %xmm8 - vpclmulqdq $17, %xmm3, %xmm0, %xmm3 - movdqu 64(%rbp), %xmm0 - vaesenc %xmm15, %xmm10, %xmm10 - movbeq 88(%r14), %r13 - vaesenc %xmm15, %xmm11, %xmm11 - movbeq 80(%r14), %r12 - vaesenc %xmm15, %xmm12, %xmm12 - movq %r13, 32(%rbp) - vaesenc %xmm15, %xmm13, %xmm13 - movq %r12, 40(%rbp) - movdqu 16(%r9), %xmm5 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu -80(%rcx), %xmm15 - vpxor %xmm1, %xmm6, %xmm6 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vaesenc %xmm15, %xmm9, %xmm9 - vpxor %xmm2, %xmm6, %xmm6 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vaesenc %xmm15, %xmm10, %xmm10 - vpxor %xmm3, %xmm7, %xmm7 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vaesenc %xmm15, %xmm11, %xmm11 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 80(%rbp), %xmm0 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 32(%r9), %xmm1 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu -64(%rcx), %xmm15 - vpxor %xmm2, %xmm6, %xmm6 - vpclmulqdq $0, %xmm1, %xmm0, %xmm2 - vaesenc %xmm15, %xmm9, %xmm9 - vpxor %xmm3, %xmm6, %xmm6 - vpclmulqdq $16, %xmm1, %xmm0, %xmm3 - vaesenc %xmm15, %xmm10, %xmm10 - movbeq 72(%r14), %r13 - vpxor %xmm5, %xmm7, %xmm7 - vpclmulqdq $1, %xmm1, %xmm0, %xmm5 - vaesenc %xmm15, %xmm11, %xmm11 - movbeq 64(%r14), %r12 - vpclmulqdq $17, %xmm1, %xmm0, %xmm1 - movdqu 96(%rbp), %xmm0 - vaesenc %xmm15, %xmm12, %xmm12 - movq %r13, 48(%rbp) - vaesenc %xmm15, %xmm13, %xmm13 - movq %r12, 56(%rbp) - vpxor %xmm2, %xmm4, %xmm4 - movdqu 64(%r9), %xmm2 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu -48(%rcx), %xmm15 - vpxor %xmm3, %xmm6, %xmm6 - vpclmulqdq $0, %xmm2, %xmm0, %xmm3 - vaesenc %xmm15, %xmm9, %xmm9 - vpxor %xmm5, %xmm6, %xmm6 - vpclmulqdq $16, %xmm2, %xmm0, %xmm5 - vaesenc %xmm15, %xmm10, %xmm10 - movbeq 56(%r14), %r13 - vpxor %xmm1, %xmm7, %xmm7 - vpclmulqdq $1, %xmm2, %xmm0, %xmm1 - vpxor 112(%rbp), %xmm8, %xmm8 - vaesenc %xmm15, %xmm11, %xmm11 - movbeq 48(%r14), %r12 - vpclmulqdq $17, %xmm2, %xmm0, %xmm2 - vaesenc %xmm15, %xmm12, %xmm12 - movq %r13, 64(%rbp) - vaesenc %xmm15, %xmm13, %xmm13 - movq %r12, 72(%rbp) - vpxor %xmm3, %xmm4, %xmm4 - movdqu 80(%r9), %xmm3 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu -32(%rcx), %xmm15 - vpxor %xmm5, %xmm6, %xmm6 - vpclmulqdq $16, %xmm3, %xmm8, %xmm5 - vaesenc %xmm15, %xmm9, %xmm9 - vpxor %xmm1, %xmm6, %xmm6 - vpclmulqdq $1, %xmm3, %xmm8, %xmm1 - vaesenc %xmm15, %xmm10, %xmm10 - movbeq 40(%r14), %r13 - vpxor %xmm2, %xmm7, %xmm7 - vpclmulqdq $0, %xmm3, %xmm8, %xmm2 - vaesenc %xmm15, %xmm11, %xmm11 - movbeq 32(%r14), %r12 - vpclmulqdq $17, %xmm3, %xmm8, %xmm8 - vaesenc %xmm15, %xmm12, %xmm12 - movq %r13, 80(%rbp) - vaesenc %xmm15, %xmm13, %xmm13 - movq %r12, 88(%rbp) - vpxor %xmm5, %xmm6, %xmm6 - vaesenc %xmm15, %xmm14, %xmm14 - vpxor %xmm1, %xmm6, %xmm6 - movdqu -16(%rcx), %xmm15 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm2, %xmm4, %xmm4 - pxor %xmm3, %xmm3 - mov $13979173243358019584, %r11 - pinsrq $1, %r11, %xmm3 - vaesenc %xmm15, %xmm9, %xmm9 - vpxor %xmm8, %xmm7, %xmm7 - vaesenc %xmm15, %xmm10, %xmm10 - vpxor %xmm5, %xmm4, %xmm4 - movbeq 24(%r14), %r13 - vaesenc %xmm15, %xmm11, %xmm11 - movbeq 16(%r14), %r12 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - movq %r13, 96(%rbp) - vaesenc %xmm15, %xmm12, %xmm12 - movq %r12, 104(%rbp) - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu 0(%rcx), %xmm1 - vaesenc %xmm1, %xmm9, %xmm9 - movdqu 16(%rcx), %xmm15 - vaesenc %xmm1, %xmm10, %xmm10 - vpsrldq $8, %xmm6, %xmm6 - vaesenc %xmm1, %xmm11, %xmm11 - vpxor %xmm6, %xmm7, %xmm7 - vaesenc %xmm1, %xmm12, %xmm12 - vpxor %xmm0, %xmm4, %xmm4 - movbeq 8(%r14), %r13 - vaesenc %xmm1, %xmm13, %xmm13 - movbeq 0(%r14), %r12 - vaesenc %xmm1, %xmm14, %xmm14 - movdqu 32(%rcx), %xmm1 - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - vaesenc %xmm1, %xmm9, %xmm9 - vaesenc %xmm1, %xmm10, %xmm10 - vaesenc %xmm1, %xmm11, %xmm11 - vaesenc %xmm1, %xmm12, %xmm12 - vaesenc %xmm1, %xmm13, %xmm13 - movdqu 48(%rcx), %xmm15 - vaesenc %xmm1, %xmm14, %xmm14 - movdqu 64(%rcx), %xmm1 - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - vaesenc %xmm1, %xmm9, %xmm9 - vaesenc %xmm1, %xmm10, %xmm10 - vaesenc %xmm1, %xmm11, %xmm11 - vaesenc %xmm1, %xmm12, %xmm12 - vaesenc %xmm1, %xmm13, %xmm13 - movdqu 80(%rcx), %xmm15 - vaesenc %xmm1, %xmm14, %xmm14 - movdqu 96(%rcx), %xmm1 - vaesenc %xmm15, %xmm9, %xmm9 - movdqu %xmm7, 16(%rbp) - vpalignr $8, %xmm4, %xmm4, %xmm8 - vaesenc %xmm15, %xmm10, %xmm10 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor 0(%rdi), %xmm1, %xmm2 - vaesenc %xmm15, %xmm11, %xmm11 - vpxor 16(%rdi), %xmm1, %xmm0 - vaesenc %xmm15, %xmm12, %xmm12 - vpxor 32(%rdi), %xmm1, %xmm5 - vaesenc %xmm15, %xmm13, %xmm13 - vpxor 48(%rdi), %xmm1, %xmm6 - vaesenc %xmm15, %xmm14, %xmm14 - vpxor 64(%rdi), %xmm1, %xmm7 - vpxor 80(%rdi), %xmm1, %xmm3 - movdqu 128(%rbp), %xmm1 - vaesenclast %xmm2, %xmm9, %xmm9 - pxor %xmm2, %xmm2 - mov $72057594037927936, %r11 - pinsrq $1, %r11, %xmm2 - vaesenclast %xmm0, %xmm10, %xmm10 - vpaddd %xmm2, %xmm1, %xmm0 - movq %r13, 112(%rbp) - lea 96(%rdi), %rdi - vaesenclast %xmm5, %xmm11, %xmm11 - vpaddd %xmm2, %xmm0, %xmm5 - movq %r12, 120(%rbp) - lea 96(%rsi), %rsi - movdqu -128(%rcx), %xmm15 - vaesenclast %xmm6, %xmm12, %xmm12 - vpaddd %xmm2, %xmm5, %xmm6 - vaesenclast %xmm7, %xmm13, %xmm13 - vpaddd %xmm2, %xmm6, %xmm7 - vaesenclast %xmm3, %xmm14, %xmm14 - vpaddd %xmm2, %xmm7, %xmm3 - sub $6, %rdx - add $96, %r14 - cmp $0, %rdx - jbe L126 - movdqu %xmm9, -96(%rsi) - vpxor %xmm15, %xmm1, %xmm9 - movdqu %xmm10, -80(%rsi) - movdqu %xmm0, %xmm10 - movdqu %xmm11, -64(%rsi) - movdqu %xmm5, %xmm11 - movdqu %xmm12, -48(%rsi) - movdqu %xmm6, %xmm12 - movdqu %xmm13, -32(%rsi) - movdqu %xmm7, %xmm13 - movdqu %xmm14, -16(%rsi) - movdqu %xmm3, %xmm14 - movdqu 32(%rbp), %xmm7 - jmp L127 -L126: - vpxor 16(%rbp), %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 -L127: -.balign 16 -L123: - cmp $0, %rdx - ja L122 - movdqu 32(%rbp), %xmm7 - movdqu %xmm1, 32(%rbp) - pxor %xmm4, %xmm4 - movdqu %xmm4, 16(%rbp) - movdqu -32(%r9), %xmm3 - vpclmulqdq $0, %xmm3, %xmm7, %xmm1 - vpclmulqdq $16, %xmm3, %xmm7, %xmm5 - movdqu 48(%rbp), %xmm0 - vpclmulqdq $1, %xmm3, %xmm7, %xmm6 - vpclmulqdq $17, %xmm3, %xmm7, %xmm7 - movdqu -16(%r9), %xmm3 - vpxor %xmm5, %xmm6, %xmm6 - vpclmulqdq $0, %xmm3, %xmm0, %xmm5 - vpxor %xmm4, %xmm8, %xmm8 - vpxor %xmm5, %xmm1, %xmm4 - vpclmulqdq $16, %xmm3, %xmm0, %xmm1 - vpclmulqdq $1, %xmm3, %xmm0, %xmm2 - vpxor 16(%rbp), %xmm8, %xmm8 - vpclmulqdq $17, %xmm3, %xmm0, %xmm3 - movdqu 64(%rbp), %xmm0 - movdqu 16(%r9), %xmm5 - vpxor %xmm1, %xmm6, %xmm6 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpxor %xmm3, %xmm7, %xmm7 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 80(%rbp), %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 32(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpclmulqdq $0, %xmm1, %xmm0, %xmm2 - vpxor %xmm3, %xmm6, %xmm6 - vpclmulqdq $16, %xmm1, %xmm0, %xmm3 - vpxor %xmm5, %xmm7, %xmm7 - vpclmulqdq $1, %xmm1, %xmm0, %xmm5 - vpclmulqdq $17, %xmm1, %xmm0, %xmm1 - movdqu 96(%rbp), %xmm0 - vpxor %xmm2, %xmm4, %xmm4 - movdqu 64(%r9), %xmm2 - vpxor %xmm3, %xmm6, %xmm6 - vpclmulqdq $0, %xmm2, %xmm0, %xmm3 - vpxor %xmm5, %xmm6, %xmm6 - vpclmulqdq $16, %xmm2, %xmm0, %xmm5 - vpxor %xmm1, %xmm7, %xmm7 - vpclmulqdq $1, %xmm2, %xmm0, %xmm1 - vpxor 112(%rbp), %xmm8, %xmm8 - vpclmulqdq $17, %xmm2, %xmm0, %xmm2 - vpxor %xmm3, %xmm4, %xmm4 - movdqu 80(%r9), %xmm3 - vpxor %xmm5, %xmm6, %xmm6 - vpclmulqdq $16, %xmm3, %xmm8, %xmm5 - vpxor %xmm1, %xmm6, %xmm6 - vpclmulqdq $1, %xmm3, %xmm8, %xmm1 - vpxor %xmm2, %xmm7, %xmm7 - vpclmulqdq $0, %xmm3, %xmm8, %xmm2 - vpclmulqdq $17, %xmm3, %xmm8, %xmm8 - vpxor %xmm5, %xmm6, %xmm6 - vpxor %xmm1, %xmm6, %xmm6 - vpxor %xmm2, %xmm4, %xmm4 - pxor %xmm3, %xmm3 - mov $3254779904, %rax - pinsrd $3, %eax, %xmm3 - vpxor %xmm8, %xmm7, %xmm7 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm5, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpsrldq $8, %xmm6, %xmm6 - vpxor %xmm6, %xmm7, %xmm7 - vpxor %xmm0, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm8 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor %xmm7, %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 - mov $579005069656919567, %r12 - pinsrq $0, %r12, %xmm0 - mov $283686952306183, %r12 - pinsrq $1, %r12, %xmm0 - movdqu %xmm9, -96(%rsi) - vpshufb %xmm0, %xmm9, %xmm9 - vpxor %xmm7, %xmm1, %xmm1 - movdqu %xmm10, -80(%rsi) - vpshufb %xmm0, %xmm10, %xmm10 - movdqu %xmm11, -64(%rsi) - vpshufb %xmm0, %xmm11, %xmm11 - movdqu %xmm12, -48(%rsi) - vpshufb %xmm0, %xmm12, %xmm12 - movdqu %xmm13, -32(%rsi) - vpshufb %xmm0, %xmm13, %xmm13 - movdqu %xmm14, -16(%rsi) - vpshufb %xmm0, %xmm14, %xmm14 - pxor %xmm4, %xmm4 - movdqu %xmm14, %xmm7 - movdqu %xmm4, 16(%rbp) - movdqu %xmm13, 48(%rbp) - movdqu %xmm12, 64(%rbp) - movdqu %xmm11, 80(%rbp) - movdqu %xmm10, 96(%rbp) - movdqu %xmm9, 112(%rbp) - movdqu -32(%r9), %xmm3 - vpclmulqdq $0, %xmm3, %xmm7, %xmm1 - vpclmulqdq $16, %xmm3, %xmm7, %xmm5 - movdqu 48(%rbp), %xmm0 - vpclmulqdq $1, %xmm3, %xmm7, %xmm6 - vpclmulqdq $17, %xmm3, %xmm7, %xmm7 - movdqu -16(%r9), %xmm3 - vpxor %xmm5, %xmm6, %xmm6 - vpclmulqdq $0, %xmm3, %xmm0, %xmm5 - vpxor %xmm4, %xmm8, %xmm8 - vpxor %xmm5, %xmm1, %xmm4 - vpclmulqdq $16, %xmm3, %xmm0, %xmm1 - vpclmulqdq $1, %xmm3, %xmm0, %xmm2 - vpxor 16(%rbp), %xmm8, %xmm8 - vpclmulqdq $17, %xmm3, %xmm0, %xmm3 - movdqu 64(%rbp), %xmm0 - movdqu 16(%r9), %xmm5 - vpxor %xmm1, %xmm6, %xmm6 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpxor %xmm3, %xmm7, %xmm7 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 80(%rbp), %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 32(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpclmulqdq $0, %xmm1, %xmm0, %xmm2 - vpxor %xmm3, %xmm6, %xmm6 - vpclmulqdq $16, %xmm1, %xmm0, %xmm3 - vpxor %xmm5, %xmm7, %xmm7 - vpclmulqdq $1, %xmm1, %xmm0, %xmm5 - vpclmulqdq $17, %xmm1, %xmm0, %xmm1 - movdqu 96(%rbp), %xmm0 - vpxor %xmm2, %xmm4, %xmm4 - movdqu 64(%r9), %xmm2 - vpxor %xmm3, %xmm6, %xmm6 - vpclmulqdq $0, %xmm2, %xmm0, %xmm3 - vpxor %xmm5, %xmm6, %xmm6 - vpclmulqdq $16, %xmm2, %xmm0, %xmm5 - vpxor %xmm1, %xmm7, %xmm7 - vpclmulqdq $1, %xmm2, %xmm0, %xmm1 - vpxor 112(%rbp), %xmm8, %xmm8 - vpclmulqdq $17, %xmm2, %xmm0, %xmm2 - vpxor %xmm3, %xmm4, %xmm4 - movdqu 80(%r9), %xmm3 - vpxor %xmm5, %xmm6, %xmm6 - vpclmulqdq $16, %xmm3, %xmm8, %xmm5 - vpxor %xmm1, %xmm6, %xmm6 - vpclmulqdq $1, %xmm3, %xmm8, %xmm1 - vpxor %xmm2, %xmm7, %xmm7 - vpclmulqdq $0, %xmm3, %xmm8, %xmm2 - vpclmulqdq $17, %xmm3, %xmm8, %xmm8 - vpxor %xmm5, %xmm6, %xmm6 - vpxor %xmm1, %xmm6, %xmm6 - vpxor %xmm2, %xmm4, %xmm4 - pxor %xmm3, %xmm3 - mov $3254779904, %rax - pinsrd $3, %eax, %xmm3 - vpxor %xmm8, %xmm7, %xmm7 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm5, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpsrldq $8, %xmm6, %xmm6 - vpxor %xmm6, %xmm7, %xmm7 - vpxor %xmm0, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm8 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor %xmm7, %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 - sub $128, %rcx -L117: - movdqu 32(%rbp), %xmm11 - mov %rcx, %r8 - mov 104(%rsp), %rax - mov 112(%rsp), %rdi - mov 120(%rsp), %rdx - mov %rdx, %r14 - mov $579005069656919567, %r12 - pinsrq $0, %r12, %xmm9 - mov $283686952306183, %r12 - pinsrq $1, %r12, %xmm9 - pshufb %xmm9, %xmm11 - pxor %xmm10, %xmm10 - mov $1, %rbx - pinsrd $0, %ebx, %xmm10 - mov %rax, %r11 - mov %rdi, %r10 - mov $0, %rbx - jmp L129 -.balign 16 -L128: - movdqu %xmm11, %xmm0 - pshufb %xmm9, %xmm0 - movdqu 0(%r8), %xmm2 - pxor %xmm2, %xmm0 - movdqu 16(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 32(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 48(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 64(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 80(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 96(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 112(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 128(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 144(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 160(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 176(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 192(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 208(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 224(%r8), %xmm2 - aesenclast %xmm2, %xmm0 - pxor %xmm2, %xmm2 - movdqu 0(%r11), %xmm2 - pxor %xmm0, %xmm2 - movdqu %xmm2, 0(%r10) - add $1, %rbx - add $16, %r11 - add $16, %r10 - paddd %xmm10, %xmm11 -.balign 16 -L129: - cmp %rdx, %rbx - jne L128 - mov %rdi, %r11 - jmp L131 -.balign 16 -L130: - add $80, %r11 - movdqu -32(%r9), %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - movdqu %xmm1, %xmm4 - movdqu -16(%r9), %xmm1 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - movdqu %xmm1, %xmm5 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 16(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 32(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 64(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 80(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - vpxor %xmm1, %xmm4, %xmm4 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - pxor %xmm3, %xmm3 - mov $3254779904, %r10 - pinsrd $3, %r10d, %xmm3 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm5, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpsrldq $8, %xmm6, %xmm6 - vpxor %xmm6, %xmm7, %xmm7 - vpxor %xmm0, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm8 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor %xmm7, %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 - add $96, %r11 - sub $6, %rdx -.balign 16 -L131: - cmp $6, %rdx - jae L130 - cmp $0, %rdx - jbe L132 - mov %rdx, %r10 - sub $1, %r10 - imul $16, %r10 - add %r10, %r11 - movdqu -32(%r9), %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - cmp $1, %rdx - jne L134 - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu %xmm1, %xmm4 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - jmp L135 -L134: - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - movdqu %xmm1, %xmm4 - movdqu -16(%r9), %xmm1 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - movdqu %xmm1, %xmm5 - cmp $2, %rdx - je L136 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 16(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - cmp $3, %rdx - je L138 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 32(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - cmp $4, %rdx - je L140 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 64(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - jmp L141 -L140: -L141: - jmp L139 -L138: -L139: - jmp L137 -L136: -L137: - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - vpxor %xmm1, %xmm4, %xmm4 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 -L135: - pxor %xmm3, %xmm3 - mov $3254779904, %r10 - pinsrd $3, %r10d, %xmm3 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm5, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpsrldq $8, %xmm6, %xmm6 - vpxor %xmm6, %xmm7, %xmm7 - vpxor %xmm0, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm8 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor %xmm7, %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 - jmp L133 -L132: -L133: - add 96(%rsp), %r14 - imul $16, %r14 - mov 136(%rsp), %r13 - cmp %r14, %r13 - jbe L142 - mov 128(%rsp), %rax - mov %r13, %r10 - and $15, %r10 - movdqu %xmm11, %xmm0 - pshufb %xmm9, %xmm0 - movdqu 0(%r8), %xmm2 - pxor %xmm2, %xmm0 - movdqu 16(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 32(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 48(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 64(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 80(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 96(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 112(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 128(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 144(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 160(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 176(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 192(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 208(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 224(%r8), %xmm2 - aesenclast %xmm2, %xmm0 - pxor %xmm2, %xmm2 - movdqu 0(%rax), %xmm4 - pxor %xmm4, %xmm0 - movdqu %xmm0, 0(%rax) - cmp $8, %r10 - jae L144 - mov $0, %rcx - pinsrq $1, %rcx, %xmm0 - mov %r10, %rcx - shl $3, %rcx - mov $1, %r11 - shl %cl, %r11 - sub $1, %r11 - pextrq $0, %xmm0, %rcx - and %r11, %rcx - pinsrq $0, %rcx, %xmm0 - jmp L145 -L144: - mov %r10, %rcx - sub $8, %rcx - shl $3, %rcx - mov $1, %r11 - shl %cl, %r11 - sub $1, %r11 - pextrq $1, %xmm0, %rcx - and %r11, %rcx - pinsrq $1, %rcx, %xmm0 -L145: - pshufb %xmm9, %xmm0 - movdqu -32(%r9), %xmm5 - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu %xmm1, %xmm4 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - pxor %xmm3, %xmm3 - mov $3254779904, %r11 - pinsrd $3, %r11d, %xmm3 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm5, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpsrldq $8, %xmm6, %xmm6 - vpxor %xmm6, %xmm7, %xmm7 - vpxor %xmm0, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm8 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor %xmm7, %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 - jmp L143 -L142: -L143: - mov %r15, %r11 - pxor %xmm0, %xmm0 - mov %r11, %rax - imul $8, %rax - pinsrq $1, %rax, %xmm0 - mov %r13, %rax - imul $8, %rax - pinsrq $0, %rax, %xmm0 - movdqu -32(%r9), %xmm5 - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu %xmm1, %xmm4 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - pxor %xmm3, %xmm3 - mov $3254779904, %r11 - pinsrd $3, %r11d, %xmm3 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm5, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpsrldq $8, %xmm6, %xmm6 - vpxor %xmm6, %xmm7, %xmm7 - vpxor %xmm0, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm8 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor %xmm7, %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 - movdqu 0(%rbp), %xmm0 - pshufb %xmm9, %xmm0 - movdqu 0(%r8), %xmm2 - pxor %xmm2, %xmm0 - movdqu 16(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 32(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 48(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 64(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 80(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 96(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 112(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 128(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 144(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 160(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 176(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 192(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 208(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 224(%r8), %xmm2 - aesenclast %xmm2, %xmm0 - pxor %xmm2, %xmm2 - pshufb %xmm9, %xmm8 - pxor %xmm0, %xmm8 - mov 152(%rsp), %r15 - movdqu %xmm8, 0(%r15) - pop %rbx - pop %rbp - pop %rdi - pop %rsi - pop %r12 - pop %r13 - pop %r14 - pop %r15 - ret - -.global _gcm128_decrypt_opt -_gcm128_decrypt_opt: - push %r15 - push %r14 - push %r13 - push %r12 - push %rsi - push %rdi - push %rbp - push %rbx - mov 144(%rsp), %rbp - mov %rcx, %r13 - lea 32(%r9), %r9 - mov 72(%rsp), %rbx - mov %rdx, %rcx - imul $16, %rcx - mov $579005069656919567, %r10 - pinsrq $0, %r10, %xmm9 - mov $283686952306183, %r10 - pinsrq $1, %r10, %xmm9 - pxor %xmm8, %xmm8 - mov %rdi, %r11 - jmp L147 -.balign 16 -L146: - add $80, %r11 - movdqu -32(%r9), %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - movdqu %xmm1, %xmm4 - movdqu -16(%r9), %xmm1 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - movdqu %xmm1, %xmm5 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 16(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 32(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 64(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 80(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - vpxor %xmm1, %xmm4, %xmm4 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - pxor %xmm3, %xmm3 - mov $3254779904, %r10 - pinsrd $3, %r10d, %xmm3 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm5, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpsrldq $8, %xmm6, %xmm6 - vpxor %xmm6, %xmm7, %xmm7 - vpxor %xmm0, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm8 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor %xmm7, %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 - add $96, %r11 - sub $6, %rdx -.balign 16 -L147: - cmp $6, %rdx - jae L146 - cmp $0, %rdx - jbe L148 - mov %rdx, %r10 - sub $1, %r10 - imul $16, %r10 - add %r10, %r11 - movdqu -32(%r9), %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - cmp $1, %rdx - jne L150 - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu %xmm1, %xmm4 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - jmp L151 -L150: - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - movdqu %xmm1, %xmm4 - movdqu -16(%r9), %xmm1 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - movdqu %xmm1, %xmm5 - cmp $2, %rdx - je L152 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 16(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - cmp $3, %rdx - je L154 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 32(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - cmp $4, %rdx - je L156 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 64(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - jmp L157 -L156: -L157: - jmp L155 -L154: -L155: - jmp L153 -L152: -L153: - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - vpxor %xmm1, %xmm4, %xmm4 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 -L151: - pxor %xmm3, %xmm3 - mov $3254779904, %r10 - pinsrd $3, %r10d, %xmm3 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm5, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpsrldq $8, %xmm6, %xmm6 - vpxor %xmm6, %xmm7, %xmm7 - vpxor %xmm0, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm8 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor %xmm7, %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 - jmp L149 -L148: -L149: - mov %rsi, %r15 - cmp %rcx, %rsi - jbe L158 - movdqu 0(%rbx), %xmm0 - mov %rsi, %r10 - and $15, %r10 - cmp $8, %r10 - jae L160 - mov $0, %rcx - pinsrq $1, %rcx, %xmm0 - mov %r10, %rcx - shl $3, %rcx - mov $1, %r11 - shl %cl, %r11 - sub $1, %r11 - pextrq $0, %xmm0, %rcx - and %r11, %rcx - pinsrq $0, %rcx, %xmm0 - jmp L161 -L160: - mov %r10, %rcx - sub $8, %rcx - shl $3, %rcx - mov $1, %r11 - shl %cl, %r11 - sub $1, %r11 - pextrq $1, %xmm0, %rcx - and %r11, %rcx - pinsrq $1, %rcx, %xmm0 -L161: - pshufb %xmm9, %xmm0 - movdqu -32(%r9), %xmm5 - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu %xmm1, %xmm4 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - pxor %xmm3, %xmm3 - mov $3254779904, %r11 - pinsrd $3, %r11d, %xmm3 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm5, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpsrldq $8, %xmm6, %xmm6 - vpxor %xmm6, %xmm7, %xmm7 - vpxor %xmm0, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm8 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor %xmm7, %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 - jmp L159 -L158: -L159: - mov 80(%rsp), %rdi - mov 88(%rsp), %rsi - mov 96(%rsp), %rdx - mov %r13, %rcx - movdqu %xmm9, %xmm0 - movdqu 0(%r8), %xmm1 - movdqu %xmm1, 0(%rbp) - pxor %xmm10, %xmm10 - mov $1, %r11 - pinsrq $0, %r11, %xmm10 - vpaddd %xmm10, %xmm1, %xmm1 - cmp $0, %rdx - jne L162 - vpshufb %xmm0, %xmm1, %xmm1 - movdqu %xmm1, 32(%rbp) - jmp L163 -L162: - movdqu %xmm8, 32(%rbp) - add $128, %rcx - pextrq $0, %xmm1, %rbx - and $255, %rbx - vpshufb %xmm0, %xmm1, %xmm1 - lea 96(%rdi), %r14 - movdqu 32(%rbp), %xmm8 - movdqu 80(%rdi), %xmm7 - movdqu 64(%rdi), %xmm4 - movdqu 48(%rdi), %xmm5 - movdqu 32(%rdi), %xmm6 - vpshufb %xmm0, %xmm7, %xmm7 - movdqu 16(%rdi), %xmm2 - vpshufb %xmm0, %xmm4, %xmm4 - movdqu 0(%rdi), %xmm3 - vpshufb %xmm0, %xmm5, %xmm5 - movdqu %xmm4, 48(%rbp) - vpshufb %xmm0, %xmm6, %xmm6 - movdqu %xmm5, 64(%rbp) - vpshufb %xmm0, %xmm2, %xmm2 - movdqu %xmm6, 80(%rbp) - vpshufb %xmm0, %xmm3, %xmm3 - movdqu %xmm2, 96(%rbp) - movdqu %xmm3, 112(%rbp) - pxor %xmm2, %xmm2 - mov $72057594037927936, %r11 - pinsrq $1, %r11, %xmm2 - vpxor %xmm4, %xmm4, %xmm4 - movdqu -128(%rcx), %xmm15 - vpaddd %xmm2, %xmm1, %xmm10 - vpaddd %xmm2, %xmm10, %xmm11 - vpaddd %xmm2, %xmm11, %xmm12 - vpaddd %xmm2, %xmm12, %xmm13 - vpaddd %xmm2, %xmm13, %xmm14 - vpxor %xmm15, %xmm1, %xmm9 - movdqu %xmm4, 16(%rbp) - cmp $6, %rdx - jne L164 - sub $96, %r14 - jmp L165 -L164: -L165: - jmp L167 -.balign 16 -L166: - add $6, %rbx - cmp $256, %rbx - jb L168 - mov $579005069656919567, %r11 - pinsrq $0, %r11, %xmm0 - mov $283686952306183, %r11 - pinsrq $1, %r11, %xmm0 - vpshufb %xmm0, %xmm1, %xmm6 - pxor %xmm5, %xmm5 - mov $1, %r11 - pinsrq $0, %r11, %xmm5 - vpaddd %xmm5, %xmm6, %xmm10 - pxor %xmm5, %xmm5 - mov $2, %r11 - pinsrq $0, %r11, %xmm5 - vpaddd %xmm5, %xmm6, %xmm11 - movdqu -32(%r9), %xmm3 - vpaddd %xmm5, %xmm10, %xmm12 - vpshufb %xmm0, %xmm10, %xmm10 - vpaddd %xmm5, %xmm11, %xmm13 - vpshufb %xmm0, %xmm11, %xmm11 - vpxor %xmm15, %xmm10, %xmm10 - vpaddd %xmm5, %xmm12, %xmm14 - vpshufb %xmm0, %xmm12, %xmm12 - vpxor %xmm15, %xmm11, %xmm11 - vpaddd %xmm5, %xmm13, %xmm1 - vpshufb %xmm0, %xmm13, %xmm13 - vpshufb %xmm0, %xmm14, %xmm14 - vpshufb %xmm0, %xmm1, %xmm1 - sub $256, %rbx - jmp L169 -L168: - movdqu -32(%r9), %xmm3 - vpaddd %xmm14, %xmm2, %xmm1 - vpxor %xmm15, %xmm10, %xmm10 - vpxor %xmm15, %xmm11, %xmm11 -L169: - movdqu %xmm1, 128(%rbp) - vpclmulqdq $16, %xmm3, %xmm7, %xmm5 - vpxor %xmm15, %xmm12, %xmm12 - movdqu -112(%rcx), %xmm2 - vpclmulqdq $1, %xmm3, %xmm7, %xmm6 - vaesenc %xmm2, %xmm9, %xmm9 - movdqu 48(%rbp), %xmm0 - vpxor %xmm15, %xmm13, %xmm13 - vpclmulqdq $0, %xmm3, %xmm7, %xmm1 - vaesenc %xmm2, %xmm10, %xmm10 - vpxor %xmm15, %xmm14, %xmm14 - vpclmulqdq $17, %xmm3, %xmm7, %xmm7 - vaesenc %xmm2, %xmm11, %xmm11 - movdqu -16(%r9), %xmm3 - vaesenc %xmm2, %xmm12, %xmm12 - vpxor %xmm5, %xmm6, %xmm6 - vpclmulqdq $0, %xmm3, %xmm0, %xmm5 - vpxor %xmm4, %xmm8, %xmm8 - vaesenc %xmm2, %xmm13, %xmm13 - vpxor %xmm5, %xmm1, %xmm4 - vpclmulqdq $16, %xmm3, %xmm0, %xmm1 - vaesenc %xmm2, %xmm14, %xmm14 - movdqu -96(%rcx), %xmm15 - vpclmulqdq $1, %xmm3, %xmm0, %xmm2 - vaesenc %xmm15, %xmm9, %xmm9 - vpxor 16(%rbp), %xmm8, %xmm8 - vpclmulqdq $17, %xmm3, %xmm0, %xmm3 - movdqu 64(%rbp), %xmm0 - vaesenc %xmm15, %xmm10, %xmm10 - movbeq 88(%r14), %r13 - vaesenc %xmm15, %xmm11, %xmm11 - movbeq 80(%r14), %r12 - vaesenc %xmm15, %xmm12, %xmm12 - movq %r13, 32(%rbp) - vaesenc %xmm15, %xmm13, %xmm13 - movq %r12, 40(%rbp) - movdqu 16(%r9), %xmm5 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu -80(%rcx), %xmm15 - vpxor %xmm1, %xmm6, %xmm6 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vaesenc %xmm15, %xmm9, %xmm9 - vpxor %xmm2, %xmm6, %xmm6 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vaesenc %xmm15, %xmm10, %xmm10 - vpxor %xmm3, %xmm7, %xmm7 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vaesenc %xmm15, %xmm11, %xmm11 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 80(%rbp), %xmm0 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 32(%r9), %xmm1 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu -64(%rcx), %xmm15 - vpxor %xmm2, %xmm6, %xmm6 - vpclmulqdq $0, %xmm1, %xmm0, %xmm2 - vaesenc %xmm15, %xmm9, %xmm9 - vpxor %xmm3, %xmm6, %xmm6 - vpclmulqdq $16, %xmm1, %xmm0, %xmm3 - vaesenc %xmm15, %xmm10, %xmm10 - movbeq 72(%r14), %r13 - vpxor %xmm5, %xmm7, %xmm7 - vpclmulqdq $1, %xmm1, %xmm0, %xmm5 - vaesenc %xmm15, %xmm11, %xmm11 - movbeq 64(%r14), %r12 - vpclmulqdq $17, %xmm1, %xmm0, %xmm1 - movdqu 96(%rbp), %xmm0 - vaesenc %xmm15, %xmm12, %xmm12 - movq %r13, 48(%rbp) - vaesenc %xmm15, %xmm13, %xmm13 - movq %r12, 56(%rbp) - vpxor %xmm2, %xmm4, %xmm4 - movdqu 64(%r9), %xmm2 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu -48(%rcx), %xmm15 - vpxor %xmm3, %xmm6, %xmm6 - vpclmulqdq $0, %xmm2, %xmm0, %xmm3 - vaesenc %xmm15, %xmm9, %xmm9 - vpxor %xmm5, %xmm6, %xmm6 - vpclmulqdq $16, %xmm2, %xmm0, %xmm5 - vaesenc %xmm15, %xmm10, %xmm10 - movbeq 56(%r14), %r13 - vpxor %xmm1, %xmm7, %xmm7 - vpclmulqdq $1, %xmm2, %xmm0, %xmm1 - vpxor 112(%rbp), %xmm8, %xmm8 - vaesenc %xmm15, %xmm11, %xmm11 - movbeq 48(%r14), %r12 - vpclmulqdq $17, %xmm2, %xmm0, %xmm2 - vaesenc %xmm15, %xmm12, %xmm12 - movq %r13, 64(%rbp) - vaesenc %xmm15, %xmm13, %xmm13 - movq %r12, 72(%rbp) - vpxor %xmm3, %xmm4, %xmm4 - movdqu 80(%r9), %xmm3 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu -32(%rcx), %xmm15 - vpxor %xmm5, %xmm6, %xmm6 - vpclmulqdq $16, %xmm3, %xmm8, %xmm5 - vaesenc %xmm15, %xmm9, %xmm9 - vpxor %xmm1, %xmm6, %xmm6 - vpclmulqdq $1, %xmm3, %xmm8, %xmm1 - vaesenc %xmm15, %xmm10, %xmm10 - movbeq 40(%r14), %r13 - vpxor %xmm2, %xmm7, %xmm7 - vpclmulqdq $0, %xmm3, %xmm8, %xmm2 - vaesenc %xmm15, %xmm11, %xmm11 - movbeq 32(%r14), %r12 - vpclmulqdq $17, %xmm3, %xmm8, %xmm8 - vaesenc %xmm15, %xmm12, %xmm12 - movq %r13, 80(%rbp) - vaesenc %xmm15, %xmm13, %xmm13 - movq %r12, 88(%rbp) - vpxor %xmm5, %xmm6, %xmm6 - vaesenc %xmm15, %xmm14, %xmm14 - vpxor %xmm1, %xmm6, %xmm6 - movdqu -16(%rcx), %xmm15 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm2, %xmm4, %xmm4 - pxor %xmm3, %xmm3 - mov $13979173243358019584, %r11 - pinsrq $1, %r11, %xmm3 - vaesenc %xmm15, %xmm9, %xmm9 - vpxor %xmm8, %xmm7, %xmm7 - vaesenc %xmm15, %xmm10, %xmm10 - vpxor %xmm5, %xmm4, %xmm4 - movbeq 24(%r14), %r13 - vaesenc %xmm15, %xmm11, %xmm11 - movbeq 16(%r14), %r12 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - movq %r13, 96(%rbp) - vaesenc %xmm15, %xmm12, %xmm12 - movq %r12, 104(%rbp) - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu 0(%rcx), %xmm1 - vaesenc %xmm1, %xmm9, %xmm9 - movdqu 16(%rcx), %xmm15 - vaesenc %xmm1, %xmm10, %xmm10 - vpsrldq $8, %xmm6, %xmm6 - vaesenc %xmm1, %xmm11, %xmm11 - vpxor %xmm6, %xmm7, %xmm7 - vaesenc %xmm1, %xmm12, %xmm12 - vpxor %xmm0, %xmm4, %xmm4 - movbeq 8(%r14), %r13 - vaesenc %xmm1, %xmm13, %xmm13 - movbeq 0(%r14), %r12 - vaesenc %xmm1, %xmm14, %xmm14 - movdqu 32(%rcx), %xmm1 - vaesenc %xmm15, %xmm9, %xmm9 - movdqu %xmm7, 16(%rbp) - vpalignr $8, %xmm4, %xmm4, %xmm8 - vaesenc %xmm15, %xmm10, %xmm10 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor 0(%rdi), %xmm1, %xmm2 - vaesenc %xmm15, %xmm11, %xmm11 - vpxor 16(%rdi), %xmm1, %xmm0 - vaesenc %xmm15, %xmm12, %xmm12 - vpxor 32(%rdi), %xmm1, %xmm5 - vaesenc %xmm15, %xmm13, %xmm13 - vpxor 48(%rdi), %xmm1, %xmm6 - vaesenc %xmm15, %xmm14, %xmm14 - vpxor 64(%rdi), %xmm1, %xmm7 - vpxor 80(%rdi), %xmm1, %xmm3 - movdqu 128(%rbp), %xmm1 - vaesenclast %xmm2, %xmm9, %xmm9 - pxor %xmm2, %xmm2 - mov $72057594037927936, %r11 - pinsrq $1, %r11, %xmm2 - vaesenclast %xmm0, %xmm10, %xmm10 - vpaddd %xmm2, %xmm1, %xmm0 - movq %r13, 112(%rbp) - lea 96(%rdi), %rdi - vaesenclast %xmm5, %xmm11, %xmm11 - vpaddd %xmm2, %xmm0, %xmm5 - movq %r12, 120(%rbp) - lea 96(%rsi), %rsi - movdqu -128(%rcx), %xmm15 - vaesenclast %xmm6, %xmm12, %xmm12 - vpaddd %xmm2, %xmm5, %xmm6 - vaesenclast %xmm7, %xmm13, %xmm13 - vpaddd %xmm2, %xmm6, %xmm7 - vaesenclast %xmm3, %xmm14, %xmm14 - vpaddd %xmm2, %xmm7, %xmm3 - sub $6, %rdx - cmp $6, %rdx - jbe L170 - add $96, %r14 - jmp L171 -L170: -L171: - cmp $0, %rdx - jbe L172 - movdqu %xmm9, -96(%rsi) - vpxor %xmm15, %xmm1, %xmm9 - movdqu %xmm10, -80(%rsi) - movdqu %xmm0, %xmm10 - movdqu %xmm11, -64(%rsi) - movdqu %xmm5, %xmm11 - movdqu %xmm12, -48(%rsi) - movdqu %xmm6, %xmm12 - movdqu %xmm13, -32(%rsi) - movdqu %xmm7, %xmm13 - movdqu %xmm14, -16(%rsi) - movdqu %xmm3, %xmm14 - movdqu 32(%rbp), %xmm7 - jmp L173 -L172: - vpxor 16(%rbp), %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 -L173: -.balign 16 -L167: - cmp $0, %rdx - ja L166 - movdqu %xmm1, 32(%rbp) - movdqu %xmm9, -96(%rsi) - movdqu %xmm10, -80(%rsi) - movdqu %xmm11, -64(%rsi) - movdqu %xmm12, -48(%rsi) - movdqu %xmm13, -32(%rsi) - movdqu %xmm14, -16(%rsi) - sub $128, %rcx -L163: - movdqu 32(%rbp), %xmm11 - mov %rcx, %r8 - mov 104(%rsp), %rax - mov 112(%rsp), %rdi - mov 120(%rsp), %rdx - mov %rdx, %r14 - mov $579005069656919567, %r12 - pinsrq $0, %r12, %xmm9 - mov $283686952306183, %r12 - pinsrq $1, %r12, %xmm9 - pshufb %xmm9, %xmm11 - mov %rdi, %rbx - mov %rdx, %r12 - mov %rax, %rdi - mov %rdi, %r11 - jmp L175 -.balign 16 -L174: - add $80, %r11 - movdqu -32(%r9), %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - movdqu %xmm1, %xmm4 - movdqu -16(%r9), %xmm1 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - movdqu %xmm1, %xmm5 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 16(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 32(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 64(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 80(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - vpxor %xmm1, %xmm4, %xmm4 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - pxor %xmm3, %xmm3 - mov $3254779904, %r10 - pinsrd $3, %r10d, %xmm3 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm5, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpsrldq $8, %xmm6, %xmm6 - vpxor %xmm6, %xmm7, %xmm7 - vpxor %xmm0, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm8 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor %xmm7, %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 - add $96, %r11 - sub $6, %rdx -.balign 16 -L175: - cmp $6, %rdx - jae L174 - cmp $0, %rdx - jbe L176 - mov %rdx, %r10 - sub $1, %r10 - imul $16, %r10 - add %r10, %r11 - movdqu -32(%r9), %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - cmp $1, %rdx - jne L178 - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu %xmm1, %xmm4 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - jmp L179 -L178: - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - movdqu %xmm1, %xmm4 - movdqu -16(%r9), %xmm1 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - movdqu %xmm1, %xmm5 - cmp $2, %rdx - je L180 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 16(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - cmp $3, %rdx - je L182 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 32(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - cmp $4, %rdx - je L184 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 64(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - jmp L185 -L184: -L185: - jmp L183 -L182: -L183: - jmp L181 -L180: -L181: - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - vpxor %xmm1, %xmm4, %xmm4 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 -L179: - pxor %xmm3, %xmm3 - mov $3254779904, %r10 - pinsrd $3, %r10d, %xmm3 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm5, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpsrldq $8, %xmm6, %xmm6 - vpxor %xmm6, %xmm7, %xmm7 - vpxor %xmm0, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm8 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor %xmm7, %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 - jmp L177 -L176: -L177: - mov %rbx, %rdi - mov %r12, %rdx - pxor %xmm10, %xmm10 - mov $1, %rbx - pinsrd $0, %ebx, %xmm10 - mov %rax, %r11 - mov %rdi, %r10 - mov $0, %rbx - jmp L187 -.balign 16 -L186: - movdqu %xmm11, %xmm0 - pshufb %xmm9, %xmm0 - movdqu 0(%r8), %xmm2 - pxor %xmm2, %xmm0 - movdqu 16(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 32(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 48(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 64(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 80(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 96(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 112(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 128(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 144(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 160(%r8), %xmm2 - aesenclast %xmm2, %xmm0 - pxor %xmm2, %xmm2 - movdqu 0(%r11), %xmm2 - pxor %xmm0, %xmm2 - movdqu %xmm2, 0(%r10) - add $1, %rbx - add $16, %r11 - add $16, %r10 - paddd %xmm10, %xmm11 -.balign 16 -L187: - cmp %rdx, %rbx - jne L186 - add 96(%rsp), %r14 - imul $16, %r14 - mov 136(%rsp), %r13 - cmp %r14, %r13 - jbe L188 - mov 128(%rsp), %rax - mov %r13, %r10 - and $15, %r10 - movdqu 0(%rax), %xmm0 - movdqu %xmm0, %xmm10 - cmp $8, %r10 - jae L190 - mov $0, %rcx - pinsrq $1, %rcx, %xmm0 - mov %r10, %rcx - shl $3, %rcx - mov $1, %r11 - shl %cl, %r11 - sub $1, %r11 - pextrq $0, %xmm0, %rcx - and %r11, %rcx - pinsrq $0, %rcx, %xmm0 - jmp L191 -L190: - mov %r10, %rcx - sub $8, %rcx - shl $3, %rcx - mov $1, %r11 - shl %cl, %r11 - sub $1, %r11 - pextrq $1, %xmm0, %rcx - and %r11, %rcx - pinsrq $1, %rcx, %xmm0 -L191: - pshufb %xmm9, %xmm0 - movdqu -32(%r9), %xmm5 - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu %xmm1, %xmm4 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - pxor %xmm3, %xmm3 - mov $3254779904, %r11 - pinsrd $3, %r11d, %xmm3 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm5, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpsrldq $8, %xmm6, %xmm6 - vpxor %xmm6, %xmm7, %xmm7 - vpxor %xmm0, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm8 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor %xmm7, %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 - movdqu %xmm11, %xmm0 - pshufb %xmm9, %xmm0 - movdqu 0(%r8), %xmm2 - pxor %xmm2, %xmm0 - movdqu 16(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 32(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 48(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 64(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 80(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 96(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 112(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 128(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 144(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 160(%r8), %xmm2 - aesenclast %xmm2, %xmm0 - pxor %xmm2, %xmm2 - pxor %xmm0, %xmm10 - movdqu %xmm10, 0(%rax) - jmp L189 -L188: -L189: - mov %r15, %r11 - pxor %xmm0, %xmm0 - mov %r11, %rax - imul $8, %rax - pinsrq $1, %rax, %xmm0 - mov %r13, %rax - imul $8, %rax - pinsrq $0, %rax, %xmm0 - movdqu -32(%r9), %xmm5 - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu %xmm1, %xmm4 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - pxor %xmm3, %xmm3 - mov $3254779904, %r11 - pinsrd $3, %r11d, %xmm3 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm5, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpsrldq $8, %xmm6, %xmm6 - vpxor %xmm6, %xmm7, %xmm7 - vpxor %xmm0, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm8 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor %xmm7, %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 - movdqu 0(%rbp), %xmm0 - pshufb %xmm9, %xmm0 - movdqu 0(%r8), %xmm2 - pxor %xmm2, %xmm0 - movdqu 16(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 32(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 48(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 64(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 80(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 96(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 112(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 128(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 144(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 160(%r8), %xmm2 - aesenclast %xmm2, %xmm0 - pxor %xmm2, %xmm2 - pshufb %xmm9, %xmm8 - pxor %xmm0, %xmm8 - mov 152(%rsp), %r15 - movdqu 0(%r15), %xmm0 - pcmpeqd %xmm8, %xmm0 - pextrq $0, %xmm0, %rdx - sub $18446744073709551615, %rdx - mov $0, %rax - adc $0, %rax - pextrq $1, %xmm0, %rdx - sub $18446744073709551615, %rdx - mov $0, %rdx - adc $0, %rdx - add %rdx, %rax - mov %rax, %rcx - pop %rbx - pop %rbp - pop %rdi - pop %rsi - pop %r12 - pop %r13 - pop %r14 - pop %r15 - mov %rcx, %rax - ret - -.global _gcm256_decrypt_opt -_gcm256_decrypt_opt: - push %r15 - push %r14 - push %r13 - push %r12 - push %rsi - push %rdi - push %rbp - push %rbx - mov 144(%rsp), %rbp - mov %rcx, %r13 - lea 32(%r9), %r9 - mov 72(%rsp), %rbx - mov %rdx, %rcx - imul $16, %rcx - mov $579005069656919567, %r10 - pinsrq $0, %r10, %xmm9 - mov $283686952306183, %r10 - pinsrq $1, %r10, %xmm9 - pxor %xmm8, %xmm8 - mov %rdi, %r11 - jmp L193 -.balign 16 -L192: - add $80, %r11 - movdqu -32(%r9), %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - movdqu %xmm1, %xmm4 - movdqu -16(%r9), %xmm1 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - movdqu %xmm1, %xmm5 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 16(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 32(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 64(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 80(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - vpxor %xmm1, %xmm4, %xmm4 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - pxor %xmm3, %xmm3 - mov $3254779904, %r10 - pinsrd $3, %r10d, %xmm3 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm5, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpsrldq $8, %xmm6, %xmm6 - vpxor %xmm6, %xmm7, %xmm7 - vpxor %xmm0, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm8 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor %xmm7, %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 - add $96, %r11 - sub $6, %rdx -.balign 16 -L193: - cmp $6, %rdx - jae L192 - cmp $0, %rdx - jbe L194 - mov %rdx, %r10 - sub $1, %r10 - imul $16, %r10 - add %r10, %r11 - movdqu -32(%r9), %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - cmp $1, %rdx - jne L196 - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu %xmm1, %xmm4 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - jmp L197 -L196: - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - movdqu %xmm1, %xmm4 - movdqu -16(%r9), %xmm1 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - movdqu %xmm1, %xmm5 - cmp $2, %rdx - je L198 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 16(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - cmp $3, %rdx - je L200 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 32(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - cmp $4, %rdx - je L202 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 64(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - jmp L203 -L202: -L203: - jmp L201 -L200: -L201: - jmp L199 -L198: -L199: - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - vpxor %xmm1, %xmm4, %xmm4 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 -L197: - pxor %xmm3, %xmm3 - mov $3254779904, %r10 - pinsrd $3, %r10d, %xmm3 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm5, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpsrldq $8, %xmm6, %xmm6 - vpxor %xmm6, %xmm7, %xmm7 - vpxor %xmm0, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm8 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor %xmm7, %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 - jmp L195 -L194: -L195: - mov %rsi, %r15 - cmp %rcx, %rsi - jbe L204 - movdqu 0(%rbx), %xmm0 - mov %rsi, %r10 - and $15, %r10 - cmp $8, %r10 - jae L206 - mov $0, %rcx - pinsrq $1, %rcx, %xmm0 - mov %r10, %rcx - shl $3, %rcx - mov $1, %r11 - shl %cl, %r11 - sub $1, %r11 - pextrq $0, %xmm0, %rcx - and %r11, %rcx - pinsrq $0, %rcx, %xmm0 - jmp L207 -L206: - mov %r10, %rcx - sub $8, %rcx - shl $3, %rcx - mov $1, %r11 - shl %cl, %r11 - sub $1, %r11 - pextrq $1, %xmm0, %rcx - and %r11, %rcx - pinsrq $1, %rcx, %xmm0 -L207: - pshufb %xmm9, %xmm0 - movdqu -32(%r9), %xmm5 - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu %xmm1, %xmm4 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - pxor %xmm3, %xmm3 - mov $3254779904, %r11 - pinsrd $3, %r11d, %xmm3 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm5, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpsrldq $8, %xmm6, %xmm6 - vpxor %xmm6, %xmm7, %xmm7 - vpxor %xmm0, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm8 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor %xmm7, %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 - jmp L205 -L204: -L205: - mov 80(%rsp), %rdi - mov 88(%rsp), %rsi - mov 96(%rsp), %rdx - mov %r13, %rcx - movdqu %xmm9, %xmm0 - movdqu 0(%r8), %xmm1 - movdqu %xmm1, 0(%rbp) - pxor %xmm10, %xmm10 - mov $1, %r11 - pinsrq $0, %r11, %xmm10 - vpaddd %xmm10, %xmm1, %xmm1 - cmp $0, %rdx - jne L208 - vpshufb %xmm0, %xmm1, %xmm1 - movdqu %xmm1, 32(%rbp) - jmp L209 -L208: - movdqu %xmm8, 32(%rbp) - add $128, %rcx - pextrq $0, %xmm1, %rbx - and $255, %rbx - vpshufb %xmm0, %xmm1, %xmm1 - lea 96(%rdi), %r14 - movdqu 32(%rbp), %xmm8 - movdqu 80(%rdi), %xmm7 - movdqu 64(%rdi), %xmm4 - movdqu 48(%rdi), %xmm5 - movdqu 32(%rdi), %xmm6 - vpshufb %xmm0, %xmm7, %xmm7 - movdqu 16(%rdi), %xmm2 - vpshufb %xmm0, %xmm4, %xmm4 - movdqu 0(%rdi), %xmm3 - vpshufb %xmm0, %xmm5, %xmm5 - movdqu %xmm4, 48(%rbp) - vpshufb %xmm0, %xmm6, %xmm6 - movdqu %xmm5, 64(%rbp) - vpshufb %xmm0, %xmm2, %xmm2 - movdqu %xmm6, 80(%rbp) - vpshufb %xmm0, %xmm3, %xmm3 - movdqu %xmm2, 96(%rbp) - movdqu %xmm3, 112(%rbp) - pxor %xmm2, %xmm2 - mov $72057594037927936, %r11 - pinsrq $1, %r11, %xmm2 - vpxor %xmm4, %xmm4, %xmm4 - movdqu -128(%rcx), %xmm15 - vpaddd %xmm2, %xmm1, %xmm10 - vpaddd %xmm2, %xmm10, %xmm11 - vpaddd %xmm2, %xmm11, %xmm12 - vpaddd %xmm2, %xmm12, %xmm13 - vpaddd %xmm2, %xmm13, %xmm14 - vpxor %xmm15, %xmm1, %xmm9 - movdqu %xmm4, 16(%rbp) - cmp $6, %rdx - jne L210 - sub $96, %r14 - jmp L211 -L210: -L211: - jmp L213 -.balign 16 -L212: - add $6, %rbx - cmp $256, %rbx - jb L214 - mov $579005069656919567, %r11 - pinsrq $0, %r11, %xmm0 - mov $283686952306183, %r11 - pinsrq $1, %r11, %xmm0 - vpshufb %xmm0, %xmm1, %xmm6 - pxor %xmm5, %xmm5 - mov $1, %r11 - pinsrq $0, %r11, %xmm5 - vpaddd %xmm5, %xmm6, %xmm10 - pxor %xmm5, %xmm5 - mov $2, %r11 - pinsrq $0, %r11, %xmm5 - vpaddd %xmm5, %xmm6, %xmm11 - movdqu -32(%r9), %xmm3 - vpaddd %xmm5, %xmm10, %xmm12 - vpshufb %xmm0, %xmm10, %xmm10 - vpaddd %xmm5, %xmm11, %xmm13 - vpshufb %xmm0, %xmm11, %xmm11 - vpxor %xmm15, %xmm10, %xmm10 - vpaddd %xmm5, %xmm12, %xmm14 - vpshufb %xmm0, %xmm12, %xmm12 - vpxor %xmm15, %xmm11, %xmm11 - vpaddd %xmm5, %xmm13, %xmm1 - vpshufb %xmm0, %xmm13, %xmm13 - vpshufb %xmm0, %xmm14, %xmm14 - vpshufb %xmm0, %xmm1, %xmm1 - sub $256, %rbx - jmp L215 -L214: - movdqu -32(%r9), %xmm3 - vpaddd %xmm14, %xmm2, %xmm1 - vpxor %xmm15, %xmm10, %xmm10 - vpxor %xmm15, %xmm11, %xmm11 -L215: - movdqu %xmm1, 128(%rbp) - vpclmulqdq $16, %xmm3, %xmm7, %xmm5 - vpxor %xmm15, %xmm12, %xmm12 - movdqu -112(%rcx), %xmm2 - vpclmulqdq $1, %xmm3, %xmm7, %xmm6 - vaesenc %xmm2, %xmm9, %xmm9 - movdqu 48(%rbp), %xmm0 - vpxor %xmm15, %xmm13, %xmm13 - vpclmulqdq $0, %xmm3, %xmm7, %xmm1 - vaesenc %xmm2, %xmm10, %xmm10 - vpxor %xmm15, %xmm14, %xmm14 - vpclmulqdq $17, %xmm3, %xmm7, %xmm7 - vaesenc %xmm2, %xmm11, %xmm11 - movdqu -16(%r9), %xmm3 - vaesenc %xmm2, %xmm12, %xmm12 - vpxor %xmm5, %xmm6, %xmm6 - vpclmulqdq $0, %xmm3, %xmm0, %xmm5 - vpxor %xmm4, %xmm8, %xmm8 - vaesenc %xmm2, %xmm13, %xmm13 - vpxor %xmm5, %xmm1, %xmm4 - vpclmulqdq $16, %xmm3, %xmm0, %xmm1 - vaesenc %xmm2, %xmm14, %xmm14 - movdqu -96(%rcx), %xmm15 - vpclmulqdq $1, %xmm3, %xmm0, %xmm2 - vaesenc %xmm15, %xmm9, %xmm9 - vpxor 16(%rbp), %xmm8, %xmm8 - vpclmulqdq $17, %xmm3, %xmm0, %xmm3 - movdqu 64(%rbp), %xmm0 - vaesenc %xmm15, %xmm10, %xmm10 - movbeq 88(%r14), %r13 - vaesenc %xmm15, %xmm11, %xmm11 - movbeq 80(%r14), %r12 - vaesenc %xmm15, %xmm12, %xmm12 - movq %r13, 32(%rbp) - vaesenc %xmm15, %xmm13, %xmm13 - movq %r12, 40(%rbp) - movdqu 16(%r9), %xmm5 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu -80(%rcx), %xmm15 - vpxor %xmm1, %xmm6, %xmm6 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vaesenc %xmm15, %xmm9, %xmm9 - vpxor %xmm2, %xmm6, %xmm6 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vaesenc %xmm15, %xmm10, %xmm10 - vpxor %xmm3, %xmm7, %xmm7 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vaesenc %xmm15, %xmm11, %xmm11 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 80(%rbp), %xmm0 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 32(%r9), %xmm1 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu -64(%rcx), %xmm15 - vpxor %xmm2, %xmm6, %xmm6 - vpclmulqdq $0, %xmm1, %xmm0, %xmm2 - vaesenc %xmm15, %xmm9, %xmm9 - vpxor %xmm3, %xmm6, %xmm6 - vpclmulqdq $16, %xmm1, %xmm0, %xmm3 - vaesenc %xmm15, %xmm10, %xmm10 - movbeq 72(%r14), %r13 - vpxor %xmm5, %xmm7, %xmm7 - vpclmulqdq $1, %xmm1, %xmm0, %xmm5 - vaesenc %xmm15, %xmm11, %xmm11 - movbeq 64(%r14), %r12 - vpclmulqdq $17, %xmm1, %xmm0, %xmm1 - movdqu 96(%rbp), %xmm0 - vaesenc %xmm15, %xmm12, %xmm12 - movq %r13, 48(%rbp) - vaesenc %xmm15, %xmm13, %xmm13 - movq %r12, 56(%rbp) - vpxor %xmm2, %xmm4, %xmm4 - movdqu 64(%r9), %xmm2 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu -48(%rcx), %xmm15 - vpxor %xmm3, %xmm6, %xmm6 - vpclmulqdq $0, %xmm2, %xmm0, %xmm3 - vaesenc %xmm15, %xmm9, %xmm9 - vpxor %xmm5, %xmm6, %xmm6 - vpclmulqdq $16, %xmm2, %xmm0, %xmm5 - vaesenc %xmm15, %xmm10, %xmm10 - movbeq 56(%r14), %r13 - vpxor %xmm1, %xmm7, %xmm7 - vpclmulqdq $1, %xmm2, %xmm0, %xmm1 - vpxor 112(%rbp), %xmm8, %xmm8 - vaesenc %xmm15, %xmm11, %xmm11 - movbeq 48(%r14), %r12 - vpclmulqdq $17, %xmm2, %xmm0, %xmm2 - vaesenc %xmm15, %xmm12, %xmm12 - movq %r13, 64(%rbp) - vaesenc %xmm15, %xmm13, %xmm13 - movq %r12, 72(%rbp) - vpxor %xmm3, %xmm4, %xmm4 - movdqu 80(%r9), %xmm3 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu -32(%rcx), %xmm15 - vpxor %xmm5, %xmm6, %xmm6 - vpclmulqdq $16, %xmm3, %xmm8, %xmm5 - vaesenc %xmm15, %xmm9, %xmm9 - vpxor %xmm1, %xmm6, %xmm6 - vpclmulqdq $1, %xmm3, %xmm8, %xmm1 - vaesenc %xmm15, %xmm10, %xmm10 - movbeq 40(%r14), %r13 - vpxor %xmm2, %xmm7, %xmm7 - vpclmulqdq $0, %xmm3, %xmm8, %xmm2 - vaesenc %xmm15, %xmm11, %xmm11 - movbeq 32(%r14), %r12 - vpclmulqdq $17, %xmm3, %xmm8, %xmm8 - vaesenc %xmm15, %xmm12, %xmm12 - movq %r13, 80(%rbp) - vaesenc %xmm15, %xmm13, %xmm13 - movq %r12, 88(%rbp) - vpxor %xmm5, %xmm6, %xmm6 - vaesenc %xmm15, %xmm14, %xmm14 - vpxor %xmm1, %xmm6, %xmm6 - movdqu -16(%rcx), %xmm15 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm2, %xmm4, %xmm4 - pxor %xmm3, %xmm3 - mov $13979173243358019584, %r11 - pinsrq $1, %r11, %xmm3 - vaesenc %xmm15, %xmm9, %xmm9 - vpxor %xmm8, %xmm7, %xmm7 - vaesenc %xmm15, %xmm10, %xmm10 - vpxor %xmm5, %xmm4, %xmm4 - movbeq 24(%r14), %r13 - vaesenc %xmm15, %xmm11, %xmm11 - movbeq 16(%r14), %r12 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - movq %r13, 96(%rbp) - vaesenc %xmm15, %xmm12, %xmm12 - movq %r12, 104(%rbp) - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu 0(%rcx), %xmm1 - vaesenc %xmm1, %xmm9, %xmm9 - movdqu 16(%rcx), %xmm15 - vaesenc %xmm1, %xmm10, %xmm10 - vpsrldq $8, %xmm6, %xmm6 - vaesenc %xmm1, %xmm11, %xmm11 - vpxor %xmm6, %xmm7, %xmm7 - vaesenc %xmm1, %xmm12, %xmm12 - vpxor %xmm0, %xmm4, %xmm4 - movbeq 8(%r14), %r13 - vaesenc %xmm1, %xmm13, %xmm13 - movbeq 0(%r14), %r12 - vaesenc %xmm1, %xmm14, %xmm14 - movdqu 32(%rcx), %xmm1 - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - vaesenc %xmm1, %xmm9, %xmm9 - vaesenc %xmm1, %xmm10, %xmm10 - vaesenc %xmm1, %xmm11, %xmm11 - vaesenc %xmm1, %xmm12, %xmm12 - vaesenc %xmm1, %xmm13, %xmm13 - movdqu 48(%rcx), %xmm15 - vaesenc %xmm1, %xmm14, %xmm14 - movdqu 64(%rcx), %xmm1 - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - vaesenc %xmm1, %xmm9, %xmm9 - vaesenc %xmm1, %xmm10, %xmm10 - vaesenc %xmm1, %xmm11, %xmm11 - vaesenc %xmm1, %xmm12, %xmm12 - vaesenc %xmm1, %xmm13, %xmm13 - movdqu 80(%rcx), %xmm15 - vaesenc %xmm1, %xmm14, %xmm14 - movdqu 96(%rcx), %xmm1 - vaesenc %xmm15, %xmm9, %xmm9 - movdqu %xmm7, 16(%rbp) - vpalignr $8, %xmm4, %xmm4, %xmm8 - vaesenc %xmm15, %xmm10, %xmm10 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor 0(%rdi), %xmm1, %xmm2 - vaesenc %xmm15, %xmm11, %xmm11 - vpxor 16(%rdi), %xmm1, %xmm0 - vaesenc %xmm15, %xmm12, %xmm12 - vpxor 32(%rdi), %xmm1, %xmm5 - vaesenc %xmm15, %xmm13, %xmm13 - vpxor 48(%rdi), %xmm1, %xmm6 - vaesenc %xmm15, %xmm14, %xmm14 - vpxor 64(%rdi), %xmm1, %xmm7 - vpxor 80(%rdi), %xmm1, %xmm3 - movdqu 128(%rbp), %xmm1 - vaesenclast %xmm2, %xmm9, %xmm9 - pxor %xmm2, %xmm2 - mov $72057594037927936, %r11 - pinsrq $1, %r11, %xmm2 - vaesenclast %xmm0, %xmm10, %xmm10 - vpaddd %xmm2, %xmm1, %xmm0 - movq %r13, 112(%rbp) - lea 96(%rdi), %rdi - vaesenclast %xmm5, %xmm11, %xmm11 - vpaddd %xmm2, %xmm0, %xmm5 - movq %r12, 120(%rbp) - lea 96(%rsi), %rsi - movdqu -128(%rcx), %xmm15 - vaesenclast %xmm6, %xmm12, %xmm12 - vpaddd %xmm2, %xmm5, %xmm6 - vaesenclast %xmm7, %xmm13, %xmm13 - vpaddd %xmm2, %xmm6, %xmm7 - vaesenclast %xmm3, %xmm14, %xmm14 - vpaddd %xmm2, %xmm7, %xmm3 - sub $6, %rdx - cmp $6, %rdx - jbe L216 - add $96, %r14 - jmp L217 -L216: -L217: - cmp $0, %rdx - jbe L218 - movdqu %xmm9, -96(%rsi) - vpxor %xmm15, %xmm1, %xmm9 - movdqu %xmm10, -80(%rsi) - movdqu %xmm0, %xmm10 - movdqu %xmm11, -64(%rsi) - movdqu %xmm5, %xmm11 - movdqu %xmm12, -48(%rsi) - movdqu %xmm6, %xmm12 - movdqu %xmm13, -32(%rsi) - movdqu %xmm7, %xmm13 - movdqu %xmm14, -16(%rsi) - movdqu %xmm3, %xmm14 - movdqu 32(%rbp), %xmm7 - jmp L219 -L218: - vpxor 16(%rbp), %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 -L219: -.balign 16 -L213: - cmp $0, %rdx - ja L212 - movdqu %xmm1, 32(%rbp) - movdqu %xmm9, -96(%rsi) - movdqu %xmm10, -80(%rsi) - movdqu %xmm11, -64(%rsi) - movdqu %xmm12, -48(%rsi) - movdqu %xmm13, -32(%rsi) - movdqu %xmm14, -16(%rsi) - sub $128, %rcx -L209: - movdqu 32(%rbp), %xmm11 - mov %rcx, %r8 - mov 104(%rsp), %rax - mov 112(%rsp), %rdi - mov 120(%rsp), %rdx - mov %rdx, %r14 - mov $579005069656919567, %r12 - pinsrq $0, %r12, %xmm9 - mov $283686952306183, %r12 - pinsrq $1, %r12, %xmm9 - pshufb %xmm9, %xmm11 - mov %rdi, %rbx - mov %rdx, %r12 - mov %rax, %rdi - mov %rdi, %r11 - jmp L221 -.balign 16 -L220: - add $80, %r11 - movdqu -32(%r9), %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - movdqu %xmm1, %xmm4 - movdqu -16(%r9), %xmm1 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - movdqu %xmm1, %xmm5 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 16(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 32(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 64(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 80(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - vpxor %xmm1, %xmm4, %xmm4 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - pxor %xmm3, %xmm3 - mov $3254779904, %r10 - pinsrd $3, %r10d, %xmm3 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm5, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpsrldq $8, %xmm6, %xmm6 - vpxor %xmm6, %xmm7, %xmm7 - vpxor %xmm0, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm8 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor %xmm7, %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 - add $96, %r11 - sub $6, %rdx -.balign 16 -L221: - cmp $6, %rdx - jae L220 - cmp $0, %rdx - jbe L222 - mov %rdx, %r10 - sub $1, %r10 - imul $16, %r10 - add %r10, %r11 - movdqu -32(%r9), %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - cmp $1, %rdx - jne L224 - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu %xmm1, %xmm4 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - jmp L225 -L224: - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - movdqu %xmm1, %xmm4 - movdqu -16(%r9), %xmm1 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - movdqu %xmm1, %xmm5 - cmp $2, %rdx - je L226 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 16(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - cmp $3, %rdx - je L228 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 32(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - cmp $4, %rdx - je L230 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 64(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - jmp L231 -L230: -L231: - jmp L229 -L228: -L229: - jmp L227 -L226: -L227: - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - vpxor %xmm1, %xmm4, %xmm4 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 -L225: - pxor %xmm3, %xmm3 - mov $3254779904, %r10 - pinsrd $3, %r10d, %xmm3 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm5, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpsrldq $8, %xmm6, %xmm6 - vpxor %xmm6, %xmm7, %xmm7 - vpxor %xmm0, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm8 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor %xmm7, %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 - jmp L223 -L222: -L223: - mov %rbx, %rdi - mov %r12, %rdx - pxor %xmm10, %xmm10 - mov $1, %rbx - pinsrd $0, %ebx, %xmm10 - mov %rax, %r11 - mov %rdi, %r10 - mov $0, %rbx - jmp L233 -.balign 16 -L232: - movdqu %xmm11, %xmm0 - pshufb %xmm9, %xmm0 - movdqu 0(%r8), %xmm2 - pxor %xmm2, %xmm0 - movdqu 16(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 32(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 48(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 64(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 80(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 96(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 112(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 128(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 144(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 160(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 176(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 192(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 208(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 224(%r8), %xmm2 - aesenclast %xmm2, %xmm0 - pxor %xmm2, %xmm2 - movdqu 0(%r11), %xmm2 - pxor %xmm0, %xmm2 - movdqu %xmm2, 0(%r10) - add $1, %rbx - add $16, %r11 - add $16, %r10 - paddd %xmm10, %xmm11 -.balign 16 -L233: - cmp %rdx, %rbx - jne L232 - add 96(%rsp), %r14 - imul $16, %r14 - mov 136(%rsp), %r13 - cmp %r14, %r13 - jbe L234 - mov 128(%rsp), %rax - mov %r13, %r10 - and $15, %r10 - movdqu 0(%rax), %xmm0 - movdqu %xmm0, %xmm10 - cmp $8, %r10 - jae L236 - mov $0, %rcx - pinsrq $1, %rcx, %xmm0 - mov %r10, %rcx - shl $3, %rcx - mov $1, %r11 - shl %cl, %r11 - sub $1, %r11 - pextrq $0, %xmm0, %rcx - and %r11, %rcx - pinsrq $0, %rcx, %xmm0 - jmp L237 -L236: - mov %r10, %rcx - sub $8, %rcx - shl $3, %rcx - mov $1, %r11 - shl %cl, %r11 - sub $1, %r11 - pextrq $1, %xmm0, %rcx - and %r11, %rcx - pinsrq $1, %rcx, %xmm0 -L237: - pshufb %xmm9, %xmm0 - movdqu -32(%r9), %xmm5 - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu %xmm1, %xmm4 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - pxor %xmm3, %xmm3 - mov $3254779904, %r11 - pinsrd $3, %r11d, %xmm3 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm5, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpsrldq $8, %xmm6, %xmm6 - vpxor %xmm6, %xmm7, %xmm7 - vpxor %xmm0, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm8 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor %xmm7, %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 - movdqu %xmm11, %xmm0 - pshufb %xmm9, %xmm0 - movdqu 0(%r8), %xmm2 - pxor %xmm2, %xmm0 - movdqu 16(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 32(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 48(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 64(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 80(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 96(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 112(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 128(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 144(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 160(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 176(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 192(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 208(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 224(%r8), %xmm2 - aesenclast %xmm2, %xmm0 - pxor %xmm2, %xmm2 - pxor %xmm0, %xmm10 - movdqu %xmm10, 0(%rax) - jmp L235 -L234: -L235: - mov %r15, %r11 - pxor %xmm0, %xmm0 - mov %r11, %rax - imul $8, %rax - pinsrq $1, %rax, %xmm0 - mov %r13, %rax - imul $8, %rax - pinsrq $0, %rax, %xmm0 - movdqu -32(%r9), %xmm5 - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu %xmm1, %xmm4 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - pxor %xmm3, %xmm3 - mov $3254779904, %r11 - pinsrd $3, %r11d, %xmm3 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm5, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpsrldq $8, %xmm6, %xmm6 - vpxor %xmm6, %xmm7, %xmm7 - vpxor %xmm0, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm8 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor %xmm7, %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 - movdqu 0(%rbp), %xmm0 - pshufb %xmm9, %xmm0 - movdqu 0(%r8), %xmm2 - pxor %xmm2, %xmm0 - movdqu 16(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 32(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 48(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 64(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 80(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 96(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 112(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 128(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 144(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 160(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 176(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 192(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 208(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 224(%r8), %xmm2 - aesenclast %xmm2, %xmm0 - pxor %xmm2, %xmm2 - pshufb %xmm9, %xmm8 - pxor %xmm0, %xmm8 - mov 152(%rsp), %r15 - movdqu 0(%r15), %xmm0 - pcmpeqd %xmm8, %xmm0 - pextrq $0, %xmm0, %rdx - sub $18446744073709551615, %rdx - mov $0, %rax - adc $0, %rax - pextrq $1, %xmm0, %rdx - sub $18446744073709551615, %rdx - mov $0, %rdx - adc $0, %rdx - add %rdx, %rax - mov %rax, %rcx - pop %rbx - pop %rbp - pop %rdi - pop %rsi - pop %r12 - pop %r13 - pop %r14 - pop %r15 - mov %rcx, %rax - ret - - diff --git a/dist/c89-compatible/aesgcm-x86_64-linux.S b/dist/c89-compatible/aesgcm-x86_64-linux.S deleted file mode 100644 index 12dddc41a7..0000000000 --- a/dist/c89-compatible/aesgcm-x86_64-linux.S +++ /dev/null @@ -1,8101 +0,0 @@ -.text -.global aes128_key_expansion -aes128_key_expansion: - movdqu 0(%rdi), %xmm1 - mov %rsi, %rdx - movdqu %xmm1, 0(%rdx) - aeskeygenassist $1, %xmm1, %xmm2 - pshufd $255, %xmm2, %xmm2 - vpslldq $4, %xmm1, %xmm3 - pxor %xmm3, %xmm1 - vpslldq $4, %xmm1, %xmm3 - pxor %xmm3, %xmm1 - vpslldq $4, %xmm1, %xmm3 - pxor %xmm3, %xmm1 - pxor %xmm2, %xmm1 - movdqu %xmm1, 16(%rdx) - aeskeygenassist $2, %xmm1, %xmm2 - pshufd $255, %xmm2, %xmm2 - vpslldq $4, %xmm1, %xmm3 - pxor %xmm3, %xmm1 - vpslldq $4, %xmm1, %xmm3 - pxor %xmm3, %xmm1 - vpslldq $4, %xmm1, %xmm3 - pxor %xmm3, %xmm1 - pxor %xmm2, %xmm1 - movdqu %xmm1, 32(%rdx) - aeskeygenassist $4, %xmm1, %xmm2 - pshufd $255, %xmm2, %xmm2 - vpslldq $4, %xmm1, %xmm3 - pxor %xmm3, %xmm1 - vpslldq $4, %xmm1, %xmm3 - pxor %xmm3, %xmm1 - vpslldq $4, %xmm1, %xmm3 - pxor %xmm3, %xmm1 - pxor %xmm2, %xmm1 - movdqu %xmm1, 48(%rdx) - aeskeygenassist $8, %xmm1, %xmm2 - pshufd $255, %xmm2, %xmm2 - vpslldq $4, %xmm1, %xmm3 - pxor %xmm3, %xmm1 - vpslldq $4, %xmm1, %xmm3 - pxor %xmm3, %xmm1 - vpslldq $4, %xmm1, %xmm3 - pxor %xmm3, %xmm1 - pxor %xmm2, %xmm1 - movdqu %xmm1, 64(%rdx) - aeskeygenassist $16, %xmm1, %xmm2 - pshufd $255, %xmm2, %xmm2 - vpslldq $4, %xmm1, %xmm3 - pxor %xmm3, %xmm1 - vpslldq $4, %xmm1, %xmm3 - pxor %xmm3, %xmm1 - vpslldq $4, %xmm1, %xmm3 - pxor %xmm3, %xmm1 - pxor %xmm2, %xmm1 - movdqu %xmm1, 80(%rdx) - aeskeygenassist $32, %xmm1, %xmm2 - pshufd $255, %xmm2, %xmm2 - vpslldq $4, %xmm1, %xmm3 - pxor %xmm3, %xmm1 - vpslldq $4, %xmm1, %xmm3 - pxor %xmm3, %xmm1 - vpslldq $4, %xmm1, %xmm3 - pxor %xmm3, %xmm1 - pxor %xmm2, %xmm1 - movdqu %xmm1, 96(%rdx) - aeskeygenassist $64, %xmm1, %xmm2 - pshufd $255, %xmm2, %xmm2 - vpslldq $4, %xmm1, %xmm3 - pxor %xmm3, %xmm1 - vpslldq $4, %xmm1, %xmm3 - pxor %xmm3, %xmm1 - vpslldq $4, %xmm1, %xmm3 - pxor %xmm3, %xmm1 - pxor %xmm2, %xmm1 - movdqu %xmm1, 112(%rdx) - aeskeygenassist $128, %xmm1, %xmm2 - pshufd $255, %xmm2, %xmm2 - vpslldq $4, %xmm1, %xmm3 - pxor %xmm3, %xmm1 - vpslldq $4, %xmm1, %xmm3 - pxor %xmm3, %xmm1 - vpslldq $4, %xmm1, %xmm3 - pxor %xmm3, %xmm1 - pxor %xmm2, %xmm1 - movdqu %xmm1, 128(%rdx) - aeskeygenassist $27, %xmm1, %xmm2 - pshufd $255, %xmm2, %xmm2 - vpslldq $4, %xmm1, %xmm3 - pxor %xmm3, %xmm1 - vpslldq $4, %xmm1, %xmm3 - pxor %xmm3, %xmm1 - vpslldq $4, %xmm1, %xmm3 - pxor %xmm3, %xmm1 - pxor %xmm2, %xmm1 - movdqu %xmm1, 144(%rdx) - aeskeygenassist $54, %xmm1, %xmm2 - pshufd $255, %xmm2, %xmm2 - vpslldq $4, %xmm1, %xmm3 - pxor %xmm3, %xmm1 - vpslldq $4, %xmm1, %xmm3 - pxor %xmm3, %xmm1 - vpslldq $4, %xmm1, %xmm3 - pxor %xmm3, %xmm1 - pxor %xmm2, %xmm1 - movdqu %xmm1, 160(%rdx) - pxor %xmm1, %xmm1 - pxor %xmm2, %xmm2 - pxor %xmm3, %xmm3 - ret - -.global aes128_keyhash_init -aes128_keyhash_init: - mov $579005069656919567, %r8 - pinsrq $0, %r8, %xmm4 - mov $283686952306183, %r8 - pinsrq $1, %r8, %xmm4 - pxor %xmm0, %xmm0 - movdqu %xmm0, 80(%rsi) - mov %rdi, %r8 - movdqu 0(%r8), %xmm2 - pxor %xmm2, %xmm0 - movdqu 16(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 32(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 48(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 64(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 80(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 96(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 112(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 128(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 144(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 160(%r8), %xmm2 - aesenclast %xmm2, %xmm0 - pxor %xmm2, %xmm2 - pshufb %xmm4, %xmm0 - mov %rsi, %rcx - movdqu %xmm0, 32(%rcx) - movdqu %xmm6, %xmm0 - mov %r12, %rax - movdqu 32(%rcx), %xmm1 - movdqu %xmm1, %xmm6 - movdqu %xmm1, %xmm3 - pxor %xmm4, %xmm4 - pxor %xmm5, %xmm5 - mov $3254779904, %r12 - pinsrd $3, %r12d, %xmm4 - mov $1, %r12 - pinsrd $0, %r12d, %xmm4 - mov $2147483648, %r12 - pinsrd $3, %r12d, %xmm5 - movdqu %xmm3, %xmm1 - movdqu %xmm1, %xmm2 - psrld $31, %xmm2 - pslld $1, %xmm1 - vpslldq $4, %xmm2, %xmm2 - pxor %xmm2, %xmm1 - pand %xmm5, %xmm3 - pcmpeqd %xmm5, %xmm3 - pshufd $255, %xmm3, %xmm3 - pand %xmm4, %xmm3 - vpxor %xmm3, %xmm1, %xmm1 - movdqu %xmm1, 0(%rcx) - movdqu %xmm6, %xmm1 - movdqu %xmm6, %xmm2 - movdqu %xmm1, %xmm5 - pclmulqdq $16, %xmm2, %xmm1 - movdqu %xmm1, %xmm3 - movdqu %xmm5, %xmm1 - pclmulqdq $1, %xmm2, %xmm1 - movdqu %xmm1, %xmm4 - movdqu %xmm5, %xmm1 - pclmulqdq $0, %xmm2, %xmm1 - pclmulqdq $17, %xmm2, %xmm5 - movdqu %xmm5, %xmm2 - movdqu %xmm1, %xmm5 - movdqu %xmm3, %xmm1 - mov $0, %r12 - pinsrd $0, %r12d, %xmm1 - pshufd $14, %xmm1, %xmm1 - pxor %xmm1, %xmm2 - movdqu %xmm4, %xmm1 - mov $0, %r12 - pinsrd $0, %r12d, %xmm1 - pshufd $14, %xmm1, %xmm1 - pxor %xmm1, %xmm2 - movdqu %xmm3, %xmm1 - mov $0, %r12 - pinsrd $3, %r12d, %xmm1 - pshufd $79, %xmm1, %xmm1 - mov $0, %r12 - pinsrd $3, %r12d, %xmm4 - pshufd $79, %xmm4, %xmm4 - pxor %xmm4, %xmm1 - pxor %xmm5, %xmm1 - movdqu %xmm1, %xmm3 - psrld $31, %xmm3 - movdqu %xmm2, %xmm4 - psrld $31, %xmm4 - pslld $1, %xmm1 - pslld $1, %xmm2 - vpslldq $4, %xmm3, %xmm5 - vpslldq $4, %xmm4, %xmm4 - mov $0, %r12 - pinsrd $0, %r12d, %xmm3 - pshufd $3, %xmm3, %xmm3 - pxor %xmm4, %xmm3 - pxor %xmm5, %xmm1 - pxor %xmm3, %xmm2 - movdqu %xmm2, %xmm6 - pxor %xmm2, %xmm2 - mov $3774873600, %r12 - pinsrd $3, %r12d, %xmm2 - movdqu %xmm1, %xmm5 - pclmulqdq $16, %xmm2, %xmm1 - movdqu %xmm1, %xmm3 - movdqu %xmm5, %xmm1 - pclmulqdq $1, %xmm2, %xmm1 - movdqu %xmm1, %xmm4 - movdqu %xmm5, %xmm1 - pclmulqdq $0, %xmm2, %xmm1 - pclmulqdq $17, %xmm2, %xmm5 - movdqu %xmm5, %xmm2 - movdqu %xmm1, %xmm5 - movdqu %xmm3, %xmm1 - mov $0, %r12 - pinsrd $0, %r12d, %xmm1 - pshufd $14, %xmm1, %xmm1 - pxor %xmm1, %xmm2 - movdqu %xmm4, %xmm1 - mov $0, %r12 - pinsrd $0, %r12d, %xmm1 - pshufd $14, %xmm1, %xmm1 - pxor %xmm1, %xmm2 - movdqu %xmm3, %xmm1 - mov $0, %r12 - pinsrd $3, %r12d, %xmm1 - pshufd $79, %xmm1, %xmm1 - mov $0, %r12 - pinsrd $3, %r12d, %xmm4 - pshufd $79, %xmm4, %xmm4 - pxor %xmm4, %xmm1 - pxor %xmm5, %xmm1 - movdqu %xmm1, %xmm3 - psrld $31, %xmm3 - movdqu %xmm2, %xmm4 - psrld $31, %xmm4 - pslld $1, %xmm1 - pslld $1, %xmm2 - vpslldq $4, %xmm3, %xmm5 - vpslldq $4, %xmm4, %xmm4 - mov $0, %r12 - pinsrd $0, %r12d, %xmm3 - pshufd $3, %xmm3, %xmm3 - pxor %xmm4, %xmm3 - pxor %xmm5, %xmm1 - pxor %xmm3, %xmm2 - movdqu %xmm2, %xmm5 - pxor %xmm2, %xmm2 - mov $3774873600, %r12 - pinsrd $3, %r12d, %xmm2 - pclmulqdq $17, %xmm2, %xmm1 - movdqu %xmm1, %xmm2 - psrld $31, %xmm2 - pslld $1, %xmm1 - vpslldq $4, %xmm2, %xmm2 - pxor %xmm2, %xmm1 - pxor %xmm5, %xmm1 - pxor %xmm6, %xmm1 - movdqu %xmm1, %xmm6 - movdqu %xmm1, %xmm3 - pxor %xmm4, %xmm4 - pxor %xmm5, %xmm5 - mov $3254779904, %r12 - pinsrd $3, %r12d, %xmm4 - mov $1, %r12 - pinsrd $0, %r12d, %xmm4 - mov $2147483648, %r12 - pinsrd $3, %r12d, %xmm5 - movdqu %xmm3, %xmm1 - movdqu %xmm1, %xmm2 - psrld $31, %xmm2 - pslld $1, %xmm1 - vpslldq $4, %xmm2, %xmm2 - pxor %xmm2, %xmm1 - pand %xmm5, %xmm3 - pcmpeqd %xmm5, %xmm3 - pshufd $255, %xmm3, %xmm3 - pand %xmm4, %xmm3 - vpxor %xmm3, %xmm1, %xmm1 - movdqu %xmm1, 16(%rcx) - movdqu %xmm6, %xmm2 - movdqu 32(%rcx), %xmm1 - movdqu %xmm1, %xmm5 - pclmulqdq $16, %xmm2, %xmm1 - movdqu %xmm1, %xmm3 - movdqu %xmm5, %xmm1 - pclmulqdq $1, %xmm2, %xmm1 - movdqu %xmm1, %xmm4 - movdqu %xmm5, %xmm1 - pclmulqdq $0, %xmm2, %xmm1 - pclmulqdq $17, %xmm2, %xmm5 - movdqu %xmm5, %xmm2 - movdqu %xmm1, %xmm5 - movdqu %xmm3, %xmm1 - mov $0, %r12 - pinsrd $0, %r12d, %xmm1 - pshufd $14, %xmm1, %xmm1 - pxor %xmm1, %xmm2 - movdqu %xmm4, %xmm1 - mov $0, %r12 - pinsrd $0, %r12d, %xmm1 - pshufd $14, %xmm1, %xmm1 - pxor %xmm1, %xmm2 - movdqu %xmm3, %xmm1 - mov $0, %r12 - pinsrd $3, %r12d, %xmm1 - pshufd $79, %xmm1, %xmm1 - mov $0, %r12 - pinsrd $3, %r12d, %xmm4 - pshufd $79, %xmm4, %xmm4 - pxor %xmm4, %xmm1 - pxor %xmm5, %xmm1 - movdqu %xmm1, %xmm3 - psrld $31, %xmm3 - movdqu %xmm2, %xmm4 - psrld $31, %xmm4 - pslld $1, %xmm1 - pslld $1, %xmm2 - vpslldq $4, %xmm3, %xmm5 - vpslldq $4, %xmm4, %xmm4 - mov $0, %r12 - pinsrd $0, %r12d, %xmm3 - pshufd $3, %xmm3, %xmm3 - pxor %xmm4, %xmm3 - pxor %xmm5, %xmm1 - pxor %xmm3, %xmm2 - movdqu %xmm2, %xmm6 - pxor %xmm2, %xmm2 - mov $3774873600, %r12 - pinsrd $3, %r12d, %xmm2 - movdqu %xmm1, %xmm5 - pclmulqdq $16, %xmm2, %xmm1 - movdqu %xmm1, %xmm3 - movdqu %xmm5, %xmm1 - pclmulqdq $1, %xmm2, %xmm1 - movdqu %xmm1, %xmm4 - movdqu %xmm5, %xmm1 - pclmulqdq $0, %xmm2, %xmm1 - pclmulqdq $17, %xmm2, %xmm5 - movdqu %xmm5, %xmm2 - movdqu %xmm1, %xmm5 - movdqu %xmm3, %xmm1 - mov $0, %r12 - pinsrd $0, %r12d, %xmm1 - pshufd $14, %xmm1, %xmm1 - pxor %xmm1, %xmm2 - movdqu %xmm4, %xmm1 - mov $0, %r12 - pinsrd $0, %r12d, %xmm1 - pshufd $14, %xmm1, %xmm1 - pxor %xmm1, %xmm2 - movdqu %xmm3, %xmm1 - mov $0, %r12 - pinsrd $3, %r12d, %xmm1 - pshufd $79, %xmm1, %xmm1 - mov $0, %r12 - pinsrd $3, %r12d, %xmm4 - pshufd $79, %xmm4, %xmm4 - pxor %xmm4, %xmm1 - pxor %xmm5, %xmm1 - movdqu %xmm1, %xmm3 - psrld $31, %xmm3 - movdqu %xmm2, %xmm4 - psrld $31, %xmm4 - pslld $1, %xmm1 - pslld $1, %xmm2 - vpslldq $4, %xmm3, %xmm5 - vpslldq $4, %xmm4, %xmm4 - mov $0, %r12 - pinsrd $0, %r12d, %xmm3 - pshufd $3, %xmm3, %xmm3 - pxor %xmm4, %xmm3 - pxor %xmm5, %xmm1 - pxor %xmm3, %xmm2 - movdqu %xmm2, %xmm5 - pxor %xmm2, %xmm2 - mov $3774873600, %r12 - pinsrd $3, %r12d, %xmm2 - pclmulqdq $17, %xmm2, %xmm1 - movdqu %xmm1, %xmm2 - psrld $31, %xmm2 - pslld $1, %xmm1 - vpslldq $4, %xmm2, %xmm2 - pxor %xmm2, %xmm1 - pxor %xmm5, %xmm1 - pxor %xmm6, %xmm1 - movdqu %xmm1, %xmm6 - movdqu %xmm1, %xmm3 - pxor %xmm4, %xmm4 - pxor %xmm5, %xmm5 - mov $3254779904, %r12 - pinsrd $3, %r12d, %xmm4 - mov $1, %r12 - pinsrd $0, %r12d, %xmm4 - mov $2147483648, %r12 - pinsrd $3, %r12d, %xmm5 - movdqu %xmm3, %xmm1 - movdqu %xmm1, %xmm2 - psrld $31, %xmm2 - pslld $1, %xmm1 - vpslldq $4, %xmm2, %xmm2 - pxor %xmm2, %xmm1 - pand %xmm5, %xmm3 - pcmpeqd %xmm5, %xmm3 - pshufd $255, %xmm3, %xmm3 - pand %xmm4, %xmm3 - vpxor %xmm3, %xmm1, %xmm1 - movdqu %xmm1, 48(%rcx) - movdqu %xmm6, %xmm2 - movdqu 32(%rcx), %xmm1 - movdqu %xmm1, %xmm5 - pclmulqdq $16, %xmm2, %xmm1 - movdqu %xmm1, %xmm3 - movdqu %xmm5, %xmm1 - pclmulqdq $1, %xmm2, %xmm1 - movdqu %xmm1, %xmm4 - movdqu %xmm5, %xmm1 - pclmulqdq $0, %xmm2, %xmm1 - pclmulqdq $17, %xmm2, %xmm5 - movdqu %xmm5, %xmm2 - movdqu %xmm1, %xmm5 - movdqu %xmm3, %xmm1 - mov $0, %r12 - pinsrd $0, %r12d, %xmm1 - pshufd $14, %xmm1, %xmm1 - pxor %xmm1, %xmm2 - movdqu %xmm4, %xmm1 - mov $0, %r12 - pinsrd $0, %r12d, %xmm1 - pshufd $14, %xmm1, %xmm1 - pxor %xmm1, %xmm2 - movdqu %xmm3, %xmm1 - mov $0, %r12 - pinsrd $3, %r12d, %xmm1 - pshufd $79, %xmm1, %xmm1 - mov $0, %r12 - pinsrd $3, %r12d, %xmm4 - pshufd $79, %xmm4, %xmm4 - pxor %xmm4, %xmm1 - pxor %xmm5, %xmm1 - movdqu %xmm1, %xmm3 - psrld $31, %xmm3 - movdqu %xmm2, %xmm4 - psrld $31, %xmm4 - pslld $1, %xmm1 - pslld $1, %xmm2 - vpslldq $4, %xmm3, %xmm5 - vpslldq $4, %xmm4, %xmm4 - mov $0, %r12 - pinsrd $0, %r12d, %xmm3 - pshufd $3, %xmm3, %xmm3 - pxor %xmm4, %xmm3 - pxor %xmm5, %xmm1 - pxor %xmm3, %xmm2 - movdqu %xmm2, %xmm6 - pxor %xmm2, %xmm2 - mov $3774873600, %r12 - pinsrd $3, %r12d, %xmm2 - movdqu %xmm1, %xmm5 - pclmulqdq $16, %xmm2, %xmm1 - movdqu %xmm1, %xmm3 - movdqu %xmm5, %xmm1 - pclmulqdq $1, %xmm2, %xmm1 - movdqu %xmm1, %xmm4 - movdqu %xmm5, %xmm1 - pclmulqdq $0, %xmm2, %xmm1 - pclmulqdq $17, %xmm2, %xmm5 - movdqu %xmm5, %xmm2 - movdqu %xmm1, %xmm5 - movdqu %xmm3, %xmm1 - mov $0, %r12 - pinsrd $0, %r12d, %xmm1 - pshufd $14, %xmm1, %xmm1 - pxor %xmm1, %xmm2 - movdqu %xmm4, %xmm1 - mov $0, %r12 - pinsrd $0, %r12d, %xmm1 - pshufd $14, %xmm1, %xmm1 - pxor %xmm1, %xmm2 - movdqu %xmm3, %xmm1 - mov $0, %r12 - pinsrd $3, %r12d, %xmm1 - pshufd $79, %xmm1, %xmm1 - mov $0, %r12 - pinsrd $3, %r12d, %xmm4 - pshufd $79, %xmm4, %xmm4 - pxor %xmm4, %xmm1 - pxor %xmm5, %xmm1 - movdqu %xmm1, %xmm3 - psrld $31, %xmm3 - movdqu %xmm2, %xmm4 - psrld $31, %xmm4 - pslld $1, %xmm1 - pslld $1, %xmm2 - vpslldq $4, %xmm3, %xmm5 - vpslldq $4, %xmm4, %xmm4 - mov $0, %r12 - pinsrd $0, %r12d, %xmm3 - pshufd $3, %xmm3, %xmm3 - pxor %xmm4, %xmm3 - pxor %xmm5, %xmm1 - pxor %xmm3, %xmm2 - movdqu %xmm2, %xmm5 - pxor %xmm2, %xmm2 - mov $3774873600, %r12 - pinsrd $3, %r12d, %xmm2 - pclmulqdq $17, %xmm2, %xmm1 - movdqu %xmm1, %xmm2 - psrld $31, %xmm2 - pslld $1, %xmm1 - vpslldq $4, %xmm2, %xmm2 - pxor %xmm2, %xmm1 - pxor %xmm5, %xmm1 - pxor %xmm6, %xmm1 - movdqu %xmm1, %xmm6 - movdqu %xmm1, %xmm3 - pxor %xmm4, %xmm4 - pxor %xmm5, %xmm5 - mov $3254779904, %r12 - pinsrd $3, %r12d, %xmm4 - mov $1, %r12 - pinsrd $0, %r12d, %xmm4 - mov $2147483648, %r12 - pinsrd $3, %r12d, %xmm5 - movdqu %xmm3, %xmm1 - movdqu %xmm1, %xmm2 - psrld $31, %xmm2 - pslld $1, %xmm1 - vpslldq $4, %xmm2, %xmm2 - pxor %xmm2, %xmm1 - pand %xmm5, %xmm3 - pcmpeqd %xmm5, %xmm3 - pshufd $255, %xmm3, %xmm3 - pand %xmm4, %xmm3 - vpxor %xmm3, %xmm1, %xmm1 - movdqu %xmm1, 64(%rcx) - movdqu %xmm6, %xmm2 - movdqu 32(%rcx), %xmm1 - movdqu %xmm1, %xmm5 - pclmulqdq $16, %xmm2, %xmm1 - movdqu %xmm1, %xmm3 - movdqu %xmm5, %xmm1 - pclmulqdq $1, %xmm2, %xmm1 - movdqu %xmm1, %xmm4 - movdqu %xmm5, %xmm1 - pclmulqdq $0, %xmm2, %xmm1 - pclmulqdq $17, %xmm2, %xmm5 - movdqu %xmm5, %xmm2 - movdqu %xmm1, %xmm5 - movdqu %xmm3, %xmm1 - mov $0, %r12 - pinsrd $0, %r12d, %xmm1 - pshufd $14, %xmm1, %xmm1 - pxor %xmm1, %xmm2 - movdqu %xmm4, %xmm1 - mov $0, %r12 - pinsrd $0, %r12d, %xmm1 - pshufd $14, %xmm1, %xmm1 - pxor %xmm1, %xmm2 - movdqu %xmm3, %xmm1 - mov $0, %r12 - pinsrd $3, %r12d, %xmm1 - pshufd $79, %xmm1, %xmm1 - mov $0, %r12 - pinsrd $3, %r12d, %xmm4 - pshufd $79, %xmm4, %xmm4 - pxor %xmm4, %xmm1 - pxor %xmm5, %xmm1 - movdqu %xmm1, %xmm3 - psrld $31, %xmm3 - movdqu %xmm2, %xmm4 - psrld $31, %xmm4 - pslld $1, %xmm1 - pslld $1, %xmm2 - vpslldq $4, %xmm3, %xmm5 - vpslldq $4, %xmm4, %xmm4 - mov $0, %r12 - pinsrd $0, %r12d, %xmm3 - pshufd $3, %xmm3, %xmm3 - pxor %xmm4, %xmm3 - pxor %xmm5, %xmm1 - pxor %xmm3, %xmm2 - movdqu %xmm2, %xmm6 - pxor %xmm2, %xmm2 - mov $3774873600, %r12 - pinsrd $3, %r12d, %xmm2 - movdqu %xmm1, %xmm5 - pclmulqdq $16, %xmm2, %xmm1 - movdqu %xmm1, %xmm3 - movdqu %xmm5, %xmm1 - pclmulqdq $1, %xmm2, %xmm1 - movdqu %xmm1, %xmm4 - movdqu %xmm5, %xmm1 - pclmulqdq $0, %xmm2, %xmm1 - pclmulqdq $17, %xmm2, %xmm5 - movdqu %xmm5, %xmm2 - movdqu %xmm1, %xmm5 - movdqu %xmm3, %xmm1 - mov $0, %r12 - pinsrd $0, %r12d, %xmm1 - pshufd $14, %xmm1, %xmm1 - pxor %xmm1, %xmm2 - movdqu %xmm4, %xmm1 - mov $0, %r12 - pinsrd $0, %r12d, %xmm1 - pshufd $14, %xmm1, %xmm1 - pxor %xmm1, %xmm2 - movdqu %xmm3, %xmm1 - mov $0, %r12 - pinsrd $3, %r12d, %xmm1 - pshufd $79, %xmm1, %xmm1 - mov $0, %r12 - pinsrd $3, %r12d, %xmm4 - pshufd $79, %xmm4, %xmm4 - pxor %xmm4, %xmm1 - pxor %xmm5, %xmm1 - movdqu %xmm1, %xmm3 - psrld $31, %xmm3 - movdqu %xmm2, %xmm4 - psrld $31, %xmm4 - pslld $1, %xmm1 - pslld $1, %xmm2 - vpslldq $4, %xmm3, %xmm5 - vpslldq $4, %xmm4, %xmm4 - mov $0, %r12 - pinsrd $0, %r12d, %xmm3 - pshufd $3, %xmm3, %xmm3 - pxor %xmm4, %xmm3 - pxor %xmm5, %xmm1 - pxor %xmm3, %xmm2 - movdqu %xmm2, %xmm5 - pxor %xmm2, %xmm2 - mov $3774873600, %r12 - pinsrd $3, %r12d, %xmm2 - pclmulqdq $17, %xmm2, %xmm1 - movdqu %xmm1, %xmm2 - psrld $31, %xmm2 - pslld $1, %xmm1 - vpslldq $4, %xmm2, %xmm2 - pxor %xmm2, %xmm1 - pxor %xmm5, %xmm1 - pxor %xmm6, %xmm1 - movdqu %xmm1, %xmm6 - movdqu %xmm1, %xmm3 - pxor %xmm4, %xmm4 - pxor %xmm5, %xmm5 - mov $3254779904, %r12 - pinsrd $3, %r12d, %xmm4 - mov $1, %r12 - pinsrd $0, %r12d, %xmm4 - mov $2147483648, %r12 - pinsrd $3, %r12d, %xmm5 - movdqu %xmm3, %xmm1 - movdqu %xmm1, %xmm2 - psrld $31, %xmm2 - pslld $1, %xmm1 - vpslldq $4, %xmm2, %xmm2 - pxor %xmm2, %xmm1 - pand %xmm5, %xmm3 - pcmpeqd %xmm5, %xmm3 - pshufd $255, %xmm3, %xmm3 - pand %xmm4, %xmm3 - vpxor %xmm3, %xmm1, %xmm1 - movdqu %xmm1, 96(%rcx) - movdqu %xmm6, %xmm2 - movdqu 32(%rcx), %xmm1 - movdqu %xmm1, %xmm5 - pclmulqdq $16, %xmm2, %xmm1 - movdqu %xmm1, %xmm3 - movdqu %xmm5, %xmm1 - pclmulqdq $1, %xmm2, %xmm1 - movdqu %xmm1, %xmm4 - movdqu %xmm5, %xmm1 - pclmulqdq $0, %xmm2, %xmm1 - pclmulqdq $17, %xmm2, %xmm5 - movdqu %xmm5, %xmm2 - movdqu %xmm1, %xmm5 - movdqu %xmm3, %xmm1 - mov $0, %r12 - pinsrd $0, %r12d, %xmm1 - pshufd $14, %xmm1, %xmm1 - pxor %xmm1, %xmm2 - movdqu %xmm4, %xmm1 - mov $0, %r12 - pinsrd $0, %r12d, %xmm1 - pshufd $14, %xmm1, %xmm1 - pxor %xmm1, %xmm2 - movdqu %xmm3, %xmm1 - mov $0, %r12 - pinsrd $3, %r12d, %xmm1 - pshufd $79, %xmm1, %xmm1 - mov $0, %r12 - pinsrd $3, %r12d, %xmm4 - pshufd $79, %xmm4, %xmm4 - pxor %xmm4, %xmm1 - pxor %xmm5, %xmm1 - movdqu %xmm1, %xmm3 - psrld $31, %xmm3 - movdqu %xmm2, %xmm4 - psrld $31, %xmm4 - pslld $1, %xmm1 - pslld $1, %xmm2 - vpslldq $4, %xmm3, %xmm5 - vpslldq $4, %xmm4, %xmm4 - mov $0, %r12 - pinsrd $0, %r12d, %xmm3 - pshufd $3, %xmm3, %xmm3 - pxor %xmm4, %xmm3 - pxor %xmm5, %xmm1 - pxor %xmm3, %xmm2 - movdqu %xmm2, %xmm6 - pxor %xmm2, %xmm2 - mov $3774873600, %r12 - pinsrd $3, %r12d, %xmm2 - movdqu %xmm1, %xmm5 - pclmulqdq $16, %xmm2, %xmm1 - movdqu %xmm1, %xmm3 - movdqu %xmm5, %xmm1 - pclmulqdq $1, %xmm2, %xmm1 - movdqu %xmm1, %xmm4 - movdqu %xmm5, %xmm1 - pclmulqdq $0, %xmm2, %xmm1 - pclmulqdq $17, %xmm2, %xmm5 - movdqu %xmm5, %xmm2 - movdqu %xmm1, %xmm5 - movdqu %xmm3, %xmm1 - mov $0, %r12 - pinsrd $0, %r12d, %xmm1 - pshufd $14, %xmm1, %xmm1 - pxor %xmm1, %xmm2 - movdqu %xmm4, %xmm1 - mov $0, %r12 - pinsrd $0, %r12d, %xmm1 - pshufd $14, %xmm1, %xmm1 - pxor %xmm1, %xmm2 - movdqu %xmm3, %xmm1 - mov $0, %r12 - pinsrd $3, %r12d, %xmm1 - pshufd $79, %xmm1, %xmm1 - mov $0, %r12 - pinsrd $3, %r12d, %xmm4 - pshufd $79, %xmm4, %xmm4 - pxor %xmm4, %xmm1 - pxor %xmm5, %xmm1 - movdqu %xmm1, %xmm3 - psrld $31, %xmm3 - movdqu %xmm2, %xmm4 - psrld $31, %xmm4 - pslld $1, %xmm1 - pslld $1, %xmm2 - vpslldq $4, %xmm3, %xmm5 - vpslldq $4, %xmm4, %xmm4 - mov $0, %r12 - pinsrd $0, %r12d, %xmm3 - pshufd $3, %xmm3, %xmm3 - pxor %xmm4, %xmm3 - pxor %xmm5, %xmm1 - pxor %xmm3, %xmm2 - movdqu %xmm2, %xmm5 - pxor %xmm2, %xmm2 - mov $3774873600, %r12 - pinsrd $3, %r12d, %xmm2 - pclmulqdq $17, %xmm2, %xmm1 - movdqu %xmm1, %xmm2 - psrld $31, %xmm2 - pslld $1, %xmm1 - vpslldq $4, %xmm2, %xmm2 - pxor %xmm2, %xmm1 - pxor %xmm5, %xmm1 - pxor %xmm6, %xmm1 - movdqu %xmm1, %xmm6 - movdqu %xmm1, %xmm3 - pxor %xmm4, %xmm4 - pxor %xmm5, %xmm5 - mov $3254779904, %r12 - pinsrd $3, %r12d, %xmm4 - mov $1, %r12 - pinsrd $0, %r12d, %xmm4 - mov $2147483648, %r12 - pinsrd $3, %r12d, %xmm5 - movdqu %xmm3, %xmm1 - movdqu %xmm1, %xmm2 - psrld $31, %xmm2 - pslld $1, %xmm1 - vpslldq $4, %xmm2, %xmm2 - pxor %xmm2, %xmm1 - pand %xmm5, %xmm3 - pcmpeqd %xmm5, %xmm3 - pshufd $255, %xmm3, %xmm3 - pand %xmm4, %xmm3 - vpxor %xmm3, %xmm1, %xmm1 - movdqu %xmm1, 112(%rcx) - movdqu %xmm0, %xmm6 - mov %rax, %r12 - ret - -.global aes256_key_expansion -aes256_key_expansion: - movdqu 0(%rdi), %xmm1 - movdqu 16(%rdi), %xmm3 - mov %rsi, %rdx - movdqu %xmm1, 0(%rdx) - movdqu %xmm3, 16(%rdx) - aeskeygenassist $1, %xmm3, %xmm2 - pshufd $255, %xmm2, %xmm2 - vpslldq $4, %xmm1, %xmm4 - pxor %xmm4, %xmm1 - vpslldq $4, %xmm1, %xmm4 - pxor %xmm4, %xmm1 - vpslldq $4, %xmm1, %xmm4 - pxor %xmm4, %xmm1 - pxor %xmm2, %xmm1 - movdqu %xmm1, 32(%rdx) - aeskeygenassist $0, %xmm1, %xmm2 - pshufd $170, %xmm2, %xmm2 - vpslldq $4, %xmm3, %xmm4 - pxor %xmm4, %xmm3 - vpslldq $4, %xmm3, %xmm4 - pxor %xmm4, %xmm3 - vpslldq $4, %xmm3, %xmm4 - pxor %xmm4, %xmm3 - pxor %xmm2, %xmm3 - movdqu %xmm3, 48(%rdx) - aeskeygenassist $2, %xmm3, %xmm2 - pshufd $255, %xmm2, %xmm2 - vpslldq $4, %xmm1, %xmm4 - pxor %xmm4, %xmm1 - vpslldq $4, %xmm1, %xmm4 - pxor %xmm4, %xmm1 - vpslldq $4, %xmm1, %xmm4 - pxor %xmm4, %xmm1 - pxor %xmm2, %xmm1 - movdqu %xmm1, 64(%rdx) - aeskeygenassist $0, %xmm1, %xmm2 - pshufd $170, %xmm2, %xmm2 - vpslldq $4, %xmm3, %xmm4 - pxor %xmm4, %xmm3 - vpslldq $4, %xmm3, %xmm4 - pxor %xmm4, %xmm3 - vpslldq $4, %xmm3, %xmm4 - pxor %xmm4, %xmm3 - pxor %xmm2, %xmm3 - movdqu %xmm3, 80(%rdx) - aeskeygenassist $4, %xmm3, %xmm2 - pshufd $255, %xmm2, %xmm2 - vpslldq $4, %xmm1, %xmm4 - pxor %xmm4, %xmm1 - vpslldq $4, %xmm1, %xmm4 - pxor %xmm4, %xmm1 - vpslldq $4, %xmm1, %xmm4 - pxor %xmm4, %xmm1 - pxor %xmm2, %xmm1 - movdqu %xmm1, 96(%rdx) - aeskeygenassist $0, %xmm1, %xmm2 - pshufd $170, %xmm2, %xmm2 - vpslldq $4, %xmm3, %xmm4 - pxor %xmm4, %xmm3 - vpslldq $4, %xmm3, %xmm4 - pxor %xmm4, %xmm3 - vpslldq $4, %xmm3, %xmm4 - pxor %xmm4, %xmm3 - pxor %xmm2, %xmm3 - movdqu %xmm3, 112(%rdx) - aeskeygenassist $8, %xmm3, %xmm2 - pshufd $255, %xmm2, %xmm2 - vpslldq $4, %xmm1, %xmm4 - pxor %xmm4, %xmm1 - vpslldq $4, %xmm1, %xmm4 - pxor %xmm4, %xmm1 - vpslldq $4, %xmm1, %xmm4 - pxor %xmm4, %xmm1 - pxor %xmm2, %xmm1 - movdqu %xmm1, 128(%rdx) - aeskeygenassist $0, %xmm1, %xmm2 - pshufd $170, %xmm2, %xmm2 - vpslldq $4, %xmm3, %xmm4 - pxor %xmm4, %xmm3 - vpslldq $4, %xmm3, %xmm4 - pxor %xmm4, %xmm3 - vpslldq $4, %xmm3, %xmm4 - pxor %xmm4, %xmm3 - pxor %xmm2, %xmm3 - movdqu %xmm3, 144(%rdx) - aeskeygenassist $16, %xmm3, %xmm2 - pshufd $255, %xmm2, %xmm2 - vpslldq $4, %xmm1, %xmm4 - pxor %xmm4, %xmm1 - vpslldq $4, %xmm1, %xmm4 - pxor %xmm4, %xmm1 - vpslldq $4, %xmm1, %xmm4 - pxor %xmm4, %xmm1 - pxor %xmm2, %xmm1 - movdqu %xmm1, 160(%rdx) - aeskeygenassist $0, %xmm1, %xmm2 - pshufd $170, %xmm2, %xmm2 - vpslldq $4, %xmm3, %xmm4 - pxor %xmm4, %xmm3 - vpslldq $4, %xmm3, %xmm4 - pxor %xmm4, %xmm3 - vpslldq $4, %xmm3, %xmm4 - pxor %xmm4, %xmm3 - pxor %xmm2, %xmm3 - movdqu %xmm3, 176(%rdx) - aeskeygenassist $32, %xmm3, %xmm2 - pshufd $255, %xmm2, %xmm2 - vpslldq $4, %xmm1, %xmm4 - pxor %xmm4, %xmm1 - vpslldq $4, %xmm1, %xmm4 - pxor %xmm4, %xmm1 - vpslldq $4, %xmm1, %xmm4 - pxor %xmm4, %xmm1 - pxor %xmm2, %xmm1 - movdqu %xmm1, 192(%rdx) - aeskeygenassist $0, %xmm1, %xmm2 - pshufd $170, %xmm2, %xmm2 - vpslldq $4, %xmm3, %xmm4 - pxor %xmm4, %xmm3 - vpslldq $4, %xmm3, %xmm4 - pxor %xmm4, %xmm3 - vpslldq $4, %xmm3, %xmm4 - pxor %xmm4, %xmm3 - pxor %xmm2, %xmm3 - movdqu %xmm3, 208(%rdx) - aeskeygenassist $64, %xmm3, %xmm2 - pshufd $255, %xmm2, %xmm2 - vpslldq $4, %xmm1, %xmm4 - pxor %xmm4, %xmm1 - vpslldq $4, %xmm1, %xmm4 - pxor %xmm4, %xmm1 - vpslldq $4, %xmm1, %xmm4 - pxor %xmm4, %xmm1 - pxor %xmm2, %xmm1 - movdqu %xmm1, 224(%rdx) - pxor %xmm1, %xmm1 - pxor %xmm2, %xmm2 - pxor %xmm3, %xmm3 - pxor %xmm4, %xmm4 - ret - -.global aes256_keyhash_init -aes256_keyhash_init: - mov $579005069656919567, %r8 - pinsrq $0, %r8, %xmm4 - mov $283686952306183, %r8 - pinsrq $1, %r8, %xmm4 - pxor %xmm0, %xmm0 - movdqu %xmm0, 80(%rsi) - mov %rdi, %r8 - movdqu 0(%r8), %xmm2 - pxor %xmm2, %xmm0 - movdqu 16(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 32(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 48(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 64(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 80(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 96(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 112(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 128(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 144(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 160(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 176(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 192(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 208(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 224(%r8), %xmm2 - aesenclast %xmm2, %xmm0 - pxor %xmm2, %xmm2 - pshufb %xmm4, %xmm0 - mov %rsi, %rcx - movdqu %xmm0, 32(%rcx) - movdqu %xmm6, %xmm0 - mov %r12, %rax - movdqu 32(%rcx), %xmm1 - movdqu %xmm1, %xmm6 - movdqu %xmm1, %xmm3 - pxor %xmm4, %xmm4 - pxor %xmm5, %xmm5 - mov $3254779904, %r12 - pinsrd $3, %r12d, %xmm4 - mov $1, %r12 - pinsrd $0, %r12d, %xmm4 - mov $2147483648, %r12 - pinsrd $3, %r12d, %xmm5 - movdqu %xmm3, %xmm1 - movdqu %xmm1, %xmm2 - psrld $31, %xmm2 - pslld $1, %xmm1 - vpslldq $4, %xmm2, %xmm2 - pxor %xmm2, %xmm1 - pand %xmm5, %xmm3 - pcmpeqd %xmm5, %xmm3 - pshufd $255, %xmm3, %xmm3 - pand %xmm4, %xmm3 - vpxor %xmm3, %xmm1, %xmm1 - movdqu %xmm1, 0(%rcx) - movdqu %xmm6, %xmm1 - movdqu %xmm6, %xmm2 - movdqu %xmm1, %xmm5 - pclmulqdq $16, %xmm2, %xmm1 - movdqu %xmm1, %xmm3 - movdqu %xmm5, %xmm1 - pclmulqdq $1, %xmm2, %xmm1 - movdqu %xmm1, %xmm4 - movdqu %xmm5, %xmm1 - pclmulqdq $0, %xmm2, %xmm1 - pclmulqdq $17, %xmm2, %xmm5 - movdqu %xmm5, %xmm2 - movdqu %xmm1, %xmm5 - movdqu %xmm3, %xmm1 - mov $0, %r12 - pinsrd $0, %r12d, %xmm1 - pshufd $14, %xmm1, %xmm1 - pxor %xmm1, %xmm2 - movdqu %xmm4, %xmm1 - mov $0, %r12 - pinsrd $0, %r12d, %xmm1 - pshufd $14, %xmm1, %xmm1 - pxor %xmm1, %xmm2 - movdqu %xmm3, %xmm1 - mov $0, %r12 - pinsrd $3, %r12d, %xmm1 - pshufd $79, %xmm1, %xmm1 - mov $0, %r12 - pinsrd $3, %r12d, %xmm4 - pshufd $79, %xmm4, %xmm4 - pxor %xmm4, %xmm1 - pxor %xmm5, %xmm1 - movdqu %xmm1, %xmm3 - psrld $31, %xmm3 - movdqu %xmm2, %xmm4 - psrld $31, %xmm4 - pslld $1, %xmm1 - pslld $1, %xmm2 - vpslldq $4, %xmm3, %xmm5 - vpslldq $4, %xmm4, %xmm4 - mov $0, %r12 - pinsrd $0, %r12d, %xmm3 - pshufd $3, %xmm3, %xmm3 - pxor %xmm4, %xmm3 - pxor %xmm5, %xmm1 - pxor %xmm3, %xmm2 - movdqu %xmm2, %xmm6 - pxor %xmm2, %xmm2 - mov $3774873600, %r12 - pinsrd $3, %r12d, %xmm2 - movdqu %xmm1, %xmm5 - pclmulqdq $16, %xmm2, %xmm1 - movdqu %xmm1, %xmm3 - movdqu %xmm5, %xmm1 - pclmulqdq $1, %xmm2, %xmm1 - movdqu %xmm1, %xmm4 - movdqu %xmm5, %xmm1 - pclmulqdq $0, %xmm2, %xmm1 - pclmulqdq $17, %xmm2, %xmm5 - movdqu %xmm5, %xmm2 - movdqu %xmm1, %xmm5 - movdqu %xmm3, %xmm1 - mov $0, %r12 - pinsrd $0, %r12d, %xmm1 - pshufd $14, %xmm1, %xmm1 - pxor %xmm1, %xmm2 - movdqu %xmm4, %xmm1 - mov $0, %r12 - pinsrd $0, %r12d, %xmm1 - pshufd $14, %xmm1, %xmm1 - pxor %xmm1, %xmm2 - movdqu %xmm3, %xmm1 - mov $0, %r12 - pinsrd $3, %r12d, %xmm1 - pshufd $79, %xmm1, %xmm1 - mov $0, %r12 - pinsrd $3, %r12d, %xmm4 - pshufd $79, %xmm4, %xmm4 - pxor %xmm4, %xmm1 - pxor %xmm5, %xmm1 - movdqu %xmm1, %xmm3 - psrld $31, %xmm3 - movdqu %xmm2, %xmm4 - psrld $31, %xmm4 - pslld $1, %xmm1 - pslld $1, %xmm2 - vpslldq $4, %xmm3, %xmm5 - vpslldq $4, %xmm4, %xmm4 - mov $0, %r12 - pinsrd $0, %r12d, %xmm3 - pshufd $3, %xmm3, %xmm3 - pxor %xmm4, %xmm3 - pxor %xmm5, %xmm1 - pxor %xmm3, %xmm2 - movdqu %xmm2, %xmm5 - pxor %xmm2, %xmm2 - mov $3774873600, %r12 - pinsrd $3, %r12d, %xmm2 - pclmulqdq $17, %xmm2, %xmm1 - movdqu %xmm1, %xmm2 - psrld $31, %xmm2 - pslld $1, %xmm1 - vpslldq $4, %xmm2, %xmm2 - pxor %xmm2, %xmm1 - pxor %xmm5, %xmm1 - pxor %xmm6, %xmm1 - movdqu %xmm1, %xmm6 - movdqu %xmm1, %xmm3 - pxor %xmm4, %xmm4 - pxor %xmm5, %xmm5 - mov $3254779904, %r12 - pinsrd $3, %r12d, %xmm4 - mov $1, %r12 - pinsrd $0, %r12d, %xmm4 - mov $2147483648, %r12 - pinsrd $3, %r12d, %xmm5 - movdqu %xmm3, %xmm1 - movdqu %xmm1, %xmm2 - psrld $31, %xmm2 - pslld $1, %xmm1 - vpslldq $4, %xmm2, %xmm2 - pxor %xmm2, %xmm1 - pand %xmm5, %xmm3 - pcmpeqd %xmm5, %xmm3 - pshufd $255, %xmm3, %xmm3 - pand %xmm4, %xmm3 - vpxor %xmm3, %xmm1, %xmm1 - movdqu %xmm1, 16(%rcx) - movdqu %xmm6, %xmm2 - movdqu 32(%rcx), %xmm1 - movdqu %xmm1, %xmm5 - pclmulqdq $16, %xmm2, %xmm1 - movdqu %xmm1, %xmm3 - movdqu %xmm5, %xmm1 - pclmulqdq $1, %xmm2, %xmm1 - movdqu %xmm1, %xmm4 - movdqu %xmm5, %xmm1 - pclmulqdq $0, %xmm2, %xmm1 - pclmulqdq $17, %xmm2, %xmm5 - movdqu %xmm5, %xmm2 - movdqu %xmm1, %xmm5 - movdqu %xmm3, %xmm1 - mov $0, %r12 - pinsrd $0, %r12d, %xmm1 - pshufd $14, %xmm1, %xmm1 - pxor %xmm1, %xmm2 - movdqu %xmm4, %xmm1 - mov $0, %r12 - pinsrd $0, %r12d, %xmm1 - pshufd $14, %xmm1, %xmm1 - pxor %xmm1, %xmm2 - movdqu %xmm3, %xmm1 - mov $0, %r12 - pinsrd $3, %r12d, %xmm1 - pshufd $79, %xmm1, %xmm1 - mov $0, %r12 - pinsrd $3, %r12d, %xmm4 - pshufd $79, %xmm4, %xmm4 - pxor %xmm4, %xmm1 - pxor %xmm5, %xmm1 - movdqu %xmm1, %xmm3 - psrld $31, %xmm3 - movdqu %xmm2, %xmm4 - psrld $31, %xmm4 - pslld $1, %xmm1 - pslld $1, %xmm2 - vpslldq $4, %xmm3, %xmm5 - vpslldq $4, %xmm4, %xmm4 - mov $0, %r12 - pinsrd $0, %r12d, %xmm3 - pshufd $3, %xmm3, %xmm3 - pxor %xmm4, %xmm3 - pxor %xmm5, %xmm1 - pxor %xmm3, %xmm2 - movdqu %xmm2, %xmm6 - pxor %xmm2, %xmm2 - mov $3774873600, %r12 - pinsrd $3, %r12d, %xmm2 - movdqu %xmm1, %xmm5 - pclmulqdq $16, %xmm2, %xmm1 - movdqu %xmm1, %xmm3 - movdqu %xmm5, %xmm1 - pclmulqdq $1, %xmm2, %xmm1 - movdqu %xmm1, %xmm4 - movdqu %xmm5, %xmm1 - pclmulqdq $0, %xmm2, %xmm1 - pclmulqdq $17, %xmm2, %xmm5 - movdqu %xmm5, %xmm2 - movdqu %xmm1, %xmm5 - movdqu %xmm3, %xmm1 - mov $0, %r12 - pinsrd $0, %r12d, %xmm1 - pshufd $14, %xmm1, %xmm1 - pxor %xmm1, %xmm2 - movdqu %xmm4, %xmm1 - mov $0, %r12 - pinsrd $0, %r12d, %xmm1 - pshufd $14, %xmm1, %xmm1 - pxor %xmm1, %xmm2 - movdqu %xmm3, %xmm1 - mov $0, %r12 - pinsrd $3, %r12d, %xmm1 - pshufd $79, %xmm1, %xmm1 - mov $0, %r12 - pinsrd $3, %r12d, %xmm4 - pshufd $79, %xmm4, %xmm4 - pxor %xmm4, %xmm1 - pxor %xmm5, %xmm1 - movdqu %xmm1, %xmm3 - psrld $31, %xmm3 - movdqu %xmm2, %xmm4 - psrld $31, %xmm4 - pslld $1, %xmm1 - pslld $1, %xmm2 - vpslldq $4, %xmm3, %xmm5 - vpslldq $4, %xmm4, %xmm4 - mov $0, %r12 - pinsrd $0, %r12d, %xmm3 - pshufd $3, %xmm3, %xmm3 - pxor %xmm4, %xmm3 - pxor %xmm5, %xmm1 - pxor %xmm3, %xmm2 - movdqu %xmm2, %xmm5 - pxor %xmm2, %xmm2 - mov $3774873600, %r12 - pinsrd $3, %r12d, %xmm2 - pclmulqdq $17, %xmm2, %xmm1 - movdqu %xmm1, %xmm2 - psrld $31, %xmm2 - pslld $1, %xmm1 - vpslldq $4, %xmm2, %xmm2 - pxor %xmm2, %xmm1 - pxor %xmm5, %xmm1 - pxor %xmm6, %xmm1 - movdqu %xmm1, %xmm6 - movdqu %xmm1, %xmm3 - pxor %xmm4, %xmm4 - pxor %xmm5, %xmm5 - mov $3254779904, %r12 - pinsrd $3, %r12d, %xmm4 - mov $1, %r12 - pinsrd $0, %r12d, %xmm4 - mov $2147483648, %r12 - pinsrd $3, %r12d, %xmm5 - movdqu %xmm3, %xmm1 - movdqu %xmm1, %xmm2 - psrld $31, %xmm2 - pslld $1, %xmm1 - vpslldq $4, %xmm2, %xmm2 - pxor %xmm2, %xmm1 - pand %xmm5, %xmm3 - pcmpeqd %xmm5, %xmm3 - pshufd $255, %xmm3, %xmm3 - pand %xmm4, %xmm3 - vpxor %xmm3, %xmm1, %xmm1 - movdqu %xmm1, 48(%rcx) - movdqu %xmm6, %xmm2 - movdqu 32(%rcx), %xmm1 - movdqu %xmm1, %xmm5 - pclmulqdq $16, %xmm2, %xmm1 - movdqu %xmm1, %xmm3 - movdqu %xmm5, %xmm1 - pclmulqdq $1, %xmm2, %xmm1 - movdqu %xmm1, %xmm4 - movdqu %xmm5, %xmm1 - pclmulqdq $0, %xmm2, %xmm1 - pclmulqdq $17, %xmm2, %xmm5 - movdqu %xmm5, %xmm2 - movdqu %xmm1, %xmm5 - movdqu %xmm3, %xmm1 - mov $0, %r12 - pinsrd $0, %r12d, %xmm1 - pshufd $14, %xmm1, %xmm1 - pxor %xmm1, %xmm2 - movdqu %xmm4, %xmm1 - mov $0, %r12 - pinsrd $0, %r12d, %xmm1 - pshufd $14, %xmm1, %xmm1 - pxor %xmm1, %xmm2 - movdqu %xmm3, %xmm1 - mov $0, %r12 - pinsrd $3, %r12d, %xmm1 - pshufd $79, %xmm1, %xmm1 - mov $0, %r12 - pinsrd $3, %r12d, %xmm4 - pshufd $79, %xmm4, %xmm4 - pxor %xmm4, %xmm1 - pxor %xmm5, %xmm1 - movdqu %xmm1, %xmm3 - psrld $31, %xmm3 - movdqu %xmm2, %xmm4 - psrld $31, %xmm4 - pslld $1, %xmm1 - pslld $1, %xmm2 - vpslldq $4, %xmm3, %xmm5 - vpslldq $4, %xmm4, %xmm4 - mov $0, %r12 - pinsrd $0, %r12d, %xmm3 - pshufd $3, %xmm3, %xmm3 - pxor %xmm4, %xmm3 - pxor %xmm5, %xmm1 - pxor %xmm3, %xmm2 - movdqu %xmm2, %xmm6 - pxor %xmm2, %xmm2 - mov $3774873600, %r12 - pinsrd $3, %r12d, %xmm2 - movdqu %xmm1, %xmm5 - pclmulqdq $16, %xmm2, %xmm1 - movdqu %xmm1, %xmm3 - movdqu %xmm5, %xmm1 - pclmulqdq $1, %xmm2, %xmm1 - movdqu %xmm1, %xmm4 - movdqu %xmm5, %xmm1 - pclmulqdq $0, %xmm2, %xmm1 - pclmulqdq $17, %xmm2, %xmm5 - movdqu %xmm5, %xmm2 - movdqu %xmm1, %xmm5 - movdqu %xmm3, %xmm1 - mov $0, %r12 - pinsrd $0, %r12d, %xmm1 - pshufd $14, %xmm1, %xmm1 - pxor %xmm1, %xmm2 - movdqu %xmm4, %xmm1 - mov $0, %r12 - pinsrd $0, %r12d, %xmm1 - pshufd $14, %xmm1, %xmm1 - pxor %xmm1, %xmm2 - movdqu %xmm3, %xmm1 - mov $0, %r12 - pinsrd $3, %r12d, %xmm1 - pshufd $79, %xmm1, %xmm1 - mov $0, %r12 - pinsrd $3, %r12d, %xmm4 - pshufd $79, %xmm4, %xmm4 - pxor %xmm4, %xmm1 - pxor %xmm5, %xmm1 - movdqu %xmm1, %xmm3 - psrld $31, %xmm3 - movdqu %xmm2, %xmm4 - psrld $31, %xmm4 - pslld $1, %xmm1 - pslld $1, %xmm2 - vpslldq $4, %xmm3, %xmm5 - vpslldq $4, %xmm4, %xmm4 - mov $0, %r12 - pinsrd $0, %r12d, %xmm3 - pshufd $3, %xmm3, %xmm3 - pxor %xmm4, %xmm3 - pxor %xmm5, %xmm1 - pxor %xmm3, %xmm2 - movdqu %xmm2, %xmm5 - pxor %xmm2, %xmm2 - mov $3774873600, %r12 - pinsrd $3, %r12d, %xmm2 - pclmulqdq $17, %xmm2, %xmm1 - movdqu %xmm1, %xmm2 - psrld $31, %xmm2 - pslld $1, %xmm1 - vpslldq $4, %xmm2, %xmm2 - pxor %xmm2, %xmm1 - pxor %xmm5, %xmm1 - pxor %xmm6, %xmm1 - movdqu %xmm1, %xmm6 - movdqu %xmm1, %xmm3 - pxor %xmm4, %xmm4 - pxor %xmm5, %xmm5 - mov $3254779904, %r12 - pinsrd $3, %r12d, %xmm4 - mov $1, %r12 - pinsrd $0, %r12d, %xmm4 - mov $2147483648, %r12 - pinsrd $3, %r12d, %xmm5 - movdqu %xmm3, %xmm1 - movdqu %xmm1, %xmm2 - psrld $31, %xmm2 - pslld $1, %xmm1 - vpslldq $4, %xmm2, %xmm2 - pxor %xmm2, %xmm1 - pand %xmm5, %xmm3 - pcmpeqd %xmm5, %xmm3 - pshufd $255, %xmm3, %xmm3 - pand %xmm4, %xmm3 - vpxor %xmm3, %xmm1, %xmm1 - movdqu %xmm1, 64(%rcx) - movdqu %xmm6, %xmm2 - movdqu 32(%rcx), %xmm1 - movdqu %xmm1, %xmm5 - pclmulqdq $16, %xmm2, %xmm1 - movdqu %xmm1, %xmm3 - movdqu %xmm5, %xmm1 - pclmulqdq $1, %xmm2, %xmm1 - movdqu %xmm1, %xmm4 - movdqu %xmm5, %xmm1 - pclmulqdq $0, %xmm2, %xmm1 - pclmulqdq $17, %xmm2, %xmm5 - movdqu %xmm5, %xmm2 - movdqu %xmm1, %xmm5 - movdqu %xmm3, %xmm1 - mov $0, %r12 - pinsrd $0, %r12d, %xmm1 - pshufd $14, %xmm1, %xmm1 - pxor %xmm1, %xmm2 - movdqu %xmm4, %xmm1 - mov $0, %r12 - pinsrd $0, %r12d, %xmm1 - pshufd $14, %xmm1, %xmm1 - pxor %xmm1, %xmm2 - movdqu %xmm3, %xmm1 - mov $0, %r12 - pinsrd $3, %r12d, %xmm1 - pshufd $79, %xmm1, %xmm1 - mov $0, %r12 - pinsrd $3, %r12d, %xmm4 - pshufd $79, %xmm4, %xmm4 - pxor %xmm4, %xmm1 - pxor %xmm5, %xmm1 - movdqu %xmm1, %xmm3 - psrld $31, %xmm3 - movdqu %xmm2, %xmm4 - psrld $31, %xmm4 - pslld $1, %xmm1 - pslld $1, %xmm2 - vpslldq $4, %xmm3, %xmm5 - vpslldq $4, %xmm4, %xmm4 - mov $0, %r12 - pinsrd $0, %r12d, %xmm3 - pshufd $3, %xmm3, %xmm3 - pxor %xmm4, %xmm3 - pxor %xmm5, %xmm1 - pxor %xmm3, %xmm2 - movdqu %xmm2, %xmm6 - pxor %xmm2, %xmm2 - mov $3774873600, %r12 - pinsrd $3, %r12d, %xmm2 - movdqu %xmm1, %xmm5 - pclmulqdq $16, %xmm2, %xmm1 - movdqu %xmm1, %xmm3 - movdqu %xmm5, %xmm1 - pclmulqdq $1, %xmm2, %xmm1 - movdqu %xmm1, %xmm4 - movdqu %xmm5, %xmm1 - pclmulqdq $0, %xmm2, %xmm1 - pclmulqdq $17, %xmm2, %xmm5 - movdqu %xmm5, %xmm2 - movdqu %xmm1, %xmm5 - movdqu %xmm3, %xmm1 - mov $0, %r12 - pinsrd $0, %r12d, %xmm1 - pshufd $14, %xmm1, %xmm1 - pxor %xmm1, %xmm2 - movdqu %xmm4, %xmm1 - mov $0, %r12 - pinsrd $0, %r12d, %xmm1 - pshufd $14, %xmm1, %xmm1 - pxor %xmm1, %xmm2 - movdqu %xmm3, %xmm1 - mov $0, %r12 - pinsrd $3, %r12d, %xmm1 - pshufd $79, %xmm1, %xmm1 - mov $0, %r12 - pinsrd $3, %r12d, %xmm4 - pshufd $79, %xmm4, %xmm4 - pxor %xmm4, %xmm1 - pxor %xmm5, %xmm1 - movdqu %xmm1, %xmm3 - psrld $31, %xmm3 - movdqu %xmm2, %xmm4 - psrld $31, %xmm4 - pslld $1, %xmm1 - pslld $1, %xmm2 - vpslldq $4, %xmm3, %xmm5 - vpslldq $4, %xmm4, %xmm4 - mov $0, %r12 - pinsrd $0, %r12d, %xmm3 - pshufd $3, %xmm3, %xmm3 - pxor %xmm4, %xmm3 - pxor %xmm5, %xmm1 - pxor %xmm3, %xmm2 - movdqu %xmm2, %xmm5 - pxor %xmm2, %xmm2 - mov $3774873600, %r12 - pinsrd $3, %r12d, %xmm2 - pclmulqdq $17, %xmm2, %xmm1 - movdqu %xmm1, %xmm2 - psrld $31, %xmm2 - pslld $1, %xmm1 - vpslldq $4, %xmm2, %xmm2 - pxor %xmm2, %xmm1 - pxor %xmm5, %xmm1 - pxor %xmm6, %xmm1 - movdqu %xmm1, %xmm6 - movdqu %xmm1, %xmm3 - pxor %xmm4, %xmm4 - pxor %xmm5, %xmm5 - mov $3254779904, %r12 - pinsrd $3, %r12d, %xmm4 - mov $1, %r12 - pinsrd $0, %r12d, %xmm4 - mov $2147483648, %r12 - pinsrd $3, %r12d, %xmm5 - movdqu %xmm3, %xmm1 - movdqu %xmm1, %xmm2 - psrld $31, %xmm2 - pslld $1, %xmm1 - vpslldq $4, %xmm2, %xmm2 - pxor %xmm2, %xmm1 - pand %xmm5, %xmm3 - pcmpeqd %xmm5, %xmm3 - pshufd $255, %xmm3, %xmm3 - pand %xmm4, %xmm3 - vpxor %xmm3, %xmm1, %xmm1 - movdqu %xmm1, 96(%rcx) - movdqu %xmm6, %xmm2 - movdqu 32(%rcx), %xmm1 - movdqu %xmm1, %xmm5 - pclmulqdq $16, %xmm2, %xmm1 - movdqu %xmm1, %xmm3 - movdqu %xmm5, %xmm1 - pclmulqdq $1, %xmm2, %xmm1 - movdqu %xmm1, %xmm4 - movdqu %xmm5, %xmm1 - pclmulqdq $0, %xmm2, %xmm1 - pclmulqdq $17, %xmm2, %xmm5 - movdqu %xmm5, %xmm2 - movdqu %xmm1, %xmm5 - movdqu %xmm3, %xmm1 - mov $0, %r12 - pinsrd $0, %r12d, %xmm1 - pshufd $14, %xmm1, %xmm1 - pxor %xmm1, %xmm2 - movdqu %xmm4, %xmm1 - mov $0, %r12 - pinsrd $0, %r12d, %xmm1 - pshufd $14, %xmm1, %xmm1 - pxor %xmm1, %xmm2 - movdqu %xmm3, %xmm1 - mov $0, %r12 - pinsrd $3, %r12d, %xmm1 - pshufd $79, %xmm1, %xmm1 - mov $0, %r12 - pinsrd $3, %r12d, %xmm4 - pshufd $79, %xmm4, %xmm4 - pxor %xmm4, %xmm1 - pxor %xmm5, %xmm1 - movdqu %xmm1, %xmm3 - psrld $31, %xmm3 - movdqu %xmm2, %xmm4 - psrld $31, %xmm4 - pslld $1, %xmm1 - pslld $1, %xmm2 - vpslldq $4, %xmm3, %xmm5 - vpslldq $4, %xmm4, %xmm4 - mov $0, %r12 - pinsrd $0, %r12d, %xmm3 - pshufd $3, %xmm3, %xmm3 - pxor %xmm4, %xmm3 - pxor %xmm5, %xmm1 - pxor %xmm3, %xmm2 - movdqu %xmm2, %xmm6 - pxor %xmm2, %xmm2 - mov $3774873600, %r12 - pinsrd $3, %r12d, %xmm2 - movdqu %xmm1, %xmm5 - pclmulqdq $16, %xmm2, %xmm1 - movdqu %xmm1, %xmm3 - movdqu %xmm5, %xmm1 - pclmulqdq $1, %xmm2, %xmm1 - movdqu %xmm1, %xmm4 - movdqu %xmm5, %xmm1 - pclmulqdq $0, %xmm2, %xmm1 - pclmulqdq $17, %xmm2, %xmm5 - movdqu %xmm5, %xmm2 - movdqu %xmm1, %xmm5 - movdqu %xmm3, %xmm1 - mov $0, %r12 - pinsrd $0, %r12d, %xmm1 - pshufd $14, %xmm1, %xmm1 - pxor %xmm1, %xmm2 - movdqu %xmm4, %xmm1 - mov $0, %r12 - pinsrd $0, %r12d, %xmm1 - pshufd $14, %xmm1, %xmm1 - pxor %xmm1, %xmm2 - movdqu %xmm3, %xmm1 - mov $0, %r12 - pinsrd $3, %r12d, %xmm1 - pshufd $79, %xmm1, %xmm1 - mov $0, %r12 - pinsrd $3, %r12d, %xmm4 - pshufd $79, %xmm4, %xmm4 - pxor %xmm4, %xmm1 - pxor %xmm5, %xmm1 - movdqu %xmm1, %xmm3 - psrld $31, %xmm3 - movdqu %xmm2, %xmm4 - psrld $31, %xmm4 - pslld $1, %xmm1 - pslld $1, %xmm2 - vpslldq $4, %xmm3, %xmm5 - vpslldq $4, %xmm4, %xmm4 - mov $0, %r12 - pinsrd $0, %r12d, %xmm3 - pshufd $3, %xmm3, %xmm3 - pxor %xmm4, %xmm3 - pxor %xmm5, %xmm1 - pxor %xmm3, %xmm2 - movdqu %xmm2, %xmm5 - pxor %xmm2, %xmm2 - mov $3774873600, %r12 - pinsrd $3, %r12d, %xmm2 - pclmulqdq $17, %xmm2, %xmm1 - movdqu %xmm1, %xmm2 - psrld $31, %xmm2 - pslld $1, %xmm1 - vpslldq $4, %xmm2, %xmm2 - pxor %xmm2, %xmm1 - pxor %xmm5, %xmm1 - pxor %xmm6, %xmm1 - movdqu %xmm1, %xmm6 - movdqu %xmm1, %xmm3 - pxor %xmm4, %xmm4 - pxor %xmm5, %xmm5 - mov $3254779904, %r12 - pinsrd $3, %r12d, %xmm4 - mov $1, %r12 - pinsrd $0, %r12d, %xmm4 - mov $2147483648, %r12 - pinsrd $3, %r12d, %xmm5 - movdqu %xmm3, %xmm1 - movdqu %xmm1, %xmm2 - psrld $31, %xmm2 - pslld $1, %xmm1 - vpslldq $4, %xmm2, %xmm2 - pxor %xmm2, %xmm1 - pand %xmm5, %xmm3 - pcmpeqd %xmm5, %xmm3 - pshufd $255, %xmm3, %xmm3 - pand %xmm4, %xmm3 - vpxor %xmm3, %xmm1, %xmm1 - movdqu %xmm1, 112(%rcx) - movdqu %xmm0, %xmm6 - mov %rax, %r12 - ret - -.global gctr128_bytes -gctr128_bytes: - push %r15 - push %r14 - push %r13 - push %r12 - push %rsi - push %rdi - push %rbp - push %rbx - movdqu 0(%r9), %xmm7 - mov %rdi, %rax - mov %rdx, %rbx - mov %rcx, %r13 - mov 72(%rsp), %rcx - mov %rcx, %rbp - imul $16, %rbp - mov $579005069656919567, %r12 - pinsrq $0, %r12, %xmm8 - mov $283686952306183, %r12 - pinsrq $1, %r12, %xmm8 - mov %rcx, %rdx - shr $2, %rdx - and $3, %rcx - cmp $0, %rdx - jbe L0 - mov %rax, %r9 - mov %rbx, %r10 - pshufb %xmm8, %xmm7 - movdqu %xmm7, %xmm9 - mov $579005069656919567, %rax - pinsrq $0, %rax, %xmm0 - mov $579005069656919567, %rax - pinsrq $1, %rax, %xmm0 - pshufb %xmm0, %xmm9 - movdqu %xmm9, %xmm10 - pxor %xmm3, %xmm3 - mov $1, %rax - pinsrd $2, %eax, %xmm3 - paddd %xmm3, %xmm9 - mov $3, %rax - pinsrd $2, %eax, %xmm3 - mov $2, %rax - pinsrd $0, %eax, %xmm3 - paddd %xmm3, %xmm10 - pshufb %xmm8, %xmm9 - pshufb %xmm8, %xmm10 - pextrq $0, %xmm7, %rdi - mov $283686952306183, %rax - pinsrq $0, %rax, %xmm0 - mov $579005069656919567, %rax - pinsrq $1, %rax, %xmm0 - pxor %xmm15, %xmm15 - mov $4, %rax - pinsrd $0, %eax, %xmm15 - mov $4, %rax - pinsrd $2, %eax, %xmm15 - jmp L3 -.balign 16 -L2: - pinsrq $0, %rdi, %xmm2 - pinsrq $0, %rdi, %xmm12 - pinsrq $0, %rdi, %xmm13 - pinsrq $0, %rdi, %xmm14 - shufpd $2, %xmm9, %xmm2 - shufpd $0, %xmm9, %xmm12 - shufpd $2, %xmm10, %xmm13 - shufpd $0, %xmm10, %xmm14 - pshufb %xmm0, %xmm9 - pshufb %xmm0, %xmm10 - movdqu 0(%r8), %xmm3 - movdqu 16(%r8), %xmm4 - movdqu 32(%r8), %xmm5 - movdqu 48(%r8), %xmm6 - paddd %xmm15, %xmm9 - paddd %xmm15, %xmm10 - pxor %xmm3, %xmm2 - pxor %xmm3, %xmm12 - pxor %xmm3, %xmm13 - pxor %xmm3, %xmm14 - pshufb %xmm0, %xmm9 - pshufb %xmm0, %xmm10 - aesenc %xmm4, %xmm2 - aesenc %xmm4, %xmm12 - aesenc %xmm4, %xmm13 - aesenc %xmm4, %xmm14 - aesenc %xmm5, %xmm2 - aesenc %xmm5, %xmm12 - aesenc %xmm5, %xmm13 - aesenc %xmm5, %xmm14 - aesenc %xmm6, %xmm2 - aesenc %xmm6, %xmm12 - aesenc %xmm6, %xmm13 - aesenc %xmm6, %xmm14 - movdqu 64(%r8), %xmm3 - movdqu 80(%r8), %xmm4 - movdqu 96(%r8), %xmm5 - movdqu 112(%r8), %xmm6 - aesenc %xmm3, %xmm2 - aesenc %xmm3, %xmm12 - aesenc %xmm3, %xmm13 - aesenc %xmm3, %xmm14 - aesenc %xmm4, %xmm2 - aesenc %xmm4, %xmm12 - aesenc %xmm4, %xmm13 - aesenc %xmm4, %xmm14 - aesenc %xmm5, %xmm2 - aesenc %xmm5, %xmm12 - aesenc %xmm5, %xmm13 - aesenc %xmm5, %xmm14 - aesenc %xmm6, %xmm2 - aesenc %xmm6, %xmm12 - aesenc %xmm6, %xmm13 - aesenc %xmm6, %xmm14 - movdqu 128(%r8), %xmm3 - movdqu 144(%r8), %xmm4 - movdqu 160(%r8), %xmm5 - aesenc %xmm3, %xmm2 - aesenc %xmm3, %xmm12 - aesenc %xmm3, %xmm13 - aesenc %xmm3, %xmm14 - aesenc %xmm4, %xmm2 - aesenc %xmm4, %xmm12 - aesenc %xmm4, %xmm13 - aesenc %xmm4, %xmm14 - aesenclast %xmm5, %xmm2 - aesenclast %xmm5, %xmm12 - aesenclast %xmm5, %xmm13 - aesenclast %xmm5, %xmm14 - movdqu 0(%r9), %xmm7 - pxor %xmm7, %xmm2 - movdqu 16(%r9), %xmm7 - pxor %xmm7, %xmm12 - movdqu 32(%r9), %xmm7 - pxor %xmm7, %xmm13 - movdqu 48(%r9), %xmm7 - pxor %xmm7, %xmm14 - movdqu %xmm2, 0(%r10) - movdqu %xmm12, 16(%r10) - movdqu %xmm13, 32(%r10) - movdqu %xmm14, 48(%r10) - sub $1, %rdx - add $64, %r9 - add $64, %r10 -.balign 16 -L3: - cmp $0, %rdx - ja L2 - movdqu %xmm9, %xmm7 - pinsrq $0, %rdi, %xmm7 - pshufb %xmm8, %xmm7 - mov %r9, %rax - mov %r10, %rbx - jmp L1 -L0: -L1: - mov $0, %rdx - mov %rax, %r9 - mov %rbx, %r10 - pxor %xmm4, %xmm4 - mov $1, %r12 - pinsrd $0, %r12d, %xmm4 - jmp L5 -.balign 16 -L4: - movdqu %xmm7, %xmm0 - pshufb %xmm8, %xmm0 - movdqu 0(%r8), %xmm2 - pxor %xmm2, %xmm0 - movdqu 16(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 32(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 48(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 64(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 80(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 96(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 112(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 128(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 144(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 160(%r8), %xmm2 - aesenclast %xmm2, %xmm0 - pxor %xmm2, %xmm2 - movdqu 0(%r9), %xmm2 - pxor %xmm0, %xmm2 - movdqu %xmm2, 0(%r10) - add $1, %rdx - add $16, %r9 - add $16, %r10 - paddd %xmm4, %xmm7 -.balign 16 -L5: - cmp %rcx, %rdx - jne L4 - cmp %rbp, %rsi - jbe L6 - movdqu 0(%r13), %xmm1 - movdqu %xmm7, %xmm0 - mov $579005069656919567, %r12 - pinsrq $0, %r12, %xmm2 - mov $283686952306183, %r12 - pinsrq $1, %r12, %xmm2 - pshufb %xmm2, %xmm0 - movdqu 0(%r8), %xmm2 - pxor %xmm2, %xmm0 - movdqu 16(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 32(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 48(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 64(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 80(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 96(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 112(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 128(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 144(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 160(%r8), %xmm2 - aesenclast %xmm2, %xmm0 - pxor %xmm2, %xmm2 - pxor %xmm0, %xmm1 - movdqu %xmm1, 0(%r13) - jmp L7 -L6: -L7: - pop %rbx - pop %rbp - pop %rdi - pop %rsi - pop %r12 - pop %r13 - pop %r14 - pop %r15 - ret - -.global gctr256_bytes -gctr256_bytes: - push %r15 - push %r14 - push %r13 - push %r12 - push %rsi - push %rdi - push %rbp - push %rbx - movdqu 0(%r9), %xmm7 - mov %rdi, %rax - mov %rdx, %rbx - mov %rcx, %r13 - mov 72(%rsp), %rcx - mov %rcx, %rbp - imul $16, %rbp - mov $579005069656919567, %r12 - pinsrq $0, %r12, %xmm8 - mov $283686952306183, %r12 - pinsrq $1, %r12, %xmm8 - mov %rcx, %rdx - shr $2, %rdx - and $3, %rcx - cmp $0, %rdx - jbe L8 - mov %rax, %r9 - mov %rbx, %r10 - pshufb %xmm8, %xmm7 - movdqu %xmm7, %xmm9 - mov $579005069656919567, %rax - pinsrq $0, %rax, %xmm0 - mov $579005069656919567, %rax - pinsrq $1, %rax, %xmm0 - pshufb %xmm0, %xmm9 - movdqu %xmm9, %xmm10 - pxor %xmm3, %xmm3 - mov $1, %rax - pinsrd $2, %eax, %xmm3 - paddd %xmm3, %xmm9 - mov $3, %rax - pinsrd $2, %eax, %xmm3 - mov $2, %rax - pinsrd $0, %eax, %xmm3 - paddd %xmm3, %xmm10 - pshufb %xmm8, %xmm9 - pshufb %xmm8, %xmm10 - pextrq $0, %xmm7, %rdi - mov $283686952306183, %rax - pinsrq $0, %rax, %xmm0 - mov $579005069656919567, %rax - pinsrq $1, %rax, %xmm0 - pxor %xmm15, %xmm15 - mov $4, %rax - pinsrd $0, %eax, %xmm15 - mov $4, %rax - pinsrd $2, %eax, %xmm15 - jmp L11 -.balign 16 -L10: - pinsrq $0, %rdi, %xmm2 - pinsrq $0, %rdi, %xmm12 - pinsrq $0, %rdi, %xmm13 - pinsrq $0, %rdi, %xmm14 - shufpd $2, %xmm9, %xmm2 - shufpd $0, %xmm9, %xmm12 - shufpd $2, %xmm10, %xmm13 - shufpd $0, %xmm10, %xmm14 - pshufb %xmm0, %xmm9 - pshufb %xmm0, %xmm10 - movdqu 0(%r8), %xmm3 - movdqu 16(%r8), %xmm4 - movdqu 32(%r8), %xmm5 - movdqu 48(%r8), %xmm6 - paddd %xmm15, %xmm9 - paddd %xmm15, %xmm10 - pxor %xmm3, %xmm2 - pxor %xmm3, %xmm12 - pxor %xmm3, %xmm13 - pxor %xmm3, %xmm14 - pshufb %xmm0, %xmm9 - pshufb %xmm0, %xmm10 - aesenc %xmm4, %xmm2 - aesenc %xmm4, %xmm12 - aesenc %xmm4, %xmm13 - aesenc %xmm4, %xmm14 - aesenc %xmm5, %xmm2 - aesenc %xmm5, %xmm12 - aesenc %xmm5, %xmm13 - aesenc %xmm5, %xmm14 - aesenc %xmm6, %xmm2 - aesenc %xmm6, %xmm12 - aesenc %xmm6, %xmm13 - aesenc %xmm6, %xmm14 - movdqu 64(%r8), %xmm3 - movdqu 80(%r8), %xmm4 - movdqu 96(%r8), %xmm5 - movdqu 112(%r8), %xmm6 - aesenc %xmm3, %xmm2 - aesenc %xmm3, %xmm12 - aesenc %xmm3, %xmm13 - aesenc %xmm3, %xmm14 - aesenc %xmm4, %xmm2 - aesenc %xmm4, %xmm12 - aesenc %xmm4, %xmm13 - aesenc %xmm4, %xmm14 - aesenc %xmm5, %xmm2 - aesenc %xmm5, %xmm12 - aesenc %xmm5, %xmm13 - aesenc %xmm5, %xmm14 - aesenc %xmm6, %xmm2 - aesenc %xmm6, %xmm12 - aesenc %xmm6, %xmm13 - aesenc %xmm6, %xmm14 - movdqu 128(%r8), %xmm3 - movdqu 144(%r8), %xmm4 - movdqu 160(%r8), %xmm5 - aesenc %xmm3, %xmm2 - aesenc %xmm3, %xmm12 - aesenc %xmm3, %xmm13 - aesenc %xmm3, %xmm14 - aesenc %xmm4, %xmm2 - aesenc %xmm4, %xmm12 - aesenc %xmm4, %xmm13 - aesenc %xmm4, %xmm14 - movdqu %xmm5, %xmm3 - movdqu 176(%r8), %xmm4 - movdqu 192(%r8), %xmm5 - movdqu 208(%r8), %xmm6 - aesenc %xmm3, %xmm2 - aesenc %xmm3, %xmm12 - aesenc %xmm3, %xmm13 - aesenc %xmm3, %xmm14 - aesenc %xmm4, %xmm2 - aesenc %xmm4, %xmm12 - aesenc %xmm4, %xmm13 - aesenc %xmm4, %xmm14 - aesenc %xmm5, %xmm2 - aesenc %xmm5, %xmm12 - aesenc %xmm5, %xmm13 - aesenc %xmm5, %xmm14 - aesenc %xmm6, %xmm2 - aesenc %xmm6, %xmm12 - aesenc %xmm6, %xmm13 - aesenc %xmm6, %xmm14 - movdqu 224(%r8), %xmm5 - aesenclast %xmm5, %xmm2 - aesenclast %xmm5, %xmm12 - aesenclast %xmm5, %xmm13 - aesenclast %xmm5, %xmm14 - movdqu 0(%r9), %xmm7 - pxor %xmm7, %xmm2 - movdqu 16(%r9), %xmm7 - pxor %xmm7, %xmm12 - movdqu 32(%r9), %xmm7 - pxor %xmm7, %xmm13 - movdqu 48(%r9), %xmm7 - pxor %xmm7, %xmm14 - movdqu %xmm2, 0(%r10) - movdqu %xmm12, 16(%r10) - movdqu %xmm13, 32(%r10) - movdqu %xmm14, 48(%r10) - sub $1, %rdx - add $64, %r9 - add $64, %r10 -.balign 16 -L11: - cmp $0, %rdx - ja L10 - movdqu %xmm9, %xmm7 - pinsrq $0, %rdi, %xmm7 - pshufb %xmm8, %xmm7 - mov %r9, %rax - mov %r10, %rbx - jmp L9 -L8: -L9: - mov $0, %rdx - mov %rax, %r9 - mov %rbx, %r10 - pxor %xmm4, %xmm4 - mov $1, %r12 - pinsrd $0, %r12d, %xmm4 - jmp L13 -.balign 16 -L12: - movdqu %xmm7, %xmm0 - pshufb %xmm8, %xmm0 - movdqu 0(%r8), %xmm2 - pxor %xmm2, %xmm0 - movdqu 16(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 32(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 48(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 64(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 80(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 96(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 112(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 128(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 144(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 160(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 176(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 192(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 208(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 224(%r8), %xmm2 - aesenclast %xmm2, %xmm0 - pxor %xmm2, %xmm2 - movdqu 0(%r9), %xmm2 - pxor %xmm0, %xmm2 - movdqu %xmm2, 0(%r10) - add $1, %rdx - add $16, %r9 - add $16, %r10 - paddd %xmm4, %xmm7 -.balign 16 -L13: - cmp %rcx, %rdx - jne L12 - cmp %rbp, %rsi - jbe L14 - movdqu 0(%r13), %xmm1 - movdqu %xmm7, %xmm0 - mov $579005069656919567, %r12 - pinsrq $0, %r12, %xmm2 - mov $283686952306183, %r12 - pinsrq $1, %r12, %xmm2 - pshufb %xmm2, %xmm0 - movdqu 0(%r8), %xmm2 - pxor %xmm2, %xmm0 - movdqu 16(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 32(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 48(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 64(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 80(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 96(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 112(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 128(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 144(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 160(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 176(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 192(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 208(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 224(%r8), %xmm2 - aesenclast %xmm2, %xmm0 - pxor %xmm2, %xmm2 - pxor %xmm0, %xmm1 - movdqu %xmm1, 0(%r13) - jmp L15 -L14: -L15: - pop %rbx - pop %rbp - pop %rdi - pop %rsi - pop %r12 - pop %r13 - pop %r14 - pop %r15 - ret - -.global compute_iv_stdcall -compute_iv_stdcall: - cmp $12, %rsi - jne L16 - cmp $12, %rsi - jne L18 - movdqu 0(%r8), %xmm0 - mov $579005069656919567, %rax - pinsrq $0, %rax, %xmm1 - mov $283686952306183, %rax - pinsrq $1, %rax, %xmm1 - pshufb %xmm1, %xmm0 - mov $1, %rax - pinsrd $0, %eax, %xmm0 - movdqu %xmm0, 0(%rcx) - jmp L19 -L18: - mov %rcx, %rax - add $32, %r9 - mov %r8, %rbx - mov %rdx, %rcx - imul $16, %rcx - mov $579005069656919567, %r10 - pinsrq $0, %r10, %xmm9 - mov $283686952306183, %r10 - pinsrq $1, %r10, %xmm9 - pxor %xmm8, %xmm8 - mov %rdi, %r11 - jmp L21 -.balign 16 -L20: - add $80, %r11 - movdqu -32(%r9), %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - movdqu %xmm1, %xmm4 - movdqu -16(%r9), %xmm1 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - movdqu %xmm1, %xmm5 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 16(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 32(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 64(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 80(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - vpxor %xmm1, %xmm4, %xmm4 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - pxor %xmm3, %xmm3 - mov $3254779904, %r10 - pinsrd $3, %r10d, %xmm3 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm5, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpsrldq $8, %xmm6, %xmm6 - vpxor %xmm6, %xmm7, %xmm7 - vpxor %xmm0, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm8 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor %xmm7, %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 - add $96, %r11 - sub $6, %rdx -.balign 16 -L21: - cmp $6, %rdx - jae L20 - cmp $0, %rdx - jbe L22 - mov %rdx, %r10 - sub $1, %r10 - imul $16, %r10 - add %r10, %r11 - movdqu -32(%r9), %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - cmp $1, %rdx - jne L24 - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu %xmm1, %xmm4 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - jmp L25 -L24: - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - movdqu %xmm1, %xmm4 - movdqu -16(%r9), %xmm1 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - movdqu %xmm1, %xmm5 - cmp $2, %rdx - je L26 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 16(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - cmp $3, %rdx - je L28 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 32(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - cmp $4, %rdx - je L30 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 64(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - jmp L31 -L30: -L31: - jmp L29 -L28: -L29: - jmp L27 -L26: -L27: - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - vpxor %xmm1, %xmm4, %xmm4 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 -L25: - pxor %xmm3, %xmm3 - mov $3254779904, %r10 - pinsrd $3, %r10d, %xmm3 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm5, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpsrldq $8, %xmm6, %xmm6 - vpxor %xmm6, %xmm7, %xmm7 - vpxor %xmm0, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm8 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor %xmm7, %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 - jmp L23 -L22: -L23: - mov %rsi, %r15 - cmp %rcx, %rsi - jbe L32 - movdqu 0(%rbx), %xmm0 - mov %rsi, %r10 - and $15, %r10 - cmp $8, %r10 - jae L34 - mov $0, %rcx - pinsrq $1, %rcx, %xmm0 - mov %r10, %rcx - shl $3, %rcx - mov $1, %r11 - shl %cl, %r11 - sub $1, %r11 - pextrq $0, %xmm0, %rcx - and %r11, %rcx - pinsrq $0, %rcx, %xmm0 - jmp L35 -L34: - mov %r10, %rcx - sub $8, %rcx - shl $3, %rcx - mov $1, %r11 - shl %cl, %r11 - sub $1, %r11 - pextrq $1, %xmm0, %rcx - and %r11, %rcx - pinsrq $1, %rcx, %xmm0 -L35: - pshufb %xmm9, %xmm0 - movdqu -32(%r9), %xmm5 - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu %xmm1, %xmm4 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - pxor %xmm3, %xmm3 - mov $3254779904, %r11 - pinsrd $3, %r11d, %xmm3 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm5, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpsrldq $8, %xmm6, %xmm6 - vpxor %xmm6, %xmm7, %xmm7 - vpxor %xmm0, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm8 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor %xmm7, %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 - jmp L33 -L32: -L33: - mov %rax, %rcx - mov $0, %r11 - mov %rsi, %r13 - pxor %xmm0, %xmm0 - mov %r11, %rax - imul $8, %rax - pinsrq $1, %rax, %xmm0 - mov %r13, %rax - imul $8, %rax - pinsrq $0, %rax, %xmm0 - movdqu -32(%r9), %xmm5 - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu %xmm1, %xmm4 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - pxor %xmm3, %xmm3 - mov $3254779904, %r11 - pinsrd $3, %r11d, %xmm3 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm5, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpsrldq $8, %xmm6, %xmm6 - vpxor %xmm6, %xmm7, %xmm7 - vpxor %xmm0, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm8 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor %xmm7, %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 - movdqu %xmm8, 0(%rcx) -L19: - jmp L17 -L16: - push %r15 - push %r14 - push %r13 - push %r12 - push %rsi - push %rdi - push %rbp - push %rbx - cmp $12, %rsi - jne L36 - movdqu 0(%r8), %xmm0 - mov $579005069656919567, %rax - pinsrq $0, %rax, %xmm1 - mov $283686952306183, %rax - pinsrq $1, %rax, %xmm1 - pshufb %xmm1, %xmm0 - mov $1, %rax - pinsrd $0, %eax, %xmm0 - movdqu %xmm0, 0(%rcx) - jmp L37 -L36: - mov %rcx, %rax - add $32, %r9 - mov %r8, %rbx - mov %rdx, %rcx - imul $16, %rcx - mov $579005069656919567, %r10 - pinsrq $0, %r10, %xmm9 - mov $283686952306183, %r10 - pinsrq $1, %r10, %xmm9 - pxor %xmm8, %xmm8 - mov %rdi, %r11 - jmp L39 -.balign 16 -L38: - add $80, %r11 - movdqu -32(%r9), %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - movdqu %xmm1, %xmm4 - movdqu -16(%r9), %xmm1 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - movdqu %xmm1, %xmm5 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 16(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 32(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 64(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 80(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - vpxor %xmm1, %xmm4, %xmm4 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - pxor %xmm3, %xmm3 - mov $3254779904, %r10 - pinsrd $3, %r10d, %xmm3 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm5, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpsrldq $8, %xmm6, %xmm6 - vpxor %xmm6, %xmm7, %xmm7 - vpxor %xmm0, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm8 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor %xmm7, %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 - add $96, %r11 - sub $6, %rdx -.balign 16 -L39: - cmp $6, %rdx - jae L38 - cmp $0, %rdx - jbe L40 - mov %rdx, %r10 - sub $1, %r10 - imul $16, %r10 - add %r10, %r11 - movdqu -32(%r9), %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - cmp $1, %rdx - jne L42 - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu %xmm1, %xmm4 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - jmp L43 -L42: - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - movdqu %xmm1, %xmm4 - movdqu -16(%r9), %xmm1 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - movdqu %xmm1, %xmm5 - cmp $2, %rdx - je L44 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 16(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - cmp $3, %rdx - je L46 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 32(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - cmp $4, %rdx - je L48 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 64(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - jmp L49 -L48: -L49: - jmp L47 -L46: -L47: - jmp L45 -L44: -L45: - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - vpxor %xmm1, %xmm4, %xmm4 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 -L43: - pxor %xmm3, %xmm3 - mov $3254779904, %r10 - pinsrd $3, %r10d, %xmm3 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm5, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpsrldq $8, %xmm6, %xmm6 - vpxor %xmm6, %xmm7, %xmm7 - vpxor %xmm0, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm8 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor %xmm7, %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 - jmp L41 -L40: -L41: - mov %rsi, %r15 - cmp %rcx, %rsi - jbe L50 - movdqu 0(%rbx), %xmm0 - mov %rsi, %r10 - and $15, %r10 - cmp $8, %r10 - jae L52 - mov $0, %rcx - pinsrq $1, %rcx, %xmm0 - mov %r10, %rcx - shl $3, %rcx - mov $1, %r11 - shl %cl, %r11 - sub $1, %r11 - pextrq $0, %xmm0, %rcx - and %r11, %rcx - pinsrq $0, %rcx, %xmm0 - jmp L53 -L52: - mov %r10, %rcx - sub $8, %rcx - shl $3, %rcx - mov $1, %r11 - shl %cl, %r11 - sub $1, %r11 - pextrq $1, %xmm0, %rcx - and %r11, %rcx - pinsrq $1, %rcx, %xmm0 -L53: - pshufb %xmm9, %xmm0 - movdqu -32(%r9), %xmm5 - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu %xmm1, %xmm4 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - pxor %xmm3, %xmm3 - mov $3254779904, %r11 - pinsrd $3, %r11d, %xmm3 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm5, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpsrldq $8, %xmm6, %xmm6 - vpxor %xmm6, %xmm7, %xmm7 - vpxor %xmm0, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm8 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor %xmm7, %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 - jmp L51 -L50: -L51: - mov %rax, %rcx - mov $0, %r11 - mov %rsi, %r13 - pxor %xmm0, %xmm0 - mov %r11, %rax - imul $8, %rax - pinsrq $1, %rax, %xmm0 - mov %r13, %rax - imul $8, %rax - pinsrq $0, %rax, %xmm0 - movdqu -32(%r9), %xmm5 - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu %xmm1, %xmm4 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - pxor %xmm3, %xmm3 - mov $3254779904, %r11 - pinsrd $3, %r11d, %xmm3 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm5, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpsrldq $8, %xmm6, %xmm6 - vpxor %xmm6, %xmm7, %xmm7 - vpxor %xmm0, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm8 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor %xmm7, %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 - movdqu %xmm8, 0(%rcx) -L37: - pop %rbx - pop %rbp - pop %rdi - pop %rsi - pop %r12 - pop %r13 - pop %r14 - pop %r15 -L17: - ret - -.global gcm128_encrypt_opt -gcm128_encrypt_opt: - push %r15 - push %r14 - push %r13 - push %r12 - push %rsi - push %rdi - push %rbp - push %rbx - mov 144(%rsp), %rbp - mov %rcx, %r13 - lea 32(%r9), %r9 - mov 72(%rsp), %rbx - mov %rdx, %rcx - imul $16, %rcx - mov $579005069656919567, %r10 - pinsrq $0, %r10, %xmm9 - mov $283686952306183, %r10 - pinsrq $1, %r10, %xmm9 - pxor %xmm8, %xmm8 - mov %rdi, %r11 - jmp L55 -.balign 16 -L54: - add $80, %r11 - movdqu -32(%r9), %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - movdqu %xmm1, %xmm4 - movdqu -16(%r9), %xmm1 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - movdqu %xmm1, %xmm5 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 16(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 32(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 64(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 80(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - vpxor %xmm1, %xmm4, %xmm4 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - pxor %xmm3, %xmm3 - mov $3254779904, %r10 - pinsrd $3, %r10d, %xmm3 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm5, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpsrldq $8, %xmm6, %xmm6 - vpxor %xmm6, %xmm7, %xmm7 - vpxor %xmm0, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm8 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor %xmm7, %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 - add $96, %r11 - sub $6, %rdx -.balign 16 -L55: - cmp $6, %rdx - jae L54 - cmp $0, %rdx - jbe L56 - mov %rdx, %r10 - sub $1, %r10 - imul $16, %r10 - add %r10, %r11 - movdqu -32(%r9), %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - cmp $1, %rdx - jne L58 - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu %xmm1, %xmm4 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - jmp L59 -L58: - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - movdqu %xmm1, %xmm4 - movdqu -16(%r9), %xmm1 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - movdqu %xmm1, %xmm5 - cmp $2, %rdx - je L60 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 16(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - cmp $3, %rdx - je L62 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 32(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - cmp $4, %rdx - je L64 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 64(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - jmp L65 -L64: -L65: - jmp L63 -L62: -L63: - jmp L61 -L60: -L61: - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - vpxor %xmm1, %xmm4, %xmm4 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 -L59: - pxor %xmm3, %xmm3 - mov $3254779904, %r10 - pinsrd $3, %r10d, %xmm3 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm5, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpsrldq $8, %xmm6, %xmm6 - vpxor %xmm6, %xmm7, %xmm7 - vpxor %xmm0, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm8 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor %xmm7, %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 - jmp L57 -L56: -L57: - mov %rsi, %r15 - cmp %rcx, %rsi - jbe L66 - movdqu 0(%rbx), %xmm0 - mov %rsi, %r10 - and $15, %r10 - cmp $8, %r10 - jae L68 - mov $0, %rcx - pinsrq $1, %rcx, %xmm0 - mov %r10, %rcx - shl $3, %rcx - mov $1, %r11 - shl %cl, %r11 - sub $1, %r11 - pextrq $0, %xmm0, %rcx - and %r11, %rcx - pinsrq $0, %rcx, %xmm0 - jmp L69 -L68: - mov %r10, %rcx - sub $8, %rcx - shl $3, %rcx - mov $1, %r11 - shl %cl, %r11 - sub $1, %r11 - pextrq $1, %xmm0, %rcx - and %r11, %rcx - pinsrq $1, %rcx, %xmm0 -L69: - pshufb %xmm9, %xmm0 - movdqu -32(%r9), %xmm5 - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu %xmm1, %xmm4 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - pxor %xmm3, %xmm3 - mov $3254779904, %r11 - pinsrd $3, %r11d, %xmm3 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm5, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpsrldq $8, %xmm6, %xmm6 - vpxor %xmm6, %xmm7, %xmm7 - vpxor %xmm0, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm8 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor %xmm7, %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 - jmp L67 -L66: -L67: - mov 80(%rsp), %rdi - mov 88(%rsp), %rsi - mov 96(%rsp), %rdx - mov %r13, %rcx - movdqu %xmm9, %xmm0 - movdqu 0(%r8), %xmm1 - movdqu %xmm1, 0(%rbp) - pxor %xmm10, %xmm10 - mov $1, %r11 - pinsrq $0, %r11, %xmm10 - vpaddd %xmm10, %xmm1, %xmm1 - cmp $0, %rdx - jne L70 - vpshufb %xmm0, %xmm1, %xmm1 - movdqu %xmm1, 32(%rbp) - jmp L71 -L70: - movdqu %xmm8, 32(%rbp) - add $128, %rcx - pextrq $0, %xmm1, %rbx - and $255, %rbx - vpshufb %xmm0, %xmm1, %xmm1 - lea 96(%rsi), %r14 - movdqu -128(%rcx), %xmm4 - pxor %xmm2, %xmm2 - mov $72057594037927936, %r11 - pinsrq $1, %r11, %xmm2 - movdqu -112(%rcx), %xmm15 - mov %rcx, %r12 - sub $96, %r12 - vpxor %xmm4, %xmm1, %xmm9 - add $6, %rbx - cmp $256, %rbx - jae L72 - vpaddd %xmm2, %xmm1, %xmm10 - vpaddd %xmm2, %xmm10, %xmm11 - vpxor %xmm4, %xmm10, %xmm10 - vpaddd %xmm2, %xmm11, %xmm12 - vpxor %xmm4, %xmm11, %xmm11 - vpaddd %xmm2, %xmm12, %xmm13 - vpxor %xmm4, %xmm12, %xmm12 - vpaddd %xmm2, %xmm13, %xmm14 - vpxor %xmm4, %xmm13, %xmm13 - vpaddd %xmm2, %xmm14, %xmm1 - vpxor %xmm4, %xmm14, %xmm14 - jmp L73 -L72: - sub $256, %rbx - vpshufb %xmm0, %xmm1, %xmm6 - pxor %xmm5, %xmm5 - mov $1, %r11 - pinsrq $0, %r11, %xmm5 - vpaddd %xmm5, %xmm6, %xmm10 - pxor %xmm5, %xmm5 - mov $2, %r11 - pinsrq $0, %r11, %xmm5 - vpaddd %xmm5, %xmm6, %xmm11 - vpaddd %xmm5, %xmm10, %xmm12 - vpshufb %xmm0, %xmm10, %xmm10 - vpaddd %xmm5, %xmm11, %xmm13 - vpshufb %xmm0, %xmm11, %xmm11 - vpxor %xmm4, %xmm10, %xmm10 - vpaddd %xmm5, %xmm12, %xmm14 - vpshufb %xmm0, %xmm12, %xmm12 - vpxor %xmm4, %xmm11, %xmm11 - vpaddd %xmm5, %xmm13, %xmm1 - vpshufb %xmm0, %xmm13, %xmm13 - vpxor %xmm4, %xmm12, %xmm12 - vpshufb %xmm0, %xmm14, %xmm14 - vpxor %xmm4, %xmm13, %xmm13 - vpshufb %xmm0, %xmm1, %xmm1 - vpxor %xmm4, %xmm14, %xmm14 -L73: - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu -96(%rcx), %xmm15 - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu -80(%rcx), %xmm15 - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu -64(%rcx), %xmm15 - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu -48(%rcx), %xmm15 - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu -32(%rcx), %xmm15 - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu -16(%rcx), %xmm15 - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu 0(%rcx), %xmm15 - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu 16(%rcx), %xmm15 - movdqu 32(%rcx), %xmm3 - vaesenc %xmm15, %xmm9, %xmm9 - vpxor 0(%rdi), %xmm3, %xmm4 - vaesenc %xmm15, %xmm10, %xmm10 - vpxor 16(%rdi), %xmm3, %xmm5 - vaesenc %xmm15, %xmm11, %xmm11 - vpxor 32(%rdi), %xmm3, %xmm6 - vaesenc %xmm15, %xmm12, %xmm12 - vpxor 48(%rdi), %xmm3, %xmm8 - vaesenc %xmm15, %xmm13, %xmm13 - vpxor 64(%rdi), %xmm3, %xmm2 - vaesenc %xmm15, %xmm14, %xmm14 - vpxor 80(%rdi), %xmm3, %xmm3 - lea 96(%rdi), %rdi - vaesenclast %xmm4, %xmm9, %xmm9 - vaesenclast %xmm5, %xmm10, %xmm10 - vaesenclast %xmm6, %xmm11, %xmm11 - vaesenclast %xmm8, %xmm12, %xmm12 - vaesenclast %xmm2, %xmm13, %xmm13 - vaesenclast %xmm3, %xmm14, %xmm14 - movdqu %xmm9, 0(%rsi) - movdqu %xmm10, 16(%rsi) - movdqu %xmm11, 32(%rsi) - movdqu %xmm12, 48(%rsi) - movdqu %xmm13, 64(%rsi) - movdqu %xmm14, 80(%rsi) - lea 96(%rsi), %rsi - vpshufb %xmm0, %xmm9, %xmm8 - vpshufb %xmm0, %xmm10, %xmm2 - movdqu %xmm8, 112(%rbp) - vpshufb %xmm0, %xmm11, %xmm4 - movdqu %xmm2, 96(%rbp) - vpshufb %xmm0, %xmm12, %xmm5 - movdqu %xmm4, 80(%rbp) - vpshufb %xmm0, %xmm13, %xmm6 - movdqu %xmm5, 64(%rbp) - vpshufb %xmm0, %xmm14, %xmm7 - movdqu %xmm6, 48(%rbp) - movdqu -128(%rcx), %xmm4 - pxor %xmm2, %xmm2 - mov $72057594037927936, %r11 - pinsrq $1, %r11, %xmm2 - movdqu -112(%rcx), %xmm15 - mov %rcx, %r12 - sub $96, %r12 - vpxor %xmm4, %xmm1, %xmm9 - add $6, %rbx - cmp $256, %rbx - jae L74 - vpaddd %xmm2, %xmm1, %xmm10 - vpaddd %xmm2, %xmm10, %xmm11 - vpxor %xmm4, %xmm10, %xmm10 - vpaddd %xmm2, %xmm11, %xmm12 - vpxor %xmm4, %xmm11, %xmm11 - vpaddd %xmm2, %xmm12, %xmm13 - vpxor %xmm4, %xmm12, %xmm12 - vpaddd %xmm2, %xmm13, %xmm14 - vpxor %xmm4, %xmm13, %xmm13 - vpaddd %xmm2, %xmm14, %xmm1 - vpxor %xmm4, %xmm14, %xmm14 - jmp L75 -L74: - sub $256, %rbx - vpshufb %xmm0, %xmm1, %xmm6 - pxor %xmm5, %xmm5 - mov $1, %r11 - pinsrq $0, %r11, %xmm5 - vpaddd %xmm5, %xmm6, %xmm10 - pxor %xmm5, %xmm5 - mov $2, %r11 - pinsrq $0, %r11, %xmm5 - vpaddd %xmm5, %xmm6, %xmm11 - vpaddd %xmm5, %xmm10, %xmm12 - vpshufb %xmm0, %xmm10, %xmm10 - vpaddd %xmm5, %xmm11, %xmm13 - vpshufb %xmm0, %xmm11, %xmm11 - vpxor %xmm4, %xmm10, %xmm10 - vpaddd %xmm5, %xmm12, %xmm14 - vpshufb %xmm0, %xmm12, %xmm12 - vpxor %xmm4, %xmm11, %xmm11 - vpaddd %xmm5, %xmm13, %xmm1 - vpshufb %xmm0, %xmm13, %xmm13 - vpxor %xmm4, %xmm12, %xmm12 - vpshufb %xmm0, %xmm14, %xmm14 - vpxor %xmm4, %xmm13, %xmm13 - vpshufb %xmm0, %xmm1, %xmm1 - vpxor %xmm4, %xmm14, %xmm14 -L75: - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu -96(%rcx), %xmm15 - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu -80(%rcx), %xmm15 - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu -64(%rcx), %xmm15 - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu -48(%rcx), %xmm15 - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu -32(%rcx), %xmm15 - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu -16(%rcx), %xmm15 - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu 0(%rcx), %xmm15 - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu 16(%rcx), %xmm15 - movdqu 32(%rcx), %xmm3 - vaesenc %xmm15, %xmm9, %xmm9 - vpxor 0(%rdi), %xmm3, %xmm4 - vaesenc %xmm15, %xmm10, %xmm10 - vpxor 16(%rdi), %xmm3, %xmm5 - vaesenc %xmm15, %xmm11, %xmm11 - vpxor 32(%rdi), %xmm3, %xmm6 - vaesenc %xmm15, %xmm12, %xmm12 - vpxor 48(%rdi), %xmm3, %xmm8 - vaesenc %xmm15, %xmm13, %xmm13 - vpxor 64(%rdi), %xmm3, %xmm2 - vaesenc %xmm15, %xmm14, %xmm14 - vpxor 80(%rdi), %xmm3, %xmm3 - lea 96(%rdi), %rdi - vaesenclast %xmm4, %xmm9, %xmm9 - vaesenclast %xmm5, %xmm10, %xmm10 - vaesenclast %xmm6, %xmm11, %xmm11 - vaesenclast %xmm8, %xmm12, %xmm12 - vaesenclast %xmm2, %xmm13, %xmm13 - vaesenclast %xmm3, %xmm14, %xmm14 - movdqu %xmm9, 0(%rsi) - movdqu %xmm10, 16(%rsi) - movdqu %xmm11, 32(%rsi) - movdqu %xmm12, 48(%rsi) - movdqu %xmm13, 64(%rsi) - movdqu %xmm14, 80(%rsi) - lea 96(%rsi), %rsi - sub $12, %rdx - movdqu 32(%rbp), %xmm8 - pxor %xmm2, %xmm2 - mov $72057594037927936, %r11 - pinsrq $1, %r11, %xmm2 - vpxor %xmm4, %xmm4, %xmm4 - movdqu -128(%rcx), %xmm15 - vpaddd %xmm2, %xmm1, %xmm10 - vpaddd %xmm2, %xmm10, %xmm11 - vpaddd %xmm2, %xmm11, %xmm12 - vpaddd %xmm2, %xmm12, %xmm13 - vpaddd %xmm2, %xmm13, %xmm14 - vpxor %xmm15, %xmm1, %xmm9 - movdqu %xmm4, 16(%rbp) - jmp L77 -.balign 16 -L76: - add $6, %rbx - cmp $256, %rbx - jb L78 - mov $579005069656919567, %r11 - pinsrq $0, %r11, %xmm0 - mov $283686952306183, %r11 - pinsrq $1, %r11, %xmm0 - vpshufb %xmm0, %xmm1, %xmm6 - pxor %xmm5, %xmm5 - mov $1, %r11 - pinsrq $0, %r11, %xmm5 - vpaddd %xmm5, %xmm6, %xmm10 - pxor %xmm5, %xmm5 - mov $2, %r11 - pinsrq $0, %r11, %xmm5 - vpaddd %xmm5, %xmm6, %xmm11 - movdqu -32(%r9), %xmm3 - vpaddd %xmm5, %xmm10, %xmm12 - vpshufb %xmm0, %xmm10, %xmm10 - vpaddd %xmm5, %xmm11, %xmm13 - vpshufb %xmm0, %xmm11, %xmm11 - vpxor %xmm15, %xmm10, %xmm10 - vpaddd %xmm5, %xmm12, %xmm14 - vpshufb %xmm0, %xmm12, %xmm12 - vpxor %xmm15, %xmm11, %xmm11 - vpaddd %xmm5, %xmm13, %xmm1 - vpshufb %xmm0, %xmm13, %xmm13 - vpshufb %xmm0, %xmm14, %xmm14 - vpshufb %xmm0, %xmm1, %xmm1 - sub $256, %rbx - jmp L79 -L78: - movdqu -32(%r9), %xmm3 - vpaddd %xmm14, %xmm2, %xmm1 - vpxor %xmm15, %xmm10, %xmm10 - vpxor %xmm15, %xmm11, %xmm11 -L79: - movdqu %xmm1, 128(%rbp) - vpclmulqdq $16, %xmm3, %xmm7, %xmm5 - vpxor %xmm15, %xmm12, %xmm12 - movdqu -112(%rcx), %xmm2 - vpclmulqdq $1, %xmm3, %xmm7, %xmm6 - vaesenc %xmm2, %xmm9, %xmm9 - movdqu 48(%rbp), %xmm0 - vpxor %xmm15, %xmm13, %xmm13 - vpclmulqdq $0, %xmm3, %xmm7, %xmm1 - vaesenc %xmm2, %xmm10, %xmm10 - vpxor %xmm15, %xmm14, %xmm14 - vpclmulqdq $17, %xmm3, %xmm7, %xmm7 - vaesenc %xmm2, %xmm11, %xmm11 - movdqu -16(%r9), %xmm3 - vaesenc %xmm2, %xmm12, %xmm12 - vpxor %xmm5, %xmm6, %xmm6 - vpclmulqdq $0, %xmm3, %xmm0, %xmm5 - vpxor %xmm4, %xmm8, %xmm8 - vaesenc %xmm2, %xmm13, %xmm13 - vpxor %xmm5, %xmm1, %xmm4 - vpclmulqdq $16, %xmm3, %xmm0, %xmm1 - vaesenc %xmm2, %xmm14, %xmm14 - movdqu -96(%rcx), %xmm15 - vpclmulqdq $1, %xmm3, %xmm0, %xmm2 - vaesenc %xmm15, %xmm9, %xmm9 - vpxor 16(%rbp), %xmm8, %xmm8 - vpclmulqdq $17, %xmm3, %xmm0, %xmm3 - movdqu 64(%rbp), %xmm0 - vaesenc %xmm15, %xmm10, %xmm10 - movbeq 88(%r14), %r13 - vaesenc %xmm15, %xmm11, %xmm11 - movbeq 80(%r14), %r12 - vaesenc %xmm15, %xmm12, %xmm12 - movq %r13, 32(%rbp) - vaesenc %xmm15, %xmm13, %xmm13 - movq %r12, 40(%rbp) - movdqu 16(%r9), %xmm5 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu -80(%rcx), %xmm15 - vpxor %xmm1, %xmm6, %xmm6 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vaesenc %xmm15, %xmm9, %xmm9 - vpxor %xmm2, %xmm6, %xmm6 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vaesenc %xmm15, %xmm10, %xmm10 - vpxor %xmm3, %xmm7, %xmm7 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vaesenc %xmm15, %xmm11, %xmm11 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 80(%rbp), %xmm0 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 32(%r9), %xmm1 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu -64(%rcx), %xmm15 - vpxor %xmm2, %xmm6, %xmm6 - vpclmulqdq $0, %xmm1, %xmm0, %xmm2 - vaesenc %xmm15, %xmm9, %xmm9 - vpxor %xmm3, %xmm6, %xmm6 - vpclmulqdq $16, %xmm1, %xmm0, %xmm3 - vaesenc %xmm15, %xmm10, %xmm10 - movbeq 72(%r14), %r13 - vpxor %xmm5, %xmm7, %xmm7 - vpclmulqdq $1, %xmm1, %xmm0, %xmm5 - vaesenc %xmm15, %xmm11, %xmm11 - movbeq 64(%r14), %r12 - vpclmulqdq $17, %xmm1, %xmm0, %xmm1 - movdqu 96(%rbp), %xmm0 - vaesenc %xmm15, %xmm12, %xmm12 - movq %r13, 48(%rbp) - vaesenc %xmm15, %xmm13, %xmm13 - movq %r12, 56(%rbp) - vpxor %xmm2, %xmm4, %xmm4 - movdqu 64(%r9), %xmm2 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu -48(%rcx), %xmm15 - vpxor %xmm3, %xmm6, %xmm6 - vpclmulqdq $0, %xmm2, %xmm0, %xmm3 - vaesenc %xmm15, %xmm9, %xmm9 - vpxor %xmm5, %xmm6, %xmm6 - vpclmulqdq $16, %xmm2, %xmm0, %xmm5 - vaesenc %xmm15, %xmm10, %xmm10 - movbeq 56(%r14), %r13 - vpxor %xmm1, %xmm7, %xmm7 - vpclmulqdq $1, %xmm2, %xmm0, %xmm1 - vpxor 112(%rbp), %xmm8, %xmm8 - vaesenc %xmm15, %xmm11, %xmm11 - movbeq 48(%r14), %r12 - vpclmulqdq $17, %xmm2, %xmm0, %xmm2 - vaesenc %xmm15, %xmm12, %xmm12 - movq %r13, 64(%rbp) - vaesenc %xmm15, %xmm13, %xmm13 - movq %r12, 72(%rbp) - vpxor %xmm3, %xmm4, %xmm4 - movdqu 80(%r9), %xmm3 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu -32(%rcx), %xmm15 - vpxor %xmm5, %xmm6, %xmm6 - vpclmulqdq $16, %xmm3, %xmm8, %xmm5 - vaesenc %xmm15, %xmm9, %xmm9 - vpxor %xmm1, %xmm6, %xmm6 - vpclmulqdq $1, %xmm3, %xmm8, %xmm1 - vaesenc %xmm15, %xmm10, %xmm10 - movbeq 40(%r14), %r13 - vpxor %xmm2, %xmm7, %xmm7 - vpclmulqdq $0, %xmm3, %xmm8, %xmm2 - vaesenc %xmm15, %xmm11, %xmm11 - movbeq 32(%r14), %r12 - vpclmulqdq $17, %xmm3, %xmm8, %xmm8 - vaesenc %xmm15, %xmm12, %xmm12 - movq %r13, 80(%rbp) - vaesenc %xmm15, %xmm13, %xmm13 - movq %r12, 88(%rbp) - vpxor %xmm5, %xmm6, %xmm6 - vaesenc %xmm15, %xmm14, %xmm14 - vpxor %xmm1, %xmm6, %xmm6 - movdqu -16(%rcx), %xmm15 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm2, %xmm4, %xmm4 - pxor %xmm3, %xmm3 - mov $13979173243358019584, %r11 - pinsrq $1, %r11, %xmm3 - vaesenc %xmm15, %xmm9, %xmm9 - vpxor %xmm8, %xmm7, %xmm7 - vaesenc %xmm15, %xmm10, %xmm10 - vpxor %xmm5, %xmm4, %xmm4 - movbeq 24(%r14), %r13 - vaesenc %xmm15, %xmm11, %xmm11 - movbeq 16(%r14), %r12 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - movq %r13, 96(%rbp) - vaesenc %xmm15, %xmm12, %xmm12 - movq %r12, 104(%rbp) - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu 0(%rcx), %xmm1 - vaesenc %xmm1, %xmm9, %xmm9 - movdqu 16(%rcx), %xmm15 - vaesenc %xmm1, %xmm10, %xmm10 - vpsrldq $8, %xmm6, %xmm6 - vaesenc %xmm1, %xmm11, %xmm11 - vpxor %xmm6, %xmm7, %xmm7 - vaesenc %xmm1, %xmm12, %xmm12 - vpxor %xmm0, %xmm4, %xmm4 - movbeq 8(%r14), %r13 - vaesenc %xmm1, %xmm13, %xmm13 - movbeq 0(%r14), %r12 - vaesenc %xmm1, %xmm14, %xmm14 - movdqu 32(%rcx), %xmm1 - vaesenc %xmm15, %xmm9, %xmm9 - movdqu %xmm7, 16(%rbp) - vpalignr $8, %xmm4, %xmm4, %xmm8 - vaesenc %xmm15, %xmm10, %xmm10 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor 0(%rdi), %xmm1, %xmm2 - vaesenc %xmm15, %xmm11, %xmm11 - vpxor 16(%rdi), %xmm1, %xmm0 - vaesenc %xmm15, %xmm12, %xmm12 - vpxor 32(%rdi), %xmm1, %xmm5 - vaesenc %xmm15, %xmm13, %xmm13 - vpxor 48(%rdi), %xmm1, %xmm6 - vaesenc %xmm15, %xmm14, %xmm14 - vpxor 64(%rdi), %xmm1, %xmm7 - vpxor 80(%rdi), %xmm1, %xmm3 - movdqu 128(%rbp), %xmm1 - vaesenclast %xmm2, %xmm9, %xmm9 - pxor %xmm2, %xmm2 - mov $72057594037927936, %r11 - pinsrq $1, %r11, %xmm2 - vaesenclast %xmm0, %xmm10, %xmm10 - vpaddd %xmm2, %xmm1, %xmm0 - movq %r13, 112(%rbp) - lea 96(%rdi), %rdi - vaesenclast %xmm5, %xmm11, %xmm11 - vpaddd %xmm2, %xmm0, %xmm5 - movq %r12, 120(%rbp) - lea 96(%rsi), %rsi - movdqu -128(%rcx), %xmm15 - vaesenclast %xmm6, %xmm12, %xmm12 - vpaddd %xmm2, %xmm5, %xmm6 - vaesenclast %xmm7, %xmm13, %xmm13 - vpaddd %xmm2, %xmm6, %xmm7 - vaesenclast %xmm3, %xmm14, %xmm14 - vpaddd %xmm2, %xmm7, %xmm3 - sub $6, %rdx - add $96, %r14 - cmp $0, %rdx - jbe L80 - movdqu %xmm9, -96(%rsi) - vpxor %xmm15, %xmm1, %xmm9 - movdqu %xmm10, -80(%rsi) - movdqu %xmm0, %xmm10 - movdqu %xmm11, -64(%rsi) - movdqu %xmm5, %xmm11 - movdqu %xmm12, -48(%rsi) - movdqu %xmm6, %xmm12 - movdqu %xmm13, -32(%rsi) - movdqu %xmm7, %xmm13 - movdqu %xmm14, -16(%rsi) - movdqu %xmm3, %xmm14 - movdqu 32(%rbp), %xmm7 - jmp L81 -L80: - vpxor 16(%rbp), %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 -L81: -.balign 16 -L77: - cmp $0, %rdx - ja L76 - movdqu 32(%rbp), %xmm7 - movdqu %xmm1, 32(%rbp) - pxor %xmm4, %xmm4 - movdqu %xmm4, 16(%rbp) - movdqu -32(%r9), %xmm3 - vpclmulqdq $0, %xmm3, %xmm7, %xmm1 - vpclmulqdq $16, %xmm3, %xmm7, %xmm5 - movdqu 48(%rbp), %xmm0 - vpclmulqdq $1, %xmm3, %xmm7, %xmm6 - vpclmulqdq $17, %xmm3, %xmm7, %xmm7 - movdqu -16(%r9), %xmm3 - vpxor %xmm5, %xmm6, %xmm6 - vpclmulqdq $0, %xmm3, %xmm0, %xmm5 - vpxor %xmm4, %xmm8, %xmm8 - vpxor %xmm5, %xmm1, %xmm4 - vpclmulqdq $16, %xmm3, %xmm0, %xmm1 - vpclmulqdq $1, %xmm3, %xmm0, %xmm2 - vpxor 16(%rbp), %xmm8, %xmm8 - vpclmulqdq $17, %xmm3, %xmm0, %xmm3 - movdqu 64(%rbp), %xmm0 - movdqu 16(%r9), %xmm5 - vpxor %xmm1, %xmm6, %xmm6 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpxor %xmm3, %xmm7, %xmm7 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 80(%rbp), %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 32(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpclmulqdq $0, %xmm1, %xmm0, %xmm2 - vpxor %xmm3, %xmm6, %xmm6 - vpclmulqdq $16, %xmm1, %xmm0, %xmm3 - vpxor %xmm5, %xmm7, %xmm7 - vpclmulqdq $1, %xmm1, %xmm0, %xmm5 - vpclmulqdq $17, %xmm1, %xmm0, %xmm1 - movdqu 96(%rbp), %xmm0 - vpxor %xmm2, %xmm4, %xmm4 - movdqu 64(%r9), %xmm2 - vpxor %xmm3, %xmm6, %xmm6 - vpclmulqdq $0, %xmm2, %xmm0, %xmm3 - vpxor %xmm5, %xmm6, %xmm6 - vpclmulqdq $16, %xmm2, %xmm0, %xmm5 - vpxor %xmm1, %xmm7, %xmm7 - vpclmulqdq $1, %xmm2, %xmm0, %xmm1 - vpxor 112(%rbp), %xmm8, %xmm8 - vpclmulqdq $17, %xmm2, %xmm0, %xmm2 - vpxor %xmm3, %xmm4, %xmm4 - movdqu 80(%r9), %xmm3 - vpxor %xmm5, %xmm6, %xmm6 - vpclmulqdq $16, %xmm3, %xmm8, %xmm5 - vpxor %xmm1, %xmm6, %xmm6 - vpclmulqdq $1, %xmm3, %xmm8, %xmm1 - vpxor %xmm2, %xmm7, %xmm7 - vpclmulqdq $0, %xmm3, %xmm8, %xmm2 - vpclmulqdq $17, %xmm3, %xmm8, %xmm8 - vpxor %xmm5, %xmm6, %xmm6 - vpxor %xmm1, %xmm6, %xmm6 - vpxor %xmm2, %xmm4, %xmm4 - pxor %xmm3, %xmm3 - mov $3254779904, %rax - pinsrd $3, %eax, %xmm3 - vpxor %xmm8, %xmm7, %xmm7 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm5, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpsrldq $8, %xmm6, %xmm6 - vpxor %xmm6, %xmm7, %xmm7 - vpxor %xmm0, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm8 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor %xmm7, %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 - mov $579005069656919567, %r12 - pinsrq $0, %r12, %xmm0 - mov $283686952306183, %r12 - pinsrq $1, %r12, %xmm0 - movdqu %xmm9, -96(%rsi) - vpshufb %xmm0, %xmm9, %xmm9 - vpxor %xmm7, %xmm1, %xmm1 - movdqu %xmm10, -80(%rsi) - vpshufb %xmm0, %xmm10, %xmm10 - movdqu %xmm11, -64(%rsi) - vpshufb %xmm0, %xmm11, %xmm11 - movdqu %xmm12, -48(%rsi) - vpshufb %xmm0, %xmm12, %xmm12 - movdqu %xmm13, -32(%rsi) - vpshufb %xmm0, %xmm13, %xmm13 - movdqu %xmm14, -16(%rsi) - vpshufb %xmm0, %xmm14, %xmm14 - pxor %xmm4, %xmm4 - movdqu %xmm14, %xmm7 - movdqu %xmm4, 16(%rbp) - movdqu %xmm13, 48(%rbp) - movdqu %xmm12, 64(%rbp) - movdqu %xmm11, 80(%rbp) - movdqu %xmm10, 96(%rbp) - movdqu %xmm9, 112(%rbp) - movdqu -32(%r9), %xmm3 - vpclmulqdq $0, %xmm3, %xmm7, %xmm1 - vpclmulqdq $16, %xmm3, %xmm7, %xmm5 - movdqu 48(%rbp), %xmm0 - vpclmulqdq $1, %xmm3, %xmm7, %xmm6 - vpclmulqdq $17, %xmm3, %xmm7, %xmm7 - movdqu -16(%r9), %xmm3 - vpxor %xmm5, %xmm6, %xmm6 - vpclmulqdq $0, %xmm3, %xmm0, %xmm5 - vpxor %xmm4, %xmm8, %xmm8 - vpxor %xmm5, %xmm1, %xmm4 - vpclmulqdq $16, %xmm3, %xmm0, %xmm1 - vpclmulqdq $1, %xmm3, %xmm0, %xmm2 - vpxor 16(%rbp), %xmm8, %xmm8 - vpclmulqdq $17, %xmm3, %xmm0, %xmm3 - movdqu 64(%rbp), %xmm0 - movdqu 16(%r9), %xmm5 - vpxor %xmm1, %xmm6, %xmm6 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpxor %xmm3, %xmm7, %xmm7 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 80(%rbp), %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 32(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpclmulqdq $0, %xmm1, %xmm0, %xmm2 - vpxor %xmm3, %xmm6, %xmm6 - vpclmulqdq $16, %xmm1, %xmm0, %xmm3 - vpxor %xmm5, %xmm7, %xmm7 - vpclmulqdq $1, %xmm1, %xmm0, %xmm5 - vpclmulqdq $17, %xmm1, %xmm0, %xmm1 - movdqu 96(%rbp), %xmm0 - vpxor %xmm2, %xmm4, %xmm4 - movdqu 64(%r9), %xmm2 - vpxor %xmm3, %xmm6, %xmm6 - vpclmulqdq $0, %xmm2, %xmm0, %xmm3 - vpxor %xmm5, %xmm6, %xmm6 - vpclmulqdq $16, %xmm2, %xmm0, %xmm5 - vpxor %xmm1, %xmm7, %xmm7 - vpclmulqdq $1, %xmm2, %xmm0, %xmm1 - vpxor 112(%rbp), %xmm8, %xmm8 - vpclmulqdq $17, %xmm2, %xmm0, %xmm2 - vpxor %xmm3, %xmm4, %xmm4 - movdqu 80(%r9), %xmm3 - vpxor %xmm5, %xmm6, %xmm6 - vpclmulqdq $16, %xmm3, %xmm8, %xmm5 - vpxor %xmm1, %xmm6, %xmm6 - vpclmulqdq $1, %xmm3, %xmm8, %xmm1 - vpxor %xmm2, %xmm7, %xmm7 - vpclmulqdq $0, %xmm3, %xmm8, %xmm2 - vpclmulqdq $17, %xmm3, %xmm8, %xmm8 - vpxor %xmm5, %xmm6, %xmm6 - vpxor %xmm1, %xmm6, %xmm6 - vpxor %xmm2, %xmm4, %xmm4 - pxor %xmm3, %xmm3 - mov $3254779904, %rax - pinsrd $3, %eax, %xmm3 - vpxor %xmm8, %xmm7, %xmm7 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm5, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpsrldq $8, %xmm6, %xmm6 - vpxor %xmm6, %xmm7, %xmm7 - vpxor %xmm0, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm8 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor %xmm7, %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 - sub $128, %rcx -L71: - movdqu 32(%rbp), %xmm11 - mov %rcx, %r8 - mov 104(%rsp), %rax - mov 112(%rsp), %rdi - mov 120(%rsp), %rdx - mov %rdx, %r14 - mov $579005069656919567, %r12 - pinsrq $0, %r12, %xmm9 - mov $283686952306183, %r12 - pinsrq $1, %r12, %xmm9 - pshufb %xmm9, %xmm11 - pxor %xmm10, %xmm10 - mov $1, %rbx - pinsrd $0, %ebx, %xmm10 - mov %rax, %r11 - mov %rdi, %r10 - mov $0, %rbx - jmp L83 -.balign 16 -L82: - movdqu %xmm11, %xmm0 - pshufb %xmm9, %xmm0 - movdqu 0(%r8), %xmm2 - pxor %xmm2, %xmm0 - movdqu 16(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 32(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 48(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 64(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 80(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 96(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 112(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 128(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 144(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 160(%r8), %xmm2 - aesenclast %xmm2, %xmm0 - pxor %xmm2, %xmm2 - movdqu 0(%r11), %xmm2 - pxor %xmm0, %xmm2 - movdqu %xmm2, 0(%r10) - add $1, %rbx - add $16, %r11 - add $16, %r10 - paddd %xmm10, %xmm11 -.balign 16 -L83: - cmp %rdx, %rbx - jne L82 - mov %rdi, %r11 - jmp L85 -.balign 16 -L84: - add $80, %r11 - movdqu -32(%r9), %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - movdqu %xmm1, %xmm4 - movdqu -16(%r9), %xmm1 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - movdqu %xmm1, %xmm5 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 16(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 32(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 64(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 80(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - vpxor %xmm1, %xmm4, %xmm4 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - pxor %xmm3, %xmm3 - mov $3254779904, %r10 - pinsrd $3, %r10d, %xmm3 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm5, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpsrldq $8, %xmm6, %xmm6 - vpxor %xmm6, %xmm7, %xmm7 - vpxor %xmm0, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm8 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor %xmm7, %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 - add $96, %r11 - sub $6, %rdx -.balign 16 -L85: - cmp $6, %rdx - jae L84 - cmp $0, %rdx - jbe L86 - mov %rdx, %r10 - sub $1, %r10 - imul $16, %r10 - add %r10, %r11 - movdqu -32(%r9), %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - cmp $1, %rdx - jne L88 - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu %xmm1, %xmm4 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - jmp L89 -L88: - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - movdqu %xmm1, %xmm4 - movdqu -16(%r9), %xmm1 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - movdqu %xmm1, %xmm5 - cmp $2, %rdx - je L90 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 16(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - cmp $3, %rdx - je L92 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 32(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - cmp $4, %rdx - je L94 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 64(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - jmp L95 -L94: -L95: - jmp L93 -L92: -L93: - jmp L91 -L90: -L91: - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - vpxor %xmm1, %xmm4, %xmm4 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 -L89: - pxor %xmm3, %xmm3 - mov $3254779904, %r10 - pinsrd $3, %r10d, %xmm3 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm5, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpsrldq $8, %xmm6, %xmm6 - vpxor %xmm6, %xmm7, %xmm7 - vpxor %xmm0, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm8 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor %xmm7, %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 - jmp L87 -L86: -L87: - add 96(%rsp), %r14 - imul $16, %r14 - mov 136(%rsp), %r13 - cmp %r14, %r13 - jbe L96 - mov 128(%rsp), %rax - mov %r13, %r10 - and $15, %r10 - movdqu %xmm11, %xmm0 - pshufb %xmm9, %xmm0 - movdqu 0(%r8), %xmm2 - pxor %xmm2, %xmm0 - movdqu 16(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 32(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 48(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 64(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 80(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 96(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 112(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 128(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 144(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 160(%r8), %xmm2 - aesenclast %xmm2, %xmm0 - pxor %xmm2, %xmm2 - movdqu 0(%rax), %xmm4 - pxor %xmm4, %xmm0 - movdqu %xmm0, 0(%rax) - cmp $8, %r10 - jae L98 - mov $0, %rcx - pinsrq $1, %rcx, %xmm0 - mov %r10, %rcx - shl $3, %rcx - mov $1, %r11 - shl %cl, %r11 - sub $1, %r11 - pextrq $0, %xmm0, %rcx - and %r11, %rcx - pinsrq $0, %rcx, %xmm0 - jmp L99 -L98: - mov %r10, %rcx - sub $8, %rcx - shl $3, %rcx - mov $1, %r11 - shl %cl, %r11 - sub $1, %r11 - pextrq $1, %xmm0, %rcx - and %r11, %rcx - pinsrq $1, %rcx, %xmm0 -L99: - pshufb %xmm9, %xmm0 - movdqu -32(%r9), %xmm5 - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu %xmm1, %xmm4 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - pxor %xmm3, %xmm3 - mov $3254779904, %r11 - pinsrd $3, %r11d, %xmm3 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm5, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpsrldq $8, %xmm6, %xmm6 - vpxor %xmm6, %xmm7, %xmm7 - vpxor %xmm0, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm8 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor %xmm7, %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 - jmp L97 -L96: -L97: - mov %r15, %r11 - pxor %xmm0, %xmm0 - mov %r11, %rax - imul $8, %rax - pinsrq $1, %rax, %xmm0 - mov %r13, %rax - imul $8, %rax - pinsrq $0, %rax, %xmm0 - movdqu -32(%r9), %xmm5 - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu %xmm1, %xmm4 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - pxor %xmm3, %xmm3 - mov $3254779904, %r11 - pinsrd $3, %r11d, %xmm3 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm5, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpsrldq $8, %xmm6, %xmm6 - vpxor %xmm6, %xmm7, %xmm7 - vpxor %xmm0, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm8 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor %xmm7, %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 - movdqu 0(%rbp), %xmm0 - pshufb %xmm9, %xmm0 - movdqu 0(%r8), %xmm2 - pxor %xmm2, %xmm0 - movdqu 16(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 32(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 48(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 64(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 80(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 96(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 112(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 128(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 144(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 160(%r8), %xmm2 - aesenclast %xmm2, %xmm0 - pxor %xmm2, %xmm2 - pshufb %xmm9, %xmm8 - pxor %xmm0, %xmm8 - mov 152(%rsp), %r15 - movdqu %xmm8, 0(%r15) - pop %rbx - pop %rbp - pop %rdi - pop %rsi - pop %r12 - pop %r13 - pop %r14 - pop %r15 - ret - -.global gcm256_encrypt_opt -gcm256_encrypt_opt: - push %r15 - push %r14 - push %r13 - push %r12 - push %rsi - push %rdi - push %rbp - push %rbx - mov 144(%rsp), %rbp - mov %rcx, %r13 - lea 32(%r9), %r9 - mov 72(%rsp), %rbx - mov %rdx, %rcx - imul $16, %rcx - mov $579005069656919567, %r10 - pinsrq $0, %r10, %xmm9 - mov $283686952306183, %r10 - pinsrq $1, %r10, %xmm9 - pxor %xmm8, %xmm8 - mov %rdi, %r11 - jmp L101 -.balign 16 -L100: - add $80, %r11 - movdqu -32(%r9), %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - movdqu %xmm1, %xmm4 - movdqu -16(%r9), %xmm1 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - movdqu %xmm1, %xmm5 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 16(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 32(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 64(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 80(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - vpxor %xmm1, %xmm4, %xmm4 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - pxor %xmm3, %xmm3 - mov $3254779904, %r10 - pinsrd $3, %r10d, %xmm3 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm5, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpsrldq $8, %xmm6, %xmm6 - vpxor %xmm6, %xmm7, %xmm7 - vpxor %xmm0, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm8 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor %xmm7, %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 - add $96, %r11 - sub $6, %rdx -.balign 16 -L101: - cmp $6, %rdx - jae L100 - cmp $0, %rdx - jbe L102 - mov %rdx, %r10 - sub $1, %r10 - imul $16, %r10 - add %r10, %r11 - movdqu -32(%r9), %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - cmp $1, %rdx - jne L104 - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu %xmm1, %xmm4 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - jmp L105 -L104: - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - movdqu %xmm1, %xmm4 - movdqu -16(%r9), %xmm1 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - movdqu %xmm1, %xmm5 - cmp $2, %rdx - je L106 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 16(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - cmp $3, %rdx - je L108 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 32(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - cmp $4, %rdx - je L110 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 64(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - jmp L111 -L110: -L111: - jmp L109 -L108: -L109: - jmp L107 -L106: -L107: - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - vpxor %xmm1, %xmm4, %xmm4 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 -L105: - pxor %xmm3, %xmm3 - mov $3254779904, %r10 - pinsrd $3, %r10d, %xmm3 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm5, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpsrldq $8, %xmm6, %xmm6 - vpxor %xmm6, %xmm7, %xmm7 - vpxor %xmm0, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm8 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor %xmm7, %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 - jmp L103 -L102: -L103: - mov %rsi, %r15 - cmp %rcx, %rsi - jbe L112 - movdqu 0(%rbx), %xmm0 - mov %rsi, %r10 - and $15, %r10 - cmp $8, %r10 - jae L114 - mov $0, %rcx - pinsrq $1, %rcx, %xmm0 - mov %r10, %rcx - shl $3, %rcx - mov $1, %r11 - shl %cl, %r11 - sub $1, %r11 - pextrq $0, %xmm0, %rcx - and %r11, %rcx - pinsrq $0, %rcx, %xmm0 - jmp L115 -L114: - mov %r10, %rcx - sub $8, %rcx - shl $3, %rcx - mov $1, %r11 - shl %cl, %r11 - sub $1, %r11 - pextrq $1, %xmm0, %rcx - and %r11, %rcx - pinsrq $1, %rcx, %xmm0 -L115: - pshufb %xmm9, %xmm0 - movdqu -32(%r9), %xmm5 - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu %xmm1, %xmm4 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - pxor %xmm3, %xmm3 - mov $3254779904, %r11 - pinsrd $3, %r11d, %xmm3 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm5, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpsrldq $8, %xmm6, %xmm6 - vpxor %xmm6, %xmm7, %xmm7 - vpxor %xmm0, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm8 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor %xmm7, %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 - jmp L113 -L112: -L113: - mov 80(%rsp), %rdi - mov 88(%rsp), %rsi - mov 96(%rsp), %rdx - mov %r13, %rcx - movdqu %xmm9, %xmm0 - movdqu 0(%r8), %xmm1 - movdqu %xmm1, 0(%rbp) - pxor %xmm10, %xmm10 - mov $1, %r11 - pinsrq $0, %r11, %xmm10 - vpaddd %xmm10, %xmm1, %xmm1 - cmp $0, %rdx - jne L116 - vpshufb %xmm0, %xmm1, %xmm1 - movdqu %xmm1, 32(%rbp) - jmp L117 -L116: - movdqu %xmm8, 32(%rbp) - add $128, %rcx - pextrq $0, %xmm1, %rbx - and $255, %rbx - vpshufb %xmm0, %xmm1, %xmm1 - lea 96(%rsi), %r14 - movdqu -128(%rcx), %xmm4 - pxor %xmm2, %xmm2 - mov $72057594037927936, %r11 - pinsrq $1, %r11, %xmm2 - movdqu -112(%rcx), %xmm15 - mov %rcx, %r12 - sub $96, %r12 - vpxor %xmm4, %xmm1, %xmm9 - add $6, %rbx - cmp $256, %rbx - jae L118 - vpaddd %xmm2, %xmm1, %xmm10 - vpaddd %xmm2, %xmm10, %xmm11 - vpxor %xmm4, %xmm10, %xmm10 - vpaddd %xmm2, %xmm11, %xmm12 - vpxor %xmm4, %xmm11, %xmm11 - vpaddd %xmm2, %xmm12, %xmm13 - vpxor %xmm4, %xmm12, %xmm12 - vpaddd %xmm2, %xmm13, %xmm14 - vpxor %xmm4, %xmm13, %xmm13 - vpaddd %xmm2, %xmm14, %xmm1 - vpxor %xmm4, %xmm14, %xmm14 - jmp L119 -L118: - sub $256, %rbx - vpshufb %xmm0, %xmm1, %xmm6 - pxor %xmm5, %xmm5 - mov $1, %r11 - pinsrq $0, %r11, %xmm5 - vpaddd %xmm5, %xmm6, %xmm10 - pxor %xmm5, %xmm5 - mov $2, %r11 - pinsrq $0, %r11, %xmm5 - vpaddd %xmm5, %xmm6, %xmm11 - vpaddd %xmm5, %xmm10, %xmm12 - vpshufb %xmm0, %xmm10, %xmm10 - vpaddd %xmm5, %xmm11, %xmm13 - vpshufb %xmm0, %xmm11, %xmm11 - vpxor %xmm4, %xmm10, %xmm10 - vpaddd %xmm5, %xmm12, %xmm14 - vpshufb %xmm0, %xmm12, %xmm12 - vpxor %xmm4, %xmm11, %xmm11 - vpaddd %xmm5, %xmm13, %xmm1 - vpshufb %xmm0, %xmm13, %xmm13 - vpxor %xmm4, %xmm12, %xmm12 - vpshufb %xmm0, %xmm14, %xmm14 - vpxor %xmm4, %xmm13, %xmm13 - vpshufb %xmm0, %xmm1, %xmm1 - vpxor %xmm4, %xmm14, %xmm14 -L119: - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu -96(%rcx), %xmm15 - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu -80(%rcx), %xmm15 - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu -64(%rcx), %xmm15 - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu -48(%rcx), %xmm15 - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu -32(%rcx), %xmm15 - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu -16(%rcx), %xmm15 - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu 0(%rcx), %xmm15 - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu 16(%rcx), %xmm15 - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu 32(%rcx), %xmm15 - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu 48(%rcx), %xmm15 - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu 64(%rcx), %xmm15 - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu 80(%rcx), %xmm15 - movdqu 96(%rcx), %xmm3 - vaesenc %xmm15, %xmm9, %xmm9 - vpxor 0(%rdi), %xmm3, %xmm4 - vaesenc %xmm15, %xmm10, %xmm10 - vpxor 16(%rdi), %xmm3, %xmm5 - vaesenc %xmm15, %xmm11, %xmm11 - vpxor 32(%rdi), %xmm3, %xmm6 - vaesenc %xmm15, %xmm12, %xmm12 - vpxor 48(%rdi), %xmm3, %xmm8 - vaesenc %xmm15, %xmm13, %xmm13 - vpxor 64(%rdi), %xmm3, %xmm2 - vaesenc %xmm15, %xmm14, %xmm14 - vpxor 80(%rdi), %xmm3, %xmm3 - lea 96(%rdi), %rdi - vaesenclast %xmm4, %xmm9, %xmm9 - vaesenclast %xmm5, %xmm10, %xmm10 - vaesenclast %xmm6, %xmm11, %xmm11 - vaesenclast %xmm8, %xmm12, %xmm12 - vaesenclast %xmm2, %xmm13, %xmm13 - vaesenclast %xmm3, %xmm14, %xmm14 - movdqu %xmm9, 0(%rsi) - movdqu %xmm10, 16(%rsi) - movdqu %xmm11, 32(%rsi) - movdqu %xmm12, 48(%rsi) - movdqu %xmm13, 64(%rsi) - movdqu %xmm14, 80(%rsi) - lea 96(%rsi), %rsi - vpshufb %xmm0, %xmm9, %xmm8 - vpshufb %xmm0, %xmm10, %xmm2 - movdqu %xmm8, 112(%rbp) - vpshufb %xmm0, %xmm11, %xmm4 - movdqu %xmm2, 96(%rbp) - vpshufb %xmm0, %xmm12, %xmm5 - movdqu %xmm4, 80(%rbp) - vpshufb %xmm0, %xmm13, %xmm6 - movdqu %xmm5, 64(%rbp) - vpshufb %xmm0, %xmm14, %xmm7 - movdqu %xmm6, 48(%rbp) - movdqu -128(%rcx), %xmm4 - pxor %xmm2, %xmm2 - mov $72057594037927936, %r11 - pinsrq $1, %r11, %xmm2 - movdqu -112(%rcx), %xmm15 - mov %rcx, %r12 - sub $96, %r12 - vpxor %xmm4, %xmm1, %xmm9 - add $6, %rbx - cmp $256, %rbx - jae L120 - vpaddd %xmm2, %xmm1, %xmm10 - vpaddd %xmm2, %xmm10, %xmm11 - vpxor %xmm4, %xmm10, %xmm10 - vpaddd %xmm2, %xmm11, %xmm12 - vpxor %xmm4, %xmm11, %xmm11 - vpaddd %xmm2, %xmm12, %xmm13 - vpxor %xmm4, %xmm12, %xmm12 - vpaddd %xmm2, %xmm13, %xmm14 - vpxor %xmm4, %xmm13, %xmm13 - vpaddd %xmm2, %xmm14, %xmm1 - vpxor %xmm4, %xmm14, %xmm14 - jmp L121 -L120: - sub $256, %rbx - vpshufb %xmm0, %xmm1, %xmm6 - pxor %xmm5, %xmm5 - mov $1, %r11 - pinsrq $0, %r11, %xmm5 - vpaddd %xmm5, %xmm6, %xmm10 - pxor %xmm5, %xmm5 - mov $2, %r11 - pinsrq $0, %r11, %xmm5 - vpaddd %xmm5, %xmm6, %xmm11 - vpaddd %xmm5, %xmm10, %xmm12 - vpshufb %xmm0, %xmm10, %xmm10 - vpaddd %xmm5, %xmm11, %xmm13 - vpshufb %xmm0, %xmm11, %xmm11 - vpxor %xmm4, %xmm10, %xmm10 - vpaddd %xmm5, %xmm12, %xmm14 - vpshufb %xmm0, %xmm12, %xmm12 - vpxor %xmm4, %xmm11, %xmm11 - vpaddd %xmm5, %xmm13, %xmm1 - vpshufb %xmm0, %xmm13, %xmm13 - vpxor %xmm4, %xmm12, %xmm12 - vpshufb %xmm0, %xmm14, %xmm14 - vpxor %xmm4, %xmm13, %xmm13 - vpshufb %xmm0, %xmm1, %xmm1 - vpxor %xmm4, %xmm14, %xmm14 -L121: - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu -96(%rcx), %xmm15 - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu -80(%rcx), %xmm15 - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu -64(%rcx), %xmm15 - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu -48(%rcx), %xmm15 - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu -32(%rcx), %xmm15 - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu -16(%rcx), %xmm15 - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu 0(%rcx), %xmm15 - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu 16(%rcx), %xmm15 - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu 32(%rcx), %xmm15 - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu 48(%rcx), %xmm15 - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu 64(%rcx), %xmm15 - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu 80(%rcx), %xmm15 - movdqu 96(%rcx), %xmm3 - vaesenc %xmm15, %xmm9, %xmm9 - vpxor 0(%rdi), %xmm3, %xmm4 - vaesenc %xmm15, %xmm10, %xmm10 - vpxor 16(%rdi), %xmm3, %xmm5 - vaesenc %xmm15, %xmm11, %xmm11 - vpxor 32(%rdi), %xmm3, %xmm6 - vaesenc %xmm15, %xmm12, %xmm12 - vpxor 48(%rdi), %xmm3, %xmm8 - vaesenc %xmm15, %xmm13, %xmm13 - vpxor 64(%rdi), %xmm3, %xmm2 - vaesenc %xmm15, %xmm14, %xmm14 - vpxor 80(%rdi), %xmm3, %xmm3 - lea 96(%rdi), %rdi - vaesenclast %xmm4, %xmm9, %xmm9 - vaesenclast %xmm5, %xmm10, %xmm10 - vaesenclast %xmm6, %xmm11, %xmm11 - vaesenclast %xmm8, %xmm12, %xmm12 - vaesenclast %xmm2, %xmm13, %xmm13 - vaesenclast %xmm3, %xmm14, %xmm14 - movdqu %xmm9, 0(%rsi) - movdqu %xmm10, 16(%rsi) - movdqu %xmm11, 32(%rsi) - movdqu %xmm12, 48(%rsi) - movdqu %xmm13, 64(%rsi) - movdqu %xmm14, 80(%rsi) - lea 96(%rsi), %rsi - sub $12, %rdx - movdqu 32(%rbp), %xmm8 - pxor %xmm2, %xmm2 - mov $72057594037927936, %r11 - pinsrq $1, %r11, %xmm2 - vpxor %xmm4, %xmm4, %xmm4 - movdqu -128(%rcx), %xmm15 - vpaddd %xmm2, %xmm1, %xmm10 - vpaddd %xmm2, %xmm10, %xmm11 - vpaddd %xmm2, %xmm11, %xmm12 - vpaddd %xmm2, %xmm12, %xmm13 - vpaddd %xmm2, %xmm13, %xmm14 - vpxor %xmm15, %xmm1, %xmm9 - movdqu %xmm4, 16(%rbp) - jmp L123 -.balign 16 -L122: - add $6, %rbx - cmp $256, %rbx - jb L124 - mov $579005069656919567, %r11 - pinsrq $0, %r11, %xmm0 - mov $283686952306183, %r11 - pinsrq $1, %r11, %xmm0 - vpshufb %xmm0, %xmm1, %xmm6 - pxor %xmm5, %xmm5 - mov $1, %r11 - pinsrq $0, %r11, %xmm5 - vpaddd %xmm5, %xmm6, %xmm10 - pxor %xmm5, %xmm5 - mov $2, %r11 - pinsrq $0, %r11, %xmm5 - vpaddd %xmm5, %xmm6, %xmm11 - movdqu -32(%r9), %xmm3 - vpaddd %xmm5, %xmm10, %xmm12 - vpshufb %xmm0, %xmm10, %xmm10 - vpaddd %xmm5, %xmm11, %xmm13 - vpshufb %xmm0, %xmm11, %xmm11 - vpxor %xmm15, %xmm10, %xmm10 - vpaddd %xmm5, %xmm12, %xmm14 - vpshufb %xmm0, %xmm12, %xmm12 - vpxor %xmm15, %xmm11, %xmm11 - vpaddd %xmm5, %xmm13, %xmm1 - vpshufb %xmm0, %xmm13, %xmm13 - vpshufb %xmm0, %xmm14, %xmm14 - vpshufb %xmm0, %xmm1, %xmm1 - sub $256, %rbx - jmp L125 -L124: - movdqu -32(%r9), %xmm3 - vpaddd %xmm14, %xmm2, %xmm1 - vpxor %xmm15, %xmm10, %xmm10 - vpxor %xmm15, %xmm11, %xmm11 -L125: - movdqu %xmm1, 128(%rbp) - vpclmulqdq $16, %xmm3, %xmm7, %xmm5 - vpxor %xmm15, %xmm12, %xmm12 - movdqu -112(%rcx), %xmm2 - vpclmulqdq $1, %xmm3, %xmm7, %xmm6 - vaesenc %xmm2, %xmm9, %xmm9 - movdqu 48(%rbp), %xmm0 - vpxor %xmm15, %xmm13, %xmm13 - vpclmulqdq $0, %xmm3, %xmm7, %xmm1 - vaesenc %xmm2, %xmm10, %xmm10 - vpxor %xmm15, %xmm14, %xmm14 - vpclmulqdq $17, %xmm3, %xmm7, %xmm7 - vaesenc %xmm2, %xmm11, %xmm11 - movdqu -16(%r9), %xmm3 - vaesenc %xmm2, %xmm12, %xmm12 - vpxor %xmm5, %xmm6, %xmm6 - vpclmulqdq $0, %xmm3, %xmm0, %xmm5 - vpxor %xmm4, %xmm8, %xmm8 - vaesenc %xmm2, %xmm13, %xmm13 - vpxor %xmm5, %xmm1, %xmm4 - vpclmulqdq $16, %xmm3, %xmm0, %xmm1 - vaesenc %xmm2, %xmm14, %xmm14 - movdqu -96(%rcx), %xmm15 - vpclmulqdq $1, %xmm3, %xmm0, %xmm2 - vaesenc %xmm15, %xmm9, %xmm9 - vpxor 16(%rbp), %xmm8, %xmm8 - vpclmulqdq $17, %xmm3, %xmm0, %xmm3 - movdqu 64(%rbp), %xmm0 - vaesenc %xmm15, %xmm10, %xmm10 - movbeq 88(%r14), %r13 - vaesenc %xmm15, %xmm11, %xmm11 - movbeq 80(%r14), %r12 - vaesenc %xmm15, %xmm12, %xmm12 - movq %r13, 32(%rbp) - vaesenc %xmm15, %xmm13, %xmm13 - movq %r12, 40(%rbp) - movdqu 16(%r9), %xmm5 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu -80(%rcx), %xmm15 - vpxor %xmm1, %xmm6, %xmm6 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vaesenc %xmm15, %xmm9, %xmm9 - vpxor %xmm2, %xmm6, %xmm6 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vaesenc %xmm15, %xmm10, %xmm10 - vpxor %xmm3, %xmm7, %xmm7 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vaesenc %xmm15, %xmm11, %xmm11 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 80(%rbp), %xmm0 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 32(%r9), %xmm1 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu -64(%rcx), %xmm15 - vpxor %xmm2, %xmm6, %xmm6 - vpclmulqdq $0, %xmm1, %xmm0, %xmm2 - vaesenc %xmm15, %xmm9, %xmm9 - vpxor %xmm3, %xmm6, %xmm6 - vpclmulqdq $16, %xmm1, %xmm0, %xmm3 - vaesenc %xmm15, %xmm10, %xmm10 - movbeq 72(%r14), %r13 - vpxor %xmm5, %xmm7, %xmm7 - vpclmulqdq $1, %xmm1, %xmm0, %xmm5 - vaesenc %xmm15, %xmm11, %xmm11 - movbeq 64(%r14), %r12 - vpclmulqdq $17, %xmm1, %xmm0, %xmm1 - movdqu 96(%rbp), %xmm0 - vaesenc %xmm15, %xmm12, %xmm12 - movq %r13, 48(%rbp) - vaesenc %xmm15, %xmm13, %xmm13 - movq %r12, 56(%rbp) - vpxor %xmm2, %xmm4, %xmm4 - movdqu 64(%r9), %xmm2 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu -48(%rcx), %xmm15 - vpxor %xmm3, %xmm6, %xmm6 - vpclmulqdq $0, %xmm2, %xmm0, %xmm3 - vaesenc %xmm15, %xmm9, %xmm9 - vpxor %xmm5, %xmm6, %xmm6 - vpclmulqdq $16, %xmm2, %xmm0, %xmm5 - vaesenc %xmm15, %xmm10, %xmm10 - movbeq 56(%r14), %r13 - vpxor %xmm1, %xmm7, %xmm7 - vpclmulqdq $1, %xmm2, %xmm0, %xmm1 - vpxor 112(%rbp), %xmm8, %xmm8 - vaesenc %xmm15, %xmm11, %xmm11 - movbeq 48(%r14), %r12 - vpclmulqdq $17, %xmm2, %xmm0, %xmm2 - vaesenc %xmm15, %xmm12, %xmm12 - movq %r13, 64(%rbp) - vaesenc %xmm15, %xmm13, %xmm13 - movq %r12, 72(%rbp) - vpxor %xmm3, %xmm4, %xmm4 - movdqu 80(%r9), %xmm3 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu -32(%rcx), %xmm15 - vpxor %xmm5, %xmm6, %xmm6 - vpclmulqdq $16, %xmm3, %xmm8, %xmm5 - vaesenc %xmm15, %xmm9, %xmm9 - vpxor %xmm1, %xmm6, %xmm6 - vpclmulqdq $1, %xmm3, %xmm8, %xmm1 - vaesenc %xmm15, %xmm10, %xmm10 - movbeq 40(%r14), %r13 - vpxor %xmm2, %xmm7, %xmm7 - vpclmulqdq $0, %xmm3, %xmm8, %xmm2 - vaesenc %xmm15, %xmm11, %xmm11 - movbeq 32(%r14), %r12 - vpclmulqdq $17, %xmm3, %xmm8, %xmm8 - vaesenc %xmm15, %xmm12, %xmm12 - movq %r13, 80(%rbp) - vaesenc %xmm15, %xmm13, %xmm13 - movq %r12, 88(%rbp) - vpxor %xmm5, %xmm6, %xmm6 - vaesenc %xmm15, %xmm14, %xmm14 - vpxor %xmm1, %xmm6, %xmm6 - movdqu -16(%rcx), %xmm15 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm2, %xmm4, %xmm4 - pxor %xmm3, %xmm3 - mov $13979173243358019584, %r11 - pinsrq $1, %r11, %xmm3 - vaesenc %xmm15, %xmm9, %xmm9 - vpxor %xmm8, %xmm7, %xmm7 - vaesenc %xmm15, %xmm10, %xmm10 - vpxor %xmm5, %xmm4, %xmm4 - movbeq 24(%r14), %r13 - vaesenc %xmm15, %xmm11, %xmm11 - movbeq 16(%r14), %r12 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - movq %r13, 96(%rbp) - vaesenc %xmm15, %xmm12, %xmm12 - movq %r12, 104(%rbp) - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu 0(%rcx), %xmm1 - vaesenc %xmm1, %xmm9, %xmm9 - movdqu 16(%rcx), %xmm15 - vaesenc %xmm1, %xmm10, %xmm10 - vpsrldq $8, %xmm6, %xmm6 - vaesenc %xmm1, %xmm11, %xmm11 - vpxor %xmm6, %xmm7, %xmm7 - vaesenc %xmm1, %xmm12, %xmm12 - vpxor %xmm0, %xmm4, %xmm4 - movbeq 8(%r14), %r13 - vaesenc %xmm1, %xmm13, %xmm13 - movbeq 0(%r14), %r12 - vaesenc %xmm1, %xmm14, %xmm14 - movdqu 32(%rcx), %xmm1 - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - vaesenc %xmm1, %xmm9, %xmm9 - vaesenc %xmm1, %xmm10, %xmm10 - vaesenc %xmm1, %xmm11, %xmm11 - vaesenc %xmm1, %xmm12, %xmm12 - vaesenc %xmm1, %xmm13, %xmm13 - movdqu 48(%rcx), %xmm15 - vaesenc %xmm1, %xmm14, %xmm14 - movdqu 64(%rcx), %xmm1 - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - vaesenc %xmm1, %xmm9, %xmm9 - vaesenc %xmm1, %xmm10, %xmm10 - vaesenc %xmm1, %xmm11, %xmm11 - vaesenc %xmm1, %xmm12, %xmm12 - vaesenc %xmm1, %xmm13, %xmm13 - movdqu 80(%rcx), %xmm15 - vaesenc %xmm1, %xmm14, %xmm14 - movdqu 96(%rcx), %xmm1 - vaesenc %xmm15, %xmm9, %xmm9 - movdqu %xmm7, 16(%rbp) - vpalignr $8, %xmm4, %xmm4, %xmm8 - vaesenc %xmm15, %xmm10, %xmm10 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor 0(%rdi), %xmm1, %xmm2 - vaesenc %xmm15, %xmm11, %xmm11 - vpxor 16(%rdi), %xmm1, %xmm0 - vaesenc %xmm15, %xmm12, %xmm12 - vpxor 32(%rdi), %xmm1, %xmm5 - vaesenc %xmm15, %xmm13, %xmm13 - vpxor 48(%rdi), %xmm1, %xmm6 - vaesenc %xmm15, %xmm14, %xmm14 - vpxor 64(%rdi), %xmm1, %xmm7 - vpxor 80(%rdi), %xmm1, %xmm3 - movdqu 128(%rbp), %xmm1 - vaesenclast %xmm2, %xmm9, %xmm9 - pxor %xmm2, %xmm2 - mov $72057594037927936, %r11 - pinsrq $1, %r11, %xmm2 - vaesenclast %xmm0, %xmm10, %xmm10 - vpaddd %xmm2, %xmm1, %xmm0 - movq %r13, 112(%rbp) - lea 96(%rdi), %rdi - vaesenclast %xmm5, %xmm11, %xmm11 - vpaddd %xmm2, %xmm0, %xmm5 - movq %r12, 120(%rbp) - lea 96(%rsi), %rsi - movdqu -128(%rcx), %xmm15 - vaesenclast %xmm6, %xmm12, %xmm12 - vpaddd %xmm2, %xmm5, %xmm6 - vaesenclast %xmm7, %xmm13, %xmm13 - vpaddd %xmm2, %xmm6, %xmm7 - vaesenclast %xmm3, %xmm14, %xmm14 - vpaddd %xmm2, %xmm7, %xmm3 - sub $6, %rdx - add $96, %r14 - cmp $0, %rdx - jbe L126 - movdqu %xmm9, -96(%rsi) - vpxor %xmm15, %xmm1, %xmm9 - movdqu %xmm10, -80(%rsi) - movdqu %xmm0, %xmm10 - movdqu %xmm11, -64(%rsi) - movdqu %xmm5, %xmm11 - movdqu %xmm12, -48(%rsi) - movdqu %xmm6, %xmm12 - movdqu %xmm13, -32(%rsi) - movdqu %xmm7, %xmm13 - movdqu %xmm14, -16(%rsi) - movdqu %xmm3, %xmm14 - movdqu 32(%rbp), %xmm7 - jmp L127 -L126: - vpxor 16(%rbp), %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 -L127: -.balign 16 -L123: - cmp $0, %rdx - ja L122 - movdqu 32(%rbp), %xmm7 - movdqu %xmm1, 32(%rbp) - pxor %xmm4, %xmm4 - movdqu %xmm4, 16(%rbp) - movdqu -32(%r9), %xmm3 - vpclmulqdq $0, %xmm3, %xmm7, %xmm1 - vpclmulqdq $16, %xmm3, %xmm7, %xmm5 - movdqu 48(%rbp), %xmm0 - vpclmulqdq $1, %xmm3, %xmm7, %xmm6 - vpclmulqdq $17, %xmm3, %xmm7, %xmm7 - movdqu -16(%r9), %xmm3 - vpxor %xmm5, %xmm6, %xmm6 - vpclmulqdq $0, %xmm3, %xmm0, %xmm5 - vpxor %xmm4, %xmm8, %xmm8 - vpxor %xmm5, %xmm1, %xmm4 - vpclmulqdq $16, %xmm3, %xmm0, %xmm1 - vpclmulqdq $1, %xmm3, %xmm0, %xmm2 - vpxor 16(%rbp), %xmm8, %xmm8 - vpclmulqdq $17, %xmm3, %xmm0, %xmm3 - movdqu 64(%rbp), %xmm0 - movdqu 16(%r9), %xmm5 - vpxor %xmm1, %xmm6, %xmm6 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpxor %xmm3, %xmm7, %xmm7 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 80(%rbp), %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 32(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpclmulqdq $0, %xmm1, %xmm0, %xmm2 - vpxor %xmm3, %xmm6, %xmm6 - vpclmulqdq $16, %xmm1, %xmm0, %xmm3 - vpxor %xmm5, %xmm7, %xmm7 - vpclmulqdq $1, %xmm1, %xmm0, %xmm5 - vpclmulqdq $17, %xmm1, %xmm0, %xmm1 - movdqu 96(%rbp), %xmm0 - vpxor %xmm2, %xmm4, %xmm4 - movdqu 64(%r9), %xmm2 - vpxor %xmm3, %xmm6, %xmm6 - vpclmulqdq $0, %xmm2, %xmm0, %xmm3 - vpxor %xmm5, %xmm6, %xmm6 - vpclmulqdq $16, %xmm2, %xmm0, %xmm5 - vpxor %xmm1, %xmm7, %xmm7 - vpclmulqdq $1, %xmm2, %xmm0, %xmm1 - vpxor 112(%rbp), %xmm8, %xmm8 - vpclmulqdq $17, %xmm2, %xmm0, %xmm2 - vpxor %xmm3, %xmm4, %xmm4 - movdqu 80(%r9), %xmm3 - vpxor %xmm5, %xmm6, %xmm6 - vpclmulqdq $16, %xmm3, %xmm8, %xmm5 - vpxor %xmm1, %xmm6, %xmm6 - vpclmulqdq $1, %xmm3, %xmm8, %xmm1 - vpxor %xmm2, %xmm7, %xmm7 - vpclmulqdq $0, %xmm3, %xmm8, %xmm2 - vpclmulqdq $17, %xmm3, %xmm8, %xmm8 - vpxor %xmm5, %xmm6, %xmm6 - vpxor %xmm1, %xmm6, %xmm6 - vpxor %xmm2, %xmm4, %xmm4 - pxor %xmm3, %xmm3 - mov $3254779904, %rax - pinsrd $3, %eax, %xmm3 - vpxor %xmm8, %xmm7, %xmm7 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm5, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpsrldq $8, %xmm6, %xmm6 - vpxor %xmm6, %xmm7, %xmm7 - vpxor %xmm0, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm8 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor %xmm7, %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 - mov $579005069656919567, %r12 - pinsrq $0, %r12, %xmm0 - mov $283686952306183, %r12 - pinsrq $1, %r12, %xmm0 - movdqu %xmm9, -96(%rsi) - vpshufb %xmm0, %xmm9, %xmm9 - vpxor %xmm7, %xmm1, %xmm1 - movdqu %xmm10, -80(%rsi) - vpshufb %xmm0, %xmm10, %xmm10 - movdqu %xmm11, -64(%rsi) - vpshufb %xmm0, %xmm11, %xmm11 - movdqu %xmm12, -48(%rsi) - vpshufb %xmm0, %xmm12, %xmm12 - movdqu %xmm13, -32(%rsi) - vpshufb %xmm0, %xmm13, %xmm13 - movdqu %xmm14, -16(%rsi) - vpshufb %xmm0, %xmm14, %xmm14 - pxor %xmm4, %xmm4 - movdqu %xmm14, %xmm7 - movdqu %xmm4, 16(%rbp) - movdqu %xmm13, 48(%rbp) - movdqu %xmm12, 64(%rbp) - movdqu %xmm11, 80(%rbp) - movdqu %xmm10, 96(%rbp) - movdqu %xmm9, 112(%rbp) - movdqu -32(%r9), %xmm3 - vpclmulqdq $0, %xmm3, %xmm7, %xmm1 - vpclmulqdq $16, %xmm3, %xmm7, %xmm5 - movdqu 48(%rbp), %xmm0 - vpclmulqdq $1, %xmm3, %xmm7, %xmm6 - vpclmulqdq $17, %xmm3, %xmm7, %xmm7 - movdqu -16(%r9), %xmm3 - vpxor %xmm5, %xmm6, %xmm6 - vpclmulqdq $0, %xmm3, %xmm0, %xmm5 - vpxor %xmm4, %xmm8, %xmm8 - vpxor %xmm5, %xmm1, %xmm4 - vpclmulqdq $16, %xmm3, %xmm0, %xmm1 - vpclmulqdq $1, %xmm3, %xmm0, %xmm2 - vpxor 16(%rbp), %xmm8, %xmm8 - vpclmulqdq $17, %xmm3, %xmm0, %xmm3 - movdqu 64(%rbp), %xmm0 - movdqu 16(%r9), %xmm5 - vpxor %xmm1, %xmm6, %xmm6 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpxor %xmm3, %xmm7, %xmm7 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 80(%rbp), %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 32(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpclmulqdq $0, %xmm1, %xmm0, %xmm2 - vpxor %xmm3, %xmm6, %xmm6 - vpclmulqdq $16, %xmm1, %xmm0, %xmm3 - vpxor %xmm5, %xmm7, %xmm7 - vpclmulqdq $1, %xmm1, %xmm0, %xmm5 - vpclmulqdq $17, %xmm1, %xmm0, %xmm1 - movdqu 96(%rbp), %xmm0 - vpxor %xmm2, %xmm4, %xmm4 - movdqu 64(%r9), %xmm2 - vpxor %xmm3, %xmm6, %xmm6 - vpclmulqdq $0, %xmm2, %xmm0, %xmm3 - vpxor %xmm5, %xmm6, %xmm6 - vpclmulqdq $16, %xmm2, %xmm0, %xmm5 - vpxor %xmm1, %xmm7, %xmm7 - vpclmulqdq $1, %xmm2, %xmm0, %xmm1 - vpxor 112(%rbp), %xmm8, %xmm8 - vpclmulqdq $17, %xmm2, %xmm0, %xmm2 - vpxor %xmm3, %xmm4, %xmm4 - movdqu 80(%r9), %xmm3 - vpxor %xmm5, %xmm6, %xmm6 - vpclmulqdq $16, %xmm3, %xmm8, %xmm5 - vpxor %xmm1, %xmm6, %xmm6 - vpclmulqdq $1, %xmm3, %xmm8, %xmm1 - vpxor %xmm2, %xmm7, %xmm7 - vpclmulqdq $0, %xmm3, %xmm8, %xmm2 - vpclmulqdq $17, %xmm3, %xmm8, %xmm8 - vpxor %xmm5, %xmm6, %xmm6 - vpxor %xmm1, %xmm6, %xmm6 - vpxor %xmm2, %xmm4, %xmm4 - pxor %xmm3, %xmm3 - mov $3254779904, %rax - pinsrd $3, %eax, %xmm3 - vpxor %xmm8, %xmm7, %xmm7 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm5, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpsrldq $8, %xmm6, %xmm6 - vpxor %xmm6, %xmm7, %xmm7 - vpxor %xmm0, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm8 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor %xmm7, %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 - sub $128, %rcx -L117: - movdqu 32(%rbp), %xmm11 - mov %rcx, %r8 - mov 104(%rsp), %rax - mov 112(%rsp), %rdi - mov 120(%rsp), %rdx - mov %rdx, %r14 - mov $579005069656919567, %r12 - pinsrq $0, %r12, %xmm9 - mov $283686952306183, %r12 - pinsrq $1, %r12, %xmm9 - pshufb %xmm9, %xmm11 - pxor %xmm10, %xmm10 - mov $1, %rbx - pinsrd $0, %ebx, %xmm10 - mov %rax, %r11 - mov %rdi, %r10 - mov $0, %rbx - jmp L129 -.balign 16 -L128: - movdqu %xmm11, %xmm0 - pshufb %xmm9, %xmm0 - movdqu 0(%r8), %xmm2 - pxor %xmm2, %xmm0 - movdqu 16(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 32(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 48(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 64(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 80(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 96(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 112(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 128(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 144(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 160(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 176(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 192(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 208(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 224(%r8), %xmm2 - aesenclast %xmm2, %xmm0 - pxor %xmm2, %xmm2 - movdqu 0(%r11), %xmm2 - pxor %xmm0, %xmm2 - movdqu %xmm2, 0(%r10) - add $1, %rbx - add $16, %r11 - add $16, %r10 - paddd %xmm10, %xmm11 -.balign 16 -L129: - cmp %rdx, %rbx - jne L128 - mov %rdi, %r11 - jmp L131 -.balign 16 -L130: - add $80, %r11 - movdqu -32(%r9), %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - movdqu %xmm1, %xmm4 - movdqu -16(%r9), %xmm1 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - movdqu %xmm1, %xmm5 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 16(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 32(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 64(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 80(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - vpxor %xmm1, %xmm4, %xmm4 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - pxor %xmm3, %xmm3 - mov $3254779904, %r10 - pinsrd $3, %r10d, %xmm3 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm5, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpsrldq $8, %xmm6, %xmm6 - vpxor %xmm6, %xmm7, %xmm7 - vpxor %xmm0, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm8 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor %xmm7, %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 - add $96, %r11 - sub $6, %rdx -.balign 16 -L131: - cmp $6, %rdx - jae L130 - cmp $0, %rdx - jbe L132 - mov %rdx, %r10 - sub $1, %r10 - imul $16, %r10 - add %r10, %r11 - movdqu -32(%r9), %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - cmp $1, %rdx - jne L134 - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu %xmm1, %xmm4 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - jmp L135 -L134: - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - movdqu %xmm1, %xmm4 - movdqu -16(%r9), %xmm1 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - movdqu %xmm1, %xmm5 - cmp $2, %rdx - je L136 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 16(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - cmp $3, %rdx - je L138 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 32(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - cmp $4, %rdx - je L140 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 64(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - jmp L141 -L140: -L141: - jmp L139 -L138: -L139: - jmp L137 -L136: -L137: - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - vpxor %xmm1, %xmm4, %xmm4 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 -L135: - pxor %xmm3, %xmm3 - mov $3254779904, %r10 - pinsrd $3, %r10d, %xmm3 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm5, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpsrldq $8, %xmm6, %xmm6 - vpxor %xmm6, %xmm7, %xmm7 - vpxor %xmm0, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm8 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor %xmm7, %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 - jmp L133 -L132: -L133: - add 96(%rsp), %r14 - imul $16, %r14 - mov 136(%rsp), %r13 - cmp %r14, %r13 - jbe L142 - mov 128(%rsp), %rax - mov %r13, %r10 - and $15, %r10 - movdqu %xmm11, %xmm0 - pshufb %xmm9, %xmm0 - movdqu 0(%r8), %xmm2 - pxor %xmm2, %xmm0 - movdqu 16(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 32(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 48(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 64(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 80(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 96(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 112(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 128(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 144(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 160(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 176(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 192(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 208(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 224(%r8), %xmm2 - aesenclast %xmm2, %xmm0 - pxor %xmm2, %xmm2 - movdqu 0(%rax), %xmm4 - pxor %xmm4, %xmm0 - movdqu %xmm0, 0(%rax) - cmp $8, %r10 - jae L144 - mov $0, %rcx - pinsrq $1, %rcx, %xmm0 - mov %r10, %rcx - shl $3, %rcx - mov $1, %r11 - shl %cl, %r11 - sub $1, %r11 - pextrq $0, %xmm0, %rcx - and %r11, %rcx - pinsrq $0, %rcx, %xmm0 - jmp L145 -L144: - mov %r10, %rcx - sub $8, %rcx - shl $3, %rcx - mov $1, %r11 - shl %cl, %r11 - sub $1, %r11 - pextrq $1, %xmm0, %rcx - and %r11, %rcx - pinsrq $1, %rcx, %xmm0 -L145: - pshufb %xmm9, %xmm0 - movdqu -32(%r9), %xmm5 - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu %xmm1, %xmm4 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - pxor %xmm3, %xmm3 - mov $3254779904, %r11 - pinsrd $3, %r11d, %xmm3 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm5, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpsrldq $8, %xmm6, %xmm6 - vpxor %xmm6, %xmm7, %xmm7 - vpxor %xmm0, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm8 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor %xmm7, %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 - jmp L143 -L142: -L143: - mov %r15, %r11 - pxor %xmm0, %xmm0 - mov %r11, %rax - imul $8, %rax - pinsrq $1, %rax, %xmm0 - mov %r13, %rax - imul $8, %rax - pinsrq $0, %rax, %xmm0 - movdqu -32(%r9), %xmm5 - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu %xmm1, %xmm4 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - pxor %xmm3, %xmm3 - mov $3254779904, %r11 - pinsrd $3, %r11d, %xmm3 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm5, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpsrldq $8, %xmm6, %xmm6 - vpxor %xmm6, %xmm7, %xmm7 - vpxor %xmm0, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm8 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor %xmm7, %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 - movdqu 0(%rbp), %xmm0 - pshufb %xmm9, %xmm0 - movdqu 0(%r8), %xmm2 - pxor %xmm2, %xmm0 - movdqu 16(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 32(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 48(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 64(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 80(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 96(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 112(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 128(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 144(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 160(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 176(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 192(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 208(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 224(%r8), %xmm2 - aesenclast %xmm2, %xmm0 - pxor %xmm2, %xmm2 - pshufb %xmm9, %xmm8 - pxor %xmm0, %xmm8 - mov 152(%rsp), %r15 - movdqu %xmm8, 0(%r15) - pop %rbx - pop %rbp - pop %rdi - pop %rsi - pop %r12 - pop %r13 - pop %r14 - pop %r15 - ret - -.global gcm128_decrypt_opt -gcm128_decrypt_opt: - push %r15 - push %r14 - push %r13 - push %r12 - push %rsi - push %rdi - push %rbp - push %rbx - mov 144(%rsp), %rbp - mov %rcx, %r13 - lea 32(%r9), %r9 - mov 72(%rsp), %rbx - mov %rdx, %rcx - imul $16, %rcx - mov $579005069656919567, %r10 - pinsrq $0, %r10, %xmm9 - mov $283686952306183, %r10 - pinsrq $1, %r10, %xmm9 - pxor %xmm8, %xmm8 - mov %rdi, %r11 - jmp L147 -.balign 16 -L146: - add $80, %r11 - movdqu -32(%r9), %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - movdqu %xmm1, %xmm4 - movdqu -16(%r9), %xmm1 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - movdqu %xmm1, %xmm5 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 16(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 32(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 64(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 80(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - vpxor %xmm1, %xmm4, %xmm4 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - pxor %xmm3, %xmm3 - mov $3254779904, %r10 - pinsrd $3, %r10d, %xmm3 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm5, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpsrldq $8, %xmm6, %xmm6 - vpxor %xmm6, %xmm7, %xmm7 - vpxor %xmm0, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm8 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor %xmm7, %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 - add $96, %r11 - sub $6, %rdx -.balign 16 -L147: - cmp $6, %rdx - jae L146 - cmp $0, %rdx - jbe L148 - mov %rdx, %r10 - sub $1, %r10 - imul $16, %r10 - add %r10, %r11 - movdqu -32(%r9), %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - cmp $1, %rdx - jne L150 - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu %xmm1, %xmm4 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - jmp L151 -L150: - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - movdqu %xmm1, %xmm4 - movdqu -16(%r9), %xmm1 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - movdqu %xmm1, %xmm5 - cmp $2, %rdx - je L152 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 16(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - cmp $3, %rdx - je L154 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 32(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - cmp $4, %rdx - je L156 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 64(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - jmp L157 -L156: -L157: - jmp L155 -L154: -L155: - jmp L153 -L152: -L153: - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - vpxor %xmm1, %xmm4, %xmm4 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 -L151: - pxor %xmm3, %xmm3 - mov $3254779904, %r10 - pinsrd $3, %r10d, %xmm3 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm5, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpsrldq $8, %xmm6, %xmm6 - vpxor %xmm6, %xmm7, %xmm7 - vpxor %xmm0, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm8 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor %xmm7, %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 - jmp L149 -L148: -L149: - mov %rsi, %r15 - cmp %rcx, %rsi - jbe L158 - movdqu 0(%rbx), %xmm0 - mov %rsi, %r10 - and $15, %r10 - cmp $8, %r10 - jae L160 - mov $0, %rcx - pinsrq $1, %rcx, %xmm0 - mov %r10, %rcx - shl $3, %rcx - mov $1, %r11 - shl %cl, %r11 - sub $1, %r11 - pextrq $0, %xmm0, %rcx - and %r11, %rcx - pinsrq $0, %rcx, %xmm0 - jmp L161 -L160: - mov %r10, %rcx - sub $8, %rcx - shl $3, %rcx - mov $1, %r11 - shl %cl, %r11 - sub $1, %r11 - pextrq $1, %xmm0, %rcx - and %r11, %rcx - pinsrq $1, %rcx, %xmm0 -L161: - pshufb %xmm9, %xmm0 - movdqu -32(%r9), %xmm5 - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu %xmm1, %xmm4 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - pxor %xmm3, %xmm3 - mov $3254779904, %r11 - pinsrd $3, %r11d, %xmm3 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm5, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpsrldq $8, %xmm6, %xmm6 - vpxor %xmm6, %xmm7, %xmm7 - vpxor %xmm0, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm8 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor %xmm7, %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 - jmp L159 -L158: -L159: - mov 80(%rsp), %rdi - mov 88(%rsp), %rsi - mov 96(%rsp), %rdx - mov %r13, %rcx - movdqu %xmm9, %xmm0 - movdqu 0(%r8), %xmm1 - movdqu %xmm1, 0(%rbp) - pxor %xmm10, %xmm10 - mov $1, %r11 - pinsrq $0, %r11, %xmm10 - vpaddd %xmm10, %xmm1, %xmm1 - cmp $0, %rdx - jne L162 - vpshufb %xmm0, %xmm1, %xmm1 - movdqu %xmm1, 32(%rbp) - jmp L163 -L162: - movdqu %xmm8, 32(%rbp) - add $128, %rcx - pextrq $0, %xmm1, %rbx - and $255, %rbx - vpshufb %xmm0, %xmm1, %xmm1 - lea 96(%rdi), %r14 - movdqu 32(%rbp), %xmm8 - movdqu 80(%rdi), %xmm7 - movdqu 64(%rdi), %xmm4 - movdqu 48(%rdi), %xmm5 - movdqu 32(%rdi), %xmm6 - vpshufb %xmm0, %xmm7, %xmm7 - movdqu 16(%rdi), %xmm2 - vpshufb %xmm0, %xmm4, %xmm4 - movdqu 0(%rdi), %xmm3 - vpshufb %xmm0, %xmm5, %xmm5 - movdqu %xmm4, 48(%rbp) - vpshufb %xmm0, %xmm6, %xmm6 - movdqu %xmm5, 64(%rbp) - vpshufb %xmm0, %xmm2, %xmm2 - movdqu %xmm6, 80(%rbp) - vpshufb %xmm0, %xmm3, %xmm3 - movdqu %xmm2, 96(%rbp) - movdqu %xmm3, 112(%rbp) - pxor %xmm2, %xmm2 - mov $72057594037927936, %r11 - pinsrq $1, %r11, %xmm2 - vpxor %xmm4, %xmm4, %xmm4 - movdqu -128(%rcx), %xmm15 - vpaddd %xmm2, %xmm1, %xmm10 - vpaddd %xmm2, %xmm10, %xmm11 - vpaddd %xmm2, %xmm11, %xmm12 - vpaddd %xmm2, %xmm12, %xmm13 - vpaddd %xmm2, %xmm13, %xmm14 - vpxor %xmm15, %xmm1, %xmm9 - movdqu %xmm4, 16(%rbp) - cmp $6, %rdx - jne L164 - sub $96, %r14 - jmp L165 -L164: -L165: - jmp L167 -.balign 16 -L166: - add $6, %rbx - cmp $256, %rbx - jb L168 - mov $579005069656919567, %r11 - pinsrq $0, %r11, %xmm0 - mov $283686952306183, %r11 - pinsrq $1, %r11, %xmm0 - vpshufb %xmm0, %xmm1, %xmm6 - pxor %xmm5, %xmm5 - mov $1, %r11 - pinsrq $0, %r11, %xmm5 - vpaddd %xmm5, %xmm6, %xmm10 - pxor %xmm5, %xmm5 - mov $2, %r11 - pinsrq $0, %r11, %xmm5 - vpaddd %xmm5, %xmm6, %xmm11 - movdqu -32(%r9), %xmm3 - vpaddd %xmm5, %xmm10, %xmm12 - vpshufb %xmm0, %xmm10, %xmm10 - vpaddd %xmm5, %xmm11, %xmm13 - vpshufb %xmm0, %xmm11, %xmm11 - vpxor %xmm15, %xmm10, %xmm10 - vpaddd %xmm5, %xmm12, %xmm14 - vpshufb %xmm0, %xmm12, %xmm12 - vpxor %xmm15, %xmm11, %xmm11 - vpaddd %xmm5, %xmm13, %xmm1 - vpshufb %xmm0, %xmm13, %xmm13 - vpshufb %xmm0, %xmm14, %xmm14 - vpshufb %xmm0, %xmm1, %xmm1 - sub $256, %rbx - jmp L169 -L168: - movdqu -32(%r9), %xmm3 - vpaddd %xmm14, %xmm2, %xmm1 - vpxor %xmm15, %xmm10, %xmm10 - vpxor %xmm15, %xmm11, %xmm11 -L169: - movdqu %xmm1, 128(%rbp) - vpclmulqdq $16, %xmm3, %xmm7, %xmm5 - vpxor %xmm15, %xmm12, %xmm12 - movdqu -112(%rcx), %xmm2 - vpclmulqdq $1, %xmm3, %xmm7, %xmm6 - vaesenc %xmm2, %xmm9, %xmm9 - movdqu 48(%rbp), %xmm0 - vpxor %xmm15, %xmm13, %xmm13 - vpclmulqdq $0, %xmm3, %xmm7, %xmm1 - vaesenc %xmm2, %xmm10, %xmm10 - vpxor %xmm15, %xmm14, %xmm14 - vpclmulqdq $17, %xmm3, %xmm7, %xmm7 - vaesenc %xmm2, %xmm11, %xmm11 - movdqu -16(%r9), %xmm3 - vaesenc %xmm2, %xmm12, %xmm12 - vpxor %xmm5, %xmm6, %xmm6 - vpclmulqdq $0, %xmm3, %xmm0, %xmm5 - vpxor %xmm4, %xmm8, %xmm8 - vaesenc %xmm2, %xmm13, %xmm13 - vpxor %xmm5, %xmm1, %xmm4 - vpclmulqdq $16, %xmm3, %xmm0, %xmm1 - vaesenc %xmm2, %xmm14, %xmm14 - movdqu -96(%rcx), %xmm15 - vpclmulqdq $1, %xmm3, %xmm0, %xmm2 - vaesenc %xmm15, %xmm9, %xmm9 - vpxor 16(%rbp), %xmm8, %xmm8 - vpclmulqdq $17, %xmm3, %xmm0, %xmm3 - movdqu 64(%rbp), %xmm0 - vaesenc %xmm15, %xmm10, %xmm10 - movbeq 88(%r14), %r13 - vaesenc %xmm15, %xmm11, %xmm11 - movbeq 80(%r14), %r12 - vaesenc %xmm15, %xmm12, %xmm12 - movq %r13, 32(%rbp) - vaesenc %xmm15, %xmm13, %xmm13 - movq %r12, 40(%rbp) - movdqu 16(%r9), %xmm5 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu -80(%rcx), %xmm15 - vpxor %xmm1, %xmm6, %xmm6 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vaesenc %xmm15, %xmm9, %xmm9 - vpxor %xmm2, %xmm6, %xmm6 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vaesenc %xmm15, %xmm10, %xmm10 - vpxor %xmm3, %xmm7, %xmm7 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vaesenc %xmm15, %xmm11, %xmm11 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 80(%rbp), %xmm0 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 32(%r9), %xmm1 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu -64(%rcx), %xmm15 - vpxor %xmm2, %xmm6, %xmm6 - vpclmulqdq $0, %xmm1, %xmm0, %xmm2 - vaesenc %xmm15, %xmm9, %xmm9 - vpxor %xmm3, %xmm6, %xmm6 - vpclmulqdq $16, %xmm1, %xmm0, %xmm3 - vaesenc %xmm15, %xmm10, %xmm10 - movbeq 72(%r14), %r13 - vpxor %xmm5, %xmm7, %xmm7 - vpclmulqdq $1, %xmm1, %xmm0, %xmm5 - vaesenc %xmm15, %xmm11, %xmm11 - movbeq 64(%r14), %r12 - vpclmulqdq $17, %xmm1, %xmm0, %xmm1 - movdqu 96(%rbp), %xmm0 - vaesenc %xmm15, %xmm12, %xmm12 - movq %r13, 48(%rbp) - vaesenc %xmm15, %xmm13, %xmm13 - movq %r12, 56(%rbp) - vpxor %xmm2, %xmm4, %xmm4 - movdqu 64(%r9), %xmm2 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu -48(%rcx), %xmm15 - vpxor %xmm3, %xmm6, %xmm6 - vpclmulqdq $0, %xmm2, %xmm0, %xmm3 - vaesenc %xmm15, %xmm9, %xmm9 - vpxor %xmm5, %xmm6, %xmm6 - vpclmulqdq $16, %xmm2, %xmm0, %xmm5 - vaesenc %xmm15, %xmm10, %xmm10 - movbeq 56(%r14), %r13 - vpxor %xmm1, %xmm7, %xmm7 - vpclmulqdq $1, %xmm2, %xmm0, %xmm1 - vpxor 112(%rbp), %xmm8, %xmm8 - vaesenc %xmm15, %xmm11, %xmm11 - movbeq 48(%r14), %r12 - vpclmulqdq $17, %xmm2, %xmm0, %xmm2 - vaesenc %xmm15, %xmm12, %xmm12 - movq %r13, 64(%rbp) - vaesenc %xmm15, %xmm13, %xmm13 - movq %r12, 72(%rbp) - vpxor %xmm3, %xmm4, %xmm4 - movdqu 80(%r9), %xmm3 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu -32(%rcx), %xmm15 - vpxor %xmm5, %xmm6, %xmm6 - vpclmulqdq $16, %xmm3, %xmm8, %xmm5 - vaesenc %xmm15, %xmm9, %xmm9 - vpxor %xmm1, %xmm6, %xmm6 - vpclmulqdq $1, %xmm3, %xmm8, %xmm1 - vaesenc %xmm15, %xmm10, %xmm10 - movbeq 40(%r14), %r13 - vpxor %xmm2, %xmm7, %xmm7 - vpclmulqdq $0, %xmm3, %xmm8, %xmm2 - vaesenc %xmm15, %xmm11, %xmm11 - movbeq 32(%r14), %r12 - vpclmulqdq $17, %xmm3, %xmm8, %xmm8 - vaesenc %xmm15, %xmm12, %xmm12 - movq %r13, 80(%rbp) - vaesenc %xmm15, %xmm13, %xmm13 - movq %r12, 88(%rbp) - vpxor %xmm5, %xmm6, %xmm6 - vaesenc %xmm15, %xmm14, %xmm14 - vpxor %xmm1, %xmm6, %xmm6 - movdqu -16(%rcx), %xmm15 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm2, %xmm4, %xmm4 - pxor %xmm3, %xmm3 - mov $13979173243358019584, %r11 - pinsrq $1, %r11, %xmm3 - vaesenc %xmm15, %xmm9, %xmm9 - vpxor %xmm8, %xmm7, %xmm7 - vaesenc %xmm15, %xmm10, %xmm10 - vpxor %xmm5, %xmm4, %xmm4 - movbeq 24(%r14), %r13 - vaesenc %xmm15, %xmm11, %xmm11 - movbeq 16(%r14), %r12 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - movq %r13, 96(%rbp) - vaesenc %xmm15, %xmm12, %xmm12 - movq %r12, 104(%rbp) - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu 0(%rcx), %xmm1 - vaesenc %xmm1, %xmm9, %xmm9 - movdqu 16(%rcx), %xmm15 - vaesenc %xmm1, %xmm10, %xmm10 - vpsrldq $8, %xmm6, %xmm6 - vaesenc %xmm1, %xmm11, %xmm11 - vpxor %xmm6, %xmm7, %xmm7 - vaesenc %xmm1, %xmm12, %xmm12 - vpxor %xmm0, %xmm4, %xmm4 - movbeq 8(%r14), %r13 - vaesenc %xmm1, %xmm13, %xmm13 - movbeq 0(%r14), %r12 - vaesenc %xmm1, %xmm14, %xmm14 - movdqu 32(%rcx), %xmm1 - vaesenc %xmm15, %xmm9, %xmm9 - movdqu %xmm7, 16(%rbp) - vpalignr $8, %xmm4, %xmm4, %xmm8 - vaesenc %xmm15, %xmm10, %xmm10 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor 0(%rdi), %xmm1, %xmm2 - vaesenc %xmm15, %xmm11, %xmm11 - vpxor 16(%rdi), %xmm1, %xmm0 - vaesenc %xmm15, %xmm12, %xmm12 - vpxor 32(%rdi), %xmm1, %xmm5 - vaesenc %xmm15, %xmm13, %xmm13 - vpxor 48(%rdi), %xmm1, %xmm6 - vaesenc %xmm15, %xmm14, %xmm14 - vpxor 64(%rdi), %xmm1, %xmm7 - vpxor 80(%rdi), %xmm1, %xmm3 - movdqu 128(%rbp), %xmm1 - vaesenclast %xmm2, %xmm9, %xmm9 - pxor %xmm2, %xmm2 - mov $72057594037927936, %r11 - pinsrq $1, %r11, %xmm2 - vaesenclast %xmm0, %xmm10, %xmm10 - vpaddd %xmm2, %xmm1, %xmm0 - movq %r13, 112(%rbp) - lea 96(%rdi), %rdi - vaesenclast %xmm5, %xmm11, %xmm11 - vpaddd %xmm2, %xmm0, %xmm5 - movq %r12, 120(%rbp) - lea 96(%rsi), %rsi - movdqu -128(%rcx), %xmm15 - vaesenclast %xmm6, %xmm12, %xmm12 - vpaddd %xmm2, %xmm5, %xmm6 - vaesenclast %xmm7, %xmm13, %xmm13 - vpaddd %xmm2, %xmm6, %xmm7 - vaesenclast %xmm3, %xmm14, %xmm14 - vpaddd %xmm2, %xmm7, %xmm3 - sub $6, %rdx - cmp $6, %rdx - jbe L170 - add $96, %r14 - jmp L171 -L170: -L171: - cmp $0, %rdx - jbe L172 - movdqu %xmm9, -96(%rsi) - vpxor %xmm15, %xmm1, %xmm9 - movdqu %xmm10, -80(%rsi) - movdqu %xmm0, %xmm10 - movdqu %xmm11, -64(%rsi) - movdqu %xmm5, %xmm11 - movdqu %xmm12, -48(%rsi) - movdqu %xmm6, %xmm12 - movdqu %xmm13, -32(%rsi) - movdqu %xmm7, %xmm13 - movdqu %xmm14, -16(%rsi) - movdqu %xmm3, %xmm14 - movdqu 32(%rbp), %xmm7 - jmp L173 -L172: - vpxor 16(%rbp), %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 -L173: -.balign 16 -L167: - cmp $0, %rdx - ja L166 - movdqu %xmm1, 32(%rbp) - movdqu %xmm9, -96(%rsi) - movdqu %xmm10, -80(%rsi) - movdqu %xmm11, -64(%rsi) - movdqu %xmm12, -48(%rsi) - movdqu %xmm13, -32(%rsi) - movdqu %xmm14, -16(%rsi) - sub $128, %rcx -L163: - movdqu 32(%rbp), %xmm11 - mov %rcx, %r8 - mov 104(%rsp), %rax - mov 112(%rsp), %rdi - mov 120(%rsp), %rdx - mov %rdx, %r14 - mov $579005069656919567, %r12 - pinsrq $0, %r12, %xmm9 - mov $283686952306183, %r12 - pinsrq $1, %r12, %xmm9 - pshufb %xmm9, %xmm11 - mov %rdi, %rbx - mov %rdx, %r12 - mov %rax, %rdi - mov %rdi, %r11 - jmp L175 -.balign 16 -L174: - add $80, %r11 - movdqu -32(%r9), %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - movdqu %xmm1, %xmm4 - movdqu -16(%r9), %xmm1 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - movdqu %xmm1, %xmm5 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 16(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 32(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 64(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 80(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - vpxor %xmm1, %xmm4, %xmm4 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - pxor %xmm3, %xmm3 - mov $3254779904, %r10 - pinsrd $3, %r10d, %xmm3 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm5, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpsrldq $8, %xmm6, %xmm6 - vpxor %xmm6, %xmm7, %xmm7 - vpxor %xmm0, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm8 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor %xmm7, %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 - add $96, %r11 - sub $6, %rdx -.balign 16 -L175: - cmp $6, %rdx - jae L174 - cmp $0, %rdx - jbe L176 - mov %rdx, %r10 - sub $1, %r10 - imul $16, %r10 - add %r10, %r11 - movdqu -32(%r9), %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - cmp $1, %rdx - jne L178 - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu %xmm1, %xmm4 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - jmp L179 -L178: - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - movdqu %xmm1, %xmm4 - movdqu -16(%r9), %xmm1 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - movdqu %xmm1, %xmm5 - cmp $2, %rdx - je L180 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 16(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - cmp $3, %rdx - je L182 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 32(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - cmp $4, %rdx - je L184 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 64(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - jmp L185 -L184: -L185: - jmp L183 -L182: -L183: - jmp L181 -L180: -L181: - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - vpxor %xmm1, %xmm4, %xmm4 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 -L179: - pxor %xmm3, %xmm3 - mov $3254779904, %r10 - pinsrd $3, %r10d, %xmm3 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm5, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpsrldq $8, %xmm6, %xmm6 - vpxor %xmm6, %xmm7, %xmm7 - vpxor %xmm0, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm8 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor %xmm7, %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 - jmp L177 -L176: -L177: - mov %rbx, %rdi - mov %r12, %rdx - pxor %xmm10, %xmm10 - mov $1, %rbx - pinsrd $0, %ebx, %xmm10 - mov %rax, %r11 - mov %rdi, %r10 - mov $0, %rbx - jmp L187 -.balign 16 -L186: - movdqu %xmm11, %xmm0 - pshufb %xmm9, %xmm0 - movdqu 0(%r8), %xmm2 - pxor %xmm2, %xmm0 - movdqu 16(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 32(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 48(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 64(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 80(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 96(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 112(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 128(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 144(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 160(%r8), %xmm2 - aesenclast %xmm2, %xmm0 - pxor %xmm2, %xmm2 - movdqu 0(%r11), %xmm2 - pxor %xmm0, %xmm2 - movdqu %xmm2, 0(%r10) - add $1, %rbx - add $16, %r11 - add $16, %r10 - paddd %xmm10, %xmm11 -.balign 16 -L187: - cmp %rdx, %rbx - jne L186 - add 96(%rsp), %r14 - imul $16, %r14 - mov 136(%rsp), %r13 - cmp %r14, %r13 - jbe L188 - mov 128(%rsp), %rax - mov %r13, %r10 - and $15, %r10 - movdqu 0(%rax), %xmm0 - movdqu %xmm0, %xmm10 - cmp $8, %r10 - jae L190 - mov $0, %rcx - pinsrq $1, %rcx, %xmm0 - mov %r10, %rcx - shl $3, %rcx - mov $1, %r11 - shl %cl, %r11 - sub $1, %r11 - pextrq $0, %xmm0, %rcx - and %r11, %rcx - pinsrq $0, %rcx, %xmm0 - jmp L191 -L190: - mov %r10, %rcx - sub $8, %rcx - shl $3, %rcx - mov $1, %r11 - shl %cl, %r11 - sub $1, %r11 - pextrq $1, %xmm0, %rcx - and %r11, %rcx - pinsrq $1, %rcx, %xmm0 -L191: - pshufb %xmm9, %xmm0 - movdqu -32(%r9), %xmm5 - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu %xmm1, %xmm4 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - pxor %xmm3, %xmm3 - mov $3254779904, %r11 - pinsrd $3, %r11d, %xmm3 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm5, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpsrldq $8, %xmm6, %xmm6 - vpxor %xmm6, %xmm7, %xmm7 - vpxor %xmm0, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm8 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor %xmm7, %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 - movdqu %xmm11, %xmm0 - pshufb %xmm9, %xmm0 - movdqu 0(%r8), %xmm2 - pxor %xmm2, %xmm0 - movdqu 16(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 32(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 48(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 64(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 80(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 96(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 112(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 128(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 144(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 160(%r8), %xmm2 - aesenclast %xmm2, %xmm0 - pxor %xmm2, %xmm2 - pxor %xmm0, %xmm10 - movdqu %xmm10, 0(%rax) - jmp L189 -L188: -L189: - mov %r15, %r11 - pxor %xmm0, %xmm0 - mov %r11, %rax - imul $8, %rax - pinsrq $1, %rax, %xmm0 - mov %r13, %rax - imul $8, %rax - pinsrq $0, %rax, %xmm0 - movdqu -32(%r9), %xmm5 - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu %xmm1, %xmm4 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - pxor %xmm3, %xmm3 - mov $3254779904, %r11 - pinsrd $3, %r11d, %xmm3 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm5, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpsrldq $8, %xmm6, %xmm6 - vpxor %xmm6, %xmm7, %xmm7 - vpxor %xmm0, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm8 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor %xmm7, %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 - movdqu 0(%rbp), %xmm0 - pshufb %xmm9, %xmm0 - movdqu 0(%r8), %xmm2 - pxor %xmm2, %xmm0 - movdqu 16(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 32(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 48(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 64(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 80(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 96(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 112(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 128(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 144(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 160(%r8), %xmm2 - aesenclast %xmm2, %xmm0 - pxor %xmm2, %xmm2 - pshufb %xmm9, %xmm8 - pxor %xmm0, %xmm8 - mov 152(%rsp), %r15 - movdqu 0(%r15), %xmm0 - pcmpeqd %xmm8, %xmm0 - pextrq $0, %xmm0, %rdx - sub $18446744073709551615, %rdx - mov $0, %rax - adc $0, %rax - pextrq $1, %xmm0, %rdx - sub $18446744073709551615, %rdx - mov $0, %rdx - adc $0, %rdx - add %rdx, %rax - mov %rax, %rcx - pop %rbx - pop %rbp - pop %rdi - pop %rsi - pop %r12 - pop %r13 - pop %r14 - pop %r15 - mov %rcx, %rax - ret - -.global gcm256_decrypt_opt -gcm256_decrypt_opt: - push %r15 - push %r14 - push %r13 - push %r12 - push %rsi - push %rdi - push %rbp - push %rbx - mov 144(%rsp), %rbp - mov %rcx, %r13 - lea 32(%r9), %r9 - mov 72(%rsp), %rbx - mov %rdx, %rcx - imul $16, %rcx - mov $579005069656919567, %r10 - pinsrq $0, %r10, %xmm9 - mov $283686952306183, %r10 - pinsrq $1, %r10, %xmm9 - pxor %xmm8, %xmm8 - mov %rdi, %r11 - jmp L193 -.balign 16 -L192: - add $80, %r11 - movdqu -32(%r9), %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - movdqu %xmm1, %xmm4 - movdqu -16(%r9), %xmm1 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - movdqu %xmm1, %xmm5 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 16(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 32(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 64(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 80(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - vpxor %xmm1, %xmm4, %xmm4 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - pxor %xmm3, %xmm3 - mov $3254779904, %r10 - pinsrd $3, %r10d, %xmm3 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm5, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpsrldq $8, %xmm6, %xmm6 - vpxor %xmm6, %xmm7, %xmm7 - vpxor %xmm0, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm8 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor %xmm7, %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 - add $96, %r11 - sub $6, %rdx -.balign 16 -L193: - cmp $6, %rdx - jae L192 - cmp $0, %rdx - jbe L194 - mov %rdx, %r10 - sub $1, %r10 - imul $16, %r10 - add %r10, %r11 - movdqu -32(%r9), %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - cmp $1, %rdx - jne L196 - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu %xmm1, %xmm4 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - jmp L197 -L196: - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - movdqu %xmm1, %xmm4 - movdqu -16(%r9), %xmm1 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - movdqu %xmm1, %xmm5 - cmp $2, %rdx - je L198 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 16(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - cmp $3, %rdx - je L200 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 32(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - cmp $4, %rdx - je L202 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 64(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - jmp L203 -L202: -L203: - jmp L201 -L200: -L201: - jmp L199 -L198: -L199: - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - vpxor %xmm1, %xmm4, %xmm4 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 -L197: - pxor %xmm3, %xmm3 - mov $3254779904, %r10 - pinsrd $3, %r10d, %xmm3 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm5, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpsrldq $8, %xmm6, %xmm6 - vpxor %xmm6, %xmm7, %xmm7 - vpxor %xmm0, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm8 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor %xmm7, %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 - jmp L195 -L194: -L195: - mov %rsi, %r15 - cmp %rcx, %rsi - jbe L204 - movdqu 0(%rbx), %xmm0 - mov %rsi, %r10 - and $15, %r10 - cmp $8, %r10 - jae L206 - mov $0, %rcx - pinsrq $1, %rcx, %xmm0 - mov %r10, %rcx - shl $3, %rcx - mov $1, %r11 - shl %cl, %r11 - sub $1, %r11 - pextrq $0, %xmm0, %rcx - and %r11, %rcx - pinsrq $0, %rcx, %xmm0 - jmp L207 -L206: - mov %r10, %rcx - sub $8, %rcx - shl $3, %rcx - mov $1, %r11 - shl %cl, %r11 - sub $1, %r11 - pextrq $1, %xmm0, %rcx - and %r11, %rcx - pinsrq $1, %rcx, %xmm0 -L207: - pshufb %xmm9, %xmm0 - movdqu -32(%r9), %xmm5 - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu %xmm1, %xmm4 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - pxor %xmm3, %xmm3 - mov $3254779904, %r11 - pinsrd $3, %r11d, %xmm3 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm5, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpsrldq $8, %xmm6, %xmm6 - vpxor %xmm6, %xmm7, %xmm7 - vpxor %xmm0, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm8 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor %xmm7, %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 - jmp L205 -L204: -L205: - mov 80(%rsp), %rdi - mov 88(%rsp), %rsi - mov 96(%rsp), %rdx - mov %r13, %rcx - movdqu %xmm9, %xmm0 - movdqu 0(%r8), %xmm1 - movdqu %xmm1, 0(%rbp) - pxor %xmm10, %xmm10 - mov $1, %r11 - pinsrq $0, %r11, %xmm10 - vpaddd %xmm10, %xmm1, %xmm1 - cmp $0, %rdx - jne L208 - vpshufb %xmm0, %xmm1, %xmm1 - movdqu %xmm1, 32(%rbp) - jmp L209 -L208: - movdqu %xmm8, 32(%rbp) - add $128, %rcx - pextrq $0, %xmm1, %rbx - and $255, %rbx - vpshufb %xmm0, %xmm1, %xmm1 - lea 96(%rdi), %r14 - movdqu 32(%rbp), %xmm8 - movdqu 80(%rdi), %xmm7 - movdqu 64(%rdi), %xmm4 - movdqu 48(%rdi), %xmm5 - movdqu 32(%rdi), %xmm6 - vpshufb %xmm0, %xmm7, %xmm7 - movdqu 16(%rdi), %xmm2 - vpshufb %xmm0, %xmm4, %xmm4 - movdqu 0(%rdi), %xmm3 - vpshufb %xmm0, %xmm5, %xmm5 - movdqu %xmm4, 48(%rbp) - vpshufb %xmm0, %xmm6, %xmm6 - movdqu %xmm5, 64(%rbp) - vpshufb %xmm0, %xmm2, %xmm2 - movdqu %xmm6, 80(%rbp) - vpshufb %xmm0, %xmm3, %xmm3 - movdqu %xmm2, 96(%rbp) - movdqu %xmm3, 112(%rbp) - pxor %xmm2, %xmm2 - mov $72057594037927936, %r11 - pinsrq $1, %r11, %xmm2 - vpxor %xmm4, %xmm4, %xmm4 - movdqu -128(%rcx), %xmm15 - vpaddd %xmm2, %xmm1, %xmm10 - vpaddd %xmm2, %xmm10, %xmm11 - vpaddd %xmm2, %xmm11, %xmm12 - vpaddd %xmm2, %xmm12, %xmm13 - vpaddd %xmm2, %xmm13, %xmm14 - vpxor %xmm15, %xmm1, %xmm9 - movdqu %xmm4, 16(%rbp) - cmp $6, %rdx - jne L210 - sub $96, %r14 - jmp L211 -L210: -L211: - jmp L213 -.balign 16 -L212: - add $6, %rbx - cmp $256, %rbx - jb L214 - mov $579005069656919567, %r11 - pinsrq $0, %r11, %xmm0 - mov $283686952306183, %r11 - pinsrq $1, %r11, %xmm0 - vpshufb %xmm0, %xmm1, %xmm6 - pxor %xmm5, %xmm5 - mov $1, %r11 - pinsrq $0, %r11, %xmm5 - vpaddd %xmm5, %xmm6, %xmm10 - pxor %xmm5, %xmm5 - mov $2, %r11 - pinsrq $0, %r11, %xmm5 - vpaddd %xmm5, %xmm6, %xmm11 - movdqu -32(%r9), %xmm3 - vpaddd %xmm5, %xmm10, %xmm12 - vpshufb %xmm0, %xmm10, %xmm10 - vpaddd %xmm5, %xmm11, %xmm13 - vpshufb %xmm0, %xmm11, %xmm11 - vpxor %xmm15, %xmm10, %xmm10 - vpaddd %xmm5, %xmm12, %xmm14 - vpshufb %xmm0, %xmm12, %xmm12 - vpxor %xmm15, %xmm11, %xmm11 - vpaddd %xmm5, %xmm13, %xmm1 - vpshufb %xmm0, %xmm13, %xmm13 - vpshufb %xmm0, %xmm14, %xmm14 - vpshufb %xmm0, %xmm1, %xmm1 - sub $256, %rbx - jmp L215 -L214: - movdqu -32(%r9), %xmm3 - vpaddd %xmm14, %xmm2, %xmm1 - vpxor %xmm15, %xmm10, %xmm10 - vpxor %xmm15, %xmm11, %xmm11 -L215: - movdqu %xmm1, 128(%rbp) - vpclmulqdq $16, %xmm3, %xmm7, %xmm5 - vpxor %xmm15, %xmm12, %xmm12 - movdqu -112(%rcx), %xmm2 - vpclmulqdq $1, %xmm3, %xmm7, %xmm6 - vaesenc %xmm2, %xmm9, %xmm9 - movdqu 48(%rbp), %xmm0 - vpxor %xmm15, %xmm13, %xmm13 - vpclmulqdq $0, %xmm3, %xmm7, %xmm1 - vaesenc %xmm2, %xmm10, %xmm10 - vpxor %xmm15, %xmm14, %xmm14 - vpclmulqdq $17, %xmm3, %xmm7, %xmm7 - vaesenc %xmm2, %xmm11, %xmm11 - movdqu -16(%r9), %xmm3 - vaesenc %xmm2, %xmm12, %xmm12 - vpxor %xmm5, %xmm6, %xmm6 - vpclmulqdq $0, %xmm3, %xmm0, %xmm5 - vpxor %xmm4, %xmm8, %xmm8 - vaesenc %xmm2, %xmm13, %xmm13 - vpxor %xmm5, %xmm1, %xmm4 - vpclmulqdq $16, %xmm3, %xmm0, %xmm1 - vaesenc %xmm2, %xmm14, %xmm14 - movdqu -96(%rcx), %xmm15 - vpclmulqdq $1, %xmm3, %xmm0, %xmm2 - vaesenc %xmm15, %xmm9, %xmm9 - vpxor 16(%rbp), %xmm8, %xmm8 - vpclmulqdq $17, %xmm3, %xmm0, %xmm3 - movdqu 64(%rbp), %xmm0 - vaesenc %xmm15, %xmm10, %xmm10 - movbeq 88(%r14), %r13 - vaesenc %xmm15, %xmm11, %xmm11 - movbeq 80(%r14), %r12 - vaesenc %xmm15, %xmm12, %xmm12 - movq %r13, 32(%rbp) - vaesenc %xmm15, %xmm13, %xmm13 - movq %r12, 40(%rbp) - movdqu 16(%r9), %xmm5 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu -80(%rcx), %xmm15 - vpxor %xmm1, %xmm6, %xmm6 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vaesenc %xmm15, %xmm9, %xmm9 - vpxor %xmm2, %xmm6, %xmm6 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vaesenc %xmm15, %xmm10, %xmm10 - vpxor %xmm3, %xmm7, %xmm7 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vaesenc %xmm15, %xmm11, %xmm11 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 80(%rbp), %xmm0 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 32(%r9), %xmm1 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu -64(%rcx), %xmm15 - vpxor %xmm2, %xmm6, %xmm6 - vpclmulqdq $0, %xmm1, %xmm0, %xmm2 - vaesenc %xmm15, %xmm9, %xmm9 - vpxor %xmm3, %xmm6, %xmm6 - vpclmulqdq $16, %xmm1, %xmm0, %xmm3 - vaesenc %xmm15, %xmm10, %xmm10 - movbeq 72(%r14), %r13 - vpxor %xmm5, %xmm7, %xmm7 - vpclmulqdq $1, %xmm1, %xmm0, %xmm5 - vaesenc %xmm15, %xmm11, %xmm11 - movbeq 64(%r14), %r12 - vpclmulqdq $17, %xmm1, %xmm0, %xmm1 - movdqu 96(%rbp), %xmm0 - vaesenc %xmm15, %xmm12, %xmm12 - movq %r13, 48(%rbp) - vaesenc %xmm15, %xmm13, %xmm13 - movq %r12, 56(%rbp) - vpxor %xmm2, %xmm4, %xmm4 - movdqu 64(%r9), %xmm2 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu -48(%rcx), %xmm15 - vpxor %xmm3, %xmm6, %xmm6 - vpclmulqdq $0, %xmm2, %xmm0, %xmm3 - vaesenc %xmm15, %xmm9, %xmm9 - vpxor %xmm5, %xmm6, %xmm6 - vpclmulqdq $16, %xmm2, %xmm0, %xmm5 - vaesenc %xmm15, %xmm10, %xmm10 - movbeq 56(%r14), %r13 - vpxor %xmm1, %xmm7, %xmm7 - vpclmulqdq $1, %xmm2, %xmm0, %xmm1 - vpxor 112(%rbp), %xmm8, %xmm8 - vaesenc %xmm15, %xmm11, %xmm11 - movbeq 48(%r14), %r12 - vpclmulqdq $17, %xmm2, %xmm0, %xmm2 - vaesenc %xmm15, %xmm12, %xmm12 - movq %r13, 64(%rbp) - vaesenc %xmm15, %xmm13, %xmm13 - movq %r12, 72(%rbp) - vpxor %xmm3, %xmm4, %xmm4 - movdqu 80(%r9), %xmm3 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu -32(%rcx), %xmm15 - vpxor %xmm5, %xmm6, %xmm6 - vpclmulqdq $16, %xmm3, %xmm8, %xmm5 - vaesenc %xmm15, %xmm9, %xmm9 - vpxor %xmm1, %xmm6, %xmm6 - vpclmulqdq $1, %xmm3, %xmm8, %xmm1 - vaesenc %xmm15, %xmm10, %xmm10 - movbeq 40(%r14), %r13 - vpxor %xmm2, %xmm7, %xmm7 - vpclmulqdq $0, %xmm3, %xmm8, %xmm2 - vaesenc %xmm15, %xmm11, %xmm11 - movbeq 32(%r14), %r12 - vpclmulqdq $17, %xmm3, %xmm8, %xmm8 - vaesenc %xmm15, %xmm12, %xmm12 - movq %r13, 80(%rbp) - vaesenc %xmm15, %xmm13, %xmm13 - movq %r12, 88(%rbp) - vpxor %xmm5, %xmm6, %xmm6 - vaesenc %xmm15, %xmm14, %xmm14 - vpxor %xmm1, %xmm6, %xmm6 - movdqu -16(%rcx), %xmm15 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm2, %xmm4, %xmm4 - pxor %xmm3, %xmm3 - mov $13979173243358019584, %r11 - pinsrq $1, %r11, %xmm3 - vaesenc %xmm15, %xmm9, %xmm9 - vpxor %xmm8, %xmm7, %xmm7 - vaesenc %xmm15, %xmm10, %xmm10 - vpxor %xmm5, %xmm4, %xmm4 - movbeq 24(%r14), %r13 - vaesenc %xmm15, %xmm11, %xmm11 - movbeq 16(%r14), %r12 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - movq %r13, 96(%rbp) - vaesenc %xmm15, %xmm12, %xmm12 - movq %r12, 104(%rbp) - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu 0(%rcx), %xmm1 - vaesenc %xmm1, %xmm9, %xmm9 - movdqu 16(%rcx), %xmm15 - vaesenc %xmm1, %xmm10, %xmm10 - vpsrldq $8, %xmm6, %xmm6 - vaesenc %xmm1, %xmm11, %xmm11 - vpxor %xmm6, %xmm7, %xmm7 - vaesenc %xmm1, %xmm12, %xmm12 - vpxor %xmm0, %xmm4, %xmm4 - movbeq 8(%r14), %r13 - vaesenc %xmm1, %xmm13, %xmm13 - movbeq 0(%r14), %r12 - vaesenc %xmm1, %xmm14, %xmm14 - movdqu 32(%rcx), %xmm1 - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - vaesenc %xmm1, %xmm9, %xmm9 - vaesenc %xmm1, %xmm10, %xmm10 - vaesenc %xmm1, %xmm11, %xmm11 - vaesenc %xmm1, %xmm12, %xmm12 - vaesenc %xmm1, %xmm13, %xmm13 - movdqu 48(%rcx), %xmm15 - vaesenc %xmm1, %xmm14, %xmm14 - movdqu 64(%rcx), %xmm1 - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - vaesenc %xmm1, %xmm9, %xmm9 - vaesenc %xmm1, %xmm10, %xmm10 - vaesenc %xmm1, %xmm11, %xmm11 - vaesenc %xmm1, %xmm12, %xmm12 - vaesenc %xmm1, %xmm13, %xmm13 - movdqu 80(%rcx), %xmm15 - vaesenc %xmm1, %xmm14, %xmm14 - movdqu 96(%rcx), %xmm1 - vaesenc %xmm15, %xmm9, %xmm9 - movdqu %xmm7, 16(%rbp) - vpalignr $8, %xmm4, %xmm4, %xmm8 - vaesenc %xmm15, %xmm10, %xmm10 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor 0(%rdi), %xmm1, %xmm2 - vaesenc %xmm15, %xmm11, %xmm11 - vpxor 16(%rdi), %xmm1, %xmm0 - vaesenc %xmm15, %xmm12, %xmm12 - vpxor 32(%rdi), %xmm1, %xmm5 - vaesenc %xmm15, %xmm13, %xmm13 - vpxor 48(%rdi), %xmm1, %xmm6 - vaesenc %xmm15, %xmm14, %xmm14 - vpxor 64(%rdi), %xmm1, %xmm7 - vpxor 80(%rdi), %xmm1, %xmm3 - movdqu 128(%rbp), %xmm1 - vaesenclast %xmm2, %xmm9, %xmm9 - pxor %xmm2, %xmm2 - mov $72057594037927936, %r11 - pinsrq $1, %r11, %xmm2 - vaesenclast %xmm0, %xmm10, %xmm10 - vpaddd %xmm2, %xmm1, %xmm0 - movq %r13, 112(%rbp) - lea 96(%rdi), %rdi - vaesenclast %xmm5, %xmm11, %xmm11 - vpaddd %xmm2, %xmm0, %xmm5 - movq %r12, 120(%rbp) - lea 96(%rsi), %rsi - movdqu -128(%rcx), %xmm15 - vaesenclast %xmm6, %xmm12, %xmm12 - vpaddd %xmm2, %xmm5, %xmm6 - vaesenclast %xmm7, %xmm13, %xmm13 - vpaddd %xmm2, %xmm6, %xmm7 - vaesenclast %xmm3, %xmm14, %xmm14 - vpaddd %xmm2, %xmm7, %xmm3 - sub $6, %rdx - cmp $6, %rdx - jbe L216 - add $96, %r14 - jmp L217 -L216: -L217: - cmp $0, %rdx - jbe L218 - movdqu %xmm9, -96(%rsi) - vpxor %xmm15, %xmm1, %xmm9 - movdqu %xmm10, -80(%rsi) - movdqu %xmm0, %xmm10 - movdqu %xmm11, -64(%rsi) - movdqu %xmm5, %xmm11 - movdqu %xmm12, -48(%rsi) - movdqu %xmm6, %xmm12 - movdqu %xmm13, -32(%rsi) - movdqu %xmm7, %xmm13 - movdqu %xmm14, -16(%rsi) - movdqu %xmm3, %xmm14 - movdqu 32(%rbp), %xmm7 - jmp L219 -L218: - vpxor 16(%rbp), %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 -L219: -.balign 16 -L213: - cmp $0, %rdx - ja L212 - movdqu %xmm1, 32(%rbp) - movdqu %xmm9, -96(%rsi) - movdqu %xmm10, -80(%rsi) - movdqu %xmm11, -64(%rsi) - movdqu %xmm12, -48(%rsi) - movdqu %xmm13, -32(%rsi) - movdqu %xmm14, -16(%rsi) - sub $128, %rcx -L209: - movdqu 32(%rbp), %xmm11 - mov %rcx, %r8 - mov 104(%rsp), %rax - mov 112(%rsp), %rdi - mov 120(%rsp), %rdx - mov %rdx, %r14 - mov $579005069656919567, %r12 - pinsrq $0, %r12, %xmm9 - mov $283686952306183, %r12 - pinsrq $1, %r12, %xmm9 - pshufb %xmm9, %xmm11 - mov %rdi, %rbx - mov %rdx, %r12 - mov %rax, %rdi - mov %rdi, %r11 - jmp L221 -.balign 16 -L220: - add $80, %r11 - movdqu -32(%r9), %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - movdqu %xmm1, %xmm4 - movdqu -16(%r9), %xmm1 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - movdqu %xmm1, %xmm5 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 16(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 32(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 64(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 80(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - vpxor %xmm1, %xmm4, %xmm4 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - pxor %xmm3, %xmm3 - mov $3254779904, %r10 - pinsrd $3, %r10d, %xmm3 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm5, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpsrldq $8, %xmm6, %xmm6 - vpxor %xmm6, %xmm7, %xmm7 - vpxor %xmm0, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm8 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor %xmm7, %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 - add $96, %r11 - sub $6, %rdx -.balign 16 -L221: - cmp $6, %rdx - jae L220 - cmp $0, %rdx - jbe L222 - mov %rdx, %r10 - sub $1, %r10 - imul $16, %r10 - add %r10, %r11 - movdqu -32(%r9), %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - cmp $1, %rdx - jne L224 - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu %xmm1, %xmm4 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - jmp L225 -L224: - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - movdqu %xmm1, %xmm4 - movdqu -16(%r9), %xmm1 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - movdqu %xmm1, %xmm5 - cmp $2, %rdx - je L226 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 16(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - cmp $3, %rdx - je L228 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 32(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - cmp $4, %rdx - je L230 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 64(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - jmp L231 -L230: -L231: - jmp L229 -L228: -L229: - jmp L227 -L226: -L227: - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - vpxor %xmm1, %xmm4, %xmm4 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 -L225: - pxor %xmm3, %xmm3 - mov $3254779904, %r10 - pinsrd $3, %r10d, %xmm3 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm5, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpsrldq $8, %xmm6, %xmm6 - vpxor %xmm6, %xmm7, %xmm7 - vpxor %xmm0, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm8 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor %xmm7, %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 - jmp L223 -L222: -L223: - mov %rbx, %rdi - mov %r12, %rdx - pxor %xmm10, %xmm10 - mov $1, %rbx - pinsrd $0, %ebx, %xmm10 - mov %rax, %r11 - mov %rdi, %r10 - mov $0, %rbx - jmp L233 -.balign 16 -L232: - movdqu %xmm11, %xmm0 - pshufb %xmm9, %xmm0 - movdqu 0(%r8), %xmm2 - pxor %xmm2, %xmm0 - movdqu 16(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 32(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 48(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 64(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 80(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 96(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 112(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 128(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 144(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 160(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 176(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 192(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 208(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 224(%r8), %xmm2 - aesenclast %xmm2, %xmm0 - pxor %xmm2, %xmm2 - movdqu 0(%r11), %xmm2 - pxor %xmm0, %xmm2 - movdqu %xmm2, 0(%r10) - add $1, %rbx - add $16, %r11 - add $16, %r10 - paddd %xmm10, %xmm11 -.balign 16 -L233: - cmp %rdx, %rbx - jne L232 - add 96(%rsp), %r14 - imul $16, %r14 - mov 136(%rsp), %r13 - cmp %r14, %r13 - jbe L234 - mov 128(%rsp), %rax - mov %r13, %r10 - and $15, %r10 - movdqu 0(%rax), %xmm0 - movdqu %xmm0, %xmm10 - cmp $8, %r10 - jae L236 - mov $0, %rcx - pinsrq $1, %rcx, %xmm0 - mov %r10, %rcx - shl $3, %rcx - mov $1, %r11 - shl %cl, %r11 - sub $1, %r11 - pextrq $0, %xmm0, %rcx - and %r11, %rcx - pinsrq $0, %rcx, %xmm0 - jmp L237 -L236: - mov %r10, %rcx - sub $8, %rcx - shl $3, %rcx - mov $1, %r11 - shl %cl, %r11 - sub $1, %r11 - pextrq $1, %xmm0, %rcx - and %r11, %rcx - pinsrq $1, %rcx, %xmm0 -L237: - pshufb %xmm9, %xmm0 - movdqu -32(%r9), %xmm5 - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu %xmm1, %xmm4 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - pxor %xmm3, %xmm3 - mov $3254779904, %r11 - pinsrd $3, %r11d, %xmm3 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm5, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpsrldq $8, %xmm6, %xmm6 - vpxor %xmm6, %xmm7, %xmm7 - vpxor %xmm0, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm8 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor %xmm7, %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 - movdqu %xmm11, %xmm0 - pshufb %xmm9, %xmm0 - movdqu 0(%r8), %xmm2 - pxor %xmm2, %xmm0 - movdqu 16(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 32(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 48(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 64(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 80(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 96(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 112(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 128(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 144(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 160(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 176(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 192(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 208(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 224(%r8), %xmm2 - aesenclast %xmm2, %xmm0 - pxor %xmm2, %xmm2 - pxor %xmm0, %xmm10 - movdqu %xmm10, 0(%rax) - jmp L235 -L234: -L235: - mov %r15, %r11 - pxor %xmm0, %xmm0 - mov %r11, %rax - imul $8, %rax - pinsrq $1, %rax, %xmm0 - mov %r13, %rax - imul $8, %rax - pinsrq $0, %rax, %xmm0 - movdqu -32(%r9), %xmm5 - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu %xmm1, %xmm4 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - pxor %xmm3, %xmm3 - mov $3254779904, %r11 - pinsrd $3, %r11d, %xmm3 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm5, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpsrldq $8, %xmm6, %xmm6 - vpxor %xmm6, %xmm7, %xmm7 - vpxor %xmm0, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm8 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor %xmm7, %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 - movdqu 0(%rbp), %xmm0 - pshufb %xmm9, %xmm0 - movdqu 0(%r8), %xmm2 - pxor %xmm2, %xmm0 - movdqu 16(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 32(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 48(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 64(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 80(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 96(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 112(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 128(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 144(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 160(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 176(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 192(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 208(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 224(%r8), %xmm2 - aesenclast %xmm2, %xmm0 - pxor %xmm2, %xmm2 - pshufb %xmm9, %xmm8 - pxor %xmm0, %xmm8 - mov 152(%rsp), %r15 - movdqu 0(%r15), %xmm0 - pcmpeqd %xmm8, %xmm0 - pextrq $0, %xmm0, %rdx - sub $18446744073709551615, %rdx - mov $0, %rax - adc $0, %rax - pextrq $1, %xmm0, %rdx - sub $18446744073709551615, %rdx - mov $0, %rdx - adc $0, %rdx - add %rdx, %rax - mov %rax, %rcx - pop %rbx - pop %rbp - pop %rdi - pop %rsi - pop %r12 - pop %r13 - pop %r14 - pop %r15 - mov %rcx, %rax - ret - -.section .note.GNU-stack,"",%progbits diff --git a/dist/c89-compatible/aesgcm-x86_64-mingw.S b/dist/c89-compatible/aesgcm-x86_64-mingw.S deleted file mode 100644 index b42c8a7b4c..0000000000 --- a/dist/c89-compatible/aesgcm-x86_64-mingw.S +++ /dev/null @@ -1,8705 +0,0 @@ -.text -.global aes128_key_expansion -aes128_key_expansion: - movdqu 0(%rcx), %xmm1 - movdqu %xmm1, 0(%rdx) - aeskeygenassist $1, %xmm1, %xmm2 - pshufd $255, %xmm2, %xmm2 - vpslldq $4, %xmm1, %xmm3 - pxor %xmm3, %xmm1 - vpslldq $4, %xmm1, %xmm3 - pxor %xmm3, %xmm1 - vpslldq $4, %xmm1, %xmm3 - pxor %xmm3, %xmm1 - pxor %xmm2, %xmm1 - movdqu %xmm1, 16(%rdx) - aeskeygenassist $2, %xmm1, %xmm2 - pshufd $255, %xmm2, %xmm2 - vpslldq $4, %xmm1, %xmm3 - pxor %xmm3, %xmm1 - vpslldq $4, %xmm1, %xmm3 - pxor %xmm3, %xmm1 - vpslldq $4, %xmm1, %xmm3 - pxor %xmm3, %xmm1 - pxor %xmm2, %xmm1 - movdqu %xmm1, 32(%rdx) - aeskeygenassist $4, %xmm1, %xmm2 - pshufd $255, %xmm2, %xmm2 - vpslldq $4, %xmm1, %xmm3 - pxor %xmm3, %xmm1 - vpslldq $4, %xmm1, %xmm3 - pxor %xmm3, %xmm1 - vpslldq $4, %xmm1, %xmm3 - pxor %xmm3, %xmm1 - pxor %xmm2, %xmm1 - movdqu %xmm1, 48(%rdx) - aeskeygenassist $8, %xmm1, %xmm2 - pshufd $255, %xmm2, %xmm2 - vpslldq $4, %xmm1, %xmm3 - pxor %xmm3, %xmm1 - vpslldq $4, %xmm1, %xmm3 - pxor %xmm3, %xmm1 - vpslldq $4, %xmm1, %xmm3 - pxor %xmm3, %xmm1 - pxor %xmm2, %xmm1 - movdqu %xmm1, 64(%rdx) - aeskeygenassist $16, %xmm1, %xmm2 - pshufd $255, %xmm2, %xmm2 - vpslldq $4, %xmm1, %xmm3 - pxor %xmm3, %xmm1 - vpslldq $4, %xmm1, %xmm3 - pxor %xmm3, %xmm1 - vpslldq $4, %xmm1, %xmm3 - pxor %xmm3, %xmm1 - pxor %xmm2, %xmm1 - movdqu %xmm1, 80(%rdx) - aeskeygenassist $32, %xmm1, %xmm2 - pshufd $255, %xmm2, %xmm2 - vpslldq $4, %xmm1, %xmm3 - pxor %xmm3, %xmm1 - vpslldq $4, %xmm1, %xmm3 - pxor %xmm3, %xmm1 - vpslldq $4, %xmm1, %xmm3 - pxor %xmm3, %xmm1 - pxor %xmm2, %xmm1 - movdqu %xmm1, 96(%rdx) - aeskeygenassist $64, %xmm1, %xmm2 - pshufd $255, %xmm2, %xmm2 - vpslldq $4, %xmm1, %xmm3 - pxor %xmm3, %xmm1 - vpslldq $4, %xmm1, %xmm3 - pxor %xmm3, %xmm1 - vpslldq $4, %xmm1, %xmm3 - pxor %xmm3, %xmm1 - pxor %xmm2, %xmm1 - movdqu %xmm1, 112(%rdx) - aeskeygenassist $128, %xmm1, %xmm2 - pshufd $255, %xmm2, %xmm2 - vpslldq $4, %xmm1, %xmm3 - pxor %xmm3, %xmm1 - vpslldq $4, %xmm1, %xmm3 - pxor %xmm3, %xmm1 - vpslldq $4, %xmm1, %xmm3 - pxor %xmm3, %xmm1 - pxor %xmm2, %xmm1 - movdqu %xmm1, 128(%rdx) - aeskeygenassist $27, %xmm1, %xmm2 - pshufd $255, %xmm2, %xmm2 - vpslldq $4, %xmm1, %xmm3 - pxor %xmm3, %xmm1 - vpslldq $4, %xmm1, %xmm3 - pxor %xmm3, %xmm1 - vpslldq $4, %xmm1, %xmm3 - pxor %xmm3, %xmm1 - pxor %xmm2, %xmm1 - movdqu %xmm1, 144(%rdx) - aeskeygenassist $54, %xmm1, %xmm2 - pshufd $255, %xmm2, %xmm2 - vpslldq $4, %xmm1, %xmm3 - pxor %xmm3, %xmm1 - vpslldq $4, %xmm1, %xmm3 - pxor %xmm3, %xmm1 - vpslldq $4, %xmm1, %xmm3 - pxor %xmm3, %xmm1 - pxor %xmm2, %xmm1 - movdqu %xmm1, 160(%rdx) - pxor %xmm1, %xmm1 - pxor %xmm2, %xmm2 - pxor %xmm3, %xmm3 - ret - -.global aes128_keyhash_init -aes128_keyhash_init: - mov $579005069656919567, %r8 - pinsrq $0, %r8, %xmm4 - mov $283686952306183, %r8 - pinsrq $1, %r8, %xmm4 - pxor %xmm0, %xmm0 - movdqu %xmm0, 80(%rdx) - mov %rcx, %r8 - movdqu 0(%r8), %xmm2 - pxor %xmm2, %xmm0 - movdqu 16(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 32(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 48(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 64(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 80(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 96(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 112(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 128(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 144(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 160(%r8), %xmm2 - aesenclast %xmm2, %xmm0 - pxor %xmm2, %xmm2 - pshufb %xmm4, %xmm0 - mov %rdx, %rcx - movdqu %xmm0, 32(%rcx) - movdqu %xmm6, %xmm0 - mov %r12, %rax - movdqu 32(%rcx), %xmm1 - movdqu %xmm1, %xmm6 - movdqu %xmm1, %xmm3 - pxor %xmm4, %xmm4 - pxor %xmm5, %xmm5 - mov $3254779904, %r12 - pinsrd $3, %r12d, %xmm4 - mov $1, %r12 - pinsrd $0, %r12d, %xmm4 - mov $2147483648, %r12 - pinsrd $3, %r12d, %xmm5 - movdqu %xmm3, %xmm1 - movdqu %xmm1, %xmm2 - psrld $31, %xmm2 - pslld $1, %xmm1 - vpslldq $4, %xmm2, %xmm2 - pxor %xmm2, %xmm1 - pand %xmm5, %xmm3 - pcmpeqd %xmm5, %xmm3 - pshufd $255, %xmm3, %xmm3 - pand %xmm4, %xmm3 - vpxor %xmm3, %xmm1, %xmm1 - movdqu %xmm1, 0(%rcx) - movdqu %xmm6, %xmm1 - movdqu %xmm6, %xmm2 - movdqu %xmm1, %xmm5 - pclmulqdq $16, %xmm2, %xmm1 - movdqu %xmm1, %xmm3 - movdqu %xmm5, %xmm1 - pclmulqdq $1, %xmm2, %xmm1 - movdqu %xmm1, %xmm4 - movdqu %xmm5, %xmm1 - pclmulqdq $0, %xmm2, %xmm1 - pclmulqdq $17, %xmm2, %xmm5 - movdqu %xmm5, %xmm2 - movdqu %xmm1, %xmm5 - movdqu %xmm3, %xmm1 - mov $0, %r12 - pinsrd $0, %r12d, %xmm1 - pshufd $14, %xmm1, %xmm1 - pxor %xmm1, %xmm2 - movdqu %xmm4, %xmm1 - mov $0, %r12 - pinsrd $0, %r12d, %xmm1 - pshufd $14, %xmm1, %xmm1 - pxor %xmm1, %xmm2 - movdqu %xmm3, %xmm1 - mov $0, %r12 - pinsrd $3, %r12d, %xmm1 - pshufd $79, %xmm1, %xmm1 - mov $0, %r12 - pinsrd $3, %r12d, %xmm4 - pshufd $79, %xmm4, %xmm4 - pxor %xmm4, %xmm1 - pxor %xmm5, %xmm1 - movdqu %xmm1, %xmm3 - psrld $31, %xmm3 - movdqu %xmm2, %xmm4 - psrld $31, %xmm4 - pslld $1, %xmm1 - pslld $1, %xmm2 - vpslldq $4, %xmm3, %xmm5 - vpslldq $4, %xmm4, %xmm4 - mov $0, %r12 - pinsrd $0, %r12d, %xmm3 - pshufd $3, %xmm3, %xmm3 - pxor %xmm4, %xmm3 - pxor %xmm5, %xmm1 - pxor %xmm3, %xmm2 - movdqu %xmm2, %xmm6 - pxor %xmm2, %xmm2 - mov $3774873600, %r12 - pinsrd $3, %r12d, %xmm2 - movdqu %xmm1, %xmm5 - pclmulqdq $16, %xmm2, %xmm1 - movdqu %xmm1, %xmm3 - movdqu %xmm5, %xmm1 - pclmulqdq $1, %xmm2, %xmm1 - movdqu %xmm1, %xmm4 - movdqu %xmm5, %xmm1 - pclmulqdq $0, %xmm2, %xmm1 - pclmulqdq $17, %xmm2, %xmm5 - movdqu %xmm5, %xmm2 - movdqu %xmm1, %xmm5 - movdqu %xmm3, %xmm1 - mov $0, %r12 - pinsrd $0, %r12d, %xmm1 - pshufd $14, %xmm1, %xmm1 - pxor %xmm1, %xmm2 - movdqu %xmm4, %xmm1 - mov $0, %r12 - pinsrd $0, %r12d, %xmm1 - pshufd $14, %xmm1, %xmm1 - pxor %xmm1, %xmm2 - movdqu %xmm3, %xmm1 - mov $0, %r12 - pinsrd $3, %r12d, %xmm1 - pshufd $79, %xmm1, %xmm1 - mov $0, %r12 - pinsrd $3, %r12d, %xmm4 - pshufd $79, %xmm4, %xmm4 - pxor %xmm4, %xmm1 - pxor %xmm5, %xmm1 - movdqu %xmm1, %xmm3 - psrld $31, %xmm3 - movdqu %xmm2, %xmm4 - psrld $31, %xmm4 - pslld $1, %xmm1 - pslld $1, %xmm2 - vpslldq $4, %xmm3, %xmm5 - vpslldq $4, %xmm4, %xmm4 - mov $0, %r12 - pinsrd $0, %r12d, %xmm3 - pshufd $3, %xmm3, %xmm3 - pxor %xmm4, %xmm3 - pxor %xmm5, %xmm1 - pxor %xmm3, %xmm2 - movdqu %xmm2, %xmm5 - pxor %xmm2, %xmm2 - mov $3774873600, %r12 - pinsrd $3, %r12d, %xmm2 - pclmulqdq $17, %xmm2, %xmm1 - movdqu %xmm1, %xmm2 - psrld $31, %xmm2 - pslld $1, %xmm1 - vpslldq $4, %xmm2, %xmm2 - pxor %xmm2, %xmm1 - pxor %xmm5, %xmm1 - pxor %xmm6, %xmm1 - movdqu %xmm1, %xmm6 - movdqu %xmm1, %xmm3 - pxor %xmm4, %xmm4 - pxor %xmm5, %xmm5 - mov $3254779904, %r12 - pinsrd $3, %r12d, %xmm4 - mov $1, %r12 - pinsrd $0, %r12d, %xmm4 - mov $2147483648, %r12 - pinsrd $3, %r12d, %xmm5 - movdqu %xmm3, %xmm1 - movdqu %xmm1, %xmm2 - psrld $31, %xmm2 - pslld $1, %xmm1 - vpslldq $4, %xmm2, %xmm2 - pxor %xmm2, %xmm1 - pand %xmm5, %xmm3 - pcmpeqd %xmm5, %xmm3 - pshufd $255, %xmm3, %xmm3 - pand %xmm4, %xmm3 - vpxor %xmm3, %xmm1, %xmm1 - movdqu %xmm1, 16(%rcx) - movdqu %xmm6, %xmm2 - movdqu 32(%rcx), %xmm1 - movdqu %xmm1, %xmm5 - pclmulqdq $16, %xmm2, %xmm1 - movdqu %xmm1, %xmm3 - movdqu %xmm5, %xmm1 - pclmulqdq $1, %xmm2, %xmm1 - movdqu %xmm1, %xmm4 - movdqu %xmm5, %xmm1 - pclmulqdq $0, %xmm2, %xmm1 - pclmulqdq $17, %xmm2, %xmm5 - movdqu %xmm5, %xmm2 - movdqu %xmm1, %xmm5 - movdqu %xmm3, %xmm1 - mov $0, %r12 - pinsrd $0, %r12d, %xmm1 - pshufd $14, %xmm1, %xmm1 - pxor %xmm1, %xmm2 - movdqu %xmm4, %xmm1 - mov $0, %r12 - pinsrd $0, %r12d, %xmm1 - pshufd $14, %xmm1, %xmm1 - pxor %xmm1, %xmm2 - movdqu %xmm3, %xmm1 - mov $0, %r12 - pinsrd $3, %r12d, %xmm1 - pshufd $79, %xmm1, %xmm1 - mov $0, %r12 - pinsrd $3, %r12d, %xmm4 - pshufd $79, %xmm4, %xmm4 - pxor %xmm4, %xmm1 - pxor %xmm5, %xmm1 - movdqu %xmm1, %xmm3 - psrld $31, %xmm3 - movdqu %xmm2, %xmm4 - psrld $31, %xmm4 - pslld $1, %xmm1 - pslld $1, %xmm2 - vpslldq $4, %xmm3, %xmm5 - vpslldq $4, %xmm4, %xmm4 - mov $0, %r12 - pinsrd $0, %r12d, %xmm3 - pshufd $3, %xmm3, %xmm3 - pxor %xmm4, %xmm3 - pxor %xmm5, %xmm1 - pxor %xmm3, %xmm2 - movdqu %xmm2, %xmm6 - pxor %xmm2, %xmm2 - mov $3774873600, %r12 - pinsrd $3, %r12d, %xmm2 - movdqu %xmm1, %xmm5 - pclmulqdq $16, %xmm2, %xmm1 - movdqu %xmm1, %xmm3 - movdqu %xmm5, %xmm1 - pclmulqdq $1, %xmm2, %xmm1 - movdqu %xmm1, %xmm4 - movdqu %xmm5, %xmm1 - pclmulqdq $0, %xmm2, %xmm1 - pclmulqdq $17, %xmm2, %xmm5 - movdqu %xmm5, %xmm2 - movdqu %xmm1, %xmm5 - movdqu %xmm3, %xmm1 - mov $0, %r12 - pinsrd $0, %r12d, %xmm1 - pshufd $14, %xmm1, %xmm1 - pxor %xmm1, %xmm2 - movdqu %xmm4, %xmm1 - mov $0, %r12 - pinsrd $0, %r12d, %xmm1 - pshufd $14, %xmm1, %xmm1 - pxor %xmm1, %xmm2 - movdqu %xmm3, %xmm1 - mov $0, %r12 - pinsrd $3, %r12d, %xmm1 - pshufd $79, %xmm1, %xmm1 - mov $0, %r12 - pinsrd $3, %r12d, %xmm4 - pshufd $79, %xmm4, %xmm4 - pxor %xmm4, %xmm1 - pxor %xmm5, %xmm1 - movdqu %xmm1, %xmm3 - psrld $31, %xmm3 - movdqu %xmm2, %xmm4 - psrld $31, %xmm4 - pslld $1, %xmm1 - pslld $1, %xmm2 - vpslldq $4, %xmm3, %xmm5 - vpslldq $4, %xmm4, %xmm4 - mov $0, %r12 - pinsrd $0, %r12d, %xmm3 - pshufd $3, %xmm3, %xmm3 - pxor %xmm4, %xmm3 - pxor %xmm5, %xmm1 - pxor %xmm3, %xmm2 - movdqu %xmm2, %xmm5 - pxor %xmm2, %xmm2 - mov $3774873600, %r12 - pinsrd $3, %r12d, %xmm2 - pclmulqdq $17, %xmm2, %xmm1 - movdqu %xmm1, %xmm2 - psrld $31, %xmm2 - pslld $1, %xmm1 - vpslldq $4, %xmm2, %xmm2 - pxor %xmm2, %xmm1 - pxor %xmm5, %xmm1 - pxor %xmm6, %xmm1 - movdqu %xmm1, %xmm6 - movdqu %xmm1, %xmm3 - pxor %xmm4, %xmm4 - pxor %xmm5, %xmm5 - mov $3254779904, %r12 - pinsrd $3, %r12d, %xmm4 - mov $1, %r12 - pinsrd $0, %r12d, %xmm4 - mov $2147483648, %r12 - pinsrd $3, %r12d, %xmm5 - movdqu %xmm3, %xmm1 - movdqu %xmm1, %xmm2 - psrld $31, %xmm2 - pslld $1, %xmm1 - vpslldq $4, %xmm2, %xmm2 - pxor %xmm2, %xmm1 - pand %xmm5, %xmm3 - pcmpeqd %xmm5, %xmm3 - pshufd $255, %xmm3, %xmm3 - pand %xmm4, %xmm3 - vpxor %xmm3, %xmm1, %xmm1 - movdqu %xmm1, 48(%rcx) - movdqu %xmm6, %xmm2 - movdqu 32(%rcx), %xmm1 - movdqu %xmm1, %xmm5 - pclmulqdq $16, %xmm2, %xmm1 - movdqu %xmm1, %xmm3 - movdqu %xmm5, %xmm1 - pclmulqdq $1, %xmm2, %xmm1 - movdqu %xmm1, %xmm4 - movdqu %xmm5, %xmm1 - pclmulqdq $0, %xmm2, %xmm1 - pclmulqdq $17, %xmm2, %xmm5 - movdqu %xmm5, %xmm2 - movdqu %xmm1, %xmm5 - movdqu %xmm3, %xmm1 - mov $0, %r12 - pinsrd $0, %r12d, %xmm1 - pshufd $14, %xmm1, %xmm1 - pxor %xmm1, %xmm2 - movdqu %xmm4, %xmm1 - mov $0, %r12 - pinsrd $0, %r12d, %xmm1 - pshufd $14, %xmm1, %xmm1 - pxor %xmm1, %xmm2 - movdqu %xmm3, %xmm1 - mov $0, %r12 - pinsrd $3, %r12d, %xmm1 - pshufd $79, %xmm1, %xmm1 - mov $0, %r12 - pinsrd $3, %r12d, %xmm4 - pshufd $79, %xmm4, %xmm4 - pxor %xmm4, %xmm1 - pxor %xmm5, %xmm1 - movdqu %xmm1, %xmm3 - psrld $31, %xmm3 - movdqu %xmm2, %xmm4 - psrld $31, %xmm4 - pslld $1, %xmm1 - pslld $1, %xmm2 - vpslldq $4, %xmm3, %xmm5 - vpslldq $4, %xmm4, %xmm4 - mov $0, %r12 - pinsrd $0, %r12d, %xmm3 - pshufd $3, %xmm3, %xmm3 - pxor %xmm4, %xmm3 - pxor %xmm5, %xmm1 - pxor %xmm3, %xmm2 - movdqu %xmm2, %xmm6 - pxor %xmm2, %xmm2 - mov $3774873600, %r12 - pinsrd $3, %r12d, %xmm2 - movdqu %xmm1, %xmm5 - pclmulqdq $16, %xmm2, %xmm1 - movdqu %xmm1, %xmm3 - movdqu %xmm5, %xmm1 - pclmulqdq $1, %xmm2, %xmm1 - movdqu %xmm1, %xmm4 - movdqu %xmm5, %xmm1 - pclmulqdq $0, %xmm2, %xmm1 - pclmulqdq $17, %xmm2, %xmm5 - movdqu %xmm5, %xmm2 - movdqu %xmm1, %xmm5 - movdqu %xmm3, %xmm1 - mov $0, %r12 - pinsrd $0, %r12d, %xmm1 - pshufd $14, %xmm1, %xmm1 - pxor %xmm1, %xmm2 - movdqu %xmm4, %xmm1 - mov $0, %r12 - pinsrd $0, %r12d, %xmm1 - pshufd $14, %xmm1, %xmm1 - pxor %xmm1, %xmm2 - movdqu %xmm3, %xmm1 - mov $0, %r12 - pinsrd $3, %r12d, %xmm1 - pshufd $79, %xmm1, %xmm1 - mov $0, %r12 - pinsrd $3, %r12d, %xmm4 - pshufd $79, %xmm4, %xmm4 - pxor %xmm4, %xmm1 - pxor %xmm5, %xmm1 - movdqu %xmm1, %xmm3 - psrld $31, %xmm3 - movdqu %xmm2, %xmm4 - psrld $31, %xmm4 - pslld $1, %xmm1 - pslld $1, %xmm2 - vpslldq $4, %xmm3, %xmm5 - vpslldq $4, %xmm4, %xmm4 - mov $0, %r12 - pinsrd $0, %r12d, %xmm3 - pshufd $3, %xmm3, %xmm3 - pxor %xmm4, %xmm3 - pxor %xmm5, %xmm1 - pxor %xmm3, %xmm2 - movdqu %xmm2, %xmm5 - pxor %xmm2, %xmm2 - mov $3774873600, %r12 - pinsrd $3, %r12d, %xmm2 - pclmulqdq $17, %xmm2, %xmm1 - movdqu %xmm1, %xmm2 - psrld $31, %xmm2 - pslld $1, %xmm1 - vpslldq $4, %xmm2, %xmm2 - pxor %xmm2, %xmm1 - pxor %xmm5, %xmm1 - pxor %xmm6, %xmm1 - movdqu %xmm1, %xmm6 - movdqu %xmm1, %xmm3 - pxor %xmm4, %xmm4 - pxor %xmm5, %xmm5 - mov $3254779904, %r12 - pinsrd $3, %r12d, %xmm4 - mov $1, %r12 - pinsrd $0, %r12d, %xmm4 - mov $2147483648, %r12 - pinsrd $3, %r12d, %xmm5 - movdqu %xmm3, %xmm1 - movdqu %xmm1, %xmm2 - psrld $31, %xmm2 - pslld $1, %xmm1 - vpslldq $4, %xmm2, %xmm2 - pxor %xmm2, %xmm1 - pand %xmm5, %xmm3 - pcmpeqd %xmm5, %xmm3 - pshufd $255, %xmm3, %xmm3 - pand %xmm4, %xmm3 - vpxor %xmm3, %xmm1, %xmm1 - movdqu %xmm1, 64(%rcx) - movdqu %xmm6, %xmm2 - movdqu 32(%rcx), %xmm1 - movdqu %xmm1, %xmm5 - pclmulqdq $16, %xmm2, %xmm1 - movdqu %xmm1, %xmm3 - movdqu %xmm5, %xmm1 - pclmulqdq $1, %xmm2, %xmm1 - movdqu %xmm1, %xmm4 - movdqu %xmm5, %xmm1 - pclmulqdq $0, %xmm2, %xmm1 - pclmulqdq $17, %xmm2, %xmm5 - movdqu %xmm5, %xmm2 - movdqu %xmm1, %xmm5 - movdqu %xmm3, %xmm1 - mov $0, %r12 - pinsrd $0, %r12d, %xmm1 - pshufd $14, %xmm1, %xmm1 - pxor %xmm1, %xmm2 - movdqu %xmm4, %xmm1 - mov $0, %r12 - pinsrd $0, %r12d, %xmm1 - pshufd $14, %xmm1, %xmm1 - pxor %xmm1, %xmm2 - movdqu %xmm3, %xmm1 - mov $0, %r12 - pinsrd $3, %r12d, %xmm1 - pshufd $79, %xmm1, %xmm1 - mov $0, %r12 - pinsrd $3, %r12d, %xmm4 - pshufd $79, %xmm4, %xmm4 - pxor %xmm4, %xmm1 - pxor %xmm5, %xmm1 - movdqu %xmm1, %xmm3 - psrld $31, %xmm3 - movdqu %xmm2, %xmm4 - psrld $31, %xmm4 - pslld $1, %xmm1 - pslld $1, %xmm2 - vpslldq $4, %xmm3, %xmm5 - vpslldq $4, %xmm4, %xmm4 - mov $0, %r12 - pinsrd $0, %r12d, %xmm3 - pshufd $3, %xmm3, %xmm3 - pxor %xmm4, %xmm3 - pxor %xmm5, %xmm1 - pxor %xmm3, %xmm2 - movdqu %xmm2, %xmm6 - pxor %xmm2, %xmm2 - mov $3774873600, %r12 - pinsrd $3, %r12d, %xmm2 - movdqu %xmm1, %xmm5 - pclmulqdq $16, %xmm2, %xmm1 - movdqu %xmm1, %xmm3 - movdqu %xmm5, %xmm1 - pclmulqdq $1, %xmm2, %xmm1 - movdqu %xmm1, %xmm4 - movdqu %xmm5, %xmm1 - pclmulqdq $0, %xmm2, %xmm1 - pclmulqdq $17, %xmm2, %xmm5 - movdqu %xmm5, %xmm2 - movdqu %xmm1, %xmm5 - movdqu %xmm3, %xmm1 - mov $0, %r12 - pinsrd $0, %r12d, %xmm1 - pshufd $14, %xmm1, %xmm1 - pxor %xmm1, %xmm2 - movdqu %xmm4, %xmm1 - mov $0, %r12 - pinsrd $0, %r12d, %xmm1 - pshufd $14, %xmm1, %xmm1 - pxor %xmm1, %xmm2 - movdqu %xmm3, %xmm1 - mov $0, %r12 - pinsrd $3, %r12d, %xmm1 - pshufd $79, %xmm1, %xmm1 - mov $0, %r12 - pinsrd $3, %r12d, %xmm4 - pshufd $79, %xmm4, %xmm4 - pxor %xmm4, %xmm1 - pxor %xmm5, %xmm1 - movdqu %xmm1, %xmm3 - psrld $31, %xmm3 - movdqu %xmm2, %xmm4 - psrld $31, %xmm4 - pslld $1, %xmm1 - pslld $1, %xmm2 - vpslldq $4, %xmm3, %xmm5 - vpslldq $4, %xmm4, %xmm4 - mov $0, %r12 - pinsrd $0, %r12d, %xmm3 - pshufd $3, %xmm3, %xmm3 - pxor %xmm4, %xmm3 - pxor %xmm5, %xmm1 - pxor %xmm3, %xmm2 - movdqu %xmm2, %xmm5 - pxor %xmm2, %xmm2 - mov $3774873600, %r12 - pinsrd $3, %r12d, %xmm2 - pclmulqdq $17, %xmm2, %xmm1 - movdqu %xmm1, %xmm2 - psrld $31, %xmm2 - pslld $1, %xmm1 - vpslldq $4, %xmm2, %xmm2 - pxor %xmm2, %xmm1 - pxor %xmm5, %xmm1 - pxor %xmm6, %xmm1 - movdqu %xmm1, %xmm6 - movdqu %xmm1, %xmm3 - pxor %xmm4, %xmm4 - pxor %xmm5, %xmm5 - mov $3254779904, %r12 - pinsrd $3, %r12d, %xmm4 - mov $1, %r12 - pinsrd $0, %r12d, %xmm4 - mov $2147483648, %r12 - pinsrd $3, %r12d, %xmm5 - movdqu %xmm3, %xmm1 - movdqu %xmm1, %xmm2 - psrld $31, %xmm2 - pslld $1, %xmm1 - vpslldq $4, %xmm2, %xmm2 - pxor %xmm2, %xmm1 - pand %xmm5, %xmm3 - pcmpeqd %xmm5, %xmm3 - pshufd $255, %xmm3, %xmm3 - pand %xmm4, %xmm3 - vpxor %xmm3, %xmm1, %xmm1 - movdqu %xmm1, 96(%rcx) - movdqu %xmm6, %xmm2 - movdqu 32(%rcx), %xmm1 - movdqu %xmm1, %xmm5 - pclmulqdq $16, %xmm2, %xmm1 - movdqu %xmm1, %xmm3 - movdqu %xmm5, %xmm1 - pclmulqdq $1, %xmm2, %xmm1 - movdqu %xmm1, %xmm4 - movdqu %xmm5, %xmm1 - pclmulqdq $0, %xmm2, %xmm1 - pclmulqdq $17, %xmm2, %xmm5 - movdqu %xmm5, %xmm2 - movdqu %xmm1, %xmm5 - movdqu %xmm3, %xmm1 - mov $0, %r12 - pinsrd $0, %r12d, %xmm1 - pshufd $14, %xmm1, %xmm1 - pxor %xmm1, %xmm2 - movdqu %xmm4, %xmm1 - mov $0, %r12 - pinsrd $0, %r12d, %xmm1 - pshufd $14, %xmm1, %xmm1 - pxor %xmm1, %xmm2 - movdqu %xmm3, %xmm1 - mov $0, %r12 - pinsrd $3, %r12d, %xmm1 - pshufd $79, %xmm1, %xmm1 - mov $0, %r12 - pinsrd $3, %r12d, %xmm4 - pshufd $79, %xmm4, %xmm4 - pxor %xmm4, %xmm1 - pxor %xmm5, %xmm1 - movdqu %xmm1, %xmm3 - psrld $31, %xmm3 - movdqu %xmm2, %xmm4 - psrld $31, %xmm4 - pslld $1, %xmm1 - pslld $1, %xmm2 - vpslldq $4, %xmm3, %xmm5 - vpslldq $4, %xmm4, %xmm4 - mov $0, %r12 - pinsrd $0, %r12d, %xmm3 - pshufd $3, %xmm3, %xmm3 - pxor %xmm4, %xmm3 - pxor %xmm5, %xmm1 - pxor %xmm3, %xmm2 - movdqu %xmm2, %xmm6 - pxor %xmm2, %xmm2 - mov $3774873600, %r12 - pinsrd $3, %r12d, %xmm2 - movdqu %xmm1, %xmm5 - pclmulqdq $16, %xmm2, %xmm1 - movdqu %xmm1, %xmm3 - movdqu %xmm5, %xmm1 - pclmulqdq $1, %xmm2, %xmm1 - movdqu %xmm1, %xmm4 - movdqu %xmm5, %xmm1 - pclmulqdq $0, %xmm2, %xmm1 - pclmulqdq $17, %xmm2, %xmm5 - movdqu %xmm5, %xmm2 - movdqu %xmm1, %xmm5 - movdqu %xmm3, %xmm1 - mov $0, %r12 - pinsrd $0, %r12d, %xmm1 - pshufd $14, %xmm1, %xmm1 - pxor %xmm1, %xmm2 - movdqu %xmm4, %xmm1 - mov $0, %r12 - pinsrd $0, %r12d, %xmm1 - pshufd $14, %xmm1, %xmm1 - pxor %xmm1, %xmm2 - movdqu %xmm3, %xmm1 - mov $0, %r12 - pinsrd $3, %r12d, %xmm1 - pshufd $79, %xmm1, %xmm1 - mov $0, %r12 - pinsrd $3, %r12d, %xmm4 - pshufd $79, %xmm4, %xmm4 - pxor %xmm4, %xmm1 - pxor %xmm5, %xmm1 - movdqu %xmm1, %xmm3 - psrld $31, %xmm3 - movdqu %xmm2, %xmm4 - psrld $31, %xmm4 - pslld $1, %xmm1 - pslld $1, %xmm2 - vpslldq $4, %xmm3, %xmm5 - vpslldq $4, %xmm4, %xmm4 - mov $0, %r12 - pinsrd $0, %r12d, %xmm3 - pshufd $3, %xmm3, %xmm3 - pxor %xmm4, %xmm3 - pxor %xmm5, %xmm1 - pxor %xmm3, %xmm2 - movdqu %xmm2, %xmm5 - pxor %xmm2, %xmm2 - mov $3774873600, %r12 - pinsrd $3, %r12d, %xmm2 - pclmulqdq $17, %xmm2, %xmm1 - movdqu %xmm1, %xmm2 - psrld $31, %xmm2 - pslld $1, %xmm1 - vpslldq $4, %xmm2, %xmm2 - pxor %xmm2, %xmm1 - pxor %xmm5, %xmm1 - pxor %xmm6, %xmm1 - movdqu %xmm1, %xmm6 - movdqu %xmm1, %xmm3 - pxor %xmm4, %xmm4 - pxor %xmm5, %xmm5 - mov $3254779904, %r12 - pinsrd $3, %r12d, %xmm4 - mov $1, %r12 - pinsrd $0, %r12d, %xmm4 - mov $2147483648, %r12 - pinsrd $3, %r12d, %xmm5 - movdqu %xmm3, %xmm1 - movdqu %xmm1, %xmm2 - psrld $31, %xmm2 - pslld $1, %xmm1 - vpslldq $4, %xmm2, %xmm2 - pxor %xmm2, %xmm1 - pand %xmm5, %xmm3 - pcmpeqd %xmm5, %xmm3 - pshufd $255, %xmm3, %xmm3 - pand %xmm4, %xmm3 - vpxor %xmm3, %xmm1, %xmm1 - movdqu %xmm1, 112(%rcx) - movdqu %xmm0, %xmm6 - mov %rax, %r12 - ret - -.global aes256_key_expansion -aes256_key_expansion: - movdqu 0(%rcx), %xmm1 - movdqu 16(%rcx), %xmm3 - movdqu %xmm1, 0(%rdx) - movdqu %xmm3, 16(%rdx) - aeskeygenassist $1, %xmm3, %xmm2 - pshufd $255, %xmm2, %xmm2 - vpslldq $4, %xmm1, %xmm4 - pxor %xmm4, %xmm1 - vpslldq $4, %xmm1, %xmm4 - pxor %xmm4, %xmm1 - vpslldq $4, %xmm1, %xmm4 - pxor %xmm4, %xmm1 - pxor %xmm2, %xmm1 - movdqu %xmm1, 32(%rdx) - aeskeygenassist $0, %xmm1, %xmm2 - pshufd $170, %xmm2, %xmm2 - vpslldq $4, %xmm3, %xmm4 - pxor %xmm4, %xmm3 - vpslldq $4, %xmm3, %xmm4 - pxor %xmm4, %xmm3 - vpslldq $4, %xmm3, %xmm4 - pxor %xmm4, %xmm3 - pxor %xmm2, %xmm3 - movdqu %xmm3, 48(%rdx) - aeskeygenassist $2, %xmm3, %xmm2 - pshufd $255, %xmm2, %xmm2 - vpslldq $4, %xmm1, %xmm4 - pxor %xmm4, %xmm1 - vpslldq $4, %xmm1, %xmm4 - pxor %xmm4, %xmm1 - vpslldq $4, %xmm1, %xmm4 - pxor %xmm4, %xmm1 - pxor %xmm2, %xmm1 - movdqu %xmm1, 64(%rdx) - aeskeygenassist $0, %xmm1, %xmm2 - pshufd $170, %xmm2, %xmm2 - vpslldq $4, %xmm3, %xmm4 - pxor %xmm4, %xmm3 - vpslldq $4, %xmm3, %xmm4 - pxor %xmm4, %xmm3 - vpslldq $4, %xmm3, %xmm4 - pxor %xmm4, %xmm3 - pxor %xmm2, %xmm3 - movdqu %xmm3, 80(%rdx) - aeskeygenassist $4, %xmm3, %xmm2 - pshufd $255, %xmm2, %xmm2 - vpslldq $4, %xmm1, %xmm4 - pxor %xmm4, %xmm1 - vpslldq $4, %xmm1, %xmm4 - pxor %xmm4, %xmm1 - vpslldq $4, %xmm1, %xmm4 - pxor %xmm4, %xmm1 - pxor %xmm2, %xmm1 - movdqu %xmm1, 96(%rdx) - aeskeygenassist $0, %xmm1, %xmm2 - pshufd $170, %xmm2, %xmm2 - vpslldq $4, %xmm3, %xmm4 - pxor %xmm4, %xmm3 - vpslldq $4, %xmm3, %xmm4 - pxor %xmm4, %xmm3 - vpslldq $4, %xmm3, %xmm4 - pxor %xmm4, %xmm3 - pxor %xmm2, %xmm3 - movdqu %xmm3, 112(%rdx) - aeskeygenassist $8, %xmm3, %xmm2 - pshufd $255, %xmm2, %xmm2 - vpslldq $4, %xmm1, %xmm4 - pxor %xmm4, %xmm1 - vpslldq $4, %xmm1, %xmm4 - pxor %xmm4, %xmm1 - vpslldq $4, %xmm1, %xmm4 - pxor %xmm4, %xmm1 - pxor %xmm2, %xmm1 - movdqu %xmm1, 128(%rdx) - aeskeygenassist $0, %xmm1, %xmm2 - pshufd $170, %xmm2, %xmm2 - vpslldq $4, %xmm3, %xmm4 - pxor %xmm4, %xmm3 - vpslldq $4, %xmm3, %xmm4 - pxor %xmm4, %xmm3 - vpslldq $4, %xmm3, %xmm4 - pxor %xmm4, %xmm3 - pxor %xmm2, %xmm3 - movdqu %xmm3, 144(%rdx) - aeskeygenassist $16, %xmm3, %xmm2 - pshufd $255, %xmm2, %xmm2 - vpslldq $4, %xmm1, %xmm4 - pxor %xmm4, %xmm1 - vpslldq $4, %xmm1, %xmm4 - pxor %xmm4, %xmm1 - vpslldq $4, %xmm1, %xmm4 - pxor %xmm4, %xmm1 - pxor %xmm2, %xmm1 - movdqu %xmm1, 160(%rdx) - aeskeygenassist $0, %xmm1, %xmm2 - pshufd $170, %xmm2, %xmm2 - vpslldq $4, %xmm3, %xmm4 - pxor %xmm4, %xmm3 - vpslldq $4, %xmm3, %xmm4 - pxor %xmm4, %xmm3 - vpslldq $4, %xmm3, %xmm4 - pxor %xmm4, %xmm3 - pxor %xmm2, %xmm3 - movdqu %xmm3, 176(%rdx) - aeskeygenassist $32, %xmm3, %xmm2 - pshufd $255, %xmm2, %xmm2 - vpslldq $4, %xmm1, %xmm4 - pxor %xmm4, %xmm1 - vpslldq $4, %xmm1, %xmm4 - pxor %xmm4, %xmm1 - vpslldq $4, %xmm1, %xmm4 - pxor %xmm4, %xmm1 - pxor %xmm2, %xmm1 - movdqu %xmm1, 192(%rdx) - aeskeygenassist $0, %xmm1, %xmm2 - pshufd $170, %xmm2, %xmm2 - vpslldq $4, %xmm3, %xmm4 - pxor %xmm4, %xmm3 - vpslldq $4, %xmm3, %xmm4 - pxor %xmm4, %xmm3 - vpslldq $4, %xmm3, %xmm4 - pxor %xmm4, %xmm3 - pxor %xmm2, %xmm3 - movdqu %xmm3, 208(%rdx) - aeskeygenassist $64, %xmm3, %xmm2 - pshufd $255, %xmm2, %xmm2 - vpslldq $4, %xmm1, %xmm4 - pxor %xmm4, %xmm1 - vpslldq $4, %xmm1, %xmm4 - pxor %xmm4, %xmm1 - vpslldq $4, %xmm1, %xmm4 - pxor %xmm4, %xmm1 - pxor %xmm2, %xmm1 - movdqu %xmm1, 224(%rdx) - pxor %xmm1, %xmm1 - pxor %xmm2, %xmm2 - pxor %xmm3, %xmm3 - pxor %xmm4, %xmm4 - ret - -.global aes256_keyhash_init -aes256_keyhash_init: - mov $579005069656919567, %r8 - pinsrq $0, %r8, %xmm4 - mov $283686952306183, %r8 - pinsrq $1, %r8, %xmm4 - pxor %xmm0, %xmm0 - movdqu %xmm0, 80(%rdx) - mov %rcx, %r8 - movdqu 0(%r8), %xmm2 - pxor %xmm2, %xmm0 - movdqu 16(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 32(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 48(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 64(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 80(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 96(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 112(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 128(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 144(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 160(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 176(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 192(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 208(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 224(%r8), %xmm2 - aesenclast %xmm2, %xmm0 - pxor %xmm2, %xmm2 - pshufb %xmm4, %xmm0 - mov %rdx, %rcx - movdqu %xmm0, 32(%rcx) - movdqu %xmm6, %xmm0 - mov %r12, %rax - movdqu 32(%rcx), %xmm1 - movdqu %xmm1, %xmm6 - movdqu %xmm1, %xmm3 - pxor %xmm4, %xmm4 - pxor %xmm5, %xmm5 - mov $3254779904, %r12 - pinsrd $3, %r12d, %xmm4 - mov $1, %r12 - pinsrd $0, %r12d, %xmm4 - mov $2147483648, %r12 - pinsrd $3, %r12d, %xmm5 - movdqu %xmm3, %xmm1 - movdqu %xmm1, %xmm2 - psrld $31, %xmm2 - pslld $1, %xmm1 - vpslldq $4, %xmm2, %xmm2 - pxor %xmm2, %xmm1 - pand %xmm5, %xmm3 - pcmpeqd %xmm5, %xmm3 - pshufd $255, %xmm3, %xmm3 - pand %xmm4, %xmm3 - vpxor %xmm3, %xmm1, %xmm1 - movdqu %xmm1, 0(%rcx) - movdqu %xmm6, %xmm1 - movdqu %xmm6, %xmm2 - movdqu %xmm1, %xmm5 - pclmulqdq $16, %xmm2, %xmm1 - movdqu %xmm1, %xmm3 - movdqu %xmm5, %xmm1 - pclmulqdq $1, %xmm2, %xmm1 - movdqu %xmm1, %xmm4 - movdqu %xmm5, %xmm1 - pclmulqdq $0, %xmm2, %xmm1 - pclmulqdq $17, %xmm2, %xmm5 - movdqu %xmm5, %xmm2 - movdqu %xmm1, %xmm5 - movdqu %xmm3, %xmm1 - mov $0, %r12 - pinsrd $0, %r12d, %xmm1 - pshufd $14, %xmm1, %xmm1 - pxor %xmm1, %xmm2 - movdqu %xmm4, %xmm1 - mov $0, %r12 - pinsrd $0, %r12d, %xmm1 - pshufd $14, %xmm1, %xmm1 - pxor %xmm1, %xmm2 - movdqu %xmm3, %xmm1 - mov $0, %r12 - pinsrd $3, %r12d, %xmm1 - pshufd $79, %xmm1, %xmm1 - mov $0, %r12 - pinsrd $3, %r12d, %xmm4 - pshufd $79, %xmm4, %xmm4 - pxor %xmm4, %xmm1 - pxor %xmm5, %xmm1 - movdqu %xmm1, %xmm3 - psrld $31, %xmm3 - movdqu %xmm2, %xmm4 - psrld $31, %xmm4 - pslld $1, %xmm1 - pslld $1, %xmm2 - vpslldq $4, %xmm3, %xmm5 - vpslldq $4, %xmm4, %xmm4 - mov $0, %r12 - pinsrd $0, %r12d, %xmm3 - pshufd $3, %xmm3, %xmm3 - pxor %xmm4, %xmm3 - pxor %xmm5, %xmm1 - pxor %xmm3, %xmm2 - movdqu %xmm2, %xmm6 - pxor %xmm2, %xmm2 - mov $3774873600, %r12 - pinsrd $3, %r12d, %xmm2 - movdqu %xmm1, %xmm5 - pclmulqdq $16, %xmm2, %xmm1 - movdqu %xmm1, %xmm3 - movdqu %xmm5, %xmm1 - pclmulqdq $1, %xmm2, %xmm1 - movdqu %xmm1, %xmm4 - movdqu %xmm5, %xmm1 - pclmulqdq $0, %xmm2, %xmm1 - pclmulqdq $17, %xmm2, %xmm5 - movdqu %xmm5, %xmm2 - movdqu %xmm1, %xmm5 - movdqu %xmm3, %xmm1 - mov $0, %r12 - pinsrd $0, %r12d, %xmm1 - pshufd $14, %xmm1, %xmm1 - pxor %xmm1, %xmm2 - movdqu %xmm4, %xmm1 - mov $0, %r12 - pinsrd $0, %r12d, %xmm1 - pshufd $14, %xmm1, %xmm1 - pxor %xmm1, %xmm2 - movdqu %xmm3, %xmm1 - mov $0, %r12 - pinsrd $3, %r12d, %xmm1 - pshufd $79, %xmm1, %xmm1 - mov $0, %r12 - pinsrd $3, %r12d, %xmm4 - pshufd $79, %xmm4, %xmm4 - pxor %xmm4, %xmm1 - pxor %xmm5, %xmm1 - movdqu %xmm1, %xmm3 - psrld $31, %xmm3 - movdqu %xmm2, %xmm4 - psrld $31, %xmm4 - pslld $1, %xmm1 - pslld $1, %xmm2 - vpslldq $4, %xmm3, %xmm5 - vpslldq $4, %xmm4, %xmm4 - mov $0, %r12 - pinsrd $0, %r12d, %xmm3 - pshufd $3, %xmm3, %xmm3 - pxor %xmm4, %xmm3 - pxor %xmm5, %xmm1 - pxor %xmm3, %xmm2 - movdqu %xmm2, %xmm5 - pxor %xmm2, %xmm2 - mov $3774873600, %r12 - pinsrd $3, %r12d, %xmm2 - pclmulqdq $17, %xmm2, %xmm1 - movdqu %xmm1, %xmm2 - psrld $31, %xmm2 - pslld $1, %xmm1 - vpslldq $4, %xmm2, %xmm2 - pxor %xmm2, %xmm1 - pxor %xmm5, %xmm1 - pxor %xmm6, %xmm1 - movdqu %xmm1, %xmm6 - movdqu %xmm1, %xmm3 - pxor %xmm4, %xmm4 - pxor %xmm5, %xmm5 - mov $3254779904, %r12 - pinsrd $3, %r12d, %xmm4 - mov $1, %r12 - pinsrd $0, %r12d, %xmm4 - mov $2147483648, %r12 - pinsrd $3, %r12d, %xmm5 - movdqu %xmm3, %xmm1 - movdqu %xmm1, %xmm2 - psrld $31, %xmm2 - pslld $1, %xmm1 - vpslldq $4, %xmm2, %xmm2 - pxor %xmm2, %xmm1 - pand %xmm5, %xmm3 - pcmpeqd %xmm5, %xmm3 - pshufd $255, %xmm3, %xmm3 - pand %xmm4, %xmm3 - vpxor %xmm3, %xmm1, %xmm1 - movdqu %xmm1, 16(%rcx) - movdqu %xmm6, %xmm2 - movdqu 32(%rcx), %xmm1 - movdqu %xmm1, %xmm5 - pclmulqdq $16, %xmm2, %xmm1 - movdqu %xmm1, %xmm3 - movdqu %xmm5, %xmm1 - pclmulqdq $1, %xmm2, %xmm1 - movdqu %xmm1, %xmm4 - movdqu %xmm5, %xmm1 - pclmulqdq $0, %xmm2, %xmm1 - pclmulqdq $17, %xmm2, %xmm5 - movdqu %xmm5, %xmm2 - movdqu %xmm1, %xmm5 - movdqu %xmm3, %xmm1 - mov $0, %r12 - pinsrd $0, %r12d, %xmm1 - pshufd $14, %xmm1, %xmm1 - pxor %xmm1, %xmm2 - movdqu %xmm4, %xmm1 - mov $0, %r12 - pinsrd $0, %r12d, %xmm1 - pshufd $14, %xmm1, %xmm1 - pxor %xmm1, %xmm2 - movdqu %xmm3, %xmm1 - mov $0, %r12 - pinsrd $3, %r12d, %xmm1 - pshufd $79, %xmm1, %xmm1 - mov $0, %r12 - pinsrd $3, %r12d, %xmm4 - pshufd $79, %xmm4, %xmm4 - pxor %xmm4, %xmm1 - pxor %xmm5, %xmm1 - movdqu %xmm1, %xmm3 - psrld $31, %xmm3 - movdqu %xmm2, %xmm4 - psrld $31, %xmm4 - pslld $1, %xmm1 - pslld $1, %xmm2 - vpslldq $4, %xmm3, %xmm5 - vpslldq $4, %xmm4, %xmm4 - mov $0, %r12 - pinsrd $0, %r12d, %xmm3 - pshufd $3, %xmm3, %xmm3 - pxor %xmm4, %xmm3 - pxor %xmm5, %xmm1 - pxor %xmm3, %xmm2 - movdqu %xmm2, %xmm6 - pxor %xmm2, %xmm2 - mov $3774873600, %r12 - pinsrd $3, %r12d, %xmm2 - movdqu %xmm1, %xmm5 - pclmulqdq $16, %xmm2, %xmm1 - movdqu %xmm1, %xmm3 - movdqu %xmm5, %xmm1 - pclmulqdq $1, %xmm2, %xmm1 - movdqu %xmm1, %xmm4 - movdqu %xmm5, %xmm1 - pclmulqdq $0, %xmm2, %xmm1 - pclmulqdq $17, %xmm2, %xmm5 - movdqu %xmm5, %xmm2 - movdqu %xmm1, %xmm5 - movdqu %xmm3, %xmm1 - mov $0, %r12 - pinsrd $0, %r12d, %xmm1 - pshufd $14, %xmm1, %xmm1 - pxor %xmm1, %xmm2 - movdqu %xmm4, %xmm1 - mov $0, %r12 - pinsrd $0, %r12d, %xmm1 - pshufd $14, %xmm1, %xmm1 - pxor %xmm1, %xmm2 - movdqu %xmm3, %xmm1 - mov $0, %r12 - pinsrd $3, %r12d, %xmm1 - pshufd $79, %xmm1, %xmm1 - mov $0, %r12 - pinsrd $3, %r12d, %xmm4 - pshufd $79, %xmm4, %xmm4 - pxor %xmm4, %xmm1 - pxor %xmm5, %xmm1 - movdqu %xmm1, %xmm3 - psrld $31, %xmm3 - movdqu %xmm2, %xmm4 - psrld $31, %xmm4 - pslld $1, %xmm1 - pslld $1, %xmm2 - vpslldq $4, %xmm3, %xmm5 - vpslldq $4, %xmm4, %xmm4 - mov $0, %r12 - pinsrd $0, %r12d, %xmm3 - pshufd $3, %xmm3, %xmm3 - pxor %xmm4, %xmm3 - pxor %xmm5, %xmm1 - pxor %xmm3, %xmm2 - movdqu %xmm2, %xmm5 - pxor %xmm2, %xmm2 - mov $3774873600, %r12 - pinsrd $3, %r12d, %xmm2 - pclmulqdq $17, %xmm2, %xmm1 - movdqu %xmm1, %xmm2 - psrld $31, %xmm2 - pslld $1, %xmm1 - vpslldq $4, %xmm2, %xmm2 - pxor %xmm2, %xmm1 - pxor %xmm5, %xmm1 - pxor %xmm6, %xmm1 - movdqu %xmm1, %xmm6 - movdqu %xmm1, %xmm3 - pxor %xmm4, %xmm4 - pxor %xmm5, %xmm5 - mov $3254779904, %r12 - pinsrd $3, %r12d, %xmm4 - mov $1, %r12 - pinsrd $0, %r12d, %xmm4 - mov $2147483648, %r12 - pinsrd $3, %r12d, %xmm5 - movdqu %xmm3, %xmm1 - movdqu %xmm1, %xmm2 - psrld $31, %xmm2 - pslld $1, %xmm1 - vpslldq $4, %xmm2, %xmm2 - pxor %xmm2, %xmm1 - pand %xmm5, %xmm3 - pcmpeqd %xmm5, %xmm3 - pshufd $255, %xmm3, %xmm3 - pand %xmm4, %xmm3 - vpxor %xmm3, %xmm1, %xmm1 - movdqu %xmm1, 48(%rcx) - movdqu %xmm6, %xmm2 - movdqu 32(%rcx), %xmm1 - movdqu %xmm1, %xmm5 - pclmulqdq $16, %xmm2, %xmm1 - movdqu %xmm1, %xmm3 - movdqu %xmm5, %xmm1 - pclmulqdq $1, %xmm2, %xmm1 - movdqu %xmm1, %xmm4 - movdqu %xmm5, %xmm1 - pclmulqdq $0, %xmm2, %xmm1 - pclmulqdq $17, %xmm2, %xmm5 - movdqu %xmm5, %xmm2 - movdqu %xmm1, %xmm5 - movdqu %xmm3, %xmm1 - mov $0, %r12 - pinsrd $0, %r12d, %xmm1 - pshufd $14, %xmm1, %xmm1 - pxor %xmm1, %xmm2 - movdqu %xmm4, %xmm1 - mov $0, %r12 - pinsrd $0, %r12d, %xmm1 - pshufd $14, %xmm1, %xmm1 - pxor %xmm1, %xmm2 - movdqu %xmm3, %xmm1 - mov $0, %r12 - pinsrd $3, %r12d, %xmm1 - pshufd $79, %xmm1, %xmm1 - mov $0, %r12 - pinsrd $3, %r12d, %xmm4 - pshufd $79, %xmm4, %xmm4 - pxor %xmm4, %xmm1 - pxor %xmm5, %xmm1 - movdqu %xmm1, %xmm3 - psrld $31, %xmm3 - movdqu %xmm2, %xmm4 - psrld $31, %xmm4 - pslld $1, %xmm1 - pslld $1, %xmm2 - vpslldq $4, %xmm3, %xmm5 - vpslldq $4, %xmm4, %xmm4 - mov $0, %r12 - pinsrd $0, %r12d, %xmm3 - pshufd $3, %xmm3, %xmm3 - pxor %xmm4, %xmm3 - pxor %xmm5, %xmm1 - pxor %xmm3, %xmm2 - movdqu %xmm2, %xmm6 - pxor %xmm2, %xmm2 - mov $3774873600, %r12 - pinsrd $3, %r12d, %xmm2 - movdqu %xmm1, %xmm5 - pclmulqdq $16, %xmm2, %xmm1 - movdqu %xmm1, %xmm3 - movdqu %xmm5, %xmm1 - pclmulqdq $1, %xmm2, %xmm1 - movdqu %xmm1, %xmm4 - movdqu %xmm5, %xmm1 - pclmulqdq $0, %xmm2, %xmm1 - pclmulqdq $17, %xmm2, %xmm5 - movdqu %xmm5, %xmm2 - movdqu %xmm1, %xmm5 - movdqu %xmm3, %xmm1 - mov $0, %r12 - pinsrd $0, %r12d, %xmm1 - pshufd $14, %xmm1, %xmm1 - pxor %xmm1, %xmm2 - movdqu %xmm4, %xmm1 - mov $0, %r12 - pinsrd $0, %r12d, %xmm1 - pshufd $14, %xmm1, %xmm1 - pxor %xmm1, %xmm2 - movdqu %xmm3, %xmm1 - mov $0, %r12 - pinsrd $3, %r12d, %xmm1 - pshufd $79, %xmm1, %xmm1 - mov $0, %r12 - pinsrd $3, %r12d, %xmm4 - pshufd $79, %xmm4, %xmm4 - pxor %xmm4, %xmm1 - pxor %xmm5, %xmm1 - movdqu %xmm1, %xmm3 - psrld $31, %xmm3 - movdqu %xmm2, %xmm4 - psrld $31, %xmm4 - pslld $1, %xmm1 - pslld $1, %xmm2 - vpslldq $4, %xmm3, %xmm5 - vpslldq $4, %xmm4, %xmm4 - mov $0, %r12 - pinsrd $0, %r12d, %xmm3 - pshufd $3, %xmm3, %xmm3 - pxor %xmm4, %xmm3 - pxor %xmm5, %xmm1 - pxor %xmm3, %xmm2 - movdqu %xmm2, %xmm5 - pxor %xmm2, %xmm2 - mov $3774873600, %r12 - pinsrd $3, %r12d, %xmm2 - pclmulqdq $17, %xmm2, %xmm1 - movdqu %xmm1, %xmm2 - psrld $31, %xmm2 - pslld $1, %xmm1 - vpslldq $4, %xmm2, %xmm2 - pxor %xmm2, %xmm1 - pxor %xmm5, %xmm1 - pxor %xmm6, %xmm1 - movdqu %xmm1, %xmm6 - movdqu %xmm1, %xmm3 - pxor %xmm4, %xmm4 - pxor %xmm5, %xmm5 - mov $3254779904, %r12 - pinsrd $3, %r12d, %xmm4 - mov $1, %r12 - pinsrd $0, %r12d, %xmm4 - mov $2147483648, %r12 - pinsrd $3, %r12d, %xmm5 - movdqu %xmm3, %xmm1 - movdqu %xmm1, %xmm2 - psrld $31, %xmm2 - pslld $1, %xmm1 - vpslldq $4, %xmm2, %xmm2 - pxor %xmm2, %xmm1 - pand %xmm5, %xmm3 - pcmpeqd %xmm5, %xmm3 - pshufd $255, %xmm3, %xmm3 - pand %xmm4, %xmm3 - vpxor %xmm3, %xmm1, %xmm1 - movdqu %xmm1, 64(%rcx) - movdqu %xmm6, %xmm2 - movdqu 32(%rcx), %xmm1 - movdqu %xmm1, %xmm5 - pclmulqdq $16, %xmm2, %xmm1 - movdqu %xmm1, %xmm3 - movdqu %xmm5, %xmm1 - pclmulqdq $1, %xmm2, %xmm1 - movdqu %xmm1, %xmm4 - movdqu %xmm5, %xmm1 - pclmulqdq $0, %xmm2, %xmm1 - pclmulqdq $17, %xmm2, %xmm5 - movdqu %xmm5, %xmm2 - movdqu %xmm1, %xmm5 - movdqu %xmm3, %xmm1 - mov $0, %r12 - pinsrd $0, %r12d, %xmm1 - pshufd $14, %xmm1, %xmm1 - pxor %xmm1, %xmm2 - movdqu %xmm4, %xmm1 - mov $0, %r12 - pinsrd $0, %r12d, %xmm1 - pshufd $14, %xmm1, %xmm1 - pxor %xmm1, %xmm2 - movdqu %xmm3, %xmm1 - mov $0, %r12 - pinsrd $3, %r12d, %xmm1 - pshufd $79, %xmm1, %xmm1 - mov $0, %r12 - pinsrd $3, %r12d, %xmm4 - pshufd $79, %xmm4, %xmm4 - pxor %xmm4, %xmm1 - pxor %xmm5, %xmm1 - movdqu %xmm1, %xmm3 - psrld $31, %xmm3 - movdqu %xmm2, %xmm4 - psrld $31, %xmm4 - pslld $1, %xmm1 - pslld $1, %xmm2 - vpslldq $4, %xmm3, %xmm5 - vpslldq $4, %xmm4, %xmm4 - mov $0, %r12 - pinsrd $0, %r12d, %xmm3 - pshufd $3, %xmm3, %xmm3 - pxor %xmm4, %xmm3 - pxor %xmm5, %xmm1 - pxor %xmm3, %xmm2 - movdqu %xmm2, %xmm6 - pxor %xmm2, %xmm2 - mov $3774873600, %r12 - pinsrd $3, %r12d, %xmm2 - movdqu %xmm1, %xmm5 - pclmulqdq $16, %xmm2, %xmm1 - movdqu %xmm1, %xmm3 - movdqu %xmm5, %xmm1 - pclmulqdq $1, %xmm2, %xmm1 - movdqu %xmm1, %xmm4 - movdqu %xmm5, %xmm1 - pclmulqdq $0, %xmm2, %xmm1 - pclmulqdq $17, %xmm2, %xmm5 - movdqu %xmm5, %xmm2 - movdqu %xmm1, %xmm5 - movdqu %xmm3, %xmm1 - mov $0, %r12 - pinsrd $0, %r12d, %xmm1 - pshufd $14, %xmm1, %xmm1 - pxor %xmm1, %xmm2 - movdqu %xmm4, %xmm1 - mov $0, %r12 - pinsrd $0, %r12d, %xmm1 - pshufd $14, %xmm1, %xmm1 - pxor %xmm1, %xmm2 - movdqu %xmm3, %xmm1 - mov $0, %r12 - pinsrd $3, %r12d, %xmm1 - pshufd $79, %xmm1, %xmm1 - mov $0, %r12 - pinsrd $3, %r12d, %xmm4 - pshufd $79, %xmm4, %xmm4 - pxor %xmm4, %xmm1 - pxor %xmm5, %xmm1 - movdqu %xmm1, %xmm3 - psrld $31, %xmm3 - movdqu %xmm2, %xmm4 - psrld $31, %xmm4 - pslld $1, %xmm1 - pslld $1, %xmm2 - vpslldq $4, %xmm3, %xmm5 - vpslldq $4, %xmm4, %xmm4 - mov $0, %r12 - pinsrd $0, %r12d, %xmm3 - pshufd $3, %xmm3, %xmm3 - pxor %xmm4, %xmm3 - pxor %xmm5, %xmm1 - pxor %xmm3, %xmm2 - movdqu %xmm2, %xmm5 - pxor %xmm2, %xmm2 - mov $3774873600, %r12 - pinsrd $3, %r12d, %xmm2 - pclmulqdq $17, %xmm2, %xmm1 - movdqu %xmm1, %xmm2 - psrld $31, %xmm2 - pslld $1, %xmm1 - vpslldq $4, %xmm2, %xmm2 - pxor %xmm2, %xmm1 - pxor %xmm5, %xmm1 - pxor %xmm6, %xmm1 - movdqu %xmm1, %xmm6 - movdqu %xmm1, %xmm3 - pxor %xmm4, %xmm4 - pxor %xmm5, %xmm5 - mov $3254779904, %r12 - pinsrd $3, %r12d, %xmm4 - mov $1, %r12 - pinsrd $0, %r12d, %xmm4 - mov $2147483648, %r12 - pinsrd $3, %r12d, %xmm5 - movdqu %xmm3, %xmm1 - movdqu %xmm1, %xmm2 - psrld $31, %xmm2 - pslld $1, %xmm1 - vpslldq $4, %xmm2, %xmm2 - pxor %xmm2, %xmm1 - pand %xmm5, %xmm3 - pcmpeqd %xmm5, %xmm3 - pshufd $255, %xmm3, %xmm3 - pand %xmm4, %xmm3 - vpxor %xmm3, %xmm1, %xmm1 - movdqu %xmm1, 96(%rcx) - movdqu %xmm6, %xmm2 - movdqu 32(%rcx), %xmm1 - movdqu %xmm1, %xmm5 - pclmulqdq $16, %xmm2, %xmm1 - movdqu %xmm1, %xmm3 - movdqu %xmm5, %xmm1 - pclmulqdq $1, %xmm2, %xmm1 - movdqu %xmm1, %xmm4 - movdqu %xmm5, %xmm1 - pclmulqdq $0, %xmm2, %xmm1 - pclmulqdq $17, %xmm2, %xmm5 - movdqu %xmm5, %xmm2 - movdqu %xmm1, %xmm5 - movdqu %xmm3, %xmm1 - mov $0, %r12 - pinsrd $0, %r12d, %xmm1 - pshufd $14, %xmm1, %xmm1 - pxor %xmm1, %xmm2 - movdqu %xmm4, %xmm1 - mov $0, %r12 - pinsrd $0, %r12d, %xmm1 - pshufd $14, %xmm1, %xmm1 - pxor %xmm1, %xmm2 - movdqu %xmm3, %xmm1 - mov $0, %r12 - pinsrd $3, %r12d, %xmm1 - pshufd $79, %xmm1, %xmm1 - mov $0, %r12 - pinsrd $3, %r12d, %xmm4 - pshufd $79, %xmm4, %xmm4 - pxor %xmm4, %xmm1 - pxor %xmm5, %xmm1 - movdqu %xmm1, %xmm3 - psrld $31, %xmm3 - movdqu %xmm2, %xmm4 - psrld $31, %xmm4 - pslld $1, %xmm1 - pslld $1, %xmm2 - vpslldq $4, %xmm3, %xmm5 - vpslldq $4, %xmm4, %xmm4 - mov $0, %r12 - pinsrd $0, %r12d, %xmm3 - pshufd $3, %xmm3, %xmm3 - pxor %xmm4, %xmm3 - pxor %xmm5, %xmm1 - pxor %xmm3, %xmm2 - movdqu %xmm2, %xmm6 - pxor %xmm2, %xmm2 - mov $3774873600, %r12 - pinsrd $3, %r12d, %xmm2 - movdqu %xmm1, %xmm5 - pclmulqdq $16, %xmm2, %xmm1 - movdqu %xmm1, %xmm3 - movdqu %xmm5, %xmm1 - pclmulqdq $1, %xmm2, %xmm1 - movdqu %xmm1, %xmm4 - movdqu %xmm5, %xmm1 - pclmulqdq $0, %xmm2, %xmm1 - pclmulqdq $17, %xmm2, %xmm5 - movdqu %xmm5, %xmm2 - movdqu %xmm1, %xmm5 - movdqu %xmm3, %xmm1 - mov $0, %r12 - pinsrd $0, %r12d, %xmm1 - pshufd $14, %xmm1, %xmm1 - pxor %xmm1, %xmm2 - movdqu %xmm4, %xmm1 - mov $0, %r12 - pinsrd $0, %r12d, %xmm1 - pshufd $14, %xmm1, %xmm1 - pxor %xmm1, %xmm2 - movdqu %xmm3, %xmm1 - mov $0, %r12 - pinsrd $3, %r12d, %xmm1 - pshufd $79, %xmm1, %xmm1 - mov $0, %r12 - pinsrd $3, %r12d, %xmm4 - pshufd $79, %xmm4, %xmm4 - pxor %xmm4, %xmm1 - pxor %xmm5, %xmm1 - movdqu %xmm1, %xmm3 - psrld $31, %xmm3 - movdqu %xmm2, %xmm4 - psrld $31, %xmm4 - pslld $1, %xmm1 - pslld $1, %xmm2 - vpslldq $4, %xmm3, %xmm5 - vpslldq $4, %xmm4, %xmm4 - mov $0, %r12 - pinsrd $0, %r12d, %xmm3 - pshufd $3, %xmm3, %xmm3 - pxor %xmm4, %xmm3 - pxor %xmm5, %xmm1 - pxor %xmm3, %xmm2 - movdqu %xmm2, %xmm5 - pxor %xmm2, %xmm2 - mov $3774873600, %r12 - pinsrd $3, %r12d, %xmm2 - pclmulqdq $17, %xmm2, %xmm1 - movdqu %xmm1, %xmm2 - psrld $31, %xmm2 - pslld $1, %xmm1 - vpslldq $4, %xmm2, %xmm2 - pxor %xmm2, %xmm1 - pxor %xmm5, %xmm1 - pxor %xmm6, %xmm1 - movdqu %xmm1, %xmm6 - movdqu %xmm1, %xmm3 - pxor %xmm4, %xmm4 - pxor %xmm5, %xmm5 - mov $3254779904, %r12 - pinsrd $3, %r12d, %xmm4 - mov $1, %r12 - pinsrd $0, %r12d, %xmm4 - mov $2147483648, %r12 - pinsrd $3, %r12d, %xmm5 - movdqu %xmm3, %xmm1 - movdqu %xmm1, %xmm2 - psrld $31, %xmm2 - pslld $1, %xmm1 - vpslldq $4, %xmm2, %xmm2 - pxor %xmm2, %xmm1 - pand %xmm5, %xmm3 - pcmpeqd %xmm5, %xmm3 - pshufd $255, %xmm3, %xmm3 - pand %xmm4, %xmm3 - vpxor %xmm3, %xmm1, %xmm1 - movdqu %xmm1, 112(%rcx) - movdqu %xmm0, %xmm6 - mov %rax, %r12 - ret - -.global gctr128_bytes -gctr128_bytes: - push %r15 - push %r14 - push %r13 - push %r12 - push %rsi - push %rdi - push %rbp - push %rbx - pextrq $0, %xmm15, %rax - push %rax - pextrq $1, %xmm15, %rax - push %rax - pextrq $0, %xmm14, %rax - push %rax - pextrq $1, %xmm14, %rax - push %rax - pextrq $0, %xmm13, %rax - push %rax - pextrq $1, %xmm13, %rax - push %rax - pextrq $0, %xmm12, %rax - push %rax - pextrq $1, %xmm12, %rax - push %rax - pextrq $0, %xmm11, %rax - push %rax - pextrq $1, %xmm11, %rax - push %rax - pextrq $0, %xmm10, %rax - push %rax - pextrq $1, %xmm10, %rax - push %rax - pextrq $0, %xmm9, %rax - push %rax - pextrq $1, %xmm9, %rax - push %rax - pextrq $0, %xmm8, %rax - push %rax - pextrq $1, %xmm8, %rax - push %rax - pextrq $0, %xmm7, %rax - push %rax - pextrq $1, %xmm7, %rax - push %rax - pextrq $0, %xmm6, %rax - push %rax - pextrq $1, %xmm6, %rax - push %rax - mov 272(%rsp), %rax - movdqu 0(%rax), %xmm7 - mov %rcx, %rax - mov %r8, %rbx - mov %rdx, %rsi - mov %r9, %r13 - mov 264(%rsp), %r8 - mov 280(%rsp), %rcx - mov %rcx, %rbp - imul $16, %rbp - mov $579005069656919567, %r12 - pinsrq $0, %r12, %xmm8 - mov $283686952306183, %r12 - pinsrq $1, %r12, %xmm8 - mov %rcx, %rdx - shr $2, %rdx - and $3, %rcx - cmp $0, %rdx - jbe L0 - mov %rax, %r9 - mov %rbx, %r10 - pshufb %xmm8, %xmm7 - movdqu %xmm7, %xmm9 - mov $579005069656919567, %rax - pinsrq $0, %rax, %xmm0 - mov $579005069656919567, %rax - pinsrq $1, %rax, %xmm0 - pshufb %xmm0, %xmm9 - movdqu %xmm9, %xmm10 - pxor %xmm3, %xmm3 - mov $1, %rax - pinsrd $2, %eax, %xmm3 - paddd %xmm3, %xmm9 - mov $3, %rax - pinsrd $2, %eax, %xmm3 - mov $2, %rax - pinsrd $0, %eax, %xmm3 - paddd %xmm3, %xmm10 - pshufb %xmm8, %xmm9 - pshufb %xmm8, %xmm10 - pextrq $0, %xmm7, %rdi - mov $283686952306183, %rax - pinsrq $0, %rax, %xmm0 - mov $579005069656919567, %rax - pinsrq $1, %rax, %xmm0 - pxor %xmm15, %xmm15 - mov $4, %rax - pinsrd $0, %eax, %xmm15 - mov $4, %rax - pinsrd $2, %eax, %xmm15 - jmp L3 -.balign 16 -L2: - pinsrq $0, %rdi, %xmm2 - pinsrq $0, %rdi, %xmm12 - pinsrq $0, %rdi, %xmm13 - pinsrq $0, %rdi, %xmm14 - shufpd $2, %xmm9, %xmm2 - shufpd $0, %xmm9, %xmm12 - shufpd $2, %xmm10, %xmm13 - shufpd $0, %xmm10, %xmm14 - pshufb %xmm0, %xmm9 - pshufb %xmm0, %xmm10 - movdqu 0(%r8), %xmm3 - movdqu 16(%r8), %xmm4 - movdqu 32(%r8), %xmm5 - movdqu 48(%r8), %xmm6 - paddd %xmm15, %xmm9 - paddd %xmm15, %xmm10 - pxor %xmm3, %xmm2 - pxor %xmm3, %xmm12 - pxor %xmm3, %xmm13 - pxor %xmm3, %xmm14 - pshufb %xmm0, %xmm9 - pshufb %xmm0, %xmm10 - aesenc %xmm4, %xmm2 - aesenc %xmm4, %xmm12 - aesenc %xmm4, %xmm13 - aesenc %xmm4, %xmm14 - aesenc %xmm5, %xmm2 - aesenc %xmm5, %xmm12 - aesenc %xmm5, %xmm13 - aesenc %xmm5, %xmm14 - aesenc %xmm6, %xmm2 - aesenc %xmm6, %xmm12 - aesenc %xmm6, %xmm13 - aesenc %xmm6, %xmm14 - movdqu 64(%r8), %xmm3 - movdqu 80(%r8), %xmm4 - movdqu 96(%r8), %xmm5 - movdqu 112(%r8), %xmm6 - aesenc %xmm3, %xmm2 - aesenc %xmm3, %xmm12 - aesenc %xmm3, %xmm13 - aesenc %xmm3, %xmm14 - aesenc %xmm4, %xmm2 - aesenc %xmm4, %xmm12 - aesenc %xmm4, %xmm13 - aesenc %xmm4, %xmm14 - aesenc %xmm5, %xmm2 - aesenc %xmm5, %xmm12 - aesenc %xmm5, %xmm13 - aesenc %xmm5, %xmm14 - aesenc %xmm6, %xmm2 - aesenc %xmm6, %xmm12 - aesenc %xmm6, %xmm13 - aesenc %xmm6, %xmm14 - movdqu 128(%r8), %xmm3 - movdqu 144(%r8), %xmm4 - movdqu 160(%r8), %xmm5 - aesenc %xmm3, %xmm2 - aesenc %xmm3, %xmm12 - aesenc %xmm3, %xmm13 - aesenc %xmm3, %xmm14 - aesenc %xmm4, %xmm2 - aesenc %xmm4, %xmm12 - aesenc %xmm4, %xmm13 - aesenc %xmm4, %xmm14 - aesenclast %xmm5, %xmm2 - aesenclast %xmm5, %xmm12 - aesenclast %xmm5, %xmm13 - aesenclast %xmm5, %xmm14 - movdqu 0(%r9), %xmm7 - pxor %xmm7, %xmm2 - movdqu 16(%r9), %xmm7 - pxor %xmm7, %xmm12 - movdqu 32(%r9), %xmm7 - pxor %xmm7, %xmm13 - movdqu 48(%r9), %xmm7 - pxor %xmm7, %xmm14 - movdqu %xmm2, 0(%r10) - movdqu %xmm12, 16(%r10) - movdqu %xmm13, 32(%r10) - movdqu %xmm14, 48(%r10) - sub $1, %rdx - add $64, %r9 - add $64, %r10 -.balign 16 -L3: - cmp $0, %rdx - ja L2 - movdqu %xmm9, %xmm7 - pinsrq $0, %rdi, %xmm7 - pshufb %xmm8, %xmm7 - mov %r9, %rax - mov %r10, %rbx - jmp L1 -L0: -L1: - mov $0, %rdx - mov %rax, %r9 - mov %rbx, %r10 - pxor %xmm4, %xmm4 - mov $1, %r12 - pinsrd $0, %r12d, %xmm4 - jmp L5 -.balign 16 -L4: - movdqu %xmm7, %xmm0 - pshufb %xmm8, %xmm0 - movdqu 0(%r8), %xmm2 - pxor %xmm2, %xmm0 - movdqu 16(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 32(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 48(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 64(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 80(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 96(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 112(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 128(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 144(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 160(%r8), %xmm2 - aesenclast %xmm2, %xmm0 - pxor %xmm2, %xmm2 - movdqu 0(%r9), %xmm2 - pxor %xmm0, %xmm2 - movdqu %xmm2, 0(%r10) - add $1, %rdx - add $16, %r9 - add $16, %r10 - paddd %xmm4, %xmm7 -.balign 16 -L5: - cmp %rcx, %rdx - jne L4 - cmp %rbp, %rsi - jbe L6 - movdqu 0(%r13), %xmm1 - movdqu %xmm7, %xmm0 - mov $579005069656919567, %r12 - pinsrq $0, %r12, %xmm2 - mov $283686952306183, %r12 - pinsrq $1, %r12, %xmm2 - pshufb %xmm2, %xmm0 - movdqu 0(%r8), %xmm2 - pxor %xmm2, %xmm0 - movdqu 16(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 32(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 48(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 64(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 80(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 96(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 112(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 128(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 144(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 160(%r8), %xmm2 - aesenclast %xmm2, %xmm0 - pxor %xmm2, %xmm2 - pxor %xmm0, %xmm1 - movdqu %xmm1, 0(%r13) - jmp L7 -L6: -L7: - pop %rax - pinsrq $1, %rax, %xmm6 - pop %rax - pinsrq $0, %rax, %xmm6 - pop %rax - pinsrq $1, %rax, %xmm7 - pop %rax - pinsrq $0, %rax, %xmm7 - pop %rax - pinsrq $1, %rax, %xmm8 - pop %rax - pinsrq $0, %rax, %xmm8 - pop %rax - pinsrq $1, %rax, %xmm9 - pop %rax - pinsrq $0, %rax, %xmm9 - pop %rax - pinsrq $1, %rax, %xmm10 - pop %rax - pinsrq $0, %rax, %xmm10 - pop %rax - pinsrq $1, %rax, %xmm11 - pop %rax - pinsrq $0, %rax, %xmm11 - pop %rax - pinsrq $1, %rax, %xmm12 - pop %rax - pinsrq $0, %rax, %xmm12 - pop %rax - pinsrq $1, %rax, %xmm13 - pop %rax - pinsrq $0, %rax, %xmm13 - pop %rax - pinsrq $1, %rax, %xmm14 - pop %rax - pinsrq $0, %rax, %xmm14 - pop %rax - pinsrq $1, %rax, %xmm15 - pop %rax - pinsrq $0, %rax, %xmm15 - pop %rbx - pop %rbp - pop %rdi - pop %rsi - pop %r12 - pop %r13 - pop %r14 - pop %r15 - ret - -.global gctr256_bytes -gctr256_bytes: - push %r15 - push %r14 - push %r13 - push %r12 - push %rsi - push %rdi - push %rbp - push %rbx - pextrq $0, %xmm15, %rax - push %rax - pextrq $1, %xmm15, %rax - push %rax - pextrq $0, %xmm14, %rax - push %rax - pextrq $1, %xmm14, %rax - push %rax - pextrq $0, %xmm13, %rax - push %rax - pextrq $1, %xmm13, %rax - push %rax - pextrq $0, %xmm12, %rax - push %rax - pextrq $1, %xmm12, %rax - push %rax - pextrq $0, %xmm11, %rax - push %rax - pextrq $1, %xmm11, %rax - push %rax - pextrq $0, %xmm10, %rax - push %rax - pextrq $1, %xmm10, %rax - push %rax - pextrq $0, %xmm9, %rax - push %rax - pextrq $1, %xmm9, %rax - push %rax - pextrq $0, %xmm8, %rax - push %rax - pextrq $1, %xmm8, %rax - push %rax - pextrq $0, %xmm7, %rax - push %rax - pextrq $1, %xmm7, %rax - push %rax - pextrq $0, %xmm6, %rax - push %rax - pextrq $1, %xmm6, %rax - push %rax - mov 272(%rsp), %rax - movdqu 0(%rax), %xmm7 - mov %rcx, %rax - mov %r8, %rbx - mov %rdx, %rsi - mov %r9, %r13 - mov 264(%rsp), %r8 - mov 280(%rsp), %rcx - mov %rcx, %rbp - imul $16, %rbp - mov $579005069656919567, %r12 - pinsrq $0, %r12, %xmm8 - mov $283686952306183, %r12 - pinsrq $1, %r12, %xmm8 - mov %rcx, %rdx - shr $2, %rdx - and $3, %rcx - cmp $0, %rdx - jbe L8 - mov %rax, %r9 - mov %rbx, %r10 - pshufb %xmm8, %xmm7 - movdqu %xmm7, %xmm9 - mov $579005069656919567, %rax - pinsrq $0, %rax, %xmm0 - mov $579005069656919567, %rax - pinsrq $1, %rax, %xmm0 - pshufb %xmm0, %xmm9 - movdqu %xmm9, %xmm10 - pxor %xmm3, %xmm3 - mov $1, %rax - pinsrd $2, %eax, %xmm3 - paddd %xmm3, %xmm9 - mov $3, %rax - pinsrd $2, %eax, %xmm3 - mov $2, %rax - pinsrd $0, %eax, %xmm3 - paddd %xmm3, %xmm10 - pshufb %xmm8, %xmm9 - pshufb %xmm8, %xmm10 - pextrq $0, %xmm7, %rdi - mov $283686952306183, %rax - pinsrq $0, %rax, %xmm0 - mov $579005069656919567, %rax - pinsrq $1, %rax, %xmm0 - pxor %xmm15, %xmm15 - mov $4, %rax - pinsrd $0, %eax, %xmm15 - mov $4, %rax - pinsrd $2, %eax, %xmm15 - jmp L11 -.balign 16 -L10: - pinsrq $0, %rdi, %xmm2 - pinsrq $0, %rdi, %xmm12 - pinsrq $0, %rdi, %xmm13 - pinsrq $0, %rdi, %xmm14 - shufpd $2, %xmm9, %xmm2 - shufpd $0, %xmm9, %xmm12 - shufpd $2, %xmm10, %xmm13 - shufpd $0, %xmm10, %xmm14 - pshufb %xmm0, %xmm9 - pshufb %xmm0, %xmm10 - movdqu 0(%r8), %xmm3 - movdqu 16(%r8), %xmm4 - movdqu 32(%r8), %xmm5 - movdqu 48(%r8), %xmm6 - paddd %xmm15, %xmm9 - paddd %xmm15, %xmm10 - pxor %xmm3, %xmm2 - pxor %xmm3, %xmm12 - pxor %xmm3, %xmm13 - pxor %xmm3, %xmm14 - pshufb %xmm0, %xmm9 - pshufb %xmm0, %xmm10 - aesenc %xmm4, %xmm2 - aesenc %xmm4, %xmm12 - aesenc %xmm4, %xmm13 - aesenc %xmm4, %xmm14 - aesenc %xmm5, %xmm2 - aesenc %xmm5, %xmm12 - aesenc %xmm5, %xmm13 - aesenc %xmm5, %xmm14 - aesenc %xmm6, %xmm2 - aesenc %xmm6, %xmm12 - aesenc %xmm6, %xmm13 - aesenc %xmm6, %xmm14 - movdqu 64(%r8), %xmm3 - movdqu 80(%r8), %xmm4 - movdqu 96(%r8), %xmm5 - movdqu 112(%r8), %xmm6 - aesenc %xmm3, %xmm2 - aesenc %xmm3, %xmm12 - aesenc %xmm3, %xmm13 - aesenc %xmm3, %xmm14 - aesenc %xmm4, %xmm2 - aesenc %xmm4, %xmm12 - aesenc %xmm4, %xmm13 - aesenc %xmm4, %xmm14 - aesenc %xmm5, %xmm2 - aesenc %xmm5, %xmm12 - aesenc %xmm5, %xmm13 - aesenc %xmm5, %xmm14 - aesenc %xmm6, %xmm2 - aesenc %xmm6, %xmm12 - aesenc %xmm6, %xmm13 - aesenc %xmm6, %xmm14 - movdqu 128(%r8), %xmm3 - movdqu 144(%r8), %xmm4 - movdqu 160(%r8), %xmm5 - aesenc %xmm3, %xmm2 - aesenc %xmm3, %xmm12 - aesenc %xmm3, %xmm13 - aesenc %xmm3, %xmm14 - aesenc %xmm4, %xmm2 - aesenc %xmm4, %xmm12 - aesenc %xmm4, %xmm13 - aesenc %xmm4, %xmm14 - movdqu %xmm5, %xmm3 - movdqu 176(%r8), %xmm4 - movdqu 192(%r8), %xmm5 - movdqu 208(%r8), %xmm6 - aesenc %xmm3, %xmm2 - aesenc %xmm3, %xmm12 - aesenc %xmm3, %xmm13 - aesenc %xmm3, %xmm14 - aesenc %xmm4, %xmm2 - aesenc %xmm4, %xmm12 - aesenc %xmm4, %xmm13 - aesenc %xmm4, %xmm14 - aesenc %xmm5, %xmm2 - aesenc %xmm5, %xmm12 - aesenc %xmm5, %xmm13 - aesenc %xmm5, %xmm14 - aesenc %xmm6, %xmm2 - aesenc %xmm6, %xmm12 - aesenc %xmm6, %xmm13 - aesenc %xmm6, %xmm14 - movdqu 224(%r8), %xmm5 - aesenclast %xmm5, %xmm2 - aesenclast %xmm5, %xmm12 - aesenclast %xmm5, %xmm13 - aesenclast %xmm5, %xmm14 - movdqu 0(%r9), %xmm7 - pxor %xmm7, %xmm2 - movdqu 16(%r9), %xmm7 - pxor %xmm7, %xmm12 - movdqu 32(%r9), %xmm7 - pxor %xmm7, %xmm13 - movdqu 48(%r9), %xmm7 - pxor %xmm7, %xmm14 - movdqu %xmm2, 0(%r10) - movdqu %xmm12, 16(%r10) - movdqu %xmm13, 32(%r10) - movdqu %xmm14, 48(%r10) - sub $1, %rdx - add $64, %r9 - add $64, %r10 -.balign 16 -L11: - cmp $0, %rdx - ja L10 - movdqu %xmm9, %xmm7 - pinsrq $0, %rdi, %xmm7 - pshufb %xmm8, %xmm7 - mov %r9, %rax - mov %r10, %rbx - jmp L9 -L8: -L9: - mov $0, %rdx - mov %rax, %r9 - mov %rbx, %r10 - pxor %xmm4, %xmm4 - mov $1, %r12 - pinsrd $0, %r12d, %xmm4 - jmp L13 -.balign 16 -L12: - movdqu %xmm7, %xmm0 - pshufb %xmm8, %xmm0 - movdqu 0(%r8), %xmm2 - pxor %xmm2, %xmm0 - movdqu 16(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 32(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 48(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 64(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 80(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 96(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 112(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 128(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 144(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 160(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 176(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 192(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 208(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 224(%r8), %xmm2 - aesenclast %xmm2, %xmm0 - pxor %xmm2, %xmm2 - movdqu 0(%r9), %xmm2 - pxor %xmm0, %xmm2 - movdqu %xmm2, 0(%r10) - add $1, %rdx - add $16, %r9 - add $16, %r10 - paddd %xmm4, %xmm7 -.balign 16 -L13: - cmp %rcx, %rdx - jne L12 - cmp %rbp, %rsi - jbe L14 - movdqu 0(%r13), %xmm1 - movdqu %xmm7, %xmm0 - mov $579005069656919567, %r12 - pinsrq $0, %r12, %xmm2 - mov $283686952306183, %r12 - pinsrq $1, %r12, %xmm2 - pshufb %xmm2, %xmm0 - movdqu 0(%r8), %xmm2 - pxor %xmm2, %xmm0 - movdqu 16(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 32(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 48(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 64(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 80(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 96(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 112(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 128(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 144(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 160(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 176(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 192(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 208(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 224(%r8), %xmm2 - aesenclast %xmm2, %xmm0 - pxor %xmm2, %xmm2 - pxor %xmm0, %xmm1 - movdqu %xmm1, 0(%r13) - jmp L15 -L14: -L15: - pop %rax - pinsrq $1, %rax, %xmm6 - pop %rax - pinsrq $0, %rax, %xmm6 - pop %rax - pinsrq $1, %rax, %xmm7 - pop %rax - pinsrq $0, %rax, %xmm7 - pop %rax - pinsrq $1, %rax, %xmm8 - pop %rax - pinsrq $0, %rax, %xmm8 - pop %rax - pinsrq $1, %rax, %xmm9 - pop %rax - pinsrq $0, %rax, %xmm9 - pop %rax - pinsrq $1, %rax, %xmm10 - pop %rax - pinsrq $0, %rax, %xmm10 - pop %rax - pinsrq $1, %rax, %xmm11 - pop %rax - pinsrq $0, %rax, %xmm11 - pop %rax - pinsrq $1, %rax, %xmm12 - pop %rax - pinsrq $0, %rax, %xmm12 - pop %rax - pinsrq $1, %rax, %xmm13 - pop %rax - pinsrq $0, %rax, %xmm13 - pop %rax - pinsrq $1, %rax, %xmm14 - pop %rax - pinsrq $0, %rax, %xmm14 - pop %rax - pinsrq $1, %rax, %xmm15 - pop %rax - pinsrq $0, %rax, %xmm15 - pop %rbx - pop %rbp - pop %rdi - pop %rsi - pop %r12 - pop %r13 - pop %r14 - pop %r15 - ret - -.global compute_iv_stdcall -compute_iv_stdcall: - cmp $12, %rdx - jne L16 - push %rdi - push %rsi - mov %rcx, %rdi - mov %rdx, %rsi - mov %r8, %rdx - mov %r9, %rcx - mov 56(%rsp), %r8 - mov 64(%rsp), %r9 - cmp $12, %rsi - jne L18 - movdqu 0(%r8), %xmm0 - mov $579005069656919567, %rax - pinsrq $0, %rax, %xmm1 - mov $283686952306183, %rax - pinsrq $1, %rax, %xmm1 - pshufb %xmm1, %xmm0 - mov $1, %rax - pinsrd $0, %eax, %xmm0 - movdqu %xmm0, 0(%rcx) - jmp L19 -L18: - mov %rcx, %rax - add $32, %r9 - mov %r8, %rbx - mov %rdx, %rcx - imul $16, %rcx - mov $579005069656919567, %r10 - pinsrq $0, %r10, %xmm9 - mov $283686952306183, %r10 - pinsrq $1, %r10, %xmm9 - pxor %xmm8, %xmm8 - mov %rdi, %r11 - jmp L21 -.balign 16 -L20: - add $80, %r11 - movdqu -32(%r9), %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - movdqu %xmm1, %xmm4 - movdqu -16(%r9), %xmm1 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - movdqu %xmm1, %xmm5 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 16(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 32(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 64(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 80(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - vpxor %xmm1, %xmm4, %xmm4 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - pxor %xmm3, %xmm3 - mov $3254779904, %r10 - pinsrd $3, %r10d, %xmm3 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm5, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpsrldq $8, %xmm6, %xmm6 - vpxor %xmm6, %xmm7, %xmm7 - vpxor %xmm0, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm8 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor %xmm7, %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 - add $96, %r11 - sub $6, %rdx -.balign 16 -L21: - cmp $6, %rdx - jae L20 - cmp $0, %rdx - jbe L22 - mov %rdx, %r10 - sub $1, %r10 - imul $16, %r10 - add %r10, %r11 - movdqu -32(%r9), %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - cmp $1, %rdx - jne L24 - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu %xmm1, %xmm4 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - jmp L25 -L24: - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - movdqu %xmm1, %xmm4 - movdqu -16(%r9), %xmm1 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - movdqu %xmm1, %xmm5 - cmp $2, %rdx - je L26 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 16(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - cmp $3, %rdx - je L28 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 32(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - cmp $4, %rdx - je L30 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 64(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - jmp L31 -L30: -L31: - jmp L29 -L28: -L29: - jmp L27 -L26: -L27: - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - vpxor %xmm1, %xmm4, %xmm4 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 -L25: - pxor %xmm3, %xmm3 - mov $3254779904, %r10 - pinsrd $3, %r10d, %xmm3 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm5, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpsrldq $8, %xmm6, %xmm6 - vpxor %xmm6, %xmm7, %xmm7 - vpxor %xmm0, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm8 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor %xmm7, %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 - jmp L23 -L22: -L23: - mov %rsi, %r15 - cmp %rcx, %rsi - jbe L32 - movdqu 0(%rbx), %xmm0 - mov %rsi, %r10 - and $15, %r10 - cmp $8, %r10 - jae L34 - mov $0, %rcx - pinsrq $1, %rcx, %xmm0 - mov %r10, %rcx - shl $3, %rcx - mov $1, %r11 - shl %cl, %r11 - sub $1, %r11 - pextrq $0, %xmm0, %rcx - and %r11, %rcx - pinsrq $0, %rcx, %xmm0 - jmp L35 -L34: - mov %r10, %rcx - sub $8, %rcx - shl $3, %rcx - mov $1, %r11 - shl %cl, %r11 - sub $1, %r11 - pextrq $1, %xmm0, %rcx - and %r11, %rcx - pinsrq $1, %rcx, %xmm0 -L35: - pshufb %xmm9, %xmm0 - movdqu -32(%r9), %xmm5 - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu %xmm1, %xmm4 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - pxor %xmm3, %xmm3 - mov $3254779904, %r11 - pinsrd $3, %r11d, %xmm3 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm5, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpsrldq $8, %xmm6, %xmm6 - vpxor %xmm6, %xmm7, %xmm7 - vpxor %xmm0, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm8 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor %xmm7, %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 - jmp L33 -L32: -L33: - mov %rax, %rcx - mov $0, %r11 - mov %rsi, %r13 - pxor %xmm0, %xmm0 - mov %r11, %rax - imul $8, %rax - pinsrq $1, %rax, %xmm0 - mov %r13, %rax - imul $8, %rax - pinsrq $0, %rax, %xmm0 - movdqu -32(%r9), %xmm5 - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu %xmm1, %xmm4 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - pxor %xmm3, %xmm3 - mov $3254779904, %r11 - pinsrd $3, %r11d, %xmm3 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm5, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpsrldq $8, %xmm6, %xmm6 - vpxor %xmm6, %xmm7, %xmm7 - vpxor %xmm0, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm8 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor %xmm7, %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 - movdqu %xmm8, 0(%rcx) -L19: - pop %rsi - pop %rdi - jmp L17 -L16: - push %r15 - push %r14 - push %r13 - push %r12 - push %rsi - push %rdi - push %rbp - push %rbx - pextrq $0, %xmm15, %rax - push %rax - pextrq $1, %xmm15, %rax - push %rax - pextrq $0, %xmm14, %rax - push %rax - pextrq $1, %xmm14, %rax - push %rax - pextrq $0, %xmm13, %rax - push %rax - pextrq $1, %xmm13, %rax - push %rax - pextrq $0, %xmm12, %rax - push %rax - pextrq $1, %xmm12, %rax - push %rax - pextrq $0, %xmm11, %rax - push %rax - pextrq $1, %xmm11, %rax - push %rax - pextrq $0, %xmm10, %rax - push %rax - pextrq $1, %xmm10, %rax - push %rax - pextrq $0, %xmm9, %rax - push %rax - pextrq $1, %xmm9, %rax - push %rax - pextrq $0, %xmm8, %rax - push %rax - pextrq $1, %xmm8, %rax - push %rax - pextrq $0, %xmm7, %rax - push %rax - pextrq $1, %xmm7, %rax - push %rax - pextrq $0, %xmm6, %rax - push %rax - pextrq $1, %xmm6, %rax - push %rax - mov %rcx, %rdi - mov %rdx, %rsi - mov %r8, %rdx - mov %r9, %rcx - mov 264(%rsp), %r8 - mov 272(%rsp), %r9 - cmp $12, %rsi - jne L36 - movdqu 0(%r8), %xmm0 - mov $579005069656919567, %rax - pinsrq $0, %rax, %xmm1 - mov $283686952306183, %rax - pinsrq $1, %rax, %xmm1 - pshufb %xmm1, %xmm0 - mov $1, %rax - pinsrd $0, %eax, %xmm0 - movdqu %xmm0, 0(%rcx) - jmp L37 -L36: - mov %rcx, %rax - add $32, %r9 - mov %r8, %rbx - mov %rdx, %rcx - imul $16, %rcx - mov $579005069656919567, %r10 - pinsrq $0, %r10, %xmm9 - mov $283686952306183, %r10 - pinsrq $1, %r10, %xmm9 - pxor %xmm8, %xmm8 - mov %rdi, %r11 - jmp L39 -.balign 16 -L38: - add $80, %r11 - movdqu -32(%r9), %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - movdqu %xmm1, %xmm4 - movdqu -16(%r9), %xmm1 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - movdqu %xmm1, %xmm5 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 16(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 32(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 64(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 80(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - vpxor %xmm1, %xmm4, %xmm4 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - pxor %xmm3, %xmm3 - mov $3254779904, %r10 - pinsrd $3, %r10d, %xmm3 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm5, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpsrldq $8, %xmm6, %xmm6 - vpxor %xmm6, %xmm7, %xmm7 - vpxor %xmm0, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm8 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor %xmm7, %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 - add $96, %r11 - sub $6, %rdx -.balign 16 -L39: - cmp $6, %rdx - jae L38 - cmp $0, %rdx - jbe L40 - mov %rdx, %r10 - sub $1, %r10 - imul $16, %r10 - add %r10, %r11 - movdqu -32(%r9), %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - cmp $1, %rdx - jne L42 - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu %xmm1, %xmm4 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - jmp L43 -L42: - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - movdqu %xmm1, %xmm4 - movdqu -16(%r9), %xmm1 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - movdqu %xmm1, %xmm5 - cmp $2, %rdx - je L44 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 16(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - cmp $3, %rdx - je L46 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 32(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - cmp $4, %rdx - je L48 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 64(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - jmp L49 -L48: -L49: - jmp L47 -L46: -L47: - jmp L45 -L44: -L45: - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - vpxor %xmm1, %xmm4, %xmm4 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 -L43: - pxor %xmm3, %xmm3 - mov $3254779904, %r10 - pinsrd $3, %r10d, %xmm3 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm5, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpsrldq $8, %xmm6, %xmm6 - vpxor %xmm6, %xmm7, %xmm7 - vpxor %xmm0, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm8 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor %xmm7, %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 - jmp L41 -L40: -L41: - mov %rsi, %r15 - cmp %rcx, %rsi - jbe L50 - movdqu 0(%rbx), %xmm0 - mov %rsi, %r10 - and $15, %r10 - cmp $8, %r10 - jae L52 - mov $0, %rcx - pinsrq $1, %rcx, %xmm0 - mov %r10, %rcx - shl $3, %rcx - mov $1, %r11 - shl %cl, %r11 - sub $1, %r11 - pextrq $0, %xmm0, %rcx - and %r11, %rcx - pinsrq $0, %rcx, %xmm0 - jmp L53 -L52: - mov %r10, %rcx - sub $8, %rcx - shl $3, %rcx - mov $1, %r11 - shl %cl, %r11 - sub $1, %r11 - pextrq $1, %xmm0, %rcx - and %r11, %rcx - pinsrq $1, %rcx, %xmm0 -L53: - pshufb %xmm9, %xmm0 - movdqu -32(%r9), %xmm5 - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu %xmm1, %xmm4 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - pxor %xmm3, %xmm3 - mov $3254779904, %r11 - pinsrd $3, %r11d, %xmm3 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm5, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpsrldq $8, %xmm6, %xmm6 - vpxor %xmm6, %xmm7, %xmm7 - vpxor %xmm0, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm8 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor %xmm7, %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 - jmp L51 -L50: -L51: - mov %rax, %rcx - mov $0, %r11 - mov %rsi, %r13 - pxor %xmm0, %xmm0 - mov %r11, %rax - imul $8, %rax - pinsrq $1, %rax, %xmm0 - mov %r13, %rax - imul $8, %rax - pinsrq $0, %rax, %xmm0 - movdqu -32(%r9), %xmm5 - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu %xmm1, %xmm4 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - pxor %xmm3, %xmm3 - mov $3254779904, %r11 - pinsrd $3, %r11d, %xmm3 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm5, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpsrldq $8, %xmm6, %xmm6 - vpxor %xmm6, %xmm7, %xmm7 - vpxor %xmm0, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm8 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor %xmm7, %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 - movdqu %xmm8, 0(%rcx) -L37: - pop %rax - pinsrq $1, %rax, %xmm6 - pop %rax - pinsrq $0, %rax, %xmm6 - pop %rax - pinsrq $1, %rax, %xmm7 - pop %rax - pinsrq $0, %rax, %xmm7 - pop %rax - pinsrq $1, %rax, %xmm8 - pop %rax - pinsrq $0, %rax, %xmm8 - pop %rax - pinsrq $1, %rax, %xmm9 - pop %rax - pinsrq $0, %rax, %xmm9 - pop %rax - pinsrq $1, %rax, %xmm10 - pop %rax - pinsrq $0, %rax, %xmm10 - pop %rax - pinsrq $1, %rax, %xmm11 - pop %rax - pinsrq $0, %rax, %xmm11 - pop %rax - pinsrq $1, %rax, %xmm12 - pop %rax - pinsrq $0, %rax, %xmm12 - pop %rax - pinsrq $1, %rax, %xmm13 - pop %rax - pinsrq $0, %rax, %xmm13 - pop %rax - pinsrq $1, %rax, %xmm14 - pop %rax - pinsrq $0, %rax, %xmm14 - pop %rax - pinsrq $1, %rax, %xmm15 - pop %rax - pinsrq $0, %rax, %xmm15 - pop %rbx - pop %rbp - pop %rdi - pop %rsi - pop %r12 - pop %r13 - pop %r14 - pop %r15 -L17: - ret - -.global gcm128_encrypt_opt -gcm128_encrypt_opt: - push %r15 - push %r14 - push %r13 - push %r12 - push %rsi - push %rdi - push %rbp - push %rbx - pextrq $0, %xmm15, %rax - push %rax - pextrq $1, %xmm15, %rax - push %rax - pextrq $0, %xmm14, %rax - push %rax - pextrq $1, %xmm14, %rax - push %rax - pextrq $0, %xmm13, %rax - push %rax - pextrq $1, %xmm13, %rax - push %rax - pextrq $0, %xmm12, %rax - push %rax - pextrq $1, %xmm12, %rax - push %rax - pextrq $0, %xmm11, %rax - push %rax - pextrq $1, %xmm11, %rax - push %rax - pextrq $0, %xmm10, %rax - push %rax - pextrq $1, %xmm10, %rax - push %rax - pextrq $0, %xmm9, %rax - push %rax - pextrq $1, %xmm9, %rax - push %rax - pextrq $0, %xmm8, %rax - push %rax - pextrq $1, %xmm8, %rax - push %rax - pextrq $0, %xmm7, %rax - push %rax - pextrq $1, %xmm7, %rax - push %rax - pextrq $0, %xmm6, %rax - push %rax - pextrq $1, %xmm6, %rax - push %rax - mov %rcx, %rdi - mov %rdx, %rsi - mov %r8, %rdx - mov %r9, %rcx - mov 264(%rsp), %r8 - mov 272(%rsp), %r9 - mov 352(%rsp), %rbp - mov %rcx, %r13 - lea 32(%r9), %r9 - mov 280(%rsp), %rbx - mov %rdx, %rcx - imul $16, %rcx - mov $579005069656919567, %r10 - pinsrq $0, %r10, %xmm9 - mov $283686952306183, %r10 - pinsrq $1, %r10, %xmm9 - pxor %xmm8, %xmm8 - mov %rdi, %r11 - jmp L55 -.balign 16 -L54: - add $80, %r11 - movdqu -32(%r9), %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - movdqu %xmm1, %xmm4 - movdqu -16(%r9), %xmm1 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - movdqu %xmm1, %xmm5 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 16(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 32(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 64(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 80(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - vpxor %xmm1, %xmm4, %xmm4 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - pxor %xmm3, %xmm3 - mov $3254779904, %r10 - pinsrd $3, %r10d, %xmm3 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm5, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpsrldq $8, %xmm6, %xmm6 - vpxor %xmm6, %xmm7, %xmm7 - vpxor %xmm0, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm8 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor %xmm7, %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 - add $96, %r11 - sub $6, %rdx -.balign 16 -L55: - cmp $6, %rdx - jae L54 - cmp $0, %rdx - jbe L56 - mov %rdx, %r10 - sub $1, %r10 - imul $16, %r10 - add %r10, %r11 - movdqu -32(%r9), %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - cmp $1, %rdx - jne L58 - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu %xmm1, %xmm4 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - jmp L59 -L58: - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - movdqu %xmm1, %xmm4 - movdqu -16(%r9), %xmm1 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - movdqu %xmm1, %xmm5 - cmp $2, %rdx - je L60 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 16(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - cmp $3, %rdx - je L62 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 32(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - cmp $4, %rdx - je L64 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 64(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - jmp L65 -L64: -L65: - jmp L63 -L62: -L63: - jmp L61 -L60: -L61: - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - vpxor %xmm1, %xmm4, %xmm4 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 -L59: - pxor %xmm3, %xmm3 - mov $3254779904, %r10 - pinsrd $3, %r10d, %xmm3 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm5, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpsrldq $8, %xmm6, %xmm6 - vpxor %xmm6, %xmm7, %xmm7 - vpxor %xmm0, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm8 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor %xmm7, %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 - jmp L57 -L56: -L57: - mov %rsi, %r15 - cmp %rcx, %rsi - jbe L66 - movdqu 0(%rbx), %xmm0 - mov %rsi, %r10 - and $15, %r10 - cmp $8, %r10 - jae L68 - mov $0, %rcx - pinsrq $1, %rcx, %xmm0 - mov %r10, %rcx - shl $3, %rcx - mov $1, %r11 - shl %cl, %r11 - sub $1, %r11 - pextrq $0, %xmm0, %rcx - and %r11, %rcx - pinsrq $0, %rcx, %xmm0 - jmp L69 -L68: - mov %r10, %rcx - sub $8, %rcx - shl $3, %rcx - mov $1, %r11 - shl %cl, %r11 - sub $1, %r11 - pextrq $1, %xmm0, %rcx - and %r11, %rcx - pinsrq $1, %rcx, %xmm0 -L69: - pshufb %xmm9, %xmm0 - movdqu -32(%r9), %xmm5 - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu %xmm1, %xmm4 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - pxor %xmm3, %xmm3 - mov $3254779904, %r11 - pinsrd $3, %r11d, %xmm3 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm5, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpsrldq $8, %xmm6, %xmm6 - vpxor %xmm6, %xmm7, %xmm7 - vpxor %xmm0, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm8 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor %xmm7, %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 - jmp L67 -L66: -L67: - mov 288(%rsp), %rdi - mov 296(%rsp), %rsi - mov 304(%rsp), %rdx - mov %r13, %rcx - movdqu %xmm9, %xmm0 - movdqu 0(%r8), %xmm1 - movdqu %xmm1, 0(%rbp) - pxor %xmm10, %xmm10 - mov $1, %r11 - pinsrq $0, %r11, %xmm10 - vpaddd %xmm10, %xmm1, %xmm1 - cmp $0, %rdx - jne L70 - vpshufb %xmm0, %xmm1, %xmm1 - movdqu %xmm1, 32(%rbp) - jmp L71 -L70: - movdqu %xmm8, 32(%rbp) - add $128, %rcx - pextrq $0, %xmm1, %rbx - and $255, %rbx - vpshufb %xmm0, %xmm1, %xmm1 - lea 96(%rsi), %r14 - movdqu -128(%rcx), %xmm4 - pxor %xmm2, %xmm2 - mov $72057594037927936, %r11 - pinsrq $1, %r11, %xmm2 - movdqu -112(%rcx), %xmm15 - mov %rcx, %r12 - sub $96, %r12 - vpxor %xmm4, %xmm1, %xmm9 - add $6, %rbx - cmp $256, %rbx - jae L72 - vpaddd %xmm2, %xmm1, %xmm10 - vpaddd %xmm2, %xmm10, %xmm11 - vpxor %xmm4, %xmm10, %xmm10 - vpaddd %xmm2, %xmm11, %xmm12 - vpxor %xmm4, %xmm11, %xmm11 - vpaddd %xmm2, %xmm12, %xmm13 - vpxor %xmm4, %xmm12, %xmm12 - vpaddd %xmm2, %xmm13, %xmm14 - vpxor %xmm4, %xmm13, %xmm13 - vpaddd %xmm2, %xmm14, %xmm1 - vpxor %xmm4, %xmm14, %xmm14 - jmp L73 -L72: - sub $256, %rbx - vpshufb %xmm0, %xmm1, %xmm6 - pxor %xmm5, %xmm5 - mov $1, %r11 - pinsrq $0, %r11, %xmm5 - vpaddd %xmm5, %xmm6, %xmm10 - pxor %xmm5, %xmm5 - mov $2, %r11 - pinsrq $0, %r11, %xmm5 - vpaddd %xmm5, %xmm6, %xmm11 - vpaddd %xmm5, %xmm10, %xmm12 - vpshufb %xmm0, %xmm10, %xmm10 - vpaddd %xmm5, %xmm11, %xmm13 - vpshufb %xmm0, %xmm11, %xmm11 - vpxor %xmm4, %xmm10, %xmm10 - vpaddd %xmm5, %xmm12, %xmm14 - vpshufb %xmm0, %xmm12, %xmm12 - vpxor %xmm4, %xmm11, %xmm11 - vpaddd %xmm5, %xmm13, %xmm1 - vpshufb %xmm0, %xmm13, %xmm13 - vpxor %xmm4, %xmm12, %xmm12 - vpshufb %xmm0, %xmm14, %xmm14 - vpxor %xmm4, %xmm13, %xmm13 - vpshufb %xmm0, %xmm1, %xmm1 - vpxor %xmm4, %xmm14, %xmm14 -L73: - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu -96(%rcx), %xmm15 - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu -80(%rcx), %xmm15 - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu -64(%rcx), %xmm15 - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu -48(%rcx), %xmm15 - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu -32(%rcx), %xmm15 - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu -16(%rcx), %xmm15 - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu 0(%rcx), %xmm15 - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu 16(%rcx), %xmm15 - movdqu 32(%rcx), %xmm3 - vaesenc %xmm15, %xmm9, %xmm9 - vpxor 0(%rdi), %xmm3, %xmm4 - vaesenc %xmm15, %xmm10, %xmm10 - vpxor 16(%rdi), %xmm3, %xmm5 - vaesenc %xmm15, %xmm11, %xmm11 - vpxor 32(%rdi), %xmm3, %xmm6 - vaesenc %xmm15, %xmm12, %xmm12 - vpxor 48(%rdi), %xmm3, %xmm8 - vaesenc %xmm15, %xmm13, %xmm13 - vpxor 64(%rdi), %xmm3, %xmm2 - vaesenc %xmm15, %xmm14, %xmm14 - vpxor 80(%rdi), %xmm3, %xmm3 - lea 96(%rdi), %rdi - vaesenclast %xmm4, %xmm9, %xmm9 - vaesenclast %xmm5, %xmm10, %xmm10 - vaesenclast %xmm6, %xmm11, %xmm11 - vaesenclast %xmm8, %xmm12, %xmm12 - vaesenclast %xmm2, %xmm13, %xmm13 - vaesenclast %xmm3, %xmm14, %xmm14 - movdqu %xmm9, 0(%rsi) - movdqu %xmm10, 16(%rsi) - movdqu %xmm11, 32(%rsi) - movdqu %xmm12, 48(%rsi) - movdqu %xmm13, 64(%rsi) - movdqu %xmm14, 80(%rsi) - lea 96(%rsi), %rsi - vpshufb %xmm0, %xmm9, %xmm8 - vpshufb %xmm0, %xmm10, %xmm2 - movdqu %xmm8, 112(%rbp) - vpshufb %xmm0, %xmm11, %xmm4 - movdqu %xmm2, 96(%rbp) - vpshufb %xmm0, %xmm12, %xmm5 - movdqu %xmm4, 80(%rbp) - vpshufb %xmm0, %xmm13, %xmm6 - movdqu %xmm5, 64(%rbp) - vpshufb %xmm0, %xmm14, %xmm7 - movdqu %xmm6, 48(%rbp) - movdqu -128(%rcx), %xmm4 - pxor %xmm2, %xmm2 - mov $72057594037927936, %r11 - pinsrq $1, %r11, %xmm2 - movdqu -112(%rcx), %xmm15 - mov %rcx, %r12 - sub $96, %r12 - vpxor %xmm4, %xmm1, %xmm9 - add $6, %rbx - cmp $256, %rbx - jae L74 - vpaddd %xmm2, %xmm1, %xmm10 - vpaddd %xmm2, %xmm10, %xmm11 - vpxor %xmm4, %xmm10, %xmm10 - vpaddd %xmm2, %xmm11, %xmm12 - vpxor %xmm4, %xmm11, %xmm11 - vpaddd %xmm2, %xmm12, %xmm13 - vpxor %xmm4, %xmm12, %xmm12 - vpaddd %xmm2, %xmm13, %xmm14 - vpxor %xmm4, %xmm13, %xmm13 - vpaddd %xmm2, %xmm14, %xmm1 - vpxor %xmm4, %xmm14, %xmm14 - jmp L75 -L74: - sub $256, %rbx - vpshufb %xmm0, %xmm1, %xmm6 - pxor %xmm5, %xmm5 - mov $1, %r11 - pinsrq $0, %r11, %xmm5 - vpaddd %xmm5, %xmm6, %xmm10 - pxor %xmm5, %xmm5 - mov $2, %r11 - pinsrq $0, %r11, %xmm5 - vpaddd %xmm5, %xmm6, %xmm11 - vpaddd %xmm5, %xmm10, %xmm12 - vpshufb %xmm0, %xmm10, %xmm10 - vpaddd %xmm5, %xmm11, %xmm13 - vpshufb %xmm0, %xmm11, %xmm11 - vpxor %xmm4, %xmm10, %xmm10 - vpaddd %xmm5, %xmm12, %xmm14 - vpshufb %xmm0, %xmm12, %xmm12 - vpxor %xmm4, %xmm11, %xmm11 - vpaddd %xmm5, %xmm13, %xmm1 - vpshufb %xmm0, %xmm13, %xmm13 - vpxor %xmm4, %xmm12, %xmm12 - vpshufb %xmm0, %xmm14, %xmm14 - vpxor %xmm4, %xmm13, %xmm13 - vpshufb %xmm0, %xmm1, %xmm1 - vpxor %xmm4, %xmm14, %xmm14 -L75: - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu -96(%rcx), %xmm15 - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu -80(%rcx), %xmm15 - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu -64(%rcx), %xmm15 - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu -48(%rcx), %xmm15 - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu -32(%rcx), %xmm15 - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu -16(%rcx), %xmm15 - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu 0(%rcx), %xmm15 - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu 16(%rcx), %xmm15 - movdqu 32(%rcx), %xmm3 - vaesenc %xmm15, %xmm9, %xmm9 - vpxor 0(%rdi), %xmm3, %xmm4 - vaesenc %xmm15, %xmm10, %xmm10 - vpxor 16(%rdi), %xmm3, %xmm5 - vaesenc %xmm15, %xmm11, %xmm11 - vpxor 32(%rdi), %xmm3, %xmm6 - vaesenc %xmm15, %xmm12, %xmm12 - vpxor 48(%rdi), %xmm3, %xmm8 - vaesenc %xmm15, %xmm13, %xmm13 - vpxor 64(%rdi), %xmm3, %xmm2 - vaesenc %xmm15, %xmm14, %xmm14 - vpxor 80(%rdi), %xmm3, %xmm3 - lea 96(%rdi), %rdi - vaesenclast %xmm4, %xmm9, %xmm9 - vaesenclast %xmm5, %xmm10, %xmm10 - vaesenclast %xmm6, %xmm11, %xmm11 - vaesenclast %xmm8, %xmm12, %xmm12 - vaesenclast %xmm2, %xmm13, %xmm13 - vaesenclast %xmm3, %xmm14, %xmm14 - movdqu %xmm9, 0(%rsi) - movdqu %xmm10, 16(%rsi) - movdqu %xmm11, 32(%rsi) - movdqu %xmm12, 48(%rsi) - movdqu %xmm13, 64(%rsi) - movdqu %xmm14, 80(%rsi) - lea 96(%rsi), %rsi - sub $12, %rdx - movdqu 32(%rbp), %xmm8 - pxor %xmm2, %xmm2 - mov $72057594037927936, %r11 - pinsrq $1, %r11, %xmm2 - vpxor %xmm4, %xmm4, %xmm4 - movdqu -128(%rcx), %xmm15 - vpaddd %xmm2, %xmm1, %xmm10 - vpaddd %xmm2, %xmm10, %xmm11 - vpaddd %xmm2, %xmm11, %xmm12 - vpaddd %xmm2, %xmm12, %xmm13 - vpaddd %xmm2, %xmm13, %xmm14 - vpxor %xmm15, %xmm1, %xmm9 - movdqu %xmm4, 16(%rbp) - jmp L77 -.balign 16 -L76: - add $6, %rbx - cmp $256, %rbx - jb L78 - mov $579005069656919567, %r11 - pinsrq $0, %r11, %xmm0 - mov $283686952306183, %r11 - pinsrq $1, %r11, %xmm0 - vpshufb %xmm0, %xmm1, %xmm6 - pxor %xmm5, %xmm5 - mov $1, %r11 - pinsrq $0, %r11, %xmm5 - vpaddd %xmm5, %xmm6, %xmm10 - pxor %xmm5, %xmm5 - mov $2, %r11 - pinsrq $0, %r11, %xmm5 - vpaddd %xmm5, %xmm6, %xmm11 - movdqu -32(%r9), %xmm3 - vpaddd %xmm5, %xmm10, %xmm12 - vpshufb %xmm0, %xmm10, %xmm10 - vpaddd %xmm5, %xmm11, %xmm13 - vpshufb %xmm0, %xmm11, %xmm11 - vpxor %xmm15, %xmm10, %xmm10 - vpaddd %xmm5, %xmm12, %xmm14 - vpshufb %xmm0, %xmm12, %xmm12 - vpxor %xmm15, %xmm11, %xmm11 - vpaddd %xmm5, %xmm13, %xmm1 - vpshufb %xmm0, %xmm13, %xmm13 - vpshufb %xmm0, %xmm14, %xmm14 - vpshufb %xmm0, %xmm1, %xmm1 - sub $256, %rbx - jmp L79 -L78: - movdqu -32(%r9), %xmm3 - vpaddd %xmm14, %xmm2, %xmm1 - vpxor %xmm15, %xmm10, %xmm10 - vpxor %xmm15, %xmm11, %xmm11 -L79: - movdqu %xmm1, 128(%rbp) - vpclmulqdq $16, %xmm3, %xmm7, %xmm5 - vpxor %xmm15, %xmm12, %xmm12 - movdqu -112(%rcx), %xmm2 - vpclmulqdq $1, %xmm3, %xmm7, %xmm6 - vaesenc %xmm2, %xmm9, %xmm9 - movdqu 48(%rbp), %xmm0 - vpxor %xmm15, %xmm13, %xmm13 - vpclmulqdq $0, %xmm3, %xmm7, %xmm1 - vaesenc %xmm2, %xmm10, %xmm10 - vpxor %xmm15, %xmm14, %xmm14 - vpclmulqdq $17, %xmm3, %xmm7, %xmm7 - vaesenc %xmm2, %xmm11, %xmm11 - movdqu -16(%r9), %xmm3 - vaesenc %xmm2, %xmm12, %xmm12 - vpxor %xmm5, %xmm6, %xmm6 - vpclmulqdq $0, %xmm3, %xmm0, %xmm5 - vpxor %xmm4, %xmm8, %xmm8 - vaesenc %xmm2, %xmm13, %xmm13 - vpxor %xmm5, %xmm1, %xmm4 - vpclmulqdq $16, %xmm3, %xmm0, %xmm1 - vaesenc %xmm2, %xmm14, %xmm14 - movdqu -96(%rcx), %xmm15 - vpclmulqdq $1, %xmm3, %xmm0, %xmm2 - vaesenc %xmm15, %xmm9, %xmm9 - vpxor 16(%rbp), %xmm8, %xmm8 - vpclmulqdq $17, %xmm3, %xmm0, %xmm3 - movdqu 64(%rbp), %xmm0 - vaesenc %xmm15, %xmm10, %xmm10 - movbeq 88(%r14), %r13 - vaesenc %xmm15, %xmm11, %xmm11 - movbeq 80(%r14), %r12 - vaesenc %xmm15, %xmm12, %xmm12 - movq %r13, 32(%rbp) - vaesenc %xmm15, %xmm13, %xmm13 - movq %r12, 40(%rbp) - movdqu 16(%r9), %xmm5 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu -80(%rcx), %xmm15 - vpxor %xmm1, %xmm6, %xmm6 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vaesenc %xmm15, %xmm9, %xmm9 - vpxor %xmm2, %xmm6, %xmm6 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vaesenc %xmm15, %xmm10, %xmm10 - vpxor %xmm3, %xmm7, %xmm7 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vaesenc %xmm15, %xmm11, %xmm11 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 80(%rbp), %xmm0 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 32(%r9), %xmm1 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu -64(%rcx), %xmm15 - vpxor %xmm2, %xmm6, %xmm6 - vpclmulqdq $0, %xmm1, %xmm0, %xmm2 - vaesenc %xmm15, %xmm9, %xmm9 - vpxor %xmm3, %xmm6, %xmm6 - vpclmulqdq $16, %xmm1, %xmm0, %xmm3 - vaesenc %xmm15, %xmm10, %xmm10 - movbeq 72(%r14), %r13 - vpxor %xmm5, %xmm7, %xmm7 - vpclmulqdq $1, %xmm1, %xmm0, %xmm5 - vaesenc %xmm15, %xmm11, %xmm11 - movbeq 64(%r14), %r12 - vpclmulqdq $17, %xmm1, %xmm0, %xmm1 - movdqu 96(%rbp), %xmm0 - vaesenc %xmm15, %xmm12, %xmm12 - movq %r13, 48(%rbp) - vaesenc %xmm15, %xmm13, %xmm13 - movq %r12, 56(%rbp) - vpxor %xmm2, %xmm4, %xmm4 - movdqu 64(%r9), %xmm2 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu -48(%rcx), %xmm15 - vpxor %xmm3, %xmm6, %xmm6 - vpclmulqdq $0, %xmm2, %xmm0, %xmm3 - vaesenc %xmm15, %xmm9, %xmm9 - vpxor %xmm5, %xmm6, %xmm6 - vpclmulqdq $16, %xmm2, %xmm0, %xmm5 - vaesenc %xmm15, %xmm10, %xmm10 - movbeq 56(%r14), %r13 - vpxor %xmm1, %xmm7, %xmm7 - vpclmulqdq $1, %xmm2, %xmm0, %xmm1 - vpxor 112(%rbp), %xmm8, %xmm8 - vaesenc %xmm15, %xmm11, %xmm11 - movbeq 48(%r14), %r12 - vpclmulqdq $17, %xmm2, %xmm0, %xmm2 - vaesenc %xmm15, %xmm12, %xmm12 - movq %r13, 64(%rbp) - vaesenc %xmm15, %xmm13, %xmm13 - movq %r12, 72(%rbp) - vpxor %xmm3, %xmm4, %xmm4 - movdqu 80(%r9), %xmm3 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu -32(%rcx), %xmm15 - vpxor %xmm5, %xmm6, %xmm6 - vpclmulqdq $16, %xmm3, %xmm8, %xmm5 - vaesenc %xmm15, %xmm9, %xmm9 - vpxor %xmm1, %xmm6, %xmm6 - vpclmulqdq $1, %xmm3, %xmm8, %xmm1 - vaesenc %xmm15, %xmm10, %xmm10 - movbeq 40(%r14), %r13 - vpxor %xmm2, %xmm7, %xmm7 - vpclmulqdq $0, %xmm3, %xmm8, %xmm2 - vaesenc %xmm15, %xmm11, %xmm11 - movbeq 32(%r14), %r12 - vpclmulqdq $17, %xmm3, %xmm8, %xmm8 - vaesenc %xmm15, %xmm12, %xmm12 - movq %r13, 80(%rbp) - vaesenc %xmm15, %xmm13, %xmm13 - movq %r12, 88(%rbp) - vpxor %xmm5, %xmm6, %xmm6 - vaesenc %xmm15, %xmm14, %xmm14 - vpxor %xmm1, %xmm6, %xmm6 - movdqu -16(%rcx), %xmm15 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm2, %xmm4, %xmm4 - pxor %xmm3, %xmm3 - mov $13979173243358019584, %r11 - pinsrq $1, %r11, %xmm3 - vaesenc %xmm15, %xmm9, %xmm9 - vpxor %xmm8, %xmm7, %xmm7 - vaesenc %xmm15, %xmm10, %xmm10 - vpxor %xmm5, %xmm4, %xmm4 - movbeq 24(%r14), %r13 - vaesenc %xmm15, %xmm11, %xmm11 - movbeq 16(%r14), %r12 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - movq %r13, 96(%rbp) - vaesenc %xmm15, %xmm12, %xmm12 - movq %r12, 104(%rbp) - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu 0(%rcx), %xmm1 - vaesenc %xmm1, %xmm9, %xmm9 - movdqu 16(%rcx), %xmm15 - vaesenc %xmm1, %xmm10, %xmm10 - vpsrldq $8, %xmm6, %xmm6 - vaesenc %xmm1, %xmm11, %xmm11 - vpxor %xmm6, %xmm7, %xmm7 - vaesenc %xmm1, %xmm12, %xmm12 - vpxor %xmm0, %xmm4, %xmm4 - movbeq 8(%r14), %r13 - vaesenc %xmm1, %xmm13, %xmm13 - movbeq 0(%r14), %r12 - vaesenc %xmm1, %xmm14, %xmm14 - movdqu 32(%rcx), %xmm1 - vaesenc %xmm15, %xmm9, %xmm9 - movdqu %xmm7, 16(%rbp) - vpalignr $8, %xmm4, %xmm4, %xmm8 - vaesenc %xmm15, %xmm10, %xmm10 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor 0(%rdi), %xmm1, %xmm2 - vaesenc %xmm15, %xmm11, %xmm11 - vpxor 16(%rdi), %xmm1, %xmm0 - vaesenc %xmm15, %xmm12, %xmm12 - vpxor 32(%rdi), %xmm1, %xmm5 - vaesenc %xmm15, %xmm13, %xmm13 - vpxor 48(%rdi), %xmm1, %xmm6 - vaesenc %xmm15, %xmm14, %xmm14 - vpxor 64(%rdi), %xmm1, %xmm7 - vpxor 80(%rdi), %xmm1, %xmm3 - movdqu 128(%rbp), %xmm1 - vaesenclast %xmm2, %xmm9, %xmm9 - pxor %xmm2, %xmm2 - mov $72057594037927936, %r11 - pinsrq $1, %r11, %xmm2 - vaesenclast %xmm0, %xmm10, %xmm10 - vpaddd %xmm2, %xmm1, %xmm0 - movq %r13, 112(%rbp) - lea 96(%rdi), %rdi - vaesenclast %xmm5, %xmm11, %xmm11 - vpaddd %xmm2, %xmm0, %xmm5 - movq %r12, 120(%rbp) - lea 96(%rsi), %rsi - movdqu -128(%rcx), %xmm15 - vaesenclast %xmm6, %xmm12, %xmm12 - vpaddd %xmm2, %xmm5, %xmm6 - vaesenclast %xmm7, %xmm13, %xmm13 - vpaddd %xmm2, %xmm6, %xmm7 - vaesenclast %xmm3, %xmm14, %xmm14 - vpaddd %xmm2, %xmm7, %xmm3 - sub $6, %rdx - add $96, %r14 - cmp $0, %rdx - jbe L80 - movdqu %xmm9, -96(%rsi) - vpxor %xmm15, %xmm1, %xmm9 - movdqu %xmm10, -80(%rsi) - movdqu %xmm0, %xmm10 - movdqu %xmm11, -64(%rsi) - movdqu %xmm5, %xmm11 - movdqu %xmm12, -48(%rsi) - movdqu %xmm6, %xmm12 - movdqu %xmm13, -32(%rsi) - movdqu %xmm7, %xmm13 - movdqu %xmm14, -16(%rsi) - movdqu %xmm3, %xmm14 - movdqu 32(%rbp), %xmm7 - jmp L81 -L80: - vpxor 16(%rbp), %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 -L81: -.balign 16 -L77: - cmp $0, %rdx - ja L76 - movdqu 32(%rbp), %xmm7 - movdqu %xmm1, 32(%rbp) - pxor %xmm4, %xmm4 - movdqu %xmm4, 16(%rbp) - movdqu -32(%r9), %xmm3 - vpclmulqdq $0, %xmm3, %xmm7, %xmm1 - vpclmulqdq $16, %xmm3, %xmm7, %xmm5 - movdqu 48(%rbp), %xmm0 - vpclmulqdq $1, %xmm3, %xmm7, %xmm6 - vpclmulqdq $17, %xmm3, %xmm7, %xmm7 - movdqu -16(%r9), %xmm3 - vpxor %xmm5, %xmm6, %xmm6 - vpclmulqdq $0, %xmm3, %xmm0, %xmm5 - vpxor %xmm4, %xmm8, %xmm8 - vpxor %xmm5, %xmm1, %xmm4 - vpclmulqdq $16, %xmm3, %xmm0, %xmm1 - vpclmulqdq $1, %xmm3, %xmm0, %xmm2 - vpxor 16(%rbp), %xmm8, %xmm8 - vpclmulqdq $17, %xmm3, %xmm0, %xmm3 - movdqu 64(%rbp), %xmm0 - movdqu 16(%r9), %xmm5 - vpxor %xmm1, %xmm6, %xmm6 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpxor %xmm3, %xmm7, %xmm7 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 80(%rbp), %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 32(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpclmulqdq $0, %xmm1, %xmm0, %xmm2 - vpxor %xmm3, %xmm6, %xmm6 - vpclmulqdq $16, %xmm1, %xmm0, %xmm3 - vpxor %xmm5, %xmm7, %xmm7 - vpclmulqdq $1, %xmm1, %xmm0, %xmm5 - vpclmulqdq $17, %xmm1, %xmm0, %xmm1 - movdqu 96(%rbp), %xmm0 - vpxor %xmm2, %xmm4, %xmm4 - movdqu 64(%r9), %xmm2 - vpxor %xmm3, %xmm6, %xmm6 - vpclmulqdq $0, %xmm2, %xmm0, %xmm3 - vpxor %xmm5, %xmm6, %xmm6 - vpclmulqdq $16, %xmm2, %xmm0, %xmm5 - vpxor %xmm1, %xmm7, %xmm7 - vpclmulqdq $1, %xmm2, %xmm0, %xmm1 - vpxor 112(%rbp), %xmm8, %xmm8 - vpclmulqdq $17, %xmm2, %xmm0, %xmm2 - vpxor %xmm3, %xmm4, %xmm4 - movdqu 80(%r9), %xmm3 - vpxor %xmm5, %xmm6, %xmm6 - vpclmulqdq $16, %xmm3, %xmm8, %xmm5 - vpxor %xmm1, %xmm6, %xmm6 - vpclmulqdq $1, %xmm3, %xmm8, %xmm1 - vpxor %xmm2, %xmm7, %xmm7 - vpclmulqdq $0, %xmm3, %xmm8, %xmm2 - vpclmulqdq $17, %xmm3, %xmm8, %xmm8 - vpxor %xmm5, %xmm6, %xmm6 - vpxor %xmm1, %xmm6, %xmm6 - vpxor %xmm2, %xmm4, %xmm4 - pxor %xmm3, %xmm3 - mov $3254779904, %rax - pinsrd $3, %eax, %xmm3 - vpxor %xmm8, %xmm7, %xmm7 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm5, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpsrldq $8, %xmm6, %xmm6 - vpxor %xmm6, %xmm7, %xmm7 - vpxor %xmm0, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm8 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor %xmm7, %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 - mov $579005069656919567, %r12 - pinsrq $0, %r12, %xmm0 - mov $283686952306183, %r12 - pinsrq $1, %r12, %xmm0 - movdqu %xmm9, -96(%rsi) - vpshufb %xmm0, %xmm9, %xmm9 - vpxor %xmm7, %xmm1, %xmm1 - movdqu %xmm10, -80(%rsi) - vpshufb %xmm0, %xmm10, %xmm10 - movdqu %xmm11, -64(%rsi) - vpshufb %xmm0, %xmm11, %xmm11 - movdqu %xmm12, -48(%rsi) - vpshufb %xmm0, %xmm12, %xmm12 - movdqu %xmm13, -32(%rsi) - vpshufb %xmm0, %xmm13, %xmm13 - movdqu %xmm14, -16(%rsi) - vpshufb %xmm0, %xmm14, %xmm14 - pxor %xmm4, %xmm4 - movdqu %xmm14, %xmm7 - movdqu %xmm4, 16(%rbp) - movdqu %xmm13, 48(%rbp) - movdqu %xmm12, 64(%rbp) - movdqu %xmm11, 80(%rbp) - movdqu %xmm10, 96(%rbp) - movdqu %xmm9, 112(%rbp) - movdqu -32(%r9), %xmm3 - vpclmulqdq $0, %xmm3, %xmm7, %xmm1 - vpclmulqdq $16, %xmm3, %xmm7, %xmm5 - movdqu 48(%rbp), %xmm0 - vpclmulqdq $1, %xmm3, %xmm7, %xmm6 - vpclmulqdq $17, %xmm3, %xmm7, %xmm7 - movdqu -16(%r9), %xmm3 - vpxor %xmm5, %xmm6, %xmm6 - vpclmulqdq $0, %xmm3, %xmm0, %xmm5 - vpxor %xmm4, %xmm8, %xmm8 - vpxor %xmm5, %xmm1, %xmm4 - vpclmulqdq $16, %xmm3, %xmm0, %xmm1 - vpclmulqdq $1, %xmm3, %xmm0, %xmm2 - vpxor 16(%rbp), %xmm8, %xmm8 - vpclmulqdq $17, %xmm3, %xmm0, %xmm3 - movdqu 64(%rbp), %xmm0 - movdqu 16(%r9), %xmm5 - vpxor %xmm1, %xmm6, %xmm6 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpxor %xmm3, %xmm7, %xmm7 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 80(%rbp), %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 32(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpclmulqdq $0, %xmm1, %xmm0, %xmm2 - vpxor %xmm3, %xmm6, %xmm6 - vpclmulqdq $16, %xmm1, %xmm0, %xmm3 - vpxor %xmm5, %xmm7, %xmm7 - vpclmulqdq $1, %xmm1, %xmm0, %xmm5 - vpclmulqdq $17, %xmm1, %xmm0, %xmm1 - movdqu 96(%rbp), %xmm0 - vpxor %xmm2, %xmm4, %xmm4 - movdqu 64(%r9), %xmm2 - vpxor %xmm3, %xmm6, %xmm6 - vpclmulqdq $0, %xmm2, %xmm0, %xmm3 - vpxor %xmm5, %xmm6, %xmm6 - vpclmulqdq $16, %xmm2, %xmm0, %xmm5 - vpxor %xmm1, %xmm7, %xmm7 - vpclmulqdq $1, %xmm2, %xmm0, %xmm1 - vpxor 112(%rbp), %xmm8, %xmm8 - vpclmulqdq $17, %xmm2, %xmm0, %xmm2 - vpxor %xmm3, %xmm4, %xmm4 - movdqu 80(%r9), %xmm3 - vpxor %xmm5, %xmm6, %xmm6 - vpclmulqdq $16, %xmm3, %xmm8, %xmm5 - vpxor %xmm1, %xmm6, %xmm6 - vpclmulqdq $1, %xmm3, %xmm8, %xmm1 - vpxor %xmm2, %xmm7, %xmm7 - vpclmulqdq $0, %xmm3, %xmm8, %xmm2 - vpclmulqdq $17, %xmm3, %xmm8, %xmm8 - vpxor %xmm5, %xmm6, %xmm6 - vpxor %xmm1, %xmm6, %xmm6 - vpxor %xmm2, %xmm4, %xmm4 - pxor %xmm3, %xmm3 - mov $3254779904, %rax - pinsrd $3, %eax, %xmm3 - vpxor %xmm8, %xmm7, %xmm7 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm5, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpsrldq $8, %xmm6, %xmm6 - vpxor %xmm6, %xmm7, %xmm7 - vpxor %xmm0, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm8 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor %xmm7, %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 - sub $128, %rcx -L71: - movdqu 32(%rbp), %xmm11 - mov %rcx, %r8 - mov 312(%rsp), %rax - mov 320(%rsp), %rdi - mov 328(%rsp), %rdx - mov %rdx, %r14 - mov $579005069656919567, %r12 - pinsrq $0, %r12, %xmm9 - mov $283686952306183, %r12 - pinsrq $1, %r12, %xmm9 - pshufb %xmm9, %xmm11 - pxor %xmm10, %xmm10 - mov $1, %rbx - pinsrd $0, %ebx, %xmm10 - mov %rax, %r11 - mov %rdi, %r10 - mov $0, %rbx - jmp L83 -.balign 16 -L82: - movdqu %xmm11, %xmm0 - pshufb %xmm9, %xmm0 - movdqu 0(%r8), %xmm2 - pxor %xmm2, %xmm0 - movdqu 16(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 32(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 48(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 64(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 80(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 96(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 112(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 128(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 144(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 160(%r8), %xmm2 - aesenclast %xmm2, %xmm0 - pxor %xmm2, %xmm2 - movdqu 0(%r11), %xmm2 - pxor %xmm0, %xmm2 - movdqu %xmm2, 0(%r10) - add $1, %rbx - add $16, %r11 - add $16, %r10 - paddd %xmm10, %xmm11 -.balign 16 -L83: - cmp %rdx, %rbx - jne L82 - mov %rdi, %r11 - jmp L85 -.balign 16 -L84: - add $80, %r11 - movdqu -32(%r9), %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - movdqu %xmm1, %xmm4 - movdqu -16(%r9), %xmm1 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - movdqu %xmm1, %xmm5 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 16(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 32(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 64(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 80(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - vpxor %xmm1, %xmm4, %xmm4 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - pxor %xmm3, %xmm3 - mov $3254779904, %r10 - pinsrd $3, %r10d, %xmm3 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm5, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpsrldq $8, %xmm6, %xmm6 - vpxor %xmm6, %xmm7, %xmm7 - vpxor %xmm0, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm8 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor %xmm7, %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 - add $96, %r11 - sub $6, %rdx -.balign 16 -L85: - cmp $6, %rdx - jae L84 - cmp $0, %rdx - jbe L86 - mov %rdx, %r10 - sub $1, %r10 - imul $16, %r10 - add %r10, %r11 - movdqu -32(%r9), %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - cmp $1, %rdx - jne L88 - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu %xmm1, %xmm4 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - jmp L89 -L88: - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - movdqu %xmm1, %xmm4 - movdqu -16(%r9), %xmm1 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - movdqu %xmm1, %xmm5 - cmp $2, %rdx - je L90 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 16(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - cmp $3, %rdx - je L92 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 32(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - cmp $4, %rdx - je L94 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 64(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - jmp L95 -L94: -L95: - jmp L93 -L92: -L93: - jmp L91 -L90: -L91: - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - vpxor %xmm1, %xmm4, %xmm4 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 -L89: - pxor %xmm3, %xmm3 - mov $3254779904, %r10 - pinsrd $3, %r10d, %xmm3 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm5, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpsrldq $8, %xmm6, %xmm6 - vpxor %xmm6, %xmm7, %xmm7 - vpxor %xmm0, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm8 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor %xmm7, %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 - jmp L87 -L86: -L87: - add 304(%rsp), %r14 - imul $16, %r14 - mov 344(%rsp), %r13 - cmp %r14, %r13 - jbe L96 - mov 336(%rsp), %rax - mov %r13, %r10 - and $15, %r10 - movdqu %xmm11, %xmm0 - pshufb %xmm9, %xmm0 - movdqu 0(%r8), %xmm2 - pxor %xmm2, %xmm0 - movdqu 16(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 32(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 48(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 64(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 80(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 96(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 112(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 128(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 144(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 160(%r8), %xmm2 - aesenclast %xmm2, %xmm0 - pxor %xmm2, %xmm2 - movdqu 0(%rax), %xmm4 - pxor %xmm4, %xmm0 - movdqu %xmm0, 0(%rax) - cmp $8, %r10 - jae L98 - mov $0, %rcx - pinsrq $1, %rcx, %xmm0 - mov %r10, %rcx - shl $3, %rcx - mov $1, %r11 - shl %cl, %r11 - sub $1, %r11 - pextrq $0, %xmm0, %rcx - and %r11, %rcx - pinsrq $0, %rcx, %xmm0 - jmp L99 -L98: - mov %r10, %rcx - sub $8, %rcx - shl $3, %rcx - mov $1, %r11 - shl %cl, %r11 - sub $1, %r11 - pextrq $1, %xmm0, %rcx - and %r11, %rcx - pinsrq $1, %rcx, %xmm0 -L99: - pshufb %xmm9, %xmm0 - movdqu -32(%r9), %xmm5 - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu %xmm1, %xmm4 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - pxor %xmm3, %xmm3 - mov $3254779904, %r11 - pinsrd $3, %r11d, %xmm3 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm5, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpsrldq $8, %xmm6, %xmm6 - vpxor %xmm6, %xmm7, %xmm7 - vpxor %xmm0, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm8 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor %xmm7, %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 - jmp L97 -L96: -L97: - mov %r15, %r11 - pxor %xmm0, %xmm0 - mov %r11, %rax - imul $8, %rax - pinsrq $1, %rax, %xmm0 - mov %r13, %rax - imul $8, %rax - pinsrq $0, %rax, %xmm0 - movdqu -32(%r9), %xmm5 - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu %xmm1, %xmm4 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - pxor %xmm3, %xmm3 - mov $3254779904, %r11 - pinsrd $3, %r11d, %xmm3 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm5, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpsrldq $8, %xmm6, %xmm6 - vpxor %xmm6, %xmm7, %xmm7 - vpxor %xmm0, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm8 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor %xmm7, %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 - movdqu 0(%rbp), %xmm0 - pshufb %xmm9, %xmm0 - movdqu 0(%r8), %xmm2 - pxor %xmm2, %xmm0 - movdqu 16(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 32(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 48(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 64(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 80(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 96(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 112(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 128(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 144(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 160(%r8), %xmm2 - aesenclast %xmm2, %xmm0 - pxor %xmm2, %xmm2 - pshufb %xmm9, %xmm8 - pxor %xmm0, %xmm8 - mov 360(%rsp), %r15 - movdqu %xmm8, 0(%r15) - pop %rax - pinsrq $1, %rax, %xmm6 - pop %rax - pinsrq $0, %rax, %xmm6 - pop %rax - pinsrq $1, %rax, %xmm7 - pop %rax - pinsrq $0, %rax, %xmm7 - pop %rax - pinsrq $1, %rax, %xmm8 - pop %rax - pinsrq $0, %rax, %xmm8 - pop %rax - pinsrq $1, %rax, %xmm9 - pop %rax - pinsrq $0, %rax, %xmm9 - pop %rax - pinsrq $1, %rax, %xmm10 - pop %rax - pinsrq $0, %rax, %xmm10 - pop %rax - pinsrq $1, %rax, %xmm11 - pop %rax - pinsrq $0, %rax, %xmm11 - pop %rax - pinsrq $1, %rax, %xmm12 - pop %rax - pinsrq $0, %rax, %xmm12 - pop %rax - pinsrq $1, %rax, %xmm13 - pop %rax - pinsrq $0, %rax, %xmm13 - pop %rax - pinsrq $1, %rax, %xmm14 - pop %rax - pinsrq $0, %rax, %xmm14 - pop %rax - pinsrq $1, %rax, %xmm15 - pop %rax - pinsrq $0, %rax, %xmm15 - pop %rbx - pop %rbp - pop %rdi - pop %rsi - pop %r12 - pop %r13 - pop %r14 - pop %r15 - ret - -.global gcm256_encrypt_opt -gcm256_encrypt_opt: - push %r15 - push %r14 - push %r13 - push %r12 - push %rsi - push %rdi - push %rbp - push %rbx - pextrq $0, %xmm15, %rax - push %rax - pextrq $1, %xmm15, %rax - push %rax - pextrq $0, %xmm14, %rax - push %rax - pextrq $1, %xmm14, %rax - push %rax - pextrq $0, %xmm13, %rax - push %rax - pextrq $1, %xmm13, %rax - push %rax - pextrq $0, %xmm12, %rax - push %rax - pextrq $1, %xmm12, %rax - push %rax - pextrq $0, %xmm11, %rax - push %rax - pextrq $1, %xmm11, %rax - push %rax - pextrq $0, %xmm10, %rax - push %rax - pextrq $1, %xmm10, %rax - push %rax - pextrq $0, %xmm9, %rax - push %rax - pextrq $1, %xmm9, %rax - push %rax - pextrq $0, %xmm8, %rax - push %rax - pextrq $1, %xmm8, %rax - push %rax - pextrq $0, %xmm7, %rax - push %rax - pextrq $1, %xmm7, %rax - push %rax - pextrq $0, %xmm6, %rax - push %rax - pextrq $1, %xmm6, %rax - push %rax - mov %rcx, %rdi - mov %rdx, %rsi - mov %r8, %rdx - mov %r9, %rcx - mov 264(%rsp), %r8 - mov 272(%rsp), %r9 - mov 352(%rsp), %rbp - mov %rcx, %r13 - lea 32(%r9), %r9 - mov 280(%rsp), %rbx - mov %rdx, %rcx - imul $16, %rcx - mov $579005069656919567, %r10 - pinsrq $0, %r10, %xmm9 - mov $283686952306183, %r10 - pinsrq $1, %r10, %xmm9 - pxor %xmm8, %xmm8 - mov %rdi, %r11 - jmp L101 -.balign 16 -L100: - add $80, %r11 - movdqu -32(%r9), %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - movdqu %xmm1, %xmm4 - movdqu -16(%r9), %xmm1 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - movdqu %xmm1, %xmm5 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 16(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 32(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 64(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 80(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - vpxor %xmm1, %xmm4, %xmm4 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - pxor %xmm3, %xmm3 - mov $3254779904, %r10 - pinsrd $3, %r10d, %xmm3 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm5, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpsrldq $8, %xmm6, %xmm6 - vpxor %xmm6, %xmm7, %xmm7 - vpxor %xmm0, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm8 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor %xmm7, %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 - add $96, %r11 - sub $6, %rdx -.balign 16 -L101: - cmp $6, %rdx - jae L100 - cmp $0, %rdx - jbe L102 - mov %rdx, %r10 - sub $1, %r10 - imul $16, %r10 - add %r10, %r11 - movdqu -32(%r9), %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - cmp $1, %rdx - jne L104 - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu %xmm1, %xmm4 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - jmp L105 -L104: - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - movdqu %xmm1, %xmm4 - movdqu -16(%r9), %xmm1 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - movdqu %xmm1, %xmm5 - cmp $2, %rdx - je L106 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 16(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - cmp $3, %rdx - je L108 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 32(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - cmp $4, %rdx - je L110 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 64(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - jmp L111 -L110: -L111: - jmp L109 -L108: -L109: - jmp L107 -L106: -L107: - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - vpxor %xmm1, %xmm4, %xmm4 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 -L105: - pxor %xmm3, %xmm3 - mov $3254779904, %r10 - pinsrd $3, %r10d, %xmm3 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm5, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpsrldq $8, %xmm6, %xmm6 - vpxor %xmm6, %xmm7, %xmm7 - vpxor %xmm0, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm8 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor %xmm7, %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 - jmp L103 -L102: -L103: - mov %rsi, %r15 - cmp %rcx, %rsi - jbe L112 - movdqu 0(%rbx), %xmm0 - mov %rsi, %r10 - and $15, %r10 - cmp $8, %r10 - jae L114 - mov $0, %rcx - pinsrq $1, %rcx, %xmm0 - mov %r10, %rcx - shl $3, %rcx - mov $1, %r11 - shl %cl, %r11 - sub $1, %r11 - pextrq $0, %xmm0, %rcx - and %r11, %rcx - pinsrq $0, %rcx, %xmm0 - jmp L115 -L114: - mov %r10, %rcx - sub $8, %rcx - shl $3, %rcx - mov $1, %r11 - shl %cl, %r11 - sub $1, %r11 - pextrq $1, %xmm0, %rcx - and %r11, %rcx - pinsrq $1, %rcx, %xmm0 -L115: - pshufb %xmm9, %xmm0 - movdqu -32(%r9), %xmm5 - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu %xmm1, %xmm4 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - pxor %xmm3, %xmm3 - mov $3254779904, %r11 - pinsrd $3, %r11d, %xmm3 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm5, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpsrldq $8, %xmm6, %xmm6 - vpxor %xmm6, %xmm7, %xmm7 - vpxor %xmm0, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm8 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor %xmm7, %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 - jmp L113 -L112: -L113: - mov 288(%rsp), %rdi - mov 296(%rsp), %rsi - mov 304(%rsp), %rdx - mov %r13, %rcx - movdqu %xmm9, %xmm0 - movdqu 0(%r8), %xmm1 - movdqu %xmm1, 0(%rbp) - pxor %xmm10, %xmm10 - mov $1, %r11 - pinsrq $0, %r11, %xmm10 - vpaddd %xmm10, %xmm1, %xmm1 - cmp $0, %rdx - jne L116 - vpshufb %xmm0, %xmm1, %xmm1 - movdqu %xmm1, 32(%rbp) - jmp L117 -L116: - movdqu %xmm8, 32(%rbp) - add $128, %rcx - pextrq $0, %xmm1, %rbx - and $255, %rbx - vpshufb %xmm0, %xmm1, %xmm1 - lea 96(%rsi), %r14 - movdqu -128(%rcx), %xmm4 - pxor %xmm2, %xmm2 - mov $72057594037927936, %r11 - pinsrq $1, %r11, %xmm2 - movdqu -112(%rcx), %xmm15 - mov %rcx, %r12 - sub $96, %r12 - vpxor %xmm4, %xmm1, %xmm9 - add $6, %rbx - cmp $256, %rbx - jae L118 - vpaddd %xmm2, %xmm1, %xmm10 - vpaddd %xmm2, %xmm10, %xmm11 - vpxor %xmm4, %xmm10, %xmm10 - vpaddd %xmm2, %xmm11, %xmm12 - vpxor %xmm4, %xmm11, %xmm11 - vpaddd %xmm2, %xmm12, %xmm13 - vpxor %xmm4, %xmm12, %xmm12 - vpaddd %xmm2, %xmm13, %xmm14 - vpxor %xmm4, %xmm13, %xmm13 - vpaddd %xmm2, %xmm14, %xmm1 - vpxor %xmm4, %xmm14, %xmm14 - jmp L119 -L118: - sub $256, %rbx - vpshufb %xmm0, %xmm1, %xmm6 - pxor %xmm5, %xmm5 - mov $1, %r11 - pinsrq $0, %r11, %xmm5 - vpaddd %xmm5, %xmm6, %xmm10 - pxor %xmm5, %xmm5 - mov $2, %r11 - pinsrq $0, %r11, %xmm5 - vpaddd %xmm5, %xmm6, %xmm11 - vpaddd %xmm5, %xmm10, %xmm12 - vpshufb %xmm0, %xmm10, %xmm10 - vpaddd %xmm5, %xmm11, %xmm13 - vpshufb %xmm0, %xmm11, %xmm11 - vpxor %xmm4, %xmm10, %xmm10 - vpaddd %xmm5, %xmm12, %xmm14 - vpshufb %xmm0, %xmm12, %xmm12 - vpxor %xmm4, %xmm11, %xmm11 - vpaddd %xmm5, %xmm13, %xmm1 - vpshufb %xmm0, %xmm13, %xmm13 - vpxor %xmm4, %xmm12, %xmm12 - vpshufb %xmm0, %xmm14, %xmm14 - vpxor %xmm4, %xmm13, %xmm13 - vpshufb %xmm0, %xmm1, %xmm1 - vpxor %xmm4, %xmm14, %xmm14 -L119: - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu -96(%rcx), %xmm15 - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu -80(%rcx), %xmm15 - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu -64(%rcx), %xmm15 - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu -48(%rcx), %xmm15 - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu -32(%rcx), %xmm15 - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu -16(%rcx), %xmm15 - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu 0(%rcx), %xmm15 - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu 16(%rcx), %xmm15 - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu 32(%rcx), %xmm15 - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu 48(%rcx), %xmm15 - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu 64(%rcx), %xmm15 - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu 80(%rcx), %xmm15 - movdqu 96(%rcx), %xmm3 - vaesenc %xmm15, %xmm9, %xmm9 - vpxor 0(%rdi), %xmm3, %xmm4 - vaesenc %xmm15, %xmm10, %xmm10 - vpxor 16(%rdi), %xmm3, %xmm5 - vaesenc %xmm15, %xmm11, %xmm11 - vpxor 32(%rdi), %xmm3, %xmm6 - vaesenc %xmm15, %xmm12, %xmm12 - vpxor 48(%rdi), %xmm3, %xmm8 - vaesenc %xmm15, %xmm13, %xmm13 - vpxor 64(%rdi), %xmm3, %xmm2 - vaesenc %xmm15, %xmm14, %xmm14 - vpxor 80(%rdi), %xmm3, %xmm3 - lea 96(%rdi), %rdi - vaesenclast %xmm4, %xmm9, %xmm9 - vaesenclast %xmm5, %xmm10, %xmm10 - vaesenclast %xmm6, %xmm11, %xmm11 - vaesenclast %xmm8, %xmm12, %xmm12 - vaesenclast %xmm2, %xmm13, %xmm13 - vaesenclast %xmm3, %xmm14, %xmm14 - movdqu %xmm9, 0(%rsi) - movdqu %xmm10, 16(%rsi) - movdqu %xmm11, 32(%rsi) - movdqu %xmm12, 48(%rsi) - movdqu %xmm13, 64(%rsi) - movdqu %xmm14, 80(%rsi) - lea 96(%rsi), %rsi - vpshufb %xmm0, %xmm9, %xmm8 - vpshufb %xmm0, %xmm10, %xmm2 - movdqu %xmm8, 112(%rbp) - vpshufb %xmm0, %xmm11, %xmm4 - movdqu %xmm2, 96(%rbp) - vpshufb %xmm0, %xmm12, %xmm5 - movdqu %xmm4, 80(%rbp) - vpshufb %xmm0, %xmm13, %xmm6 - movdqu %xmm5, 64(%rbp) - vpshufb %xmm0, %xmm14, %xmm7 - movdqu %xmm6, 48(%rbp) - movdqu -128(%rcx), %xmm4 - pxor %xmm2, %xmm2 - mov $72057594037927936, %r11 - pinsrq $1, %r11, %xmm2 - movdqu -112(%rcx), %xmm15 - mov %rcx, %r12 - sub $96, %r12 - vpxor %xmm4, %xmm1, %xmm9 - add $6, %rbx - cmp $256, %rbx - jae L120 - vpaddd %xmm2, %xmm1, %xmm10 - vpaddd %xmm2, %xmm10, %xmm11 - vpxor %xmm4, %xmm10, %xmm10 - vpaddd %xmm2, %xmm11, %xmm12 - vpxor %xmm4, %xmm11, %xmm11 - vpaddd %xmm2, %xmm12, %xmm13 - vpxor %xmm4, %xmm12, %xmm12 - vpaddd %xmm2, %xmm13, %xmm14 - vpxor %xmm4, %xmm13, %xmm13 - vpaddd %xmm2, %xmm14, %xmm1 - vpxor %xmm4, %xmm14, %xmm14 - jmp L121 -L120: - sub $256, %rbx - vpshufb %xmm0, %xmm1, %xmm6 - pxor %xmm5, %xmm5 - mov $1, %r11 - pinsrq $0, %r11, %xmm5 - vpaddd %xmm5, %xmm6, %xmm10 - pxor %xmm5, %xmm5 - mov $2, %r11 - pinsrq $0, %r11, %xmm5 - vpaddd %xmm5, %xmm6, %xmm11 - vpaddd %xmm5, %xmm10, %xmm12 - vpshufb %xmm0, %xmm10, %xmm10 - vpaddd %xmm5, %xmm11, %xmm13 - vpshufb %xmm0, %xmm11, %xmm11 - vpxor %xmm4, %xmm10, %xmm10 - vpaddd %xmm5, %xmm12, %xmm14 - vpshufb %xmm0, %xmm12, %xmm12 - vpxor %xmm4, %xmm11, %xmm11 - vpaddd %xmm5, %xmm13, %xmm1 - vpshufb %xmm0, %xmm13, %xmm13 - vpxor %xmm4, %xmm12, %xmm12 - vpshufb %xmm0, %xmm14, %xmm14 - vpxor %xmm4, %xmm13, %xmm13 - vpshufb %xmm0, %xmm1, %xmm1 - vpxor %xmm4, %xmm14, %xmm14 -L121: - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu -96(%rcx), %xmm15 - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu -80(%rcx), %xmm15 - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu -64(%rcx), %xmm15 - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu -48(%rcx), %xmm15 - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu -32(%rcx), %xmm15 - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu -16(%rcx), %xmm15 - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu 0(%rcx), %xmm15 - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu 16(%rcx), %xmm15 - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu 32(%rcx), %xmm15 - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu 48(%rcx), %xmm15 - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu 64(%rcx), %xmm15 - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu 80(%rcx), %xmm15 - movdqu 96(%rcx), %xmm3 - vaesenc %xmm15, %xmm9, %xmm9 - vpxor 0(%rdi), %xmm3, %xmm4 - vaesenc %xmm15, %xmm10, %xmm10 - vpxor 16(%rdi), %xmm3, %xmm5 - vaesenc %xmm15, %xmm11, %xmm11 - vpxor 32(%rdi), %xmm3, %xmm6 - vaesenc %xmm15, %xmm12, %xmm12 - vpxor 48(%rdi), %xmm3, %xmm8 - vaesenc %xmm15, %xmm13, %xmm13 - vpxor 64(%rdi), %xmm3, %xmm2 - vaesenc %xmm15, %xmm14, %xmm14 - vpxor 80(%rdi), %xmm3, %xmm3 - lea 96(%rdi), %rdi - vaesenclast %xmm4, %xmm9, %xmm9 - vaesenclast %xmm5, %xmm10, %xmm10 - vaesenclast %xmm6, %xmm11, %xmm11 - vaesenclast %xmm8, %xmm12, %xmm12 - vaesenclast %xmm2, %xmm13, %xmm13 - vaesenclast %xmm3, %xmm14, %xmm14 - movdqu %xmm9, 0(%rsi) - movdqu %xmm10, 16(%rsi) - movdqu %xmm11, 32(%rsi) - movdqu %xmm12, 48(%rsi) - movdqu %xmm13, 64(%rsi) - movdqu %xmm14, 80(%rsi) - lea 96(%rsi), %rsi - sub $12, %rdx - movdqu 32(%rbp), %xmm8 - pxor %xmm2, %xmm2 - mov $72057594037927936, %r11 - pinsrq $1, %r11, %xmm2 - vpxor %xmm4, %xmm4, %xmm4 - movdqu -128(%rcx), %xmm15 - vpaddd %xmm2, %xmm1, %xmm10 - vpaddd %xmm2, %xmm10, %xmm11 - vpaddd %xmm2, %xmm11, %xmm12 - vpaddd %xmm2, %xmm12, %xmm13 - vpaddd %xmm2, %xmm13, %xmm14 - vpxor %xmm15, %xmm1, %xmm9 - movdqu %xmm4, 16(%rbp) - jmp L123 -.balign 16 -L122: - add $6, %rbx - cmp $256, %rbx - jb L124 - mov $579005069656919567, %r11 - pinsrq $0, %r11, %xmm0 - mov $283686952306183, %r11 - pinsrq $1, %r11, %xmm0 - vpshufb %xmm0, %xmm1, %xmm6 - pxor %xmm5, %xmm5 - mov $1, %r11 - pinsrq $0, %r11, %xmm5 - vpaddd %xmm5, %xmm6, %xmm10 - pxor %xmm5, %xmm5 - mov $2, %r11 - pinsrq $0, %r11, %xmm5 - vpaddd %xmm5, %xmm6, %xmm11 - movdqu -32(%r9), %xmm3 - vpaddd %xmm5, %xmm10, %xmm12 - vpshufb %xmm0, %xmm10, %xmm10 - vpaddd %xmm5, %xmm11, %xmm13 - vpshufb %xmm0, %xmm11, %xmm11 - vpxor %xmm15, %xmm10, %xmm10 - vpaddd %xmm5, %xmm12, %xmm14 - vpshufb %xmm0, %xmm12, %xmm12 - vpxor %xmm15, %xmm11, %xmm11 - vpaddd %xmm5, %xmm13, %xmm1 - vpshufb %xmm0, %xmm13, %xmm13 - vpshufb %xmm0, %xmm14, %xmm14 - vpshufb %xmm0, %xmm1, %xmm1 - sub $256, %rbx - jmp L125 -L124: - movdqu -32(%r9), %xmm3 - vpaddd %xmm14, %xmm2, %xmm1 - vpxor %xmm15, %xmm10, %xmm10 - vpxor %xmm15, %xmm11, %xmm11 -L125: - movdqu %xmm1, 128(%rbp) - vpclmulqdq $16, %xmm3, %xmm7, %xmm5 - vpxor %xmm15, %xmm12, %xmm12 - movdqu -112(%rcx), %xmm2 - vpclmulqdq $1, %xmm3, %xmm7, %xmm6 - vaesenc %xmm2, %xmm9, %xmm9 - movdqu 48(%rbp), %xmm0 - vpxor %xmm15, %xmm13, %xmm13 - vpclmulqdq $0, %xmm3, %xmm7, %xmm1 - vaesenc %xmm2, %xmm10, %xmm10 - vpxor %xmm15, %xmm14, %xmm14 - vpclmulqdq $17, %xmm3, %xmm7, %xmm7 - vaesenc %xmm2, %xmm11, %xmm11 - movdqu -16(%r9), %xmm3 - vaesenc %xmm2, %xmm12, %xmm12 - vpxor %xmm5, %xmm6, %xmm6 - vpclmulqdq $0, %xmm3, %xmm0, %xmm5 - vpxor %xmm4, %xmm8, %xmm8 - vaesenc %xmm2, %xmm13, %xmm13 - vpxor %xmm5, %xmm1, %xmm4 - vpclmulqdq $16, %xmm3, %xmm0, %xmm1 - vaesenc %xmm2, %xmm14, %xmm14 - movdqu -96(%rcx), %xmm15 - vpclmulqdq $1, %xmm3, %xmm0, %xmm2 - vaesenc %xmm15, %xmm9, %xmm9 - vpxor 16(%rbp), %xmm8, %xmm8 - vpclmulqdq $17, %xmm3, %xmm0, %xmm3 - movdqu 64(%rbp), %xmm0 - vaesenc %xmm15, %xmm10, %xmm10 - movbeq 88(%r14), %r13 - vaesenc %xmm15, %xmm11, %xmm11 - movbeq 80(%r14), %r12 - vaesenc %xmm15, %xmm12, %xmm12 - movq %r13, 32(%rbp) - vaesenc %xmm15, %xmm13, %xmm13 - movq %r12, 40(%rbp) - movdqu 16(%r9), %xmm5 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu -80(%rcx), %xmm15 - vpxor %xmm1, %xmm6, %xmm6 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vaesenc %xmm15, %xmm9, %xmm9 - vpxor %xmm2, %xmm6, %xmm6 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vaesenc %xmm15, %xmm10, %xmm10 - vpxor %xmm3, %xmm7, %xmm7 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vaesenc %xmm15, %xmm11, %xmm11 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 80(%rbp), %xmm0 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 32(%r9), %xmm1 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu -64(%rcx), %xmm15 - vpxor %xmm2, %xmm6, %xmm6 - vpclmulqdq $0, %xmm1, %xmm0, %xmm2 - vaesenc %xmm15, %xmm9, %xmm9 - vpxor %xmm3, %xmm6, %xmm6 - vpclmulqdq $16, %xmm1, %xmm0, %xmm3 - vaesenc %xmm15, %xmm10, %xmm10 - movbeq 72(%r14), %r13 - vpxor %xmm5, %xmm7, %xmm7 - vpclmulqdq $1, %xmm1, %xmm0, %xmm5 - vaesenc %xmm15, %xmm11, %xmm11 - movbeq 64(%r14), %r12 - vpclmulqdq $17, %xmm1, %xmm0, %xmm1 - movdqu 96(%rbp), %xmm0 - vaesenc %xmm15, %xmm12, %xmm12 - movq %r13, 48(%rbp) - vaesenc %xmm15, %xmm13, %xmm13 - movq %r12, 56(%rbp) - vpxor %xmm2, %xmm4, %xmm4 - movdqu 64(%r9), %xmm2 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu -48(%rcx), %xmm15 - vpxor %xmm3, %xmm6, %xmm6 - vpclmulqdq $0, %xmm2, %xmm0, %xmm3 - vaesenc %xmm15, %xmm9, %xmm9 - vpxor %xmm5, %xmm6, %xmm6 - vpclmulqdq $16, %xmm2, %xmm0, %xmm5 - vaesenc %xmm15, %xmm10, %xmm10 - movbeq 56(%r14), %r13 - vpxor %xmm1, %xmm7, %xmm7 - vpclmulqdq $1, %xmm2, %xmm0, %xmm1 - vpxor 112(%rbp), %xmm8, %xmm8 - vaesenc %xmm15, %xmm11, %xmm11 - movbeq 48(%r14), %r12 - vpclmulqdq $17, %xmm2, %xmm0, %xmm2 - vaesenc %xmm15, %xmm12, %xmm12 - movq %r13, 64(%rbp) - vaesenc %xmm15, %xmm13, %xmm13 - movq %r12, 72(%rbp) - vpxor %xmm3, %xmm4, %xmm4 - movdqu 80(%r9), %xmm3 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu -32(%rcx), %xmm15 - vpxor %xmm5, %xmm6, %xmm6 - vpclmulqdq $16, %xmm3, %xmm8, %xmm5 - vaesenc %xmm15, %xmm9, %xmm9 - vpxor %xmm1, %xmm6, %xmm6 - vpclmulqdq $1, %xmm3, %xmm8, %xmm1 - vaesenc %xmm15, %xmm10, %xmm10 - movbeq 40(%r14), %r13 - vpxor %xmm2, %xmm7, %xmm7 - vpclmulqdq $0, %xmm3, %xmm8, %xmm2 - vaesenc %xmm15, %xmm11, %xmm11 - movbeq 32(%r14), %r12 - vpclmulqdq $17, %xmm3, %xmm8, %xmm8 - vaesenc %xmm15, %xmm12, %xmm12 - movq %r13, 80(%rbp) - vaesenc %xmm15, %xmm13, %xmm13 - movq %r12, 88(%rbp) - vpxor %xmm5, %xmm6, %xmm6 - vaesenc %xmm15, %xmm14, %xmm14 - vpxor %xmm1, %xmm6, %xmm6 - movdqu -16(%rcx), %xmm15 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm2, %xmm4, %xmm4 - pxor %xmm3, %xmm3 - mov $13979173243358019584, %r11 - pinsrq $1, %r11, %xmm3 - vaesenc %xmm15, %xmm9, %xmm9 - vpxor %xmm8, %xmm7, %xmm7 - vaesenc %xmm15, %xmm10, %xmm10 - vpxor %xmm5, %xmm4, %xmm4 - movbeq 24(%r14), %r13 - vaesenc %xmm15, %xmm11, %xmm11 - movbeq 16(%r14), %r12 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - movq %r13, 96(%rbp) - vaesenc %xmm15, %xmm12, %xmm12 - movq %r12, 104(%rbp) - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu 0(%rcx), %xmm1 - vaesenc %xmm1, %xmm9, %xmm9 - movdqu 16(%rcx), %xmm15 - vaesenc %xmm1, %xmm10, %xmm10 - vpsrldq $8, %xmm6, %xmm6 - vaesenc %xmm1, %xmm11, %xmm11 - vpxor %xmm6, %xmm7, %xmm7 - vaesenc %xmm1, %xmm12, %xmm12 - vpxor %xmm0, %xmm4, %xmm4 - movbeq 8(%r14), %r13 - vaesenc %xmm1, %xmm13, %xmm13 - movbeq 0(%r14), %r12 - vaesenc %xmm1, %xmm14, %xmm14 - movdqu 32(%rcx), %xmm1 - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - vaesenc %xmm1, %xmm9, %xmm9 - vaesenc %xmm1, %xmm10, %xmm10 - vaesenc %xmm1, %xmm11, %xmm11 - vaesenc %xmm1, %xmm12, %xmm12 - vaesenc %xmm1, %xmm13, %xmm13 - movdqu 48(%rcx), %xmm15 - vaesenc %xmm1, %xmm14, %xmm14 - movdqu 64(%rcx), %xmm1 - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - vaesenc %xmm1, %xmm9, %xmm9 - vaesenc %xmm1, %xmm10, %xmm10 - vaesenc %xmm1, %xmm11, %xmm11 - vaesenc %xmm1, %xmm12, %xmm12 - vaesenc %xmm1, %xmm13, %xmm13 - movdqu 80(%rcx), %xmm15 - vaesenc %xmm1, %xmm14, %xmm14 - movdqu 96(%rcx), %xmm1 - vaesenc %xmm15, %xmm9, %xmm9 - movdqu %xmm7, 16(%rbp) - vpalignr $8, %xmm4, %xmm4, %xmm8 - vaesenc %xmm15, %xmm10, %xmm10 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor 0(%rdi), %xmm1, %xmm2 - vaesenc %xmm15, %xmm11, %xmm11 - vpxor 16(%rdi), %xmm1, %xmm0 - vaesenc %xmm15, %xmm12, %xmm12 - vpxor 32(%rdi), %xmm1, %xmm5 - vaesenc %xmm15, %xmm13, %xmm13 - vpxor 48(%rdi), %xmm1, %xmm6 - vaesenc %xmm15, %xmm14, %xmm14 - vpxor 64(%rdi), %xmm1, %xmm7 - vpxor 80(%rdi), %xmm1, %xmm3 - movdqu 128(%rbp), %xmm1 - vaesenclast %xmm2, %xmm9, %xmm9 - pxor %xmm2, %xmm2 - mov $72057594037927936, %r11 - pinsrq $1, %r11, %xmm2 - vaesenclast %xmm0, %xmm10, %xmm10 - vpaddd %xmm2, %xmm1, %xmm0 - movq %r13, 112(%rbp) - lea 96(%rdi), %rdi - vaesenclast %xmm5, %xmm11, %xmm11 - vpaddd %xmm2, %xmm0, %xmm5 - movq %r12, 120(%rbp) - lea 96(%rsi), %rsi - movdqu -128(%rcx), %xmm15 - vaesenclast %xmm6, %xmm12, %xmm12 - vpaddd %xmm2, %xmm5, %xmm6 - vaesenclast %xmm7, %xmm13, %xmm13 - vpaddd %xmm2, %xmm6, %xmm7 - vaesenclast %xmm3, %xmm14, %xmm14 - vpaddd %xmm2, %xmm7, %xmm3 - sub $6, %rdx - add $96, %r14 - cmp $0, %rdx - jbe L126 - movdqu %xmm9, -96(%rsi) - vpxor %xmm15, %xmm1, %xmm9 - movdqu %xmm10, -80(%rsi) - movdqu %xmm0, %xmm10 - movdqu %xmm11, -64(%rsi) - movdqu %xmm5, %xmm11 - movdqu %xmm12, -48(%rsi) - movdqu %xmm6, %xmm12 - movdqu %xmm13, -32(%rsi) - movdqu %xmm7, %xmm13 - movdqu %xmm14, -16(%rsi) - movdqu %xmm3, %xmm14 - movdqu 32(%rbp), %xmm7 - jmp L127 -L126: - vpxor 16(%rbp), %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 -L127: -.balign 16 -L123: - cmp $0, %rdx - ja L122 - movdqu 32(%rbp), %xmm7 - movdqu %xmm1, 32(%rbp) - pxor %xmm4, %xmm4 - movdqu %xmm4, 16(%rbp) - movdqu -32(%r9), %xmm3 - vpclmulqdq $0, %xmm3, %xmm7, %xmm1 - vpclmulqdq $16, %xmm3, %xmm7, %xmm5 - movdqu 48(%rbp), %xmm0 - vpclmulqdq $1, %xmm3, %xmm7, %xmm6 - vpclmulqdq $17, %xmm3, %xmm7, %xmm7 - movdqu -16(%r9), %xmm3 - vpxor %xmm5, %xmm6, %xmm6 - vpclmulqdq $0, %xmm3, %xmm0, %xmm5 - vpxor %xmm4, %xmm8, %xmm8 - vpxor %xmm5, %xmm1, %xmm4 - vpclmulqdq $16, %xmm3, %xmm0, %xmm1 - vpclmulqdq $1, %xmm3, %xmm0, %xmm2 - vpxor 16(%rbp), %xmm8, %xmm8 - vpclmulqdq $17, %xmm3, %xmm0, %xmm3 - movdqu 64(%rbp), %xmm0 - movdqu 16(%r9), %xmm5 - vpxor %xmm1, %xmm6, %xmm6 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpxor %xmm3, %xmm7, %xmm7 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 80(%rbp), %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 32(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpclmulqdq $0, %xmm1, %xmm0, %xmm2 - vpxor %xmm3, %xmm6, %xmm6 - vpclmulqdq $16, %xmm1, %xmm0, %xmm3 - vpxor %xmm5, %xmm7, %xmm7 - vpclmulqdq $1, %xmm1, %xmm0, %xmm5 - vpclmulqdq $17, %xmm1, %xmm0, %xmm1 - movdqu 96(%rbp), %xmm0 - vpxor %xmm2, %xmm4, %xmm4 - movdqu 64(%r9), %xmm2 - vpxor %xmm3, %xmm6, %xmm6 - vpclmulqdq $0, %xmm2, %xmm0, %xmm3 - vpxor %xmm5, %xmm6, %xmm6 - vpclmulqdq $16, %xmm2, %xmm0, %xmm5 - vpxor %xmm1, %xmm7, %xmm7 - vpclmulqdq $1, %xmm2, %xmm0, %xmm1 - vpxor 112(%rbp), %xmm8, %xmm8 - vpclmulqdq $17, %xmm2, %xmm0, %xmm2 - vpxor %xmm3, %xmm4, %xmm4 - movdqu 80(%r9), %xmm3 - vpxor %xmm5, %xmm6, %xmm6 - vpclmulqdq $16, %xmm3, %xmm8, %xmm5 - vpxor %xmm1, %xmm6, %xmm6 - vpclmulqdq $1, %xmm3, %xmm8, %xmm1 - vpxor %xmm2, %xmm7, %xmm7 - vpclmulqdq $0, %xmm3, %xmm8, %xmm2 - vpclmulqdq $17, %xmm3, %xmm8, %xmm8 - vpxor %xmm5, %xmm6, %xmm6 - vpxor %xmm1, %xmm6, %xmm6 - vpxor %xmm2, %xmm4, %xmm4 - pxor %xmm3, %xmm3 - mov $3254779904, %rax - pinsrd $3, %eax, %xmm3 - vpxor %xmm8, %xmm7, %xmm7 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm5, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpsrldq $8, %xmm6, %xmm6 - vpxor %xmm6, %xmm7, %xmm7 - vpxor %xmm0, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm8 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor %xmm7, %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 - mov $579005069656919567, %r12 - pinsrq $0, %r12, %xmm0 - mov $283686952306183, %r12 - pinsrq $1, %r12, %xmm0 - movdqu %xmm9, -96(%rsi) - vpshufb %xmm0, %xmm9, %xmm9 - vpxor %xmm7, %xmm1, %xmm1 - movdqu %xmm10, -80(%rsi) - vpshufb %xmm0, %xmm10, %xmm10 - movdqu %xmm11, -64(%rsi) - vpshufb %xmm0, %xmm11, %xmm11 - movdqu %xmm12, -48(%rsi) - vpshufb %xmm0, %xmm12, %xmm12 - movdqu %xmm13, -32(%rsi) - vpshufb %xmm0, %xmm13, %xmm13 - movdqu %xmm14, -16(%rsi) - vpshufb %xmm0, %xmm14, %xmm14 - pxor %xmm4, %xmm4 - movdqu %xmm14, %xmm7 - movdqu %xmm4, 16(%rbp) - movdqu %xmm13, 48(%rbp) - movdqu %xmm12, 64(%rbp) - movdqu %xmm11, 80(%rbp) - movdqu %xmm10, 96(%rbp) - movdqu %xmm9, 112(%rbp) - movdqu -32(%r9), %xmm3 - vpclmulqdq $0, %xmm3, %xmm7, %xmm1 - vpclmulqdq $16, %xmm3, %xmm7, %xmm5 - movdqu 48(%rbp), %xmm0 - vpclmulqdq $1, %xmm3, %xmm7, %xmm6 - vpclmulqdq $17, %xmm3, %xmm7, %xmm7 - movdqu -16(%r9), %xmm3 - vpxor %xmm5, %xmm6, %xmm6 - vpclmulqdq $0, %xmm3, %xmm0, %xmm5 - vpxor %xmm4, %xmm8, %xmm8 - vpxor %xmm5, %xmm1, %xmm4 - vpclmulqdq $16, %xmm3, %xmm0, %xmm1 - vpclmulqdq $1, %xmm3, %xmm0, %xmm2 - vpxor 16(%rbp), %xmm8, %xmm8 - vpclmulqdq $17, %xmm3, %xmm0, %xmm3 - movdqu 64(%rbp), %xmm0 - movdqu 16(%r9), %xmm5 - vpxor %xmm1, %xmm6, %xmm6 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpxor %xmm3, %xmm7, %xmm7 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 80(%rbp), %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 32(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpclmulqdq $0, %xmm1, %xmm0, %xmm2 - vpxor %xmm3, %xmm6, %xmm6 - vpclmulqdq $16, %xmm1, %xmm0, %xmm3 - vpxor %xmm5, %xmm7, %xmm7 - vpclmulqdq $1, %xmm1, %xmm0, %xmm5 - vpclmulqdq $17, %xmm1, %xmm0, %xmm1 - movdqu 96(%rbp), %xmm0 - vpxor %xmm2, %xmm4, %xmm4 - movdqu 64(%r9), %xmm2 - vpxor %xmm3, %xmm6, %xmm6 - vpclmulqdq $0, %xmm2, %xmm0, %xmm3 - vpxor %xmm5, %xmm6, %xmm6 - vpclmulqdq $16, %xmm2, %xmm0, %xmm5 - vpxor %xmm1, %xmm7, %xmm7 - vpclmulqdq $1, %xmm2, %xmm0, %xmm1 - vpxor 112(%rbp), %xmm8, %xmm8 - vpclmulqdq $17, %xmm2, %xmm0, %xmm2 - vpxor %xmm3, %xmm4, %xmm4 - movdqu 80(%r9), %xmm3 - vpxor %xmm5, %xmm6, %xmm6 - vpclmulqdq $16, %xmm3, %xmm8, %xmm5 - vpxor %xmm1, %xmm6, %xmm6 - vpclmulqdq $1, %xmm3, %xmm8, %xmm1 - vpxor %xmm2, %xmm7, %xmm7 - vpclmulqdq $0, %xmm3, %xmm8, %xmm2 - vpclmulqdq $17, %xmm3, %xmm8, %xmm8 - vpxor %xmm5, %xmm6, %xmm6 - vpxor %xmm1, %xmm6, %xmm6 - vpxor %xmm2, %xmm4, %xmm4 - pxor %xmm3, %xmm3 - mov $3254779904, %rax - pinsrd $3, %eax, %xmm3 - vpxor %xmm8, %xmm7, %xmm7 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm5, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpsrldq $8, %xmm6, %xmm6 - vpxor %xmm6, %xmm7, %xmm7 - vpxor %xmm0, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm8 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor %xmm7, %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 - sub $128, %rcx -L117: - movdqu 32(%rbp), %xmm11 - mov %rcx, %r8 - mov 312(%rsp), %rax - mov 320(%rsp), %rdi - mov 328(%rsp), %rdx - mov %rdx, %r14 - mov $579005069656919567, %r12 - pinsrq $0, %r12, %xmm9 - mov $283686952306183, %r12 - pinsrq $1, %r12, %xmm9 - pshufb %xmm9, %xmm11 - pxor %xmm10, %xmm10 - mov $1, %rbx - pinsrd $0, %ebx, %xmm10 - mov %rax, %r11 - mov %rdi, %r10 - mov $0, %rbx - jmp L129 -.balign 16 -L128: - movdqu %xmm11, %xmm0 - pshufb %xmm9, %xmm0 - movdqu 0(%r8), %xmm2 - pxor %xmm2, %xmm0 - movdqu 16(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 32(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 48(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 64(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 80(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 96(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 112(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 128(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 144(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 160(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 176(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 192(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 208(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 224(%r8), %xmm2 - aesenclast %xmm2, %xmm0 - pxor %xmm2, %xmm2 - movdqu 0(%r11), %xmm2 - pxor %xmm0, %xmm2 - movdqu %xmm2, 0(%r10) - add $1, %rbx - add $16, %r11 - add $16, %r10 - paddd %xmm10, %xmm11 -.balign 16 -L129: - cmp %rdx, %rbx - jne L128 - mov %rdi, %r11 - jmp L131 -.balign 16 -L130: - add $80, %r11 - movdqu -32(%r9), %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - movdqu %xmm1, %xmm4 - movdqu -16(%r9), %xmm1 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - movdqu %xmm1, %xmm5 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 16(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 32(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 64(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 80(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - vpxor %xmm1, %xmm4, %xmm4 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - pxor %xmm3, %xmm3 - mov $3254779904, %r10 - pinsrd $3, %r10d, %xmm3 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm5, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpsrldq $8, %xmm6, %xmm6 - vpxor %xmm6, %xmm7, %xmm7 - vpxor %xmm0, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm8 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor %xmm7, %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 - add $96, %r11 - sub $6, %rdx -.balign 16 -L131: - cmp $6, %rdx - jae L130 - cmp $0, %rdx - jbe L132 - mov %rdx, %r10 - sub $1, %r10 - imul $16, %r10 - add %r10, %r11 - movdqu -32(%r9), %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - cmp $1, %rdx - jne L134 - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu %xmm1, %xmm4 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - jmp L135 -L134: - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - movdqu %xmm1, %xmm4 - movdqu -16(%r9), %xmm1 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - movdqu %xmm1, %xmm5 - cmp $2, %rdx - je L136 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 16(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - cmp $3, %rdx - je L138 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 32(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - cmp $4, %rdx - je L140 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 64(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - jmp L141 -L140: -L141: - jmp L139 -L138: -L139: - jmp L137 -L136: -L137: - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - vpxor %xmm1, %xmm4, %xmm4 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 -L135: - pxor %xmm3, %xmm3 - mov $3254779904, %r10 - pinsrd $3, %r10d, %xmm3 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm5, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpsrldq $8, %xmm6, %xmm6 - vpxor %xmm6, %xmm7, %xmm7 - vpxor %xmm0, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm8 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor %xmm7, %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 - jmp L133 -L132: -L133: - add 304(%rsp), %r14 - imul $16, %r14 - mov 344(%rsp), %r13 - cmp %r14, %r13 - jbe L142 - mov 336(%rsp), %rax - mov %r13, %r10 - and $15, %r10 - movdqu %xmm11, %xmm0 - pshufb %xmm9, %xmm0 - movdqu 0(%r8), %xmm2 - pxor %xmm2, %xmm0 - movdqu 16(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 32(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 48(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 64(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 80(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 96(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 112(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 128(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 144(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 160(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 176(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 192(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 208(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 224(%r8), %xmm2 - aesenclast %xmm2, %xmm0 - pxor %xmm2, %xmm2 - movdqu 0(%rax), %xmm4 - pxor %xmm4, %xmm0 - movdqu %xmm0, 0(%rax) - cmp $8, %r10 - jae L144 - mov $0, %rcx - pinsrq $1, %rcx, %xmm0 - mov %r10, %rcx - shl $3, %rcx - mov $1, %r11 - shl %cl, %r11 - sub $1, %r11 - pextrq $0, %xmm0, %rcx - and %r11, %rcx - pinsrq $0, %rcx, %xmm0 - jmp L145 -L144: - mov %r10, %rcx - sub $8, %rcx - shl $3, %rcx - mov $1, %r11 - shl %cl, %r11 - sub $1, %r11 - pextrq $1, %xmm0, %rcx - and %r11, %rcx - pinsrq $1, %rcx, %xmm0 -L145: - pshufb %xmm9, %xmm0 - movdqu -32(%r9), %xmm5 - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu %xmm1, %xmm4 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - pxor %xmm3, %xmm3 - mov $3254779904, %r11 - pinsrd $3, %r11d, %xmm3 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm5, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpsrldq $8, %xmm6, %xmm6 - vpxor %xmm6, %xmm7, %xmm7 - vpxor %xmm0, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm8 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor %xmm7, %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 - jmp L143 -L142: -L143: - mov %r15, %r11 - pxor %xmm0, %xmm0 - mov %r11, %rax - imul $8, %rax - pinsrq $1, %rax, %xmm0 - mov %r13, %rax - imul $8, %rax - pinsrq $0, %rax, %xmm0 - movdqu -32(%r9), %xmm5 - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu %xmm1, %xmm4 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - pxor %xmm3, %xmm3 - mov $3254779904, %r11 - pinsrd $3, %r11d, %xmm3 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm5, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpsrldq $8, %xmm6, %xmm6 - vpxor %xmm6, %xmm7, %xmm7 - vpxor %xmm0, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm8 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor %xmm7, %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 - movdqu 0(%rbp), %xmm0 - pshufb %xmm9, %xmm0 - movdqu 0(%r8), %xmm2 - pxor %xmm2, %xmm0 - movdqu 16(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 32(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 48(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 64(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 80(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 96(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 112(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 128(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 144(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 160(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 176(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 192(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 208(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 224(%r8), %xmm2 - aesenclast %xmm2, %xmm0 - pxor %xmm2, %xmm2 - pshufb %xmm9, %xmm8 - pxor %xmm0, %xmm8 - mov 360(%rsp), %r15 - movdqu %xmm8, 0(%r15) - pop %rax - pinsrq $1, %rax, %xmm6 - pop %rax - pinsrq $0, %rax, %xmm6 - pop %rax - pinsrq $1, %rax, %xmm7 - pop %rax - pinsrq $0, %rax, %xmm7 - pop %rax - pinsrq $1, %rax, %xmm8 - pop %rax - pinsrq $0, %rax, %xmm8 - pop %rax - pinsrq $1, %rax, %xmm9 - pop %rax - pinsrq $0, %rax, %xmm9 - pop %rax - pinsrq $1, %rax, %xmm10 - pop %rax - pinsrq $0, %rax, %xmm10 - pop %rax - pinsrq $1, %rax, %xmm11 - pop %rax - pinsrq $0, %rax, %xmm11 - pop %rax - pinsrq $1, %rax, %xmm12 - pop %rax - pinsrq $0, %rax, %xmm12 - pop %rax - pinsrq $1, %rax, %xmm13 - pop %rax - pinsrq $0, %rax, %xmm13 - pop %rax - pinsrq $1, %rax, %xmm14 - pop %rax - pinsrq $0, %rax, %xmm14 - pop %rax - pinsrq $1, %rax, %xmm15 - pop %rax - pinsrq $0, %rax, %xmm15 - pop %rbx - pop %rbp - pop %rdi - pop %rsi - pop %r12 - pop %r13 - pop %r14 - pop %r15 - ret - -.global gcm128_decrypt_opt -gcm128_decrypt_opt: - push %r15 - push %r14 - push %r13 - push %r12 - push %rsi - push %rdi - push %rbp - push %rbx - pextrq $0, %xmm15, %rax - push %rax - pextrq $1, %xmm15, %rax - push %rax - pextrq $0, %xmm14, %rax - push %rax - pextrq $1, %xmm14, %rax - push %rax - pextrq $0, %xmm13, %rax - push %rax - pextrq $1, %xmm13, %rax - push %rax - pextrq $0, %xmm12, %rax - push %rax - pextrq $1, %xmm12, %rax - push %rax - pextrq $0, %xmm11, %rax - push %rax - pextrq $1, %xmm11, %rax - push %rax - pextrq $0, %xmm10, %rax - push %rax - pextrq $1, %xmm10, %rax - push %rax - pextrq $0, %xmm9, %rax - push %rax - pextrq $1, %xmm9, %rax - push %rax - pextrq $0, %xmm8, %rax - push %rax - pextrq $1, %xmm8, %rax - push %rax - pextrq $0, %xmm7, %rax - push %rax - pextrq $1, %xmm7, %rax - push %rax - pextrq $0, %xmm6, %rax - push %rax - pextrq $1, %xmm6, %rax - push %rax - mov %rcx, %rdi - mov %rdx, %rsi - mov %r8, %rdx - mov %r9, %rcx - mov 264(%rsp), %r8 - mov 272(%rsp), %r9 - mov 352(%rsp), %rbp - mov %rcx, %r13 - lea 32(%r9), %r9 - mov 280(%rsp), %rbx - mov %rdx, %rcx - imul $16, %rcx - mov $579005069656919567, %r10 - pinsrq $0, %r10, %xmm9 - mov $283686952306183, %r10 - pinsrq $1, %r10, %xmm9 - pxor %xmm8, %xmm8 - mov %rdi, %r11 - jmp L147 -.balign 16 -L146: - add $80, %r11 - movdqu -32(%r9), %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - movdqu %xmm1, %xmm4 - movdqu -16(%r9), %xmm1 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - movdqu %xmm1, %xmm5 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 16(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 32(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 64(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 80(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - vpxor %xmm1, %xmm4, %xmm4 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - pxor %xmm3, %xmm3 - mov $3254779904, %r10 - pinsrd $3, %r10d, %xmm3 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm5, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpsrldq $8, %xmm6, %xmm6 - vpxor %xmm6, %xmm7, %xmm7 - vpxor %xmm0, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm8 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor %xmm7, %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 - add $96, %r11 - sub $6, %rdx -.balign 16 -L147: - cmp $6, %rdx - jae L146 - cmp $0, %rdx - jbe L148 - mov %rdx, %r10 - sub $1, %r10 - imul $16, %r10 - add %r10, %r11 - movdqu -32(%r9), %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - cmp $1, %rdx - jne L150 - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu %xmm1, %xmm4 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - jmp L151 -L150: - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - movdqu %xmm1, %xmm4 - movdqu -16(%r9), %xmm1 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - movdqu %xmm1, %xmm5 - cmp $2, %rdx - je L152 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 16(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - cmp $3, %rdx - je L154 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 32(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - cmp $4, %rdx - je L156 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 64(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - jmp L157 -L156: -L157: - jmp L155 -L154: -L155: - jmp L153 -L152: -L153: - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - vpxor %xmm1, %xmm4, %xmm4 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 -L151: - pxor %xmm3, %xmm3 - mov $3254779904, %r10 - pinsrd $3, %r10d, %xmm3 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm5, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpsrldq $8, %xmm6, %xmm6 - vpxor %xmm6, %xmm7, %xmm7 - vpxor %xmm0, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm8 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor %xmm7, %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 - jmp L149 -L148: -L149: - mov %rsi, %r15 - cmp %rcx, %rsi - jbe L158 - movdqu 0(%rbx), %xmm0 - mov %rsi, %r10 - and $15, %r10 - cmp $8, %r10 - jae L160 - mov $0, %rcx - pinsrq $1, %rcx, %xmm0 - mov %r10, %rcx - shl $3, %rcx - mov $1, %r11 - shl %cl, %r11 - sub $1, %r11 - pextrq $0, %xmm0, %rcx - and %r11, %rcx - pinsrq $0, %rcx, %xmm0 - jmp L161 -L160: - mov %r10, %rcx - sub $8, %rcx - shl $3, %rcx - mov $1, %r11 - shl %cl, %r11 - sub $1, %r11 - pextrq $1, %xmm0, %rcx - and %r11, %rcx - pinsrq $1, %rcx, %xmm0 -L161: - pshufb %xmm9, %xmm0 - movdqu -32(%r9), %xmm5 - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu %xmm1, %xmm4 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - pxor %xmm3, %xmm3 - mov $3254779904, %r11 - pinsrd $3, %r11d, %xmm3 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm5, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpsrldq $8, %xmm6, %xmm6 - vpxor %xmm6, %xmm7, %xmm7 - vpxor %xmm0, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm8 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor %xmm7, %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 - jmp L159 -L158: -L159: - mov 288(%rsp), %rdi - mov 296(%rsp), %rsi - mov 304(%rsp), %rdx - mov %r13, %rcx - movdqu %xmm9, %xmm0 - movdqu 0(%r8), %xmm1 - movdqu %xmm1, 0(%rbp) - pxor %xmm10, %xmm10 - mov $1, %r11 - pinsrq $0, %r11, %xmm10 - vpaddd %xmm10, %xmm1, %xmm1 - cmp $0, %rdx - jne L162 - vpshufb %xmm0, %xmm1, %xmm1 - movdqu %xmm1, 32(%rbp) - jmp L163 -L162: - movdqu %xmm8, 32(%rbp) - add $128, %rcx - pextrq $0, %xmm1, %rbx - and $255, %rbx - vpshufb %xmm0, %xmm1, %xmm1 - lea 96(%rdi), %r14 - movdqu 32(%rbp), %xmm8 - movdqu 80(%rdi), %xmm7 - movdqu 64(%rdi), %xmm4 - movdqu 48(%rdi), %xmm5 - movdqu 32(%rdi), %xmm6 - vpshufb %xmm0, %xmm7, %xmm7 - movdqu 16(%rdi), %xmm2 - vpshufb %xmm0, %xmm4, %xmm4 - movdqu 0(%rdi), %xmm3 - vpshufb %xmm0, %xmm5, %xmm5 - movdqu %xmm4, 48(%rbp) - vpshufb %xmm0, %xmm6, %xmm6 - movdqu %xmm5, 64(%rbp) - vpshufb %xmm0, %xmm2, %xmm2 - movdqu %xmm6, 80(%rbp) - vpshufb %xmm0, %xmm3, %xmm3 - movdqu %xmm2, 96(%rbp) - movdqu %xmm3, 112(%rbp) - pxor %xmm2, %xmm2 - mov $72057594037927936, %r11 - pinsrq $1, %r11, %xmm2 - vpxor %xmm4, %xmm4, %xmm4 - movdqu -128(%rcx), %xmm15 - vpaddd %xmm2, %xmm1, %xmm10 - vpaddd %xmm2, %xmm10, %xmm11 - vpaddd %xmm2, %xmm11, %xmm12 - vpaddd %xmm2, %xmm12, %xmm13 - vpaddd %xmm2, %xmm13, %xmm14 - vpxor %xmm15, %xmm1, %xmm9 - movdqu %xmm4, 16(%rbp) - cmp $6, %rdx - jne L164 - sub $96, %r14 - jmp L165 -L164: -L165: - jmp L167 -.balign 16 -L166: - add $6, %rbx - cmp $256, %rbx - jb L168 - mov $579005069656919567, %r11 - pinsrq $0, %r11, %xmm0 - mov $283686952306183, %r11 - pinsrq $1, %r11, %xmm0 - vpshufb %xmm0, %xmm1, %xmm6 - pxor %xmm5, %xmm5 - mov $1, %r11 - pinsrq $0, %r11, %xmm5 - vpaddd %xmm5, %xmm6, %xmm10 - pxor %xmm5, %xmm5 - mov $2, %r11 - pinsrq $0, %r11, %xmm5 - vpaddd %xmm5, %xmm6, %xmm11 - movdqu -32(%r9), %xmm3 - vpaddd %xmm5, %xmm10, %xmm12 - vpshufb %xmm0, %xmm10, %xmm10 - vpaddd %xmm5, %xmm11, %xmm13 - vpshufb %xmm0, %xmm11, %xmm11 - vpxor %xmm15, %xmm10, %xmm10 - vpaddd %xmm5, %xmm12, %xmm14 - vpshufb %xmm0, %xmm12, %xmm12 - vpxor %xmm15, %xmm11, %xmm11 - vpaddd %xmm5, %xmm13, %xmm1 - vpshufb %xmm0, %xmm13, %xmm13 - vpshufb %xmm0, %xmm14, %xmm14 - vpshufb %xmm0, %xmm1, %xmm1 - sub $256, %rbx - jmp L169 -L168: - movdqu -32(%r9), %xmm3 - vpaddd %xmm14, %xmm2, %xmm1 - vpxor %xmm15, %xmm10, %xmm10 - vpxor %xmm15, %xmm11, %xmm11 -L169: - movdqu %xmm1, 128(%rbp) - vpclmulqdq $16, %xmm3, %xmm7, %xmm5 - vpxor %xmm15, %xmm12, %xmm12 - movdqu -112(%rcx), %xmm2 - vpclmulqdq $1, %xmm3, %xmm7, %xmm6 - vaesenc %xmm2, %xmm9, %xmm9 - movdqu 48(%rbp), %xmm0 - vpxor %xmm15, %xmm13, %xmm13 - vpclmulqdq $0, %xmm3, %xmm7, %xmm1 - vaesenc %xmm2, %xmm10, %xmm10 - vpxor %xmm15, %xmm14, %xmm14 - vpclmulqdq $17, %xmm3, %xmm7, %xmm7 - vaesenc %xmm2, %xmm11, %xmm11 - movdqu -16(%r9), %xmm3 - vaesenc %xmm2, %xmm12, %xmm12 - vpxor %xmm5, %xmm6, %xmm6 - vpclmulqdq $0, %xmm3, %xmm0, %xmm5 - vpxor %xmm4, %xmm8, %xmm8 - vaesenc %xmm2, %xmm13, %xmm13 - vpxor %xmm5, %xmm1, %xmm4 - vpclmulqdq $16, %xmm3, %xmm0, %xmm1 - vaesenc %xmm2, %xmm14, %xmm14 - movdqu -96(%rcx), %xmm15 - vpclmulqdq $1, %xmm3, %xmm0, %xmm2 - vaesenc %xmm15, %xmm9, %xmm9 - vpxor 16(%rbp), %xmm8, %xmm8 - vpclmulqdq $17, %xmm3, %xmm0, %xmm3 - movdqu 64(%rbp), %xmm0 - vaesenc %xmm15, %xmm10, %xmm10 - movbeq 88(%r14), %r13 - vaesenc %xmm15, %xmm11, %xmm11 - movbeq 80(%r14), %r12 - vaesenc %xmm15, %xmm12, %xmm12 - movq %r13, 32(%rbp) - vaesenc %xmm15, %xmm13, %xmm13 - movq %r12, 40(%rbp) - movdqu 16(%r9), %xmm5 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu -80(%rcx), %xmm15 - vpxor %xmm1, %xmm6, %xmm6 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vaesenc %xmm15, %xmm9, %xmm9 - vpxor %xmm2, %xmm6, %xmm6 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vaesenc %xmm15, %xmm10, %xmm10 - vpxor %xmm3, %xmm7, %xmm7 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vaesenc %xmm15, %xmm11, %xmm11 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 80(%rbp), %xmm0 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 32(%r9), %xmm1 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu -64(%rcx), %xmm15 - vpxor %xmm2, %xmm6, %xmm6 - vpclmulqdq $0, %xmm1, %xmm0, %xmm2 - vaesenc %xmm15, %xmm9, %xmm9 - vpxor %xmm3, %xmm6, %xmm6 - vpclmulqdq $16, %xmm1, %xmm0, %xmm3 - vaesenc %xmm15, %xmm10, %xmm10 - movbeq 72(%r14), %r13 - vpxor %xmm5, %xmm7, %xmm7 - vpclmulqdq $1, %xmm1, %xmm0, %xmm5 - vaesenc %xmm15, %xmm11, %xmm11 - movbeq 64(%r14), %r12 - vpclmulqdq $17, %xmm1, %xmm0, %xmm1 - movdqu 96(%rbp), %xmm0 - vaesenc %xmm15, %xmm12, %xmm12 - movq %r13, 48(%rbp) - vaesenc %xmm15, %xmm13, %xmm13 - movq %r12, 56(%rbp) - vpxor %xmm2, %xmm4, %xmm4 - movdqu 64(%r9), %xmm2 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu -48(%rcx), %xmm15 - vpxor %xmm3, %xmm6, %xmm6 - vpclmulqdq $0, %xmm2, %xmm0, %xmm3 - vaesenc %xmm15, %xmm9, %xmm9 - vpxor %xmm5, %xmm6, %xmm6 - vpclmulqdq $16, %xmm2, %xmm0, %xmm5 - vaesenc %xmm15, %xmm10, %xmm10 - movbeq 56(%r14), %r13 - vpxor %xmm1, %xmm7, %xmm7 - vpclmulqdq $1, %xmm2, %xmm0, %xmm1 - vpxor 112(%rbp), %xmm8, %xmm8 - vaesenc %xmm15, %xmm11, %xmm11 - movbeq 48(%r14), %r12 - vpclmulqdq $17, %xmm2, %xmm0, %xmm2 - vaesenc %xmm15, %xmm12, %xmm12 - movq %r13, 64(%rbp) - vaesenc %xmm15, %xmm13, %xmm13 - movq %r12, 72(%rbp) - vpxor %xmm3, %xmm4, %xmm4 - movdqu 80(%r9), %xmm3 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu -32(%rcx), %xmm15 - vpxor %xmm5, %xmm6, %xmm6 - vpclmulqdq $16, %xmm3, %xmm8, %xmm5 - vaesenc %xmm15, %xmm9, %xmm9 - vpxor %xmm1, %xmm6, %xmm6 - vpclmulqdq $1, %xmm3, %xmm8, %xmm1 - vaesenc %xmm15, %xmm10, %xmm10 - movbeq 40(%r14), %r13 - vpxor %xmm2, %xmm7, %xmm7 - vpclmulqdq $0, %xmm3, %xmm8, %xmm2 - vaesenc %xmm15, %xmm11, %xmm11 - movbeq 32(%r14), %r12 - vpclmulqdq $17, %xmm3, %xmm8, %xmm8 - vaesenc %xmm15, %xmm12, %xmm12 - movq %r13, 80(%rbp) - vaesenc %xmm15, %xmm13, %xmm13 - movq %r12, 88(%rbp) - vpxor %xmm5, %xmm6, %xmm6 - vaesenc %xmm15, %xmm14, %xmm14 - vpxor %xmm1, %xmm6, %xmm6 - movdqu -16(%rcx), %xmm15 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm2, %xmm4, %xmm4 - pxor %xmm3, %xmm3 - mov $13979173243358019584, %r11 - pinsrq $1, %r11, %xmm3 - vaesenc %xmm15, %xmm9, %xmm9 - vpxor %xmm8, %xmm7, %xmm7 - vaesenc %xmm15, %xmm10, %xmm10 - vpxor %xmm5, %xmm4, %xmm4 - movbeq 24(%r14), %r13 - vaesenc %xmm15, %xmm11, %xmm11 - movbeq 16(%r14), %r12 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - movq %r13, 96(%rbp) - vaesenc %xmm15, %xmm12, %xmm12 - movq %r12, 104(%rbp) - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu 0(%rcx), %xmm1 - vaesenc %xmm1, %xmm9, %xmm9 - movdqu 16(%rcx), %xmm15 - vaesenc %xmm1, %xmm10, %xmm10 - vpsrldq $8, %xmm6, %xmm6 - vaesenc %xmm1, %xmm11, %xmm11 - vpxor %xmm6, %xmm7, %xmm7 - vaesenc %xmm1, %xmm12, %xmm12 - vpxor %xmm0, %xmm4, %xmm4 - movbeq 8(%r14), %r13 - vaesenc %xmm1, %xmm13, %xmm13 - movbeq 0(%r14), %r12 - vaesenc %xmm1, %xmm14, %xmm14 - movdqu 32(%rcx), %xmm1 - vaesenc %xmm15, %xmm9, %xmm9 - movdqu %xmm7, 16(%rbp) - vpalignr $8, %xmm4, %xmm4, %xmm8 - vaesenc %xmm15, %xmm10, %xmm10 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor 0(%rdi), %xmm1, %xmm2 - vaesenc %xmm15, %xmm11, %xmm11 - vpxor 16(%rdi), %xmm1, %xmm0 - vaesenc %xmm15, %xmm12, %xmm12 - vpxor 32(%rdi), %xmm1, %xmm5 - vaesenc %xmm15, %xmm13, %xmm13 - vpxor 48(%rdi), %xmm1, %xmm6 - vaesenc %xmm15, %xmm14, %xmm14 - vpxor 64(%rdi), %xmm1, %xmm7 - vpxor 80(%rdi), %xmm1, %xmm3 - movdqu 128(%rbp), %xmm1 - vaesenclast %xmm2, %xmm9, %xmm9 - pxor %xmm2, %xmm2 - mov $72057594037927936, %r11 - pinsrq $1, %r11, %xmm2 - vaesenclast %xmm0, %xmm10, %xmm10 - vpaddd %xmm2, %xmm1, %xmm0 - movq %r13, 112(%rbp) - lea 96(%rdi), %rdi - vaesenclast %xmm5, %xmm11, %xmm11 - vpaddd %xmm2, %xmm0, %xmm5 - movq %r12, 120(%rbp) - lea 96(%rsi), %rsi - movdqu -128(%rcx), %xmm15 - vaesenclast %xmm6, %xmm12, %xmm12 - vpaddd %xmm2, %xmm5, %xmm6 - vaesenclast %xmm7, %xmm13, %xmm13 - vpaddd %xmm2, %xmm6, %xmm7 - vaesenclast %xmm3, %xmm14, %xmm14 - vpaddd %xmm2, %xmm7, %xmm3 - sub $6, %rdx - cmp $6, %rdx - jbe L170 - add $96, %r14 - jmp L171 -L170: -L171: - cmp $0, %rdx - jbe L172 - movdqu %xmm9, -96(%rsi) - vpxor %xmm15, %xmm1, %xmm9 - movdqu %xmm10, -80(%rsi) - movdqu %xmm0, %xmm10 - movdqu %xmm11, -64(%rsi) - movdqu %xmm5, %xmm11 - movdqu %xmm12, -48(%rsi) - movdqu %xmm6, %xmm12 - movdqu %xmm13, -32(%rsi) - movdqu %xmm7, %xmm13 - movdqu %xmm14, -16(%rsi) - movdqu %xmm3, %xmm14 - movdqu 32(%rbp), %xmm7 - jmp L173 -L172: - vpxor 16(%rbp), %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 -L173: -.balign 16 -L167: - cmp $0, %rdx - ja L166 - movdqu %xmm1, 32(%rbp) - movdqu %xmm9, -96(%rsi) - movdqu %xmm10, -80(%rsi) - movdqu %xmm11, -64(%rsi) - movdqu %xmm12, -48(%rsi) - movdqu %xmm13, -32(%rsi) - movdqu %xmm14, -16(%rsi) - sub $128, %rcx -L163: - movdqu 32(%rbp), %xmm11 - mov %rcx, %r8 - mov 312(%rsp), %rax - mov 320(%rsp), %rdi - mov 328(%rsp), %rdx - mov %rdx, %r14 - mov $579005069656919567, %r12 - pinsrq $0, %r12, %xmm9 - mov $283686952306183, %r12 - pinsrq $1, %r12, %xmm9 - pshufb %xmm9, %xmm11 - mov %rdi, %rbx - mov %rdx, %r12 - mov %rax, %rdi - mov %rdi, %r11 - jmp L175 -.balign 16 -L174: - add $80, %r11 - movdqu -32(%r9), %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - movdqu %xmm1, %xmm4 - movdqu -16(%r9), %xmm1 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - movdqu %xmm1, %xmm5 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 16(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 32(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 64(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 80(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - vpxor %xmm1, %xmm4, %xmm4 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - pxor %xmm3, %xmm3 - mov $3254779904, %r10 - pinsrd $3, %r10d, %xmm3 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm5, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpsrldq $8, %xmm6, %xmm6 - vpxor %xmm6, %xmm7, %xmm7 - vpxor %xmm0, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm8 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor %xmm7, %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 - add $96, %r11 - sub $6, %rdx -.balign 16 -L175: - cmp $6, %rdx - jae L174 - cmp $0, %rdx - jbe L176 - mov %rdx, %r10 - sub $1, %r10 - imul $16, %r10 - add %r10, %r11 - movdqu -32(%r9), %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - cmp $1, %rdx - jne L178 - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu %xmm1, %xmm4 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - jmp L179 -L178: - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - movdqu %xmm1, %xmm4 - movdqu -16(%r9), %xmm1 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - movdqu %xmm1, %xmm5 - cmp $2, %rdx - je L180 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 16(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - cmp $3, %rdx - je L182 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 32(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - cmp $4, %rdx - je L184 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 64(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - jmp L185 -L184: -L185: - jmp L183 -L182: -L183: - jmp L181 -L180: -L181: - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - vpxor %xmm1, %xmm4, %xmm4 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 -L179: - pxor %xmm3, %xmm3 - mov $3254779904, %r10 - pinsrd $3, %r10d, %xmm3 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm5, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpsrldq $8, %xmm6, %xmm6 - vpxor %xmm6, %xmm7, %xmm7 - vpxor %xmm0, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm8 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor %xmm7, %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 - jmp L177 -L176: -L177: - mov %rbx, %rdi - mov %r12, %rdx - pxor %xmm10, %xmm10 - mov $1, %rbx - pinsrd $0, %ebx, %xmm10 - mov %rax, %r11 - mov %rdi, %r10 - mov $0, %rbx - jmp L187 -.balign 16 -L186: - movdqu %xmm11, %xmm0 - pshufb %xmm9, %xmm0 - movdqu 0(%r8), %xmm2 - pxor %xmm2, %xmm0 - movdqu 16(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 32(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 48(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 64(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 80(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 96(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 112(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 128(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 144(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 160(%r8), %xmm2 - aesenclast %xmm2, %xmm0 - pxor %xmm2, %xmm2 - movdqu 0(%r11), %xmm2 - pxor %xmm0, %xmm2 - movdqu %xmm2, 0(%r10) - add $1, %rbx - add $16, %r11 - add $16, %r10 - paddd %xmm10, %xmm11 -.balign 16 -L187: - cmp %rdx, %rbx - jne L186 - add 304(%rsp), %r14 - imul $16, %r14 - mov 344(%rsp), %r13 - cmp %r14, %r13 - jbe L188 - mov 336(%rsp), %rax - mov %r13, %r10 - and $15, %r10 - movdqu 0(%rax), %xmm0 - movdqu %xmm0, %xmm10 - cmp $8, %r10 - jae L190 - mov $0, %rcx - pinsrq $1, %rcx, %xmm0 - mov %r10, %rcx - shl $3, %rcx - mov $1, %r11 - shl %cl, %r11 - sub $1, %r11 - pextrq $0, %xmm0, %rcx - and %r11, %rcx - pinsrq $0, %rcx, %xmm0 - jmp L191 -L190: - mov %r10, %rcx - sub $8, %rcx - shl $3, %rcx - mov $1, %r11 - shl %cl, %r11 - sub $1, %r11 - pextrq $1, %xmm0, %rcx - and %r11, %rcx - pinsrq $1, %rcx, %xmm0 -L191: - pshufb %xmm9, %xmm0 - movdqu -32(%r9), %xmm5 - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu %xmm1, %xmm4 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - pxor %xmm3, %xmm3 - mov $3254779904, %r11 - pinsrd $3, %r11d, %xmm3 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm5, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpsrldq $8, %xmm6, %xmm6 - vpxor %xmm6, %xmm7, %xmm7 - vpxor %xmm0, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm8 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor %xmm7, %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 - movdqu %xmm11, %xmm0 - pshufb %xmm9, %xmm0 - movdqu 0(%r8), %xmm2 - pxor %xmm2, %xmm0 - movdqu 16(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 32(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 48(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 64(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 80(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 96(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 112(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 128(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 144(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 160(%r8), %xmm2 - aesenclast %xmm2, %xmm0 - pxor %xmm2, %xmm2 - pxor %xmm0, %xmm10 - movdqu %xmm10, 0(%rax) - jmp L189 -L188: -L189: - mov %r15, %r11 - pxor %xmm0, %xmm0 - mov %r11, %rax - imul $8, %rax - pinsrq $1, %rax, %xmm0 - mov %r13, %rax - imul $8, %rax - pinsrq $0, %rax, %xmm0 - movdqu -32(%r9), %xmm5 - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu %xmm1, %xmm4 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - pxor %xmm3, %xmm3 - mov $3254779904, %r11 - pinsrd $3, %r11d, %xmm3 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm5, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpsrldq $8, %xmm6, %xmm6 - vpxor %xmm6, %xmm7, %xmm7 - vpxor %xmm0, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm8 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor %xmm7, %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 - movdqu 0(%rbp), %xmm0 - pshufb %xmm9, %xmm0 - movdqu 0(%r8), %xmm2 - pxor %xmm2, %xmm0 - movdqu 16(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 32(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 48(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 64(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 80(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 96(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 112(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 128(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 144(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 160(%r8), %xmm2 - aesenclast %xmm2, %xmm0 - pxor %xmm2, %xmm2 - pshufb %xmm9, %xmm8 - pxor %xmm0, %xmm8 - mov 360(%rsp), %r15 - movdqu 0(%r15), %xmm0 - pcmpeqd %xmm8, %xmm0 - pextrq $0, %xmm0, %rdx - sub $18446744073709551615, %rdx - mov $0, %rax - adc $0, %rax - pextrq $1, %xmm0, %rdx - sub $18446744073709551615, %rdx - mov $0, %rdx - adc $0, %rdx - add %rdx, %rax - mov %rax, %rcx - pop %rax - pinsrq $1, %rax, %xmm6 - pop %rax - pinsrq $0, %rax, %xmm6 - pop %rax - pinsrq $1, %rax, %xmm7 - pop %rax - pinsrq $0, %rax, %xmm7 - pop %rax - pinsrq $1, %rax, %xmm8 - pop %rax - pinsrq $0, %rax, %xmm8 - pop %rax - pinsrq $1, %rax, %xmm9 - pop %rax - pinsrq $0, %rax, %xmm9 - pop %rax - pinsrq $1, %rax, %xmm10 - pop %rax - pinsrq $0, %rax, %xmm10 - pop %rax - pinsrq $1, %rax, %xmm11 - pop %rax - pinsrq $0, %rax, %xmm11 - pop %rax - pinsrq $1, %rax, %xmm12 - pop %rax - pinsrq $0, %rax, %xmm12 - pop %rax - pinsrq $1, %rax, %xmm13 - pop %rax - pinsrq $0, %rax, %xmm13 - pop %rax - pinsrq $1, %rax, %xmm14 - pop %rax - pinsrq $0, %rax, %xmm14 - pop %rax - pinsrq $1, %rax, %xmm15 - pop %rax - pinsrq $0, %rax, %xmm15 - pop %rbx - pop %rbp - pop %rdi - pop %rsi - pop %r12 - pop %r13 - pop %r14 - pop %r15 - mov %rcx, %rax - ret - -.global gcm256_decrypt_opt -gcm256_decrypt_opt: - push %r15 - push %r14 - push %r13 - push %r12 - push %rsi - push %rdi - push %rbp - push %rbx - pextrq $0, %xmm15, %rax - push %rax - pextrq $1, %xmm15, %rax - push %rax - pextrq $0, %xmm14, %rax - push %rax - pextrq $1, %xmm14, %rax - push %rax - pextrq $0, %xmm13, %rax - push %rax - pextrq $1, %xmm13, %rax - push %rax - pextrq $0, %xmm12, %rax - push %rax - pextrq $1, %xmm12, %rax - push %rax - pextrq $0, %xmm11, %rax - push %rax - pextrq $1, %xmm11, %rax - push %rax - pextrq $0, %xmm10, %rax - push %rax - pextrq $1, %xmm10, %rax - push %rax - pextrq $0, %xmm9, %rax - push %rax - pextrq $1, %xmm9, %rax - push %rax - pextrq $0, %xmm8, %rax - push %rax - pextrq $1, %xmm8, %rax - push %rax - pextrq $0, %xmm7, %rax - push %rax - pextrq $1, %xmm7, %rax - push %rax - pextrq $0, %xmm6, %rax - push %rax - pextrq $1, %xmm6, %rax - push %rax - mov %rcx, %rdi - mov %rdx, %rsi - mov %r8, %rdx - mov %r9, %rcx - mov 264(%rsp), %r8 - mov 272(%rsp), %r9 - mov 352(%rsp), %rbp - mov %rcx, %r13 - lea 32(%r9), %r9 - mov 280(%rsp), %rbx - mov %rdx, %rcx - imul $16, %rcx - mov $579005069656919567, %r10 - pinsrq $0, %r10, %xmm9 - mov $283686952306183, %r10 - pinsrq $1, %r10, %xmm9 - pxor %xmm8, %xmm8 - mov %rdi, %r11 - jmp L193 -.balign 16 -L192: - add $80, %r11 - movdqu -32(%r9), %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - movdqu %xmm1, %xmm4 - movdqu -16(%r9), %xmm1 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - movdqu %xmm1, %xmm5 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 16(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 32(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 64(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 80(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - vpxor %xmm1, %xmm4, %xmm4 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - pxor %xmm3, %xmm3 - mov $3254779904, %r10 - pinsrd $3, %r10d, %xmm3 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm5, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpsrldq $8, %xmm6, %xmm6 - vpxor %xmm6, %xmm7, %xmm7 - vpxor %xmm0, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm8 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor %xmm7, %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 - add $96, %r11 - sub $6, %rdx -.balign 16 -L193: - cmp $6, %rdx - jae L192 - cmp $0, %rdx - jbe L194 - mov %rdx, %r10 - sub $1, %r10 - imul $16, %r10 - add %r10, %r11 - movdqu -32(%r9), %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - cmp $1, %rdx - jne L196 - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu %xmm1, %xmm4 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - jmp L197 -L196: - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - movdqu %xmm1, %xmm4 - movdqu -16(%r9), %xmm1 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - movdqu %xmm1, %xmm5 - cmp $2, %rdx - je L198 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 16(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - cmp $3, %rdx - je L200 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 32(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - cmp $4, %rdx - je L202 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 64(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - jmp L203 -L202: -L203: - jmp L201 -L200: -L201: - jmp L199 -L198: -L199: - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - vpxor %xmm1, %xmm4, %xmm4 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 -L197: - pxor %xmm3, %xmm3 - mov $3254779904, %r10 - pinsrd $3, %r10d, %xmm3 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm5, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpsrldq $8, %xmm6, %xmm6 - vpxor %xmm6, %xmm7, %xmm7 - vpxor %xmm0, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm8 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor %xmm7, %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 - jmp L195 -L194: -L195: - mov %rsi, %r15 - cmp %rcx, %rsi - jbe L204 - movdqu 0(%rbx), %xmm0 - mov %rsi, %r10 - and $15, %r10 - cmp $8, %r10 - jae L206 - mov $0, %rcx - pinsrq $1, %rcx, %xmm0 - mov %r10, %rcx - shl $3, %rcx - mov $1, %r11 - shl %cl, %r11 - sub $1, %r11 - pextrq $0, %xmm0, %rcx - and %r11, %rcx - pinsrq $0, %rcx, %xmm0 - jmp L207 -L206: - mov %r10, %rcx - sub $8, %rcx - shl $3, %rcx - mov $1, %r11 - shl %cl, %r11 - sub $1, %r11 - pextrq $1, %xmm0, %rcx - and %r11, %rcx - pinsrq $1, %rcx, %xmm0 -L207: - pshufb %xmm9, %xmm0 - movdqu -32(%r9), %xmm5 - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu %xmm1, %xmm4 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - pxor %xmm3, %xmm3 - mov $3254779904, %r11 - pinsrd $3, %r11d, %xmm3 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm5, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpsrldq $8, %xmm6, %xmm6 - vpxor %xmm6, %xmm7, %xmm7 - vpxor %xmm0, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm8 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor %xmm7, %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 - jmp L205 -L204: -L205: - mov 288(%rsp), %rdi - mov 296(%rsp), %rsi - mov 304(%rsp), %rdx - mov %r13, %rcx - movdqu %xmm9, %xmm0 - movdqu 0(%r8), %xmm1 - movdqu %xmm1, 0(%rbp) - pxor %xmm10, %xmm10 - mov $1, %r11 - pinsrq $0, %r11, %xmm10 - vpaddd %xmm10, %xmm1, %xmm1 - cmp $0, %rdx - jne L208 - vpshufb %xmm0, %xmm1, %xmm1 - movdqu %xmm1, 32(%rbp) - jmp L209 -L208: - movdqu %xmm8, 32(%rbp) - add $128, %rcx - pextrq $0, %xmm1, %rbx - and $255, %rbx - vpshufb %xmm0, %xmm1, %xmm1 - lea 96(%rdi), %r14 - movdqu 32(%rbp), %xmm8 - movdqu 80(%rdi), %xmm7 - movdqu 64(%rdi), %xmm4 - movdqu 48(%rdi), %xmm5 - movdqu 32(%rdi), %xmm6 - vpshufb %xmm0, %xmm7, %xmm7 - movdqu 16(%rdi), %xmm2 - vpshufb %xmm0, %xmm4, %xmm4 - movdqu 0(%rdi), %xmm3 - vpshufb %xmm0, %xmm5, %xmm5 - movdqu %xmm4, 48(%rbp) - vpshufb %xmm0, %xmm6, %xmm6 - movdqu %xmm5, 64(%rbp) - vpshufb %xmm0, %xmm2, %xmm2 - movdqu %xmm6, 80(%rbp) - vpshufb %xmm0, %xmm3, %xmm3 - movdqu %xmm2, 96(%rbp) - movdqu %xmm3, 112(%rbp) - pxor %xmm2, %xmm2 - mov $72057594037927936, %r11 - pinsrq $1, %r11, %xmm2 - vpxor %xmm4, %xmm4, %xmm4 - movdqu -128(%rcx), %xmm15 - vpaddd %xmm2, %xmm1, %xmm10 - vpaddd %xmm2, %xmm10, %xmm11 - vpaddd %xmm2, %xmm11, %xmm12 - vpaddd %xmm2, %xmm12, %xmm13 - vpaddd %xmm2, %xmm13, %xmm14 - vpxor %xmm15, %xmm1, %xmm9 - movdqu %xmm4, 16(%rbp) - cmp $6, %rdx - jne L210 - sub $96, %r14 - jmp L211 -L210: -L211: - jmp L213 -.balign 16 -L212: - add $6, %rbx - cmp $256, %rbx - jb L214 - mov $579005069656919567, %r11 - pinsrq $0, %r11, %xmm0 - mov $283686952306183, %r11 - pinsrq $1, %r11, %xmm0 - vpshufb %xmm0, %xmm1, %xmm6 - pxor %xmm5, %xmm5 - mov $1, %r11 - pinsrq $0, %r11, %xmm5 - vpaddd %xmm5, %xmm6, %xmm10 - pxor %xmm5, %xmm5 - mov $2, %r11 - pinsrq $0, %r11, %xmm5 - vpaddd %xmm5, %xmm6, %xmm11 - movdqu -32(%r9), %xmm3 - vpaddd %xmm5, %xmm10, %xmm12 - vpshufb %xmm0, %xmm10, %xmm10 - vpaddd %xmm5, %xmm11, %xmm13 - vpshufb %xmm0, %xmm11, %xmm11 - vpxor %xmm15, %xmm10, %xmm10 - vpaddd %xmm5, %xmm12, %xmm14 - vpshufb %xmm0, %xmm12, %xmm12 - vpxor %xmm15, %xmm11, %xmm11 - vpaddd %xmm5, %xmm13, %xmm1 - vpshufb %xmm0, %xmm13, %xmm13 - vpshufb %xmm0, %xmm14, %xmm14 - vpshufb %xmm0, %xmm1, %xmm1 - sub $256, %rbx - jmp L215 -L214: - movdqu -32(%r9), %xmm3 - vpaddd %xmm14, %xmm2, %xmm1 - vpxor %xmm15, %xmm10, %xmm10 - vpxor %xmm15, %xmm11, %xmm11 -L215: - movdqu %xmm1, 128(%rbp) - vpclmulqdq $16, %xmm3, %xmm7, %xmm5 - vpxor %xmm15, %xmm12, %xmm12 - movdqu -112(%rcx), %xmm2 - vpclmulqdq $1, %xmm3, %xmm7, %xmm6 - vaesenc %xmm2, %xmm9, %xmm9 - movdqu 48(%rbp), %xmm0 - vpxor %xmm15, %xmm13, %xmm13 - vpclmulqdq $0, %xmm3, %xmm7, %xmm1 - vaesenc %xmm2, %xmm10, %xmm10 - vpxor %xmm15, %xmm14, %xmm14 - vpclmulqdq $17, %xmm3, %xmm7, %xmm7 - vaesenc %xmm2, %xmm11, %xmm11 - movdqu -16(%r9), %xmm3 - vaesenc %xmm2, %xmm12, %xmm12 - vpxor %xmm5, %xmm6, %xmm6 - vpclmulqdq $0, %xmm3, %xmm0, %xmm5 - vpxor %xmm4, %xmm8, %xmm8 - vaesenc %xmm2, %xmm13, %xmm13 - vpxor %xmm5, %xmm1, %xmm4 - vpclmulqdq $16, %xmm3, %xmm0, %xmm1 - vaesenc %xmm2, %xmm14, %xmm14 - movdqu -96(%rcx), %xmm15 - vpclmulqdq $1, %xmm3, %xmm0, %xmm2 - vaesenc %xmm15, %xmm9, %xmm9 - vpxor 16(%rbp), %xmm8, %xmm8 - vpclmulqdq $17, %xmm3, %xmm0, %xmm3 - movdqu 64(%rbp), %xmm0 - vaesenc %xmm15, %xmm10, %xmm10 - movbeq 88(%r14), %r13 - vaesenc %xmm15, %xmm11, %xmm11 - movbeq 80(%r14), %r12 - vaesenc %xmm15, %xmm12, %xmm12 - movq %r13, 32(%rbp) - vaesenc %xmm15, %xmm13, %xmm13 - movq %r12, 40(%rbp) - movdqu 16(%r9), %xmm5 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu -80(%rcx), %xmm15 - vpxor %xmm1, %xmm6, %xmm6 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vaesenc %xmm15, %xmm9, %xmm9 - vpxor %xmm2, %xmm6, %xmm6 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vaesenc %xmm15, %xmm10, %xmm10 - vpxor %xmm3, %xmm7, %xmm7 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vaesenc %xmm15, %xmm11, %xmm11 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 80(%rbp), %xmm0 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 32(%r9), %xmm1 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu -64(%rcx), %xmm15 - vpxor %xmm2, %xmm6, %xmm6 - vpclmulqdq $0, %xmm1, %xmm0, %xmm2 - vaesenc %xmm15, %xmm9, %xmm9 - vpxor %xmm3, %xmm6, %xmm6 - vpclmulqdq $16, %xmm1, %xmm0, %xmm3 - vaesenc %xmm15, %xmm10, %xmm10 - movbeq 72(%r14), %r13 - vpxor %xmm5, %xmm7, %xmm7 - vpclmulqdq $1, %xmm1, %xmm0, %xmm5 - vaesenc %xmm15, %xmm11, %xmm11 - movbeq 64(%r14), %r12 - vpclmulqdq $17, %xmm1, %xmm0, %xmm1 - movdqu 96(%rbp), %xmm0 - vaesenc %xmm15, %xmm12, %xmm12 - movq %r13, 48(%rbp) - vaesenc %xmm15, %xmm13, %xmm13 - movq %r12, 56(%rbp) - vpxor %xmm2, %xmm4, %xmm4 - movdqu 64(%r9), %xmm2 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu -48(%rcx), %xmm15 - vpxor %xmm3, %xmm6, %xmm6 - vpclmulqdq $0, %xmm2, %xmm0, %xmm3 - vaesenc %xmm15, %xmm9, %xmm9 - vpxor %xmm5, %xmm6, %xmm6 - vpclmulqdq $16, %xmm2, %xmm0, %xmm5 - vaesenc %xmm15, %xmm10, %xmm10 - movbeq 56(%r14), %r13 - vpxor %xmm1, %xmm7, %xmm7 - vpclmulqdq $1, %xmm2, %xmm0, %xmm1 - vpxor 112(%rbp), %xmm8, %xmm8 - vaesenc %xmm15, %xmm11, %xmm11 - movbeq 48(%r14), %r12 - vpclmulqdq $17, %xmm2, %xmm0, %xmm2 - vaesenc %xmm15, %xmm12, %xmm12 - movq %r13, 64(%rbp) - vaesenc %xmm15, %xmm13, %xmm13 - movq %r12, 72(%rbp) - vpxor %xmm3, %xmm4, %xmm4 - movdqu 80(%r9), %xmm3 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu -32(%rcx), %xmm15 - vpxor %xmm5, %xmm6, %xmm6 - vpclmulqdq $16, %xmm3, %xmm8, %xmm5 - vaesenc %xmm15, %xmm9, %xmm9 - vpxor %xmm1, %xmm6, %xmm6 - vpclmulqdq $1, %xmm3, %xmm8, %xmm1 - vaesenc %xmm15, %xmm10, %xmm10 - movbeq 40(%r14), %r13 - vpxor %xmm2, %xmm7, %xmm7 - vpclmulqdq $0, %xmm3, %xmm8, %xmm2 - vaesenc %xmm15, %xmm11, %xmm11 - movbeq 32(%r14), %r12 - vpclmulqdq $17, %xmm3, %xmm8, %xmm8 - vaesenc %xmm15, %xmm12, %xmm12 - movq %r13, 80(%rbp) - vaesenc %xmm15, %xmm13, %xmm13 - movq %r12, 88(%rbp) - vpxor %xmm5, %xmm6, %xmm6 - vaesenc %xmm15, %xmm14, %xmm14 - vpxor %xmm1, %xmm6, %xmm6 - movdqu -16(%rcx), %xmm15 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm2, %xmm4, %xmm4 - pxor %xmm3, %xmm3 - mov $13979173243358019584, %r11 - pinsrq $1, %r11, %xmm3 - vaesenc %xmm15, %xmm9, %xmm9 - vpxor %xmm8, %xmm7, %xmm7 - vaesenc %xmm15, %xmm10, %xmm10 - vpxor %xmm5, %xmm4, %xmm4 - movbeq 24(%r14), %r13 - vaesenc %xmm15, %xmm11, %xmm11 - movbeq 16(%r14), %r12 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - movq %r13, 96(%rbp) - vaesenc %xmm15, %xmm12, %xmm12 - movq %r12, 104(%rbp) - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - movdqu 0(%rcx), %xmm1 - vaesenc %xmm1, %xmm9, %xmm9 - movdqu 16(%rcx), %xmm15 - vaesenc %xmm1, %xmm10, %xmm10 - vpsrldq $8, %xmm6, %xmm6 - vaesenc %xmm1, %xmm11, %xmm11 - vpxor %xmm6, %xmm7, %xmm7 - vaesenc %xmm1, %xmm12, %xmm12 - vpxor %xmm0, %xmm4, %xmm4 - movbeq 8(%r14), %r13 - vaesenc %xmm1, %xmm13, %xmm13 - movbeq 0(%r14), %r12 - vaesenc %xmm1, %xmm14, %xmm14 - movdqu 32(%rcx), %xmm1 - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - vaesenc %xmm1, %xmm9, %xmm9 - vaesenc %xmm1, %xmm10, %xmm10 - vaesenc %xmm1, %xmm11, %xmm11 - vaesenc %xmm1, %xmm12, %xmm12 - vaesenc %xmm1, %xmm13, %xmm13 - movdqu 48(%rcx), %xmm15 - vaesenc %xmm1, %xmm14, %xmm14 - movdqu 64(%rcx), %xmm1 - vaesenc %xmm15, %xmm9, %xmm9 - vaesenc %xmm15, %xmm10, %xmm10 - vaesenc %xmm15, %xmm11, %xmm11 - vaesenc %xmm15, %xmm12, %xmm12 - vaesenc %xmm15, %xmm13, %xmm13 - vaesenc %xmm15, %xmm14, %xmm14 - vaesenc %xmm1, %xmm9, %xmm9 - vaesenc %xmm1, %xmm10, %xmm10 - vaesenc %xmm1, %xmm11, %xmm11 - vaesenc %xmm1, %xmm12, %xmm12 - vaesenc %xmm1, %xmm13, %xmm13 - movdqu 80(%rcx), %xmm15 - vaesenc %xmm1, %xmm14, %xmm14 - movdqu 96(%rcx), %xmm1 - vaesenc %xmm15, %xmm9, %xmm9 - movdqu %xmm7, 16(%rbp) - vpalignr $8, %xmm4, %xmm4, %xmm8 - vaesenc %xmm15, %xmm10, %xmm10 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor 0(%rdi), %xmm1, %xmm2 - vaesenc %xmm15, %xmm11, %xmm11 - vpxor 16(%rdi), %xmm1, %xmm0 - vaesenc %xmm15, %xmm12, %xmm12 - vpxor 32(%rdi), %xmm1, %xmm5 - vaesenc %xmm15, %xmm13, %xmm13 - vpxor 48(%rdi), %xmm1, %xmm6 - vaesenc %xmm15, %xmm14, %xmm14 - vpxor 64(%rdi), %xmm1, %xmm7 - vpxor 80(%rdi), %xmm1, %xmm3 - movdqu 128(%rbp), %xmm1 - vaesenclast %xmm2, %xmm9, %xmm9 - pxor %xmm2, %xmm2 - mov $72057594037927936, %r11 - pinsrq $1, %r11, %xmm2 - vaesenclast %xmm0, %xmm10, %xmm10 - vpaddd %xmm2, %xmm1, %xmm0 - movq %r13, 112(%rbp) - lea 96(%rdi), %rdi - vaesenclast %xmm5, %xmm11, %xmm11 - vpaddd %xmm2, %xmm0, %xmm5 - movq %r12, 120(%rbp) - lea 96(%rsi), %rsi - movdqu -128(%rcx), %xmm15 - vaesenclast %xmm6, %xmm12, %xmm12 - vpaddd %xmm2, %xmm5, %xmm6 - vaesenclast %xmm7, %xmm13, %xmm13 - vpaddd %xmm2, %xmm6, %xmm7 - vaesenclast %xmm3, %xmm14, %xmm14 - vpaddd %xmm2, %xmm7, %xmm3 - sub $6, %rdx - cmp $6, %rdx - jbe L216 - add $96, %r14 - jmp L217 -L216: -L217: - cmp $0, %rdx - jbe L218 - movdqu %xmm9, -96(%rsi) - vpxor %xmm15, %xmm1, %xmm9 - movdqu %xmm10, -80(%rsi) - movdqu %xmm0, %xmm10 - movdqu %xmm11, -64(%rsi) - movdqu %xmm5, %xmm11 - movdqu %xmm12, -48(%rsi) - movdqu %xmm6, %xmm12 - movdqu %xmm13, -32(%rsi) - movdqu %xmm7, %xmm13 - movdqu %xmm14, -16(%rsi) - movdqu %xmm3, %xmm14 - movdqu 32(%rbp), %xmm7 - jmp L219 -L218: - vpxor 16(%rbp), %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 -L219: -.balign 16 -L213: - cmp $0, %rdx - ja L212 - movdqu %xmm1, 32(%rbp) - movdqu %xmm9, -96(%rsi) - movdqu %xmm10, -80(%rsi) - movdqu %xmm11, -64(%rsi) - movdqu %xmm12, -48(%rsi) - movdqu %xmm13, -32(%rsi) - movdqu %xmm14, -16(%rsi) - sub $128, %rcx -L209: - movdqu 32(%rbp), %xmm11 - mov %rcx, %r8 - mov 312(%rsp), %rax - mov 320(%rsp), %rdi - mov 328(%rsp), %rdx - mov %rdx, %r14 - mov $579005069656919567, %r12 - pinsrq $0, %r12, %xmm9 - mov $283686952306183, %r12 - pinsrq $1, %r12, %xmm9 - pshufb %xmm9, %xmm11 - mov %rdi, %rbx - mov %rdx, %r12 - mov %rax, %rdi - mov %rdi, %r11 - jmp L221 -.balign 16 -L220: - add $80, %r11 - movdqu -32(%r9), %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - movdqu %xmm1, %xmm4 - movdqu -16(%r9), %xmm1 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - movdqu %xmm1, %xmm5 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 16(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 32(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 64(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 80(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - vpxor %xmm1, %xmm4, %xmm4 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - pxor %xmm3, %xmm3 - mov $3254779904, %r10 - pinsrd $3, %r10d, %xmm3 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm5, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpsrldq $8, %xmm6, %xmm6 - vpxor %xmm6, %xmm7, %xmm7 - vpxor %xmm0, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm8 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor %xmm7, %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 - add $96, %r11 - sub $6, %rdx -.balign 16 -L221: - cmp $6, %rdx - jae L220 - cmp $0, %rdx - jbe L222 - mov %rdx, %r10 - sub $1, %r10 - imul $16, %r10 - add %r10, %r11 - movdqu -32(%r9), %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - cmp $1, %rdx - jne L224 - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu %xmm1, %xmm4 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - jmp L225 -L224: - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - movdqu %xmm1, %xmm4 - movdqu -16(%r9), %xmm1 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - movdqu %xmm1, %xmm5 - cmp $2, %rdx - je L226 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 16(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - cmp $3, %rdx - je L228 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 32(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - cmp $4, %rdx - je L230 - sub $16, %r11 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu 0(%r11), %xmm0 - pshufb %xmm9, %xmm0 - vpxor %xmm1, %xmm4, %xmm4 - movdqu 64(%r9), %xmm1 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 - movdqu %xmm1, %xmm5 - jmp L231 -L230: -L231: - jmp L229 -L228: -L229: - jmp L227 -L226: -L227: - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - vpxor %xmm1, %xmm4, %xmm4 - vpxor %xmm2, %xmm6, %xmm6 - vpxor %xmm3, %xmm6, %xmm6 - vpxor %xmm5, %xmm7, %xmm7 -L225: - pxor %xmm3, %xmm3 - mov $3254779904, %r10 - pinsrd $3, %r10d, %xmm3 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm5, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpsrldq $8, %xmm6, %xmm6 - vpxor %xmm6, %xmm7, %xmm7 - vpxor %xmm0, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm8 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor %xmm7, %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 - jmp L223 -L222: -L223: - mov %rbx, %rdi - mov %r12, %rdx - pxor %xmm10, %xmm10 - mov $1, %rbx - pinsrd $0, %ebx, %xmm10 - mov %rax, %r11 - mov %rdi, %r10 - mov $0, %rbx - jmp L233 -.balign 16 -L232: - movdqu %xmm11, %xmm0 - pshufb %xmm9, %xmm0 - movdqu 0(%r8), %xmm2 - pxor %xmm2, %xmm0 - movdqu 16(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 32(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 48(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 64(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 80(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 96(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 112(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 128(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 144(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 160(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 176(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 192(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 208(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 224(%r8), %xmm2 - aesenclast %xmm2, %xmm0 - pxor %xmm2, %xmm2 - movdqu 0(%r11), %xmm2 - pxor %xmm0, %xmm2 - movdqu %xmm2, 0(%r10) - add $1, %rbx - add $16, %r11 - add $16, %r10 - paddd %xmm10, %xmm11 -.balign 16 -L233: - cmp %rdx, %rbx - jne L232 - add 304(%rsp), %r14 - imul $16, %r14 - mov 344(%rsp), %r13 - cmp %r14, %r13 - jbe L234 - mov 336(%rsp), %rax - mov %r13, %r10 - and $15, %r10 - movdqu 0(%rax), %xmm0 - movdqu %xmm0, %xmm10 - cmp $8, %r10 - jae L236 - mov $0, %rcx - pinsrq $1, %rcx, %xmm0 - mov %r10, %rcx - shl $3, %rcx - mov $1, %r11 - shl %cl, %r11 - sub $1, %r11 - pextrq $0, %xmm0, %rcx - and %r11, %rcx - pinsrq $0, %rcx, %xmm0 - jmp L237 -L236: - mov %r10, %rcx - sub $8, %rcx - shl $3, %rcx - mov $1, %r11 - shl %cl, %r11 - sub $1, %r11 - pextrq $1, %xmm0, %rcx - and %r11, %rcx - pinsrq $1, %rcx, %xmm0 -L237: - pshufb %xmm9, %xmm0 - movdqu -32(%r9), %xmm5 - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu %xmm1, %xmm4 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - pxor %xmm3, %xmm3 - mov $3254779904, %r11 - pinsrd $3, %r11d, %xmm3 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm5, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpsrldq $8, %xmm6, %xmm6 - vpxor %xmm6, %xmm7, %xmm7 - vpxor %xmm0, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm8 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor %xmm7, %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 - movdqu %xmm11, %xmm0 - pshufb %xmm9, %xmm0 - movdqu 0(%r8), %xmm2 - pxor %xmm2, %xmm0 - movdqu 16(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 32(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 48(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 64(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 80(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 96(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 112(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 128(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 144(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 160(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 176(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 192(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 208(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 224(%r8), %xmm2 - aesenclast %xmm2, %xmm0 - pxor %xmm2, %xmm2 - pxor %xmm0, %xmm10 - movdqu %xmm10, 0(%rax) - jmp L235 -L234: -L235: - mov %r15, %r11 - pxor %xmm0, %xmm0 - mov %r11, %rax - imul $8, %rax - pinsrq $1, %rax, %xmm0 - mov %r13, %rax - imul $8, %rax - pinsrq $0, %rax, %xmm0 - movdqu -32(%r9), %xmm5 - vpxor %xmm0, %xmm8, %xmm0 - vpclmulqdq $0, %xmm5, %xmm0, %xmm1 - vpclmulqdq $16, %xmm5, %xmm0, %xmm2 - vpclmulqdq $1, %xmm5, %xmm0, %xmm3 - vpclmulqdq $17, %xmm5, %xmm0, %xmm5 - movdqu %xmm1, %xmm4 - vpxor %xmm3, %xmm2, %xmm6 - movdqu %xmm5, %xmm7 - pxor %xmm3, %xmm3 - mov $3254779904, %r11 - pinsrd $3, %r11d, %xmm3 - vpslldq $8, %xmm6, %xmm5 - vpxor %xmm5, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm0 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpsrldq $8, %xmm6, %xmm6 - vpxor %xmm6, %xmm7, %xmm7 - vpxor %xmm0, %xmm4, %xmm4 - vpalignr $8, %xmm4, %xmm4, %xmm8 - vpclmulqdq $16, %xmm3, %xmm4, %xmm4 - vpxor %xmm7, %xmm8, %xmm8 - vpxor %xmm4, %xmm8, %xmm8 - movdqu 0(%rbp), %xmm0 - pshufb %xmm9, %xmm0 - movdqu 0(%r8), %xmm2 - pxor %xmm2, %xmm0 - movdqu 16(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 32(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 48(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 64(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 80(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 96(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 112(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 128(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 144(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 160(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 176(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 192(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 208(%r8), %xmm2 - aesenc %xmm2, %xmm0 - movdqu 224(%r8), %xmm2 - aesenclast %xmm2, %xmm0 - pxor %xmm2, %xmm2 - pshufb %xmm9, %xmm8 - pxor %xmm0, %xmm8 - mov 360(%rsp), %r15 - movdqu 0(%r15), %xmm0 - pcmpeqd %xmm8, %xmm0 - pextrq $0, %xmm0, %rdx - sub $18446744073709551615, %rdx - mov $0, %rax - adc $0, %rax - pextrq $1, %xmm0, %rdx - sub $18446744073709551615, %rdx - mov $0, %rdx - adc $0, %rdx - add %rdx, %rax - mov %rax, %rcx - pop %rax - pinsrq $1, %rax, %xmm6 - pop %rax - pinsrq $0, %rax, %xmm6 - pop %rax - pinsrq $1, %rax, %xmm7 - pop %rax - pinsrq $0, %rax, %xmm7 - pop %rax - pinsrq $1, %rax, %xmm8 - pop %rax - pinsrq $0, %rax, %xmm8 - pop %rax - pinsrq $1, %rax, %xmm9 - pop %rax - pinsrq $0, %rax, %xmm9 - pop %rax - pinsrq $1, %rax, %xmm10 - pop %rax - pinsrq $0, %rax, %xmm10 - pop %rax - pinsrq $1, %rax, %xmm11 - pop %rax - pinsrq $0, %rax, %xmm11 - pop %rax - pinsrq $1, %rax, %xmm12 - pop %rax - pinsrq $0, %rax, %xmm12 - pop %rax - pinsrq $1, %rax, %xmm13 - pop %rax - pinsrq $0, %rax, %xmm13 - pop %rax - pinsrq $1, %rax, %xmm14 - pop %rax - pinsrq $0, %rax, %xmm14 - pop %rax - pinsrq $1, %rax, %xmm15 - pop %rax - pinsrq $0, %rax, %xmm15 - pop %rbx - pop %rbp - pop %rdi - pop %rsi - pop %r12 - pop %r13 - pop %r14 - pop %r15 - mov %rcx, %rax - ret - - diff --git a/dist/c89-compatible/aesgcm-x86_64-msvc.asm b/dist/c89-compatible/aesgcm-x86_64-msvc.asm deleted file mode 100644 index 958a475da8..0000000000 --- a/dist/c89-compatible/aesgcm-x86_64-msvc.asm +++ /dev/null @@ -1,8705 +0,0 @@ -.code -ALIGN 16 -aes128_key_expansion proc - movdqu xmm1, xmmword ptr [rcx + 0] - movdqu xmmword ptr [rdx + 0], xmm1 - aeskeygenassist xmm2, xmm1, 1 - pshufd xmm2, xmm2, 255 - vpslldq xmm3, xmm1, 4 - pxor xmm1, xmm3 - vpslldq xmm3, xmm1, 4 - pxor xmm1, xmm3 - vpslldq xmm3, xmm1, 4 - pxor xmm1, xmm3 - pxor xmm1, xmm2 - movdqu xmmword ptr [rdx + 16], xmm1 - aeskeygenassist xmm2, xmm1, 2 - pshufd xmm2, xmm2, 255 - vpslldq xmm3, xmm1, 4 - pxor xmm1, xmm3 - vpslldq xmm3, xmm1, 4 - pxor xmm1, xmm3 - vpslldq xmm3, xmm1, 4 - pxor xmm1, xmm3 - pxor xmm1, xmm2 - movdqu xmmword ptr [rdx + 32], xmm1 - aeskeygenassist xmm2, xmm1, 4 - pshufd xmm2, xmm2, 255 - vpslldq xmm3, xmm1, 4 - pxor xmm1, xmm3 - vpslldq xmm3, xmm1, 4 - pxor xmm1, xmm3 - vpslldq xmm3, xmm1, 4 - pxor xmm1, xmm3 - pxor xmm1, xmm2 - movdqu xmmword ptr [rdx + 48], xmm1 - aeskeygenassist xmm2, xmm1, 8 - pshufd xmm2, xmm2, 255 - vpslldq xmm3, xmm1, 4 - pxor xmm1, xmm3 - vpslldq xmm3, xmm1, 4 - pxor xmm1, xmm3 - vpslldq xmm3, xmm1, 4 - pxor xmm1, xmm3 - pxor xmm1, xmm2 - movdqu xmmword ptr [rdx + 64], xmm1 - aeskeygenassist xmm2, xmm1, 16 - pshufd xmm2, xmm2, 255 - vpslldq xmm3, xmm1, 4 - pxor xmm1, xmm3 - vpslldq xmm3, xmm1, 4 - pxor xmm1, xmm3 - vpslldq xmm3, xmm1, 4 - pxor xmm1, xmm3 - pxor xmm1, xmm2 - movdqu xmmword ptr [rdx + 80], xmm1 - aeskeygenassist xmm2, xmm1, 32 - pshufd xmm2, xmm2, 255 - vpslldq xmm3, xmm1, 4 - pxor xmm1, xmm3 - vpslldq xmm3, xmm1, 4 - pxor xmm1, xmm3 - vpslldq xmm3, xmm1, 4 - pxor xmm1, xmm3 - pxor xmm1, xmm2 - movdqu xmmword ptr [rdx + 96], xmm1 - aeskeygenassist xmm2, xmm1, 64 - pshufd xmm2, xmm2, 255 - vpslldq xmm3, xmm1, 4 - pxor xmm1, xmm3 - vpslldq xmm3, xmm1, 4 - pxor xmm1, xmm3 - vpslldq xmm3, xmm1, 4 - pxor xmm1, xmm3 - pxor xmm1, xmm2 - movdqu xmmword ptr [rdx + 112], xmm1 - aeskeygenassist xmm2, xmm1, 128 - pshufd xmm2, xmm2, 255 - vpslldq xmm3, xmm1, 4 - pxor xmm1, xmm3 - vpslldq xmm3, xmm1, 4 - pxor xmm1, xmm3 - vpslldq xmm3, xmm1, 4 - pxor xmm1, xmm3 - pxor xmm1, xmm2 - movdqu xmmword ptr [rdx + 128], xmm1 - aeskeygenassist xmm2, xmm1, 27 - pshufd xmm2, xmm2, 255 - vpslldq xmm3, xmm1, 4 - pxor xmm1, xmm3 - vpslldq xmm3, xmm1, 4 - pxor xmm1, xmm3 - vpslldq xmm3, xmm1, 4 - pxor xmm1, xmm3 - pxor xmm1, xmm2 - movdqu xmmword ptr [rdx + 144], xmm1 - aeskeygenassist xmm2, xmm1, 54 - pshufd xmm2, xmm2, 255 - vpslldq xmm3, xmm1, 4 - pxor xmm1, xmm3 - vpslldq xmm3, xmm1, 4 - pxor xmm1, xmm3 - vpslldq xmm3, xmm1, 4 - pxor xmm1, xmm3 - pxor xmm1, xmm2 - movdqu xmmword ptr [rdx + 160], xmm1 - pxor xmm1, xmm1 - pxor xmm2, xmm2 - pxor xmm3, xmm3 - ret -aes128_key_expansion endp -ALIGN 16 -aes128_keyhash_init proc - mov r8, 579005069656919567 - pinsrq xmm4, r8, 0 - mov r8, 283686952306183 - pinsrq xmm4, r8, 1 - pxor xmm0, xmm0 - movdqu xmmword ptr [rdx + 80], xmm0 - mov r8, rcx - movdqu xmm2, xmmword ptr [r8 + 0] - pxor xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 16] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 32] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 48] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 64] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 80] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 96] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 112] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 128] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 144] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 160] - aesenclast xmm0, xmm2 - pxor xmm2, xmm2 - pshufb xmm0, xmm4 - mov rcx, rdx - movdqu xmmword ptr [rcx + 32], xmm0 - movdqu xmm0, xmm6 - mov rax, r12 - movdqu xmm1, xmmword ptr [rcx + 32] - movdqu xmm6, xmm1 - movdqu xmm3, xmm1 - pxor xmm4, xmm4 - pxor xmm5, xmm5 - mov r12, 3254779904 - pinsrd xmm4, r12d, 3 - mov r12, 1 - pinsrd xmm4, r12d, 0 - mov r12, 2147483648 - pinsrd xmm5, r12d, 3 - movdqu xmm1, xmm3 - movdqu xmm2, xmm1 - psrld xmm2, 31 - pslld xmm1, 1 - vpslldq xmm2, xmm2, 4 - pxor xmm1, xmm2 - pand xmm3, xmm5 - pcmpeqd xmm3, xmm5 - pshufd xmm3, xmm3, 255 - pand xmm3, xmm4 - vpxor xmm1, xmm1, xmm3 - movdqu xmmword ptr [rcx + 0], xmm1 - movdqu xmm1, xmm6 - movdqu xmm2, xmm6 - movdqu xmm5, xmm1 - pclmulqdq xmm1, xmm2, 16 - movdqu xmm3, xmm1 - movdqu xmm1, xmm5 - pclmulqdq xmm1, xmm2, 1 - movdqu xmm4, xmm1 - movdqu xmm1, xmm5 - pclmulqdq xmm1, xmm2, 0 - pclmulqdq xmm5, xmm2, 17 - movdqu xmm2, xmm5 - movdqu xmm5, xmm1 - movdqu xmm1, xmm3 - mov r12, 0 - pinsrd xmm1, r12d, 0 - pshufd xmm1, xmm1, 14 - pxor xmm2, xmm1 - movdqu xmm1, xmm4 - mov r12, 0 - pinsrd xmm1, r12d, 0 - pshufd xmm1, xmm1, 14 - pxor xmm2, xmm1 - movdqu xmm1, xmm3 - mov r12, 0 - pinsrd xmm1, r12d, 3 - pshufd xmm1, xmm1, 79 - mov r12, 0 - pinsrd xmm4, r12d, 3 - pshufd xmm4, xmm4, 79 - pxor xmm1, xmm4 - pxor xmm1, xmm5 - movdqu xmm3, xmm1 - psrld xmm3, 31 - movdqu xmm4, xmm2 - psrld xmm4, 31 - pslld xmm1, 1 - pslld xmm2, 1 - vpslldq xmm5, xmm3, 4 - vpslldq xmm4, xmm4, 4 - mov r12, 0 - pinsrd xmm3, r12d, 0 - pshufd xmm3, xmm3, 3 - pxor xmm3, xmm4 - pxor xmm1, xmm5 - pxor xmm2, xmm3 - movdqu xmm6, xmm2 - pxor xmm2, xmm2 - mov r12, 3774873600 - pinsrd xmm2, r12d, 3 - movdqu xmm5, xmm1 - pclmulqdq xmm1, xmm2, 16 - movdqu xmm3, xmm1 - movdqu xmm1, xmm5 - pclmulqdq xmm1, xmm2, 1 - movdqu xmm4, xmm1 - movdqu xmm1, xmm5 - pclmulqdq xmm1, xmm2, 0 - pclmulqdq xmm5, xmm2, 17 - movdqu xmm2, xmm5 - movdqu xmm5, xmm1 - movdqu xmm1, xmm3 - mov r12, 0 - pinsrd xmm1, r12d, 0 - pshufd xmm1, xmm1, 14 - pxor xmm2, xmm1 - movdqu xmm1, xmm4 - mov r12, 0 - pinsrd xmm1, r12d, 0 - pshufd xmm1, xmm1, 14 - pxor xmm2, xmm1 - movdqu xmm1, xmm3 - mov r12, 0 - pinsrd xmm1, r12d, 3 - pshufd xmm1, xmm1, 79 - mov r12, 0 - pinsrd xmm4, r12d, 3 - pshufd xmm4, xmm4, 79 - pxor xmm1, xmm4 - pxor xmm1, xmm5 - movdqu xmm3, xmm1 - psrld xmm3, 31 - movdqu xmm4, xmm2 - psrld xmm4, 31 - pslld xmm1, 1 - pslld xmm2, 1 - vpslldq xmm5, xmm3, 4 - vpslldq xmm4, xmm4, 4 - mov r12, 0 - pinsrd xmm3, r12d, 0 - pshufd xmm3, xmm3, 3 - pxor xmm3, xmm4 - pxor xmm1, xmm5 - pxor xmm2, xmm3 - movdqu xmm5, xmm2 - pxor xmm2, xmm2 - mov r12, 3774873600 - pinsrd xmm2, r12d, 3 - pclmulqdq xmm1, xmm2, 17 - movdqu xmm2, xmm1 - psrld xmm2, 31 - pslld xmm1, 1 - vpslldq xmm2, xmm2, 4 - pxor xmm1, xmm2 - pxor xmm1, xmm5 - pxor xmm1, xmm6 - movdqu xmm6, xmm1 - movdqu xmm3, xmm1 - pxor xmm4, xmm4 - pxor xmm5, xmm5 - mov r12, 3254779904 - pinsrd xmm4, r12d, 3 - mov r12, 1 - pinsrd xmm4, r12d, 0 - mov r12, 2147483648 - pinsrd xmm5, r12d, 3 - movdqu xmm1, xmm3 - movdqu xmm2, xmm1 - psrld xmm2, 31 - pslld xmm1, 1 - vpslldq xmm2, xmm2, 4 - pxor xmm1, xmm2 - pand xmm3, xmm5 - pcmpeqd xmm3, xmm5 - pshufd xmm3, xmm3, 255 - pand xmm3, xmm4 - vpxor xmm1, xmm1, xmm3 - movdqu xmmword ptr [rcx + 16], xmm1 - movdqu xmm2, xmm6 - movdqu xmm1, xmmword ptr [rcx + 32] - movdqu xmm5, xmm1 - pclmulqdq xmm1, xmm2, 16 - movdqu xmm3, xmm1 - movdqu xmm1, xmm5 - pclmulqdq xmm1, xmm2, 1 - movdqu xmm4, xmm1 - movdqu xmm1, xmm5 - pclmulqdq xmm1, xmm2, 0 - pclmulqdq xmm5, xmm2, 17 - movdqu xmm2, xmm5 - movdqu xmm5, xmm1 - movdqu xmm1, xmm3 - mov r12, 0 - pinsrd xmm1, r12d, 0 - pshufd xmm1, xmm1, 14 - pxor xmm2, xmm1 - movdqu xmm1, xmm4 - mov r12, 0 - pinsrd xmm1, r12d, 0 - pshufd xmm1, xmm1, 14 - pxor xmm2, xmm1 - movdqu xmm1, xmm3 - mov r12, 0 - pinsrd xmm1, r12d, 3 - pshufd xmm1, xmm1, 79 - mov r12, 0 - pinsrd xmm4, r12d, 3 - pshufd xmm4, xmm4, 79 - pxor xmm1, xmm4 - pxor xmm1, xmm5 - movdqu xmm3, xmm1 - psrld xmm3, 31 - movdqu xmm4, xmm2 - psrld xmm4, 31 - pslld xmm1, 1 - pslld xmm2, 1 - vpslldq xmm5, xmm3, 4 - vpslldq xmm4, xmm4, 4 - mov r12, 0 - pinsrd xmm3, r12d, 0 - pshufd xmm3, xmm3, 3 - pxor xmm3, xmm4 - pxor xmm1, xmm5 - pxor xmm2, xmm3 - movdqu xmm6, xmm2 - pxor xmm2, xmm2 - mov r12, 3774873600 - pinsrd xmm2, r12d, 3 - movdqu xmm5, xmm1 - pclmulqdq xmm1, xmm2, 16 - movdqu xmm3, xmm1 - movdqu xmm1, xmm5 - pclmulqdq xmm1, xmm2, 1 - movdqu xmm4, xmm1 - movdqu xmm1, xmm5 - pclmulqdq xmm1, xmm2, 0 - pclmulqdq xmm5, xmm2, 17 - movdqu xmm2, xmm5 - movdqu xmm5, xmm1 - movdqu xmm1, xmm3 - mov r12, 0 - pinsrd xmm1, r12d, 0 - pshufd xmm1, xmm1, 14 - pxor xmm2, xmm1 - movdqu xmm1, xmm4 - mov r12, 0 - pinsrd xmm1, r12d, 0 - pshufd xmm1, xmm1, 14 - pxor xmm2, xmm1 - movdqu xmm1, xmm3 - mov r12, 0 - pinsrd xmm1, r12d, 3 - pshufd xmm1, xmm1, 79 - mov r12, 0 - pinsrd xmm4, r12d, 3 - pshufd xmm4, xmm4, 79 - pxor xmm1, xmm4 - pxor xmm1, xmm5 - movdqu xmm3, xmm1 - psrld xmm3, 31 - movdqu xmm4, xmm2 - psrld xmm4, 31 - pslld xmm1, 1 - pslld xmm2, 1 - vpslldq xmm5, xmm3, 4 - vpslldq xmm4, xmm4, 4 - mov r12, 0 - pinsrd xmm3, r12d, 0 - pshufd xmm3, xmm3, 3 - pxor xmm3, xmm4 - pxor xmm1, xmm5 - pxor xmm2, xmm3 - movdqu xmm5, xmm2 - pxor xmm2, xmm2 - mov r12, 3774873600 - pinsrd xmm2, r12d, 3 - pclmulqdq xmm1, xmm2, 17 - movdqu xmm2, xmm1 - psrld xmm2, 31 - pslld xmm1, 1 - vpslldq xmm2, xmm2, 4 - pxor xmm1, xmm2 - pxor xmm1, xmm5 - pxor xmm1, xmm6 - movdqu xmm6, xmm1 - movdqu xmm3, xmm1 - pxor xmm4, xmm4 - pxor xmm5, xmm5 - mov r12, 3254779904 - pinsrd xmm4, r12d, 3 - mov r12, 1 - pinsrd xmm4, r12d, 0 - mov r12, 2147483648 - pinsrd xmm5, r12d, 3 - movdqu xmm1, xmm3 - movdqu xmm2, xmm1 - psrld xmm2, 31 - pslld xmm1, 1 - vpslldq xmm2, xmm2, 4 - pxor xmm1, xmm2 - pand xmm3, xmm5 - pcmpeqd xmm3, xmm5 - pshufd xmm3, xmm3, 255 - pand xmm3, xmm4 - vpxor xmm1, xmm1, xmm3 - movdqu xmmword ptr [rcx + 48], xmm1 - movdqu xmm2, xmm6 - movdqu xmm1, xmmword ptr [rcx + 32] - movdqu xmm5, xmm1 - pclmulqdq xmm1, xmm2, 16 - movdqu xmm3, xmm1 - movdqu xmm1, xmm5 - pclmulqdq xmm1, xmm2, 1 - movdqu xmm4, xmm1 - movdqu xmm1, xmm5 - pclmulqdq xmm1, xmm2, 0 - pclmulqdq xmm5, xmm2, 17 - movdqu xmm2, xmm5 - movdqu xmm5, xmm1 - movdqu xmm1, xmm3 - mov r12, 0 - pinsrd xmm1, r12d, 0 - pshufd xmm1, xmm1, 14 - pxor xmm2, xmm1 - movdqu xmm1, xmm4 - mov r12, 0 - pinsrd xmm1, r12d, 0 - pshufd xmm1, xmm1, 14 - pxor xmm2, xmm1 - movdqu xmm1, xmm3 - mov r12, 0 - pinsrd xmm1, r12d, 3 - pshufd xmm1, xmm1, 79 - mov r12, 0 - pinsrd xmm4, r12d, 3 - pshufd xmm4, xmm4, 79 - pxor xmm1, xmm4 - pxor xmm1, xmm5 - movdqu xmm3, xmm1 - psrld xmm3, 31 - movdqu xmm4, xmm2 - psrld xmm4, 31 - pslld xmm1, 1 - pslld xmm2, 1 - vpslldq xmm5, xmm3, 4 - vpslldq xmm4, xmm4, 4 - mov r12, 0 - pinsrd xmm3, r12d, 0 - pshufd xmm3, xmm3, 3 - pxor xmm3, xmm4 - pxor xmm1, xmm5 - pxor xmm2, xmm3 - movdqu xmm6, xmm2 - pxor xmm2, xmm2 - mov r12, 3774873600 - pinsrd xmm2, r12d, 3 - movdqu xmm5, xmm1 - pclmulqdq xmm1, xmm2, 16 - movdqu xmm3, xmm1 - movdqu xmm1, xmm5 - pclmulqdq xmm1, xmm2, 1 - movdqu xmm4, xmm1 - movdqu xmm1, xmm5 - pclmulqdq xmm1, xmm2, 0 - pclmulqdq xmm5, xmm2, 17 - movdqu xmm2, xmm5 - movdqu xmm5, xmm1 - movdqu xmm1, xmm3 - mov r12, 0 - pinsrd xmm1, r12d, 0 - pshufd xmm1, xmm1, 14 - pxor xmm2, xmm1 - movdqu xmm1, xmm4 - mov r12, 0 - pinsrd xmm1, r12d, 0 - pshufd xmm1, xmm1, 14 - pxor xmm2, xmm1 - movdqu xmm1, xmm3 - mov r12, 0 - pinsrd xmm1, r12d, 3 - pshufd xmm1, xmm1, 79 - mov r12, 0 - pinsrd xmm4, r12d, 3 - pshufd xmm4, xmm4, 79 - pxor xmm1, xmm4 - pxor xmm1, xmm5 - movdqu xmm3, xmm1 - psrld xmm3, 31 - movdqu xmm4, xmm2 - psrld xmm4, 31 - pslld xmm1, 1 - pslld xmm2, 1 - vpslldq xmm5, xmm3, 4 - vpslldq xmm4, xmm4, 4 - mov r12, 0 - pinsrd xmm3, r12d, 0 - pshufd xmm3, xmm3, 3 - pxor xmm3, xmm4 - pxor xmm1, xmm5 - pxor xmm2, xmm3 - movdqu xmm5, xmm2 - pxor xmm2, xmm2 - mov r12, 3774873600 - pinsrd xmm2, r12d, 3 - pclmulqdq xmm1, xmm2, 17 - movdqu xmm2, xmm1 - psrld xmm2, 31 - pslld xmm1, 1 - vpslldq xmm2, xmm2, 4 - pxor xmm1, xmm2 - pxor xmm1, xmm5 - pxor xmm1, xmm6 - movdqu xmm6, xmm1 - movdqu xmm3, xmm1 - pxor xmm4, xmm4 - pxor xmm5, xmm5 - mov r12, 3254779904 - pinsrd xmm4, r12d, 3 - mov r12, 1 - pinsrd xmm4, r12d, 0 - mov r12, 2147483648 - pinsrd xmm5, r12d, 3 - movdqu xmm1, xmm3 - movdqu xmm2, xmm1 - psrld xmm2, 31 - pslld xmm1, 1 - vpslldq xmm2, xmm2, 4 - pxor xmm1, xmm2 - pand xmm3, xmm5 - pcmpeqd xmm3, xmm5 - pshufd xmm3, xmm3, 255 - pand xmm3, xmm4 - vpxor xmm1, xmm1, xmm3 - movdqu xmmword ptr [rcx + 64], xmm1 - movdqu xmm2, xmm6 - movdqu xmm1, xmmword ptr [rcx + 32] - movdqu xmm5, xmm1 - pclmulqdq xmm1, xmm2, 16 - movdqu xmm3, xmm1 - movdqu xmm1, xmm5 - pclmulqdq xmm1, xmm2, 1 - movdqu xmm4, xmm1 - movdqu xmm1, xmm5 - pclmulqdq xmm1, xmm2, 0 - pclmulqdq xmm5, xmm2, 17 - movdqu xmm2, xmm5 - movdqu xmm5, xmm1 - movdqu xmm1, xmm3 - mov r12, 0 - pinsrd xmm1, r12d, 0 - pshufd xmm1, xmm1, 14 - pxor xmm2, xmm1 - movdqu xmm1, xmm4 - mov r12, 0 - pinsrd xmm1, r12d, 0 - pshufd xmm1, xmm1, 14 - pxor xmm2, xmm1 - movdqu xmm1, xmm3 - mov r12, 0 - pinsrd xmm1, r12d, 3 - pshufd xmm1, xmm1, 79 - mov r12, 0 - pinsrd xmm4, r12d, 3 - pshufd xmm4, xmm4, 79 - pxor xmm1, xmm4 - pxor xmm1, xmm5 - movdqu xmm3, xmm1 - psrld xmm3, 31 - movdqu xmm4, xmm2 - psrld xmm4, 31 - pslld xmm1, 1 - pslld xmm2, 1 - vpslldq xmm5, xmm3, 4 - vpslldq xmm4, xmm4, 4 - mov r12, 0 - pinsrd xmm3, r12d, 0 - pshufd xmm3, xmm3, 3 - pxor xmm3, xmm4 - pxor xmm1, xmm5 - pxor xmm2, xmm3 - movdqu xmm6, xmm2 - pxor xmm2, xmm2 - mov r12, 3774873600 - pinsrd xmm2, r12d, 3 - movdqu xmm5, xmm1 - pclmulqdq xmm1, xmm2, 16 - movdqu xmm3, xmm1 - movdqu xmm1, xmm5 - pclmulqdq xmm1, xmm2, 1 - movdqu xmm4, xmm1 - movdqu xmm1, xmm5 - pclmulqdq xmm1, xmm2, 0 - pclmulqdq xmm5, xmm2, 17 - movdqu xmm2, xmm5 - movdqu xmm5, xmm1 - movdqu xmm1, xmm3 - mov r12, 0 - pinsrd xmm1, r12d, 0 - pshufd xmm1, xmm1, 14 - pxor xmm2, xmm1 - movdqu xmm1, xmm4 - mov r12, 0 - pinsrd xmm1, r12d, 0 - pshufd xmm1, xmm1, 14 - pxor xmm2, xmm1 - movdqu xmm1, xmm3 - mov r12, 0 - pinsrd xmm1, r12d, 3 - pshufd xmm1, xmm1, 79 - mov r12, 0 - pinsrd xmm4, r12d, 3 - pshufd xmm4, xmm4, 79 - pxor xmm1, xmm4 - pxor xmm1, xmm5 - movdqu xmm3, xmm1 - psrld xmm3, 31 - movdqu xmm4, xmm2 - psrld xmm4, 31 - pslld xmm1, 1 - pslld xmm2, 1 - vpslldq xmm5, xmm3, 4 - vpslldq xmm4, xmm4, 4 - mov r12, 0 - pinsrd xmm3, r12d, 0 - pshufd xmm3, xmm3, 3 - pxor xmm3, xmm4 - pxor xmm1, xmm5 - pxor xmm2, xmm3 - movdqu xmm5, xmm2 - pxor xmm2, xmm2 - mov r12, 3774873600 - pinsrd xmm2, r12d, 3 - pclmulqdq xmm1, xmm2, 17 - movdqu xmm2, xmm1 - psrld xmm2, 31 - pslld xmm1, 1 - vpslldq xmm2, xmm2, 4 - pxor xmm1, xmm2 - pxor xmm1, xmm5 - pxor xmm1, xmm6 - movdqu xmm6, xmm1 - movdqu xmm3, xmm1 - pxor xmm4, xmm4 - pxor xmm5, xmm5 - mov r12, 3254779904 - pinsrd xmm4, r12d, 3 - mov r12, 1 - pinsrd xmm4, r12d, 0 - mov r12, 2147483648 - pinsrd xmm5, r12d, 3 - movdqu xmm1, xmm3 - movdqu xmm2, xmm1 - psrld xmm2, 31 - pslld xmm1, 1 - vpslldq xmm2, xmm2, 4 - pxor xmm1, xmm2 - pand xmm3, xmm5 - pcmpeqd xmm3, xmm5 - pshufd xmm3, xmm3, 255 - pand xmm3, xmm4 - vpxor xmm1, xmm1, xmm3 - movdqu xmmword ptr [rcx + 96], xmm1 - movdqu xmm2, xmm6 - movdqu xmm1, xmmword ptr [rcx + 32] - movdqu xmm5, xmm1 - pclmulqdq xmm1, xmm2, 16 - movdqu xmm3, xmm1 - movdqu xmm1, xmm5 - pclmulqdq xmm1, xmm2, 1 - movdqu xmm4, xmm1 - movdqu xmm1, xmm5 - pclmulqdq xmm1, xmm2, 0 - pclmulqdq xmm5, xmm2, 17 - movdqu xmm2, xmm5 - movdqu xmm5, xmm1 - movdqu xmm1, xmm3 - mov r12, 0 - pinsrd xmm1, r12d, 0 - pshufd xmm1, xmm1, 14 - pxor xmm2, xmm1 - movdqu xmm1, xmm4 - mov r12, 0 - pinsrd xmm1, r12d, 0 - pshufd xmm1, xmm1, 14 - pxor xmm2, xmm1 - movdqu xmm1, xmm3 - mov r12, 0 - pinsrd xmm1, r12d, 3 - pshufd xmm1, xmm1, 79 - mov r12, 0 - pinsrd xmm4, r12d, 3 - pshufd xmm4, xmm4, 79 - pxor xmm1, xmm4 - pxor xmm1, xmm5 - movdqu xmm3, xmm1 - psrld xmm3, 31 - movdqu xmm4, xmm2 - psrld xmm4, 31 - pslld xmm1, 1 - pslld xmm2, 1 - vpslldq xmm5, xmm3, 4 - vpslldq xmm4, xmm4, 4 - mov r12, 0 - pinsrd xmm3, r12d, 0 - pshufd xmm3, xmm3, 3 - pxor xmm3, xmm4 - pxor xmm1, xmm5 - pxor xmm2, xmm3 - movdqu xmm6, xmm2 - pxor xmm2, xmm2 - mov r12, 3774873600 - pinsrd xmm2, r12d, 3 - movdqu xmm5, xmm1 - pclmulqdq xmm1, xmm2, 16 - movdqu xmm3, xmm1 - movdqu xmm1, xmm5 - pclmulqdq xmm1, xmm2, 1 - movdqu xmm4, xmm1 - movdqu xmm1, xmm5 - pclmulqdq xmm1, xmm2, 0 - pclmulqdq xmm5, xmm2, 17 - movdqu xmm2, xmm5 - movdqu xmm5, xmm1 - movdqu xmm1, xmm3 - mov r12, 0 - pinsrd xmm1, r12d, 0 - pshufd xmm1, xmm1, 14 - pxor xmm2, xmm1 - movdqu xmm1, xmm4 - mov r12, 0 - pinsrd xmm1, r12d, 0 - pshufd xmm1, xmm1, 14 - pxor xmm2, xmm1 - movdqu xmm1, xmm3 - mov r12, 0 - pinsrd xmm1, r12d, 3 - pshufd xmm1, xmm1, 79 - mov r12, 0 - pinsrd xmm4, r12d, 3 - pshufd xmm4, xmm4, 79 - pxor xmm1, xmm4 - pxor xmm1, xmm5 - movdqu xmm3, xmm1 - psrld xmm3, 31 - movdqu xmm4, xmm2 - psrld xmm4, 31 - pslld xmm1, 1 - pslld xmm2, 1 - vpslldq xmm5, xmm3, 4 - vpslldq xmm4, xmm4, 4 - mov r12, 0 - pinsrd xmm3, r12d, 0 - pshufd xmm3, xmm3, 3 - pxor xmm3, xmm4 - pxor xmm1, xmm5 - pxor xmm2, xmm3 - movdqu xmm5, xmm2 - pxor xmm2, xmm2 - mov r12, 3774873600 - pinsrd xmm2, r12d, 3 - pclmulqdq xmm1, xmm2, 17 - movdqu xmm2, xmm1 - psrld xmm2, 31 - pslld xmm1, 1 - vpslldq xmm2, xmm2, 4 - pxor xmm1, xmm2 - pxor xmm1, xmm5 - pxor xmm1, xmm6 - movdqu xmm6, xmm1 - movdqu xmm3, xmm1 - pxor xmm4, xmm4 - pxor xmm5, xmm5 - mov r12, 3254779904 - pinsrd xmm4, r12d, 3 - mov r12, 1 - pinsrd xmm4, r12d, 0 - mov r12, 2147483648 - pinsrd xmm5, r12d, 3 - movdqu xmm1, xmm3 - movdqu xmm2, xmm1 - psrld xmm2, 31 - pslld xmm1, 1 - vpslldq xmm2, xmm2, 4 - pxor xmm1, xmm2 - pand xmm3, xmm5 - pcmpeqd xmm3, xmm5 - pshufd xmm3, xmm3, 255 - pand xmm3, xmm4 - vpxor xmm1, xmm1, xmm3 - movdqu xmmword ptr [rcx + 112], xmm1 - movdqu xmm6, xmm0 - mov r12, rax - ret -aes128_keyhash_init endp -ALIGN 16 -aes256_key_expansion proc - movdqu xmm1, xmmword ptr [rcx + 0] - movdqu xmm3, xmmword ptr [rcx + 16] - movdqu xmmword ptr [rdx + 0], xmm1 - movdqu xmmword ptr [rdx + 16], xmm3 - aeskeygenassist xmm2, xmm3, 1 - pshufd xmm2, xmm2, 255 - vpslldq xmm4, xmm1, 4 - pxor xmm1, xmm4 - vpslldq xmm4, xmm1, 4 - pxor xmm1, xmm4 - vpslldq xmm4, xmm1, 4 - pxor xmm1, xmm4 - pxor xmm1, xmm2 - movdqu xmmword ptr [rdx + 32], xmm1 - aeskeygenassist xmm2, xmm1, 0 - pshufd xmm2, xmm2, 170 - vpslldq xmm4, xmm3, 4 - pxor xmm3, xmm4 - vpslldq xmm4, xmm3, 4 - pxor xmm3, xmm4 - vpslldq xmm4, xmm3, 4 - pxor xmm3, xmm4 - pxor xmm3, xmm2 - movdqu xmmword ptr [rdx + 48], xmm3 - aeskeygenassist xmm2, xmm3, 2 - pshufd xmm2, xmm2, 255 - vpslldq xmm4, xmm1, 4 - pxor xmm1, xmm4 - vpslldq xmm4, xmm1, 4 - pxor xmm1, xmm4 - vpslldq xmm4, xmm1, 4 - pxor xmm1, xmm4 - pxor xmm1, xmm2 - movdqu xmmword ptr [rdx + 64], xmm1 - aeskeygenassist xmm2, xmm1, 0 - pshufd xmm2, xmm2, 170 - vpslldq xmm4, xmm3, 4 - pxor xmm3, xmm4 - vpslldq xmm4, xmm3, 4 - pxor xmm3, xmm4 - vpslldq xmm4, xmm3, 4 - pxor xmm3, xmm4 - pxor xmm3, xmm2 - movdqu xmmword ptr [rdx + 80], xmm3 - aeskeygenassist xmm2, xmm3, 4 - pshufd xmm2, xmm2, 255 - vpslldq xmm4, xmm1, 4 - pxor xmm1, xmm4 - vpslldq xmm4, xmm1, 4 - pxor xmm1, xmm4 - vpslldq xmm4, xmm1, 4 - pxor xmm1, xmm4 - pxor xmm1, xmm2 - movdqu xmmword ptr [rdx + 96], xmm1 - aeskeygenassist xmm2, xmm1, 0 - pshufd xmm2, xmm2, 170 - vpslldq xmm4, xmm3, 4 - pxor xmm3, xmm4 - vpslldq xmm4, xmm3, 4 - pxor xmm3, xmm4 - vpslldq xmm4, xmm3, 4 - pxor xmm3, xmm4 - pxor xmm3, xmm2 - movdqu xmmword ptr [rdx + 112], xmm3 - aeskeygenassist xmm2, xmm3, 8 - pshufd xmm2, xmm2, 255 - vpslldq xmm4, xmm1, 4 - pxor xmm1, xmm4 - vpslldq xmm4, xmm1, 4 - pxor xmm1, xmm4 - vpslldq xmm4, xmm1, 4 - pxor xmm1, xmm4 - pxor xmm1, xmm2 - movdqu xmmword ptr [rdx + 128], xmm1 - aeskeygenassist xmm2, xmm1, 0 - pshufd xmm2, xmm2, 170 - vpslldq xmm4, xmm3, 4 - pxor xmm3, xmm4 - vpslldq xmm4, xmm3, 4 - pxor xmm3, xmm4 - vpslldq xmm4, xmm3, 4 - pxor xmm3, xmm4 - pxor xmm3, xmm2 - movdqu xmmword ptr [rdx + 144], xmm3 - aeskeygenassist xmm2, xmm3, 16 - pshufd xmm2, xmm2, 255 - vpslldq xmm4, xmm1, 4 - pxor xmm1, xmm4 - vpslldq xmm4, xmm1, 4 - pxor xmm1, xmm4 - vpslldq xmm4, xmm1, 4 - pxor xmm1, xmm4 - pxor xmm1, xmm2 - movdqu xmmword ptr [rdx + 160], xmm1 - aeskeygenassist xmm2, xmm1, 0 - pshufd xmm2, xmm2, 170 - vpslldq xmm4, xmm3, 4 - pxor xmm3, xmm4 - vpslldq xmm4, xmm3, 4 - pxor xmm3, xmm4 - vpslldq xmm4, xmm3, 4 - pxor xmm3, xmm4 - pxor xmm3, xmm2 - movdqu xmmword ptr [rdx + 176], xmm3 - aeskeygenassist xmm2, xmm3, 32 - pshufd xmm2, xmm2, 255 - vpslldq xmm4, xmm1, 4 - pxor xmm1, xmm4 - vpslldq xmm4, xmm1, 4 - pxor xmm1, xmm4 - vpslldq xmm4, xmm1, 4 - pxor xmm1, xmm4 - pxor xmm1, xmm2 - movdqu xmmword ptr [rdx + 192], xmm1 - aeskeygenassist xmm2, xmm1, 0 - pshufd xmm2, xmm2, 170 - vpslldq xmm4, xmm3, 4 - pxor xmm3, xmm4 - vpslldq xmm4, xmm3, 4 - pxor xmm3, xmm4 - vpslldq xmm4, xmm3, 4 - pxor xmm3, xmm4 - pxor xmm3, xmm2 - movdqu xmmword ptr [rdx + 208], xmm3 - aeskeygenassist xmm2, xmm3, 64 - pshufd xmm2, xmm2, 255 - vpslldq xmm4, xmm1, 4 - pxor xmm1, xmm4 - vpslldq xmm4, xmm1, 4 - pxor xmm1, xmm4 - vpslldq xmm4, xmm1, 4 - pxor xmm1, xmm4 - pxor xmm1, xmm2 - movdqu xmmword ptr [rdx + 224], xmm1 - pxor xmm1, xmm1 - pxor xmm2, xmm2 - pxor xmm3, xmm3 - pxor xmm4, xmm4 - ret -aes256_key_expansion endp -ALIGN 16 -aes256_keyhash_init proc - mov r8, 579005069656919567 - pinsrq xmm4, r8, 0 - mov r8, 283686952306183 - pinsrq xmm4, r8, 1 - pxor xmm0, xmm0 - movdqu xmmword ptr [rdx + 80], xmm0 - mov r8, rcx - movdqu xmm2, xmmword ptr [r8 + 0] - pxor xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 16] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 32] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 48] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 64] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 80] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 96] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 112] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 128] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 144] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 160] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 176] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 192] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 208] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 224] - aesenclast xmm0, xmm2 - pxor xmm2, xmm2 - pshufb xmm0, xmm4 - mov rcx, rdx - movdqu xmmword ptr [rcx + 32], xmm0 - movdqu xmm0, xmm6 - mov rax, r12 - movdqu xmm1, xmmword ptr [rcx + 32] - movdqu xmm6, xmm1 - movdqu xmm3, xmm1 - pxor xmm4, xmm4 - pxor xmm5, xmm5 - mov r12, 3254779904 - pinsrd xmm4, r12d, 3 - mov r12, 1 - pinsrd xmm4, r12d, 0 - mov r12, 2147483648 - pinsrd xmm5, r12d, 3 - movdqu xmm1, xmm3 - movdqu xmm2, xmm1 - psrld xmm2, 31 - pslld xmm1, 1 - vpslldq xmm2, xmm2, 4 - pxor xmm1, xmm2 - pand xmm3, xmm5 - pcmpeqd xmm3, xmm5 - pshufd xmm3, xmm3, 255 - pand xmm3, xmm4 - vpxor xmm1, xmm1, xmm3 - movdqu xmmword ptr [rcx + 0], xmm1 - movdqu xmm1, xmm6 - movdqu xmm2, xmm6 - movdqu xmm5, xmm1 - pclmulqdq xmm1, xmm2, 16 - movdqu xmm3, xmm1 - movdqu xmm1, xmm5 - pclmulqdq xmm1, xmm2, 1 - movdqu xmm4, xmm1 - movdqu xmm1, xmm5 - pclmulqdq xmm1, xmm2, 0 - pclmulqdq xmm5, xmm2, 17 - movdqu xmm2, xmm5 - movdqu xmm5, xmm1 - movdqu xmm1, xmm3 - mov r12, 0 - pinsrd xmm1, r12d, 0 - pshufd xmm1, xmm1, 14 - pxor xmm2, xmm1 - movdqu xmm1, xmm4 - mov r12, 0 - pinsrd xmm1, r12d, 0 - pshufd xmm1, xmm1, 14 - pxor xmm2, xmm1 - movdqu xmm1, xmm3 - mov r12, 0 - pinsrd xmm1, r12d, 3 - pshufd xmm1, xmm1, 79 - mov r12, 0 - pinsrd xmm4, r12d, 3 - pshufd xmm4, xmm4, 79 - pxor xmm1, xmm4 - pxor xmm1, xmm5 - movdqu xmm3, xmm1 - psrld xmm3, 31 - movdqu xmm4, xmm2 - psrld xmm4, 31 - pslld xmm1, 1 - pslld xmm2, 1 - vpslldq xmm5, xmm3, 4 - vpslldq xmm4, xmm4, 4 - mov r12, 0 - pinsrd xmm3, r12d, 0 - pshufd xmm3, xmm3, 3 - pxor xmm3, xmm4 - pxor xmm1, xmm5 - pxor xmm2, xmm3 - movdqu xmm6, xmm2 - pxor xmm2, xmm2 - mov r12, 3774873600 - pinsrd xmm2, r12d, 3 - movdqu xmm5, xmm1 - pclmulqdq xmm1, xmm2, 16 - movdqu xmm3, xmm1 - movdqu xmm1, xmm5 - pclmulqdq xmm1, xmm2, 1 - movdqu xmm4, xmm1 - movdqu xmm1, xmm5 - pclmulqdq xmm1, xmm2, 0 - pclmulqdq xmm5, xmm2, 17 - movdqu xmm2, xmm5 - movdqu xmm5, xmm1 - movdqu xmm1, xmm3 - mov r12, 0 - pinsrd xmm1, r12d, 0 - pshufd xmm1, xmm1, 14 - pxor xmm2, xmm1 - movdqu xmm1, xmm4 - mov r12, 0 - pinsrd xmm1, r12d, 0 - pshufd xmm1, xmm1, 14 - pxor xmm2, xmm1 - movdqu xmm1, xmm3 - mov r12, 0 - pinsrd xmm1, r12d, 3 - pshufd xmm1, xmm1, 79 - mov r12, 0 - pinsrd xmm4, r12d, 3 - pshufd xmm4, xmm4, 79 - pxor xmm1, xmm4 - pxor xmm1, xmm5 - movdqu xmm3, xmm1 - psrld xmm3, 31 - movdqu xmm4, xmm2 - psrld xmm4, 31 - pslld xmm1, 1 - pslld xmm2, 1 - vpslldq xmm5, xmm3, 4 - vpslldq xmm4, xmm4, 4 - mov r12, 0 - pinsrd xmm3, r12d, 0 - pshufd xmm3, xmm3, 3 - pxor xmm3, xmm4 - pxor xmm1, xmm5 - pxor xmm2, xmm3 - movdqu xmm5, xmm2 - pxor xmm2, xmm2 - mov r12, 3774873600 - pinsrd xmm2, r12d, 3 - pclmulqdq xmm1, xmm2, 17 - movdqu xmm2, xmm1 - psrld xmm2, 31 - pslld xmm1, 1 - vpslldq xmm2, xmm2, 4 - pxor xmm1, xmm2 - pxor xmm1, xmm5 - pxor xmm1, xmm6 - movdqu xmm6, xmm1 - movdqu xmm3, xmm1 - pxor xmm4, xmm4 - pxor xmm5, xmm5 - mov r12, 3254779904 - pinsrd xmm4, r12d, 3 - mov r12, 1 - pinsrd xmm4, r12d, 0 - mov r12, 2147483648 - pinsrd xmm5, r12d, 3 - movdqu xmm1, xmm3 - movdqu xmm2, xmm1 - psrld xmm2, 31 - pslld xmm1, 1 - vpslldq xmm2, xmm2, 4 - pxor xmm1, xmm2 - pand xmm3, xmm5 - pcmpeqd xmm3, xmm5 - pshufd xmm3, xmm3, 255 - pand xmm3, xmm4 - vpxor xmm1, xmm1, xmm3 - movdqu xmmword ptr [rcx + 16], xmm1 - movdqu xmm2, xmm6 - movdqu xmm1, xmmword ptr [rcx + 32] - movdqu xmm5, xmm1 - pclmulqdq xmm1, xmm2, 16 - movdqu xmm3, xmm1 - movdqu xmm1, xmm5 - pclmulqdq xmm1, xmm2, 1 - movdqu xmm4, xmm1 - movdqu xmm1, xmm5 - pclmulqdq xmm1, xmm2, 0 - pclmulqdq xmm5, xmm2, 17 - movdqu xmm2, xmm5 - movdqu xmm5, xmm1 - movdqu xmm1, xmm3 - mov r12, 0 - pinsrd xmm1, r12d, 0 - pshufd xmm1, xmm1, 14 - pxor xmm2, xmm1 - movdqu xmm1, xmm4 - mov r12, 0 - pinsrd xmm1, r12d, 0 - pshufd xmm1, xmm1, 14 - pxor xmm2, xmm1 - movdqu xmm1, xmm3 - mov r12, 0 - pinsrd xmm1, r12d, 3 - pshufd xmm1, xmm1, 79 - mov r12, 0 - pinsrd xmm4, r12d, 3 - pshufd xmm4, xmm4, 79 - pxor xmm1, xmm4 - pxor xmm1, xmm5 - movdqu xmm3, xmm1 - psrld xmm3, 31 - movdqu xmm4, xmm2 - psrld xmm4, 31 - pslld xmm1, 1 - pslld xmm2, 1 - vpslldq xmm5, xmm3, 4 - vpslldq xmm4, xmm4, 4 - mov r12, 0 - pinsrd xmm3, r12d, 0 - pshufd xmm3, xmm3, 3 - pxor xmm3, xmm4 - pxor xmm1, xmm5 - pxor xmm2, xmm3 - movdqu xmm6, xmm2 - pxor xmm2, xmm2 - mov r12, 3774873600 - pinsrd xmm2, r12d, 3 - movdqu xmm5, xmm1 - pclmulqdq xmm1, xmm2, 16 - movdqu xmm3, xmm1 - movdqu xmm1, xmm5 - pclmulqdq xmm1, xmm2, 1 - movdqu xmm4, xmm1 - movdqu xmm1, xmm5 - pclmulqdq xmm1, xmm2, 0 - pclmulqdq xmm5, xmm2, 17 - movdqu xmm2, xmm5 - movdqu xmm5, xmm1 - movdqu xmm1, xmm3 - mov r12, 0 - pinsrd xmm1, r12d, 0 - pshufd xmm1, xmm1, 14 - pxor xmm2, xmm1 - movdqu xmm1, xmm4 - mov r12, 0 - pinsrd xmm1, r12d, 0 - pshufd xmm1, xmm1, 14 - pxor xmm2, xmm1 - movdqu xmm1, xmm3 - mov r12, 0 - pinsrd xmm1, r12d, 3 - pshufd xmm1, xmm1, 79 - mov r12, 0 - pinsrd xmm4, r12d, 3 - pshufd xmm4, xmm4, 79 - pxor xmm1, xmm4 - pxor xmm1, xmm5 - movdqu xmm3, xmm1 - psrld xmm3, 31 - movdqu xmm4, xmm2 - psrld xmm4, 31 - pslld xmm1, 1 - pslld xmm2, 1 - vpslldq xmm5, xmm3, 4 - vpslldq xmm4, xmm4, 4 - mov r12, 0 - pinsrd xmm3, r12d, 0 - pshufd xmm3, xmm3, 3 - pxor xmm3, xmm4 - pxor xmm1, xmm5 - pxor xmm2, xmm3 - movdqu xmm5, xmm2 - pxor xmm2, xmm2 - mov r12, 3774873600 - pinsrd xmm2, r12d, 3 - pclmulqdq xmm1, xmm2, 17 - movdqu xmm2, xmm1 - psrld xmm2, 31 - pslld xmm1, 1 - vpslldq xmm2, xmm2, 4 - pxor xmm1, xmm2 - pxor xmm1, xmm5 - pxor xmm1, xmm6 - movdqu xmm6, xmm1 - movdqu xmm3, xmm1 - pxor xmm4, xmm4 - pxor xmm5, xmm5 - mov r12, 3254779904 - pinsrd xmm4, r12d, 3 - mov r12, 1 - pinsrd xmm4, r12d, 0 - mov r12, 2147483648 - pinsrd xmm5, r12d, 3 - movdqu xmm1, xmm3 - movdqu xmm2, xmm1 - psrld xmm2, 31 - pslld xmm1, 1 - vpslldq xmm2, xmm2, 4 - pxor xmm1, xmm2 - pand xmm3, xmm5 - pcmpeqd xmm3, xmm5 - pshufd xmm3, xmm3, 255 - pand xmm3, xmm4 - vpxor xmm1, xmm1, xmm3 - movdqu xmmword ptr [rcx + 48], xmm1 - movdqu xmm2, xmm6 - movdqu xmm1, xmmword ptr [rcx + 32] - movdqu xmm5, xmm1 - pclmulqdq xmm1, xmm2, 16 - movdqu xmm3, xmm1 - movdqu xmm1, xmm5 - pclmulqdq xmm1, xmm2, 1 - movdqu xmm4, xmm1 - movdqu xmm1, xmm5 - pclmulqdq xmm1, xmm2, 0 - pclmulqdq xmm5, xmm2, 17 - movdqu xmm2, xmm5 - movdqu xmm5, xmm1 - movdqu xmm1, xmm3 - mov r12, 0 - pinsrd xmm1, r12d, 0 - pshufd xmm1, xmm1, 14 - pxor xmm2, xmm1 - movdqu xmm1, xmm4 - mov r12, 0 - pinsrd xmm1, r12d, 0 - pshufd xmm1, xmm1, 14 - pxor xmm2, xmm1 - movdqu xmm1, xmm3 - mov r12, 0 - pinsrd xmm1, r12d, 3 - pshufd xmm1, xmm1, 79 - mov r12, 0 - pinsrd xmm4, r12d, 3 - pshufd xmm4, xmm4, 79 - pxor xmm1, xmm4 - pxor xmm1, xmm5 - movdqu xmm3, xmm1 - psrld xmm3, 31 - movdqu xmm4, xmm2 - psrld xmm4, 31 - pslld xmm1, 1 - pslld xmm2, 1 - vpslldq xmm5, xmm3, 4 - vpslldq xmm4, xmm4, 4 - mov r12, 0 - pinsrd xmm3, r12d, 0 - pshufd xmm3, xmm3, 3 - pxor xmm3, xmm4 - pxor xmm1, xmm5 - pxor xmm2, xmm3 - movdqu xmm6, xmm2 - pxor xmm2, xmm2 - mov r12, 3774873600 - pinsrd xmm2, r12d, 3 - movdqu xmm5, xmm1 - pclmulqdq xmm1, xmm2, 16 - movdqu xmm3, xmm1 - movdqu xmm1, xmm5 - pclmulqdq xmm1, xmm2, 1 - movdqu xmm4, xmm1 - movdqu xmm1, xmm5 - pclmulqdq xmm1, xmm2, 0 - pclmulqdq xmm5, xmm2, 17 - movdqu xmm2, xmm5 - movdqu xmm5, xmm1 - movdqu xmm1, xmm3 - mov r12, 0 - pinsrd xmm1, r12d, 0 - pshufd xmm1, xmm1, 14 - pxor xmm2, xmm1 - movdqu xmm1, xmm4 - mov r12, 0 - pinsrd xmm1, r12d, 0 - pshufd xmm1, xmm1, 14 - pxor xmm2, xmm1 - movdqu xmm1, xmm3 - mov r12, 0 - pinsrd xmm1, r12d, 3 - pshufd xmm1, xmm1, 79 - mov r12, 0 - pinsrd xmm4, r12d, 3 - pshufd xmm4, xmm4, 79 - pxor xmm1, xmm4 - pxor xmm1, xmm5 - movdqu xmm3, xmm1 - psrld xmm3, 31 - movdqu xmm4, xmm2 - psrld xmm4, 31 - pslld xmm1, 1 - pslld xmm2, 1 - vpslldq xmm5, xmm3, 4 - vpslldq xmm4, xmm4, 4 - mov r12, 0 - pinsrd xmm3, r12d, 0 - pshufd xmm3, xmm3, 3 - pxor xmm3, xmm4 - pxor xmm1, xmm5 - pxor xmm2, xmm3 - movdqu xmm5, xmm2 - pxor xmm2, xmm2 - mov r12, 3774873600 - pinsrd xmm2, r12d, 3 - pclmulqdq xmm1, xmm2, 17 - movdqu xmm2, xmm1 - psrld xmm2, 31 - pslld xmm1, 1 - vpslldq xmm2, xmm2, 4 - pxor xmm1, xmm2 - pxor xmm1, xmm5 - pxor xmm1, xmm6 - movdqu xmm6, xmm1 - movdqu xmm3, xmm1 - pxor xmm4, xmm4 - pxor xmm5, xmm5 - mov r12, 3254779904 - pinsrd xmm4, r12d, 3 - mov r12, 1 - pinsrd xmm4, r12d, 0 - mov r12, 2147483648 - pinsrd xmm5, r12d, 3 - movdqu xmm1, xmm3 - movdqu xmm2, xmm1 - psrld xmm2, 31 - pslld xmm1, 1 - vpslldq xmm2, xmm2, 4 - pxor xmm1, xmm2 - pand xmm3, xmm5 - pcmpeqd xmm3, xmm5 - pshufd xmm3, xmm3, 255 - pand xmm3, xmm4 - vpxor xmm1, xmm1, xmm3 - movdqu xmmword ptr [rcx + 64], xmm1 - movdqu xmm2, xmm6 - movdqu xmm1, xmmword ptr [rcx + 32] - movdqu xmm5, xmm1 - pclmulqdq xmm1, xmm2, 16 - movdqu xmm3, xmm1 - movdqu xmm1, xmm5 - pclmulqdq xmm1, xmm2, 1 - movdqu xmm4, xmm1 - movdqu xmm1, xmm5 - pclmulqdq xmm1, xmm2, 0 - pclmulqdq xmm5, xmm2, 17 - movdqu xmm2, xmm5 - movdqu xmm5, xmm1 - movdqu xmm1, xmm3 - mov r12, 0 - pinsrd xmm1, r12d, 0 - pshufd xmm1, xmm1, 14 - pxor xmm2, xmm1 - movdqu xmm1, xmm4 - mov r12, 0 - pinsrd xmm1, r12d, 0 - pshufd xmm1, xmm1, 14 - pxor xmm2, xmm1 - movdqu xmm1, xmm3 - mov r12, 0 - pinsrd xmm1, r12d, 3 - pshufd xmm1, xmm1, 79 - mov r12, 0 - pinsrd xmm4, r12d, 3 - pshufd xmm4, xmm4, 79 - pxor xmm1, xmm4 - pxor xmm1, xmm5 - movdqu xmm3, xmm1 - psrld xmm3, 31 - movdqu xmm4, xmm2 - psrld xmm4, 31 - pslld xmm1, 1 - pslld xmm2, 1 - vpslldq xmm5, xmm3, 4 - vpslldq xmm4, xmm4, 4 - mov r12, 0 - pinsrd xmm3, r12d, 0 - pshufd xmm3, xmm3, 3 - pxor xmm3, xmm4 - pxor xmm1, xmm5 - pxor xmm2, xmm3 - movdqu xmm6, xmm2 - pxor xmm2, xmm2 - mov r12, 3774873600 - pinsrd xmm2, r12d, 3 - movdqu xmm5, xmm1 - pclmulqdq xmm1, xmm2, 16 - movdqu xmm3, xmm1 - movdqu xmm1, xmm5 - pclmulqdq xmm1, xmm2, 1 - movdqu xmm4, xmm1 - movdqu xmm1, xmm5 - pclmulqdq xmm1, xmm2, 0 - pclmulqdq xmm5, xmm2, 17 - movdqu xmm2, xmm5 - movdqu xmm5, xmm1 - movdqu xmm1, xmm3 - mov r12, 0 - pinsrd xmm1, r12d, 0 - pshufd xmm1, xmm1, 14 - pxor xmm2, xmm1 - movdqu xmm1, xmm4 - mov r12, 0 - pinsrd xmm1, r12d, 0 - pshufd xmm1, xmm1, 14 - pxor xmm2, xmm1 - movdqu xmm1, xmm3 - mov r12, 0 - pinsrd xmm1, r12d, 3 - pshufd xmm1, xmm1, 79 - mov r12, 0 - pinsrd xmm4, r12d, 3 - pshufd xmm4, xmm4, 79 - pxor xmm1, xmm4 - pxor xmm1, xmm5 - movdqu xmm3, xmm1 - psrld xmm3, 31 - movdqu xmm4, xmm2 - psrld xmm4, 31 - pslld xmm1, 1 - pslld xmm2, 1 - vpslldq xmm5, xmm3, 4 - vpslldq xmm4, xmm4, 4 - mov r12, 0 - pinsrd xmm3, r12d, 0 - pshufd xmm3, xmm3, 3 - pxor xmm3, xmm4 - pxor xmm1, xmm5 - pxor xmm2, xmm3 - movdqu xmm5, xmm2 - pxor xmm2, xmm2 - mov r12, 3774873600 - pinsrd xmm2, r12d, 3 - pclmulqdq xmm1, xmm2, 17 - movdqu xmm2, xmm1 - psrld xmm2, 31 - pslld xmm1, 1 - vpslldq xmm2, xmm2, 4 - pxor xmm1, xmm2 - pxor xmm1, xmm5 - pxor xmm1, xmm6 - movdqu xmm6, xmm1 - movdqu xmm3, xmm1 - pxor xmm4, xmm4 - pxor xmm5, xmm5 - mov r12, 3254779904 - pinsrd xmm4, r12d, 3 - mov r12, 1 - pinsrd xmm4, r12d, 0 - mov r12, 2147483648 - pinsrd xmm5, r12d, 3 - movdqu xmm1, xmm3 - movdqu xmm2, xmm1 - psrld xmm2, 31 - pslld xmm1, 1 - vpslldq xmm2, xmm2, 4 - pxor xmm1, xmm2 - pand xmm3, xmm5 - pcmpeqd xmm3, xmm5 - pshufd xmm3, xmm3, 255 - pand xmm3, xmm4 - vpxor xmm1, xmm1, xmm3 - movdqu xmmword ptr [rcx + 96], xmm1 - movdqu xmm2, xmm6 - movdqu xmm1, xmmword ptr [rcx + 32] - movdqu xmm5, xmm1 - pclmulqdq xmm1, xmm2, 16 - movdqu xmm3, xmm1 - movdqu xmm1, xmm5 - pclmulqdq xmm1, xmm2, 1 - movdqu xmm4, xmm1 - movdqu xmm1, xmm5 - pclmulqdq xmm1, xmm2, 0 - pclmulqdq xmm5, xmm2, 17 - movdqu xmm2, xmm5 - movdqu xmm5, xmm1 - movdqu xmm1, xmm3 - mov r12, 0 - pinsrd xmm1, r12d, 0 - pshufd xmm1, xmm1, 14 - pxor xmm2, xmm1 - movdqu xmm1, xmm4 - mov r12, 0 - pinsrd xmm1, r12d, 0 - pshufd xmm1, xmm1, 14 - pxor xmm2, xmm1 - movdqu xmm1, xmm3 - mov r12, 0 - pinsrd xmm1, r12d, 3 - pshufd xmm1, xmm1, 79 - mov r12, 0 - pinsrd xmm4, r12d, 3 - pshufd xmm4, xmm4, 79 - pxor xmm1, xmm4 - pxor xmm1, xmm5 - movdqu xmm3, xmm1 - psrld xmm3, 31 - movdqu xmm4, xmm2 - psrld xmm4, 31 - pslld xmm1, 1 - pslld xmm2, 1 - vpslldq xmm5, xmm3, 4 - vpslldq xmm4, xmm4, 4 - mov r12, 0 - pinsrd xmm3, r12d, 0 - pshufd xmm3, xmm3, 3 - pxor xmm3, xmm4 - pxor xmm1, xmm5 - pxor xmm2, xmm3 - movdqu xmm6, xmm2 - pxor xmm2, xmm2 - mov r12, 3774873600 - pinsrd xmm2, r12d, 3 - movdqu xmm5, xmm1 - pclmulqdq xmm1, xmm2, 16 - movdqu xmm3, xmm1 - movdqu xmm1, xmm5 - pclmulqdq xmm1, xmm2, 1 - movdqu xmm4, xmm1 - movdqu xmm1, xmm5 - pclmulqdq xmm1, xmm2, 0 - pclmulqdq xmm5, xmm2, 17 - movdqu xmm2, xmm5 - movdqu xmm5, xmm1 - movdqu xmm1, xmm3 - mov r12, 0 - pinsrd xmm1, r12d, 0 - pshufd xmm1, xmm1, 14 - pxor xmm2, xmm1 - movdqu xmm1, xmm4 - mov r12, 0 - pinsrd xmm1, r12d, 0 - pshufd xmm1, xmm1, 14 - pxor xmm2, xmm1 - movdqu xmm1, xmm3 - mov r12, 0 - pinsrd xmm1, r12d, 3 - pshufd xmm1, xmm1, 79 - mov r12, 0 - pinsrd xmm4, r12d, 3 - pshufd xmm4, xmm4, 79 - pxor xmm1, xmm4 - pxor xmm1, xmm5 - movdqu xmm3, xmm1 - psrld xmm3, 31 - movdqu xmm4, xmm2 - psrld xmm4, 31 - pslld xmm1, 1 - pslld xmm2, 1 - vpslldq xmm5, xmm3, 4 - vpslldq xmm4, xmm4, 4 - mov r12, 0 - pinsrd xmm3, r12d, 0 - pshufd xmm3, xmm3, 3 - pxor xmm3, xmm4 - pxor xmm1, xmm5 - pxor xmm2, xmm3 - movdqu xmm5, xmm2 - pxor xmm2, xmm2 - mov r12, 3774873600 - pinsrd xmm2, r12d, 3 - pclmulqdq xmm1, xmm2, 17 - movdqu xmm2, xmm1 - psrld xmm2, 31 - pslld xmm1, 1 - vpslldq xmm2, xmm2, 4 - pxor xmm1, xmm2 - pxor xmm1, xmm5 - pxor xmm1, xmm6 - movdqu xmm6, xmm1 - movdqu xmm3, xmm1 - pxor xmm4, xmm4 - pxor xmm5, xmm5 - mov r12, 3254779904 - pinsrd xmm4, r12d, 3 - mov r12, 1 - pinsrd xmm4, r12d, 0 - mov r12, 2147483648 - pinsrd xmm5, r12d, 3 - movdqu xmm1, xmm3 - movdqu xmm2, xmm1 - psrld xmm2, 31 - pslld xmm1, 1 - vpslldq xmm2, xmm2, 4 - pxor xmm1, xmm2 - pand xmm3, xmm5 - pcmpeqd xmm3, xmm5 - pshufd xmm3, xmm3, 255 - pand xmm3, xmm4 - vpxor xmm1, xmm1, xmm3 - movdqu xmmword ptr [rcx + 112], xmm1 - movdqu xmm6, xmm0 - mov r12, rax - ret -aes256_keyhash_init endp -ALIGN 16 -gctr128_bytes proc - push r15 - push r14 - push r13 - push r12 - push rsi - push rdi - push rbp - push rbx - pextrq rax, xmm15, 0 - push rax - pextrq rax, xmm15, 1 - push rax - pextrq rax, xmm14, 0 - push rax - pextrq rax, xmm14, 1 - push rax - pextrq rax, xmm13, 0 - push rax - pextrq rax, xmm13, 1 - push rax - pextrq rax, xmm12, 0 - push rax - pextrq rax, xmm12, 1 - push rax - pextrq rax, xmm11, 0 - push rax - pextrq rax, xmm11, 1 - push rax - pextrq rax, xmm10, 0 - push rax - pextrq rax, xmm10, 1 - push rax - pextrq rax, xmm9, 0 - push rax - pextrq rax, xmm9, 1 - push rax - pextrq rax, xmm8, 0 - push rax - pextrq rax, xmm8, 1 - push rax - pextrq rax, xmm7, 0 - push rax - pextrq rax, xmm7, 1 - push rax - pextrq rax, xmm6, 0 - push rax - pextrq rax, xmm6, 1 - push rax - mov rax, qword ptr [rsp + 272] - movdqu xmm7, xmmword ptr [rax + 0] - mov rax, rcx - mov rbx, r8 - mov rsi, rdx - mov r13, r9 - mov r8, qword ptr [rsp + 264] - mov rcx, qword ptr [rsp + 280] - mov rbp, rcx - imul rbp, 16 - mov r12, 579005069656919567 - pinsrq xmm8, r12, 0 - mov r12, 283686952306183 - pinsrq xmm8, r12, 1 - mov rdx, rcx - shr rdx, 2 - and rcx, 3 - cmp rdx, 0 - jbe L0 - mov r9, rax - mov r10, rbx - pshufb xmm7, xmm8 - movdqu xmm9, xmm7 - mov rax, 579005069656919567 - pinsrq xmm0, rax, 0 - mov rax, 579005069656919567 - pinsrq xmm0, rax, 1 - pshufb xmm9, xmm0 - movdqu xmm10, xmm9 - pxor xmm3, xmm3 - mov rax, 1 - pinsrd xmm3, eax, 2 - paddd xmm9, xmm3 - mov rax, 3 - pinsrd xmm3, eax, 2 - mov rax, 2 - pinsrd xmm3, eax, 0 - paddd xmm10, xmm3 - pshufb xmm9, xmm8 - pshufb xmm10, xmm8 - pextrq rdi, xmm7, 0 - mov rax, 283686952306183 - pinsrq xmm0, rax, 0 - mov rax, 579005069656919567 - pinsrq xmm0, rax, 1 - pxor xmm15, xmm15 - mov rax, 4 - pinsrd xmm15, eax, 0 - mov rax, 4 - pinsrd xmm15, eax, 2 - jmp L3 -ALIGN 16 -L2: - pinsrq xmm2, rdi, 0 - pinsrq xmm12, rdi, 0 - pinsrq xmm13, rdi, 0 - pinsrq xmm14, rdi, 0 - shufpd xmm2, xmm9, 2 - shufpd xmm12, xmm9, 0 - shufpd xmm13, xmm10, 2 - shufpd xmm14, xmm10, 0 - pshufb xmm9, xmm0 - pshufb xmm10, xmm0 - movdqu xmm3, xmmword ptr [r8 + 0] - movdqu xmm4, xmmword ptr [r8 + 16] - movdqu xmm5, xmmword ptr [r8 + 32] - movdqu xmm6, xmmword ptr [r8 + 48] - paddd xmm9, xmm15 - paddd xmm10, xmm15 - pxor xmm2, xmm3 - pxor xmm12, xmm3 - pxor xmm13, xmm3 - pxor xmm14, xmm3 - pshufb xmm9, xmm0 - pshufb xmm10, xmm0 - aesenc xmm2, xmm4 - aesenc xmm12, xmm4 - aesenc xmm13, xmm4 - aesenc xmm14, xmm4 - aesenc xmm2, xmm5 - aesenc xmm12, xmm5 - aesenc xmm13, xmm5 - aesenc xmm14, xmm5 - aesenc xmm2, xmm6 - aesenc xmm12, xmm6 - aesenc xmm13, xmm6 - aesenc xmm14, xmm6 - movdqu xmm3, xmmword ptr [r8 + 64] - movdqu xmm4, xmmword ptr [r8 + 80] - movdqu xmm5, xmmword ptr [r8 + 96] - movdqu xmm6, xmmword ptr [r8 + 112] - aesenc xmm2, xmm3 - aesenc xmm12, xmm3 - aesenc xmm13, xmm3 - aesenc xmm14, xmm3 - aesenc xmm2, xmm4 - aesenc xmm12, xmm4 - aesenc xmm13, xmm4 - aesenc xmm14, xmm4 - aesenc xmm2, xmm5 - aesenc xmm12, xmm5 - aesenc xmm13, xmm5 - aesenc xmm14, xmm5 - aesenc xmm2, xmm6 - aesenc xmm12, xmm6 - aesenc xmm13, xmm6 - aesenc xmm14, xmm6 - movdqu xmm3, xmmword ptr [r8 + 128] - movdqu xmm4, xmmword ptr [r8 + 144] - movdqu xmm5, xmmword ptr [r8 + 160] - aesenc xmm2, xmm3 - aesenc xmm12, xmm3 - aesenc xmm13, xmm3 - aesenc xmm14, xmm3 - aesenc xmm2, xmm4 - aesenc xmm12, xmm4 - aesenc xmm13, xmm4 - aesenc xmm14, xmm4 - aesenclast xmm2, xmm5 - aesenclast xmm12, xmm5 - aesenclast xmm13, xmm5 - aesenclast xmm14, xmm5 - movdqu xmm7, xmmword ptr [r9 + 0] - pxor xmm2, xmm7 - movdqu xmm7, xmmword ptr [r9 + 16] - pxor xmm12, xmm7 - movdqu xmm7, xmmword ptr [r9 + 32] - pxor xmm13, xmm7 - movdqu xmm7, xmmword ptr [r9 + 48] - pxor xmm14, xmm7 - movdqu xmmword ptr [r10 + 0], xmm2 - movdqu xmmword ptr [r10 + 16], xmm12 - movdqu xmmword ptr [r10 + 32], xmm13 - movdqu xmmword ptr [r10 + 48], xmm14 - sub rdx, 1 - add r9, 64 - add r10, 64 -ALIGN 16 -L3: - cmp rdx, 0 - ja L2 - movdqu xmm7, xmm9 - pinsrq xmm7, rdi, 0 - pshufb xmm7, xmm8 - mov rax, r9 - mov rbx, r10 - jmp L1 -L0: -L1: - mov rdx, 0 - mov r9, rax - mov r10, rbx - pxor xmm4, xmm4 - mov r12, 1 - pinsrd xmm4, r12d, 0 - jmp L5 -ALIGN 16 -L4: - movdqu xmm0, xmm7 - pshufb xmm0, xmm8 - movdqu xmm2, xmmword ptr [r8 + 0] - pxor xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 16] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 32] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 48] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 64] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 80] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 96] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 112] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 128] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 144] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 160] - aesenclast xmm0, xmm2 - pxor xmm2, xmm2 - movdqu xmm2, xmmword ptr [r9 + 0] - pxor xmm2, xmm0 - movdqu xmmword ptr [r10 + 0], xmm2 - add rdx, 1 - add r9, 16 - add r10, 16 - paddd xmm7, xmm4 -ALIGN 16 -L5: - cmp rdx, rcx - jne L4 - cmp rsi, rbp - jbe L6 - movdqu xmm1, xmmword ptr [r13 + 0] - movdqu xmm0, xmm7 - mov r12, 579005069656919567 - pinsrq xmm2, r12, 0 - mov r12, 283686952306183 - pinsrq xmm2, r12, 1 - pshufb xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 0] - pxor xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 16] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 32] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 48] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 64] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 80] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 96] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 112] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 128] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 144] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 160] - aesenclast xmm0, xmm2 - pxor xmm2, xmm2 - pxor xmm1, xmm0 - movdqu xmmword ptr [r13 + 0], xmm1 - jmp L7 -L6: -L7: - pop rax - pinsrq xmm6, rax, 1 - pop rax - pinsrq xmm6, rax, 0 - pop rax - pinsrq xmm7, rax, 1 - pop rax - pinsrq xmm7, rax, 0 - pop rax - pinsrq xmm8, rax, 1 - pop rax - pinsrq xmm8, rax, 0 - pop rax - pinsrq xmm9, rax, 1 - pop rax - pinsrq xmm9, rax, 0 - pop rax - pinsrq xmm10, rax, 1 - pop rax - pinsrq xmm10, rax, 0 - pop rax - pinsrq xmm11, rax, 1 - pop rax - pinsrq xmm11, rax, 0 - pop rax - pinsrq xmm12, rax, 1 - pop rax - pinsrq xmm12, rax, 0 - pop rax - pinsrq xmm13, rax, 1 - pop rax - pinsrq xmm13, rax, 0 - pop rax - pinsrq xmm14, rax, 1 - pop rax - pinsrq xmm14, rax, 0 - pop rax - pinsrq xmm15, rax, 1 - pop rax - pinsrq xmm15, rax, 0 - pop rbx - pop rbp - pop rdi - pop rsi - pop r12 - pop r13 - pop r14 - pop r15 - ret -gctr128_bytes endp -ALIGN 16 -gctr256_bytes proc - push r15 - push r14 - push r13 - push r12 - push rsi - push rdi - push rbp - push rbx - pextrq rax, xmm15, 0 - push rax - pextrq rax, xmm15, 1 - push rax - pextrq rax, xmm14, 0 - push rax - pextrq rax, xmm14, 1 - push rax - pextrq rax, xmm13, 0 - push rax - pextrq rax, xmm13, 1 - push rax - pextrq rax, xmm12, 0 - push rax - pextrq rax, xmm12, 1 - push rax - pextrq rax, xmm11, 0 - push rax - pextrq rax, xmm11, 1 - push rax - pextrq rax, xmm10, 0 - push rax - pextrq rax, xmm10, 1 - push rax - pextrq rax, xmm9, 0 - push rax - pextrq rax, xmm9, 1 - push rax - pextrq rax, xmm8, 0 - push rax - pextrq rax, xmm8, 1 - push rax - pextrq rax, xmm7, 0 - push rax - pextrq rax, xmm7, 1 - push rax - pextrq rax, xmm6, 0 - push rax - pextrq rax, xmm6, 1 - push rax - mov rax, qword ptr [rsp + 272] - movdqu xmm7, xmmword ptr [rax + 0] - mov rax, rcx - mov rbx, r8 - mov rsi, rdx - mov r13, r9 - mov r8, qword ptr [rsp + 264] - mov rcx, qword ptr [rsp + 280] - mov rbp, rcx - imul rbp, 16 - mov r12, 579005069656919567 - pinsrq xmm8, r12, 0 - mov r12, 283686952306183 - pinsrq xmm8, r12, 1 - mov rdx, rcx - shr rdx, 2 - and rcx, 3 - cmp rdx, 0 - jbe L8 - mov r9, rax - mov r10, rbx - pshufb xmm7, xmm8 - movdqu xmm9, xmm7 - mov rax, 579005069656919567 - pinsrq xmm0, rax, 0 - mov rax, 579005069656919567 - pinsrq xmm0, rax, 1 - pshufb xmm9, xmm0 - movdqu xmm10, xmm9 - pxor xmm3, xmm3 - mov rax, 1 - pinsrd xmm3, eax, 2 - paddd xmm9, xmm3 - mov rax, 3 - pinsrd xmm3, eax, 2 - mov rax, 2 - pinsrd xmm3, eax, 0 - paddd xmm10, xmm3 - pshufb xmm9, xmm8 - pshufb xmm10, xmm8 - pextrq rdi, xmm7, 0 - mov rax, 283686952306183 - pinsrq xmm0, rax, 0 - mov rax, 579005069656919567 - pinsrq xmm0, rax, 1 - pxor xmm15, xmm15 - mov rax, 4 - pinsrd xmm15, eax, 0 - mov rax, 4 - pinsrd xmm15, eax, 2 - jmp L11 -ALIGN 16 -L10: - pinsrq xmm2, rdi, 0 - pinsrq xmm12, rdi, 0 - pinsrq xmm13, rdi, 0 - pinsrq xmm14, rdi, 0 - shufpd xmm2, xmm9, 2 - shufpd xmm12, xmm9, 0 - shufpd xmm13, xmm10, 2 - shufpd xmm14, xmm10, 0 - pshufb xmm9, xmm0 - pshufb xmm10, xmm0 - movdqu xmm3, xmmword ptr [r8 + 0] - movdqu xmm4, xmmword ptr [r8 + 16] - movdqu xmm5, xmmword ptr [r8 + 32] - movdqu xmm6, xmmword ptr [r8 + 48] - paddd xmm9, xmm15 - paddd xmm10, xmm15 - pxor xmm2, xmm3 - pxor xmm12, xmm3 - pxor xmm13, xmm3 - pxor xmm14, xmm3 - pshufb xmm9, xmm0 - pshufb xmm10, xmm0 - aesenc xmm2, xmm4 - aesenc xmm12, xmm4 - aesenc xmm13, xmm4 - aesenc xmm14, xmm4 - aesenc xmm2, xmm5 - aesenc xmm12, xmm5 - aesenc xmm13, xmm5 - aesenc xmm14, xmm5 - aesenc xmm2, xmm6 - aesenc xmm12, xmm6 - aesenc xmm13, xmm6 - aesenc xmm14, xmm6 - movdqu xmm3, xmmword ptr [r8 + 64] - movdqu xmm4, xmmword ptr [r8 + 80] - movdqu xmm5, xmmword ptr [r8 + 96] - movdqu xmm6, xmmword ptr [r8 + 112] - aesenc xmm2, xmm3 - aesenc xmm12, xmm3 - aesenc xmm13, xmm3 - aesenc xmm14, xmm3 - aesenc xmm2, xmm4 - aesenc xmm12, xmm4 - aesenc xmm13, xmm4 - aesenc xmm14, xmm4 - aesenc xmm2, xmm5 - aesenc xmm12, xmm5 - aesenc xmm13, xmm5 - aesenc xmm14, xmm5 - aesenc xmm2, xmm6 - aesenc xmm12, xmm6 - aesenc xmm13, xmm6 - aesenc xmm14, xmm6 - movdqu xmm3, xmmword ptr [r8 + 128] - movdqu xmm4, xmmword ptr [r8 + 144] - movdqu xmm5, xmmword ptr [r8 + 160] - aesenc xmm2, xmm3 - aesenc xmm12, xmm3 - aesenc xmm13, xmm3 - aesenc xmm14, xmm3 - aesenc xmm2, xmm4 - aesenc xmm12, xmm4 - aesenc xmm13, xmm4 - aesenc xmm14, xmm4 - movdqu xmm3, xmm5 - movdqu xmm4, xmmword ptr [r8 + 176] - movdqu xmm5, xmmword ptr [r8 + 192] - movdqu xmm6, xmmword ptr [r8 + 208] - aesenc xmm2, xmm3 - aesenc xmm12, xmm3 - aesenc xmm13, xmm3 - aesenc xmm14, xmm3 - aesenc xmm2, xmm4 - aesenc xmm12, xmm4 - aesenc xmm13, xmm4 - aesenc xmm14, xmm4 - aesenc xmm2, xmm5 - aesenc xmm12, xmm5 - aesenc xmm13, xmm5 - aesenc xmm14, xmm5 - aesenc xmm2, xmm6 - aesenc xmm12, xmm6 - aesenc xmm13, xmm6 - aesenc xmm14, xmm6 - movdqu xmm5, xmmword ptr [r8 + 224] - aesenclast xmm2, xmm5 - aesenclast xmm12, xmm5 - aesenclast xmm13, xmm5 - aesenclast xmm14, xmm5 - movdqu xmm7, xmmword ptr [r9 + 0] - pxor xmm2, xmm7 - movdqu xmm7, xmmword ptr [r9 + 16] - pxor xmm12, xmm7 - movdqu xmm7, xmmword ptr [r9 + 32] - pxor xmm13, xmm7 - movdqu xmm7, xmmword ptr [r9 + 48] - pxor xmm14, xmm7 - movdqu xmmword ptr [r10 + 0], xmm2 - movdqu xmmword ptr [r10 + 16], xmm12 - movdqu xmmword ptr [r10 + 32], xmm13 - movdqu xmmword ptr [r10 + 48], xmm14 - sub rdx, 1 - add r9, 64 - add r10, 64 -ALIGN 16 -L11: - cmp rdx, 0 - ja L10 - movdqu xmm7, xmm9 - pinsrq xmm7, rdi, 0 - pshufb xmm7, xmm8 - mov rax, r9 - mov rbx, r10 - jmp L9 -L8: -L9: - mov rdx, 0 - mov r9, rax - mov r10, rbx - pxor xmm4, xmm4 - mov r12, 1 - pinsrd xmm4, r12d, 0 - jmp L13 -ALIGN 16 -L12: - movdqu xmm0, xmm7 - pshufb xmm0, xmm8 - movdqu xmm2, xmmword ptr [r8 + 0] - pxor xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 16] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 32] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 48] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 64] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 80] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 96] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 112] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 128] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 144] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 160] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 176] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 192] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 208] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 224] - aesenclast xmm0, xmm2 - pxor xmm2, xmm2 - movdqu xmm2, xmmword ptr [r9 + 0] - pxor xmm2, xmm0 - movdqu xmmword ptr [r10 + 0], xmm2 - add rdx, 1 - add r9, 16 - add r10, 16 - paddd xmm7, xmm4 -ALIGN 16 -L13: - cmp rdx, rcx - jne L12 - cmp rsi, rbp - jbe L14 - movdqu xmm1, xmmword ptr [r13 + 0] - movdqu xmm0, xmm7 - mov r12, 579005069656919567 - pinsrq xmm2, r12, 0 - mov r12, 283686952306183 - pinsrq xmm2, r12, 1 - pshufb xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 0] - pxor xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 16] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 32] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 48] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 64] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 80] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 96] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 112] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 128] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 144] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 160] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 176] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 192] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 208] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 224] - aesenclast xmm0, xmm2 - pxor xmm2, xmm2 - pxor xmm1, xmm0 - movdqu xmmword ptr [r13 + 0], xmm1 - jmp L15 -L14: -L15: - pop rax - pinsrq xmm6, rax, 1 - pop rax - pinsrq xmm6, rax, 0 - pop rax - pinsrq xmm7, rax, 1 - pop rax - pinsrq xmm7, rax, 0 - pop rax - pinsrq xmm8, rax, 1 - pop rax - pinsrq xmm8, rax, 0 - pop rax - pinsrq xmm9, rax, 1 - pop rax - pinsrq xmm9, rax, 0 - pop rax - pinsrq xmm10, rax, 1 - pop rax - pinsrq xmm10, rax, 0 - pop rax - pinsrq xmm11, rax, 1 - pop rax - pinsrq xmm11, rax, 0 - pop rax - pinsrq xmm12, rax, 1 - pop rax - pinsrq xmm12, rax, 0 - pop rax - pinsrq xmm13, rax, 1 - pop rax - pinsrq xmm13, rax, 0 - pop rax - pinsrq xmm14, rax, 1 - pop rax - pinsrq xmm14, rax, 0 - pop rax - pinsrq xmm15, rax, 1 - pop rax - pinsrq xmm15, rax, 0 - pop rbx - pop rbp - pop rdi - pop rsi - pop r12 - pop r13 - pop r14 - pop r15 - ret -gctr256_bytes endp -ALIGN 16 -compute_iv_stdcall proc - cmp rdx, 12 - jne L16 - push rdi - push rsi - mov rdi, rcx - mov rsi, rdx - mov rdx, r8 - mov rcx, r9 - mov r8, qword ptr [rsp + 56] - mov r9, qword ptr [rsp + 64] - cmp rsi, 12 - jne L18 - movdqu xmm0, xmmword ptr [r8 + 0] - mov rax, 579005069656919567 - pinsrq xmm1, rax, 0 - mov rax, 283686952306183 - pinsrq xmm1, rax, 1 - pshufb xmm0, xmm1 - mov rax, 1 - pinsrd xmm0, eax, 0 - movdqu xmmword ptr [rcx + 0], xmm0 - jmp L19 -L18: - mov rax, rcx - add r9, 32 - mov rbx, r8 - mov rcx, rdx - imul rcx, 16 - mov r10, 579005069656919567 - pinsrq xmm9, r10, 0 - mov r10, 283686952306183 - pinsrq xmm9, r10, 1 - pxor xmm8, xmm8 - mov r11, rdi - jmp L21 -ALIGN 16 -L20: - add r11, 80 - movdqu xmm5, xmmword ptr [r9 + -32] - movdqu xmm0, xmmword ptr [r11 + 0] - pshufb xmm0, xmm9 - sub r11, 16 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - movdqu xmm0, xmmword ptr [r11 + 0] - pshufb xmm0, xmm9 - movdqu xmm4, xmm1 - movdqu xmm1, xmmword ptr [r9 + -16] - vpxor xmm6, xmm2, xmm3 - movdqu xmm7, xmm5 - movdqu xmm5, xmm1 - sub r11, 16 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - movdqu xmm0, xmmword ptr [r11 + 0] - pshufb xmm0, xmm9 - vpxor xmm4, xmm4, xmm1 - movdqu xmm1, xmmword ptr [r9 + 16] - vpxor xmm6, xmm6, xmm2 - vpxor xmm6, xmm6, xmm3 - vpxor xmm7, xmm7, xmm5 - movdqu xmm5, xmm1 - sub r11, 16 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - movdqu xmm0, xmmword ptr [r11 + 0] - pshufb xmm0, xmm9 - vpxor xmm4, xmm4, xmm1 - movdqu xmm1, xmmword ptr [r9 + 32] - vpxor xmm6, xmm6, xmm2 - vpxor xmm6, xmm6, xmm3 - vpxor xmm7, xmm7, xmm5 - movdqu xmm5, xmm1 - sub r11, 16 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - movdqu xmm0, xmmword ptr [r11 + 0] - pshufb xmm0, xmm9 - vpxor xmm4, xmm4, xmm1 - movdqu xmm1, xmmword ptr [r9 + 64] - vpxor xmm6, xmm6, xmm2 - vpxor xmm6, xmm6, xmm3 - vpxor xmm7, xmm7, xmm5 - movdqu xmm5, xmm1 - sub r11, 16 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - movdqu xmm0, xmmword ptr [r11 + 0] - pshufb xmm0, xmm9 - vpxor xmm4, xmm4, xmm1 - movdqu xmm1, xmmword ptr [r9 + 80] - vpxor xmm6, xmm6, xmm2 - vpxor xmm6, xmm6, xmm3 - vpxor xmm7, xmm7, xmm5 - movdqu xmm5, xmm1 - vpxor xmm0, xmm8, xmm0 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - vpxor xmm4, xmm4, xmm1 - vpxor xmm6, xmm6, xmm2 - vpxor xmm6, xmm6, xmm3 - vpxor xmm7, xmm7, xmm5 - pxor xmm3, xmm3 - mov r10, 3254779904 - pinsrd xmm3, r10d, 3 - vpslldq xmm5, xmm6, 8 - vpxor xmm4, xmm4, xmm5 - vpalignr xmm0, xmm4, xmm4, 8 - vpclmulqdq xmm4, xmm4, xmm3, 16 - vpsrldq xmm6, xmm6, 8 - vpxor xmm7, xmm7, xmm6 - vpxor xmm4, xmm4, xmm0 - vpalignr xmm8, xmm4, xmm4, 8 - vpclmulqdq xmm4, xmm4, xmm3, 16 - vpxor xmm8, xmm8, xmm7 - vpxor xmm8, xmm8, xmm4 - add r11, 96 - sub rdx, 6 -ALIGN 16 -L21: - cmp rdx, 6 - jae L20 - cmp rdx, 0 - jbe L22 - mov r10, rdx - sub r10, 1 - imul r10, 16 - add r11, r10 - movdqu xmm5, xmmword ptr [r9 + -32] - movdqu xmm0, xmmword ptr [r11 + 0] - pshufb xmm0, xmm9 - cmp rdx, 1 - jne L24 - vpxor xmm0, xmm8, xmm0 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - movdqu xmm4, xmm1 - vpxor xmm6, xmm2, xmm3 - movdqu xmm7, xmm5 - jmp L25 -L24: - sub r11, 16 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - movdqu xmm0, xmmword ptr [r11 + 0] - pshufb xmm0, xmm9 - movdqu xmm4, xmm1 - movdqu xmm1, xmmword ptr [r9 + -16] - vpxor xmm6, xmm2, xmm3 - movdqu xmm7, xmm5 - movdqu xmm5, xmm1 - cmp rdx, 2 - je L26 - sub r11, 16 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - movdqu xmm0, xmmword ptr [r11 + 0] - pshufb xmm0, xmm9 - vpxor xmm4, xmm4, xmm1 - movdqu xmm1, xmmword ptr [r9 + 16] - vpxor xmm6, xmm6, xmm2 - vpxor xmm6, xmm6, xmm3 - vpxor xmm7, xmm7, xmm5 - movdqu xmm5, xmm1 - cmp rdx, 3 - je L28 - sub r11, 16 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - movdqu xmm0, xmmword ptr [r11 + 0] - pshufb xmm0, xmm9 - vpxor xmm4, xmm4, xmm1 - movdqu xmm1, xmmword ptr [r9 + 32] - vpxor xmm6, xmm6, xmm2 - vpxor xmm6, xmm6, xmm3 - vpxor xmm7, xmm7, xmm5 - movdqu xmm5, xmm1 - cmp rdx, 4 - je L30 - sub r11, 16 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - movdqu xmm0, xmmword ptr [r11 + 0] - pshufb xmm0, xmm9 - vpxor xmm4, xmm4, xmm1 - movdqu xmm1, xmmword ptr [r9 + 64] - vpxor xmm6, xmm6, xmm2 - vpxor xmm6, xmm6, xmm3 - vpxor xmm7, xmm7, xmm5 - movdqu xmm5, xmm1 - jmp L31 -L30: -L31: - jmp L29 -L28: -L29: - jmp L27 -L26: -L27: - vpxor xmm0, xmm8, xmm0 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - vpxor xmm4, xmm4, xmm1 - vpxor xmm6, xmm6, xmm2 - vpxor xmm6, xmm6, xmm3 - vpxor xmm7, xmm7, xmm5 -L25: - pxor xmm3, xmm3 - mov r10, 3254779904 - pinsrd xmm3, r10d, 3 - vpslldq xmm5, xmm6, 8 - vpxor xmm4, xmm4, xmm5 - vpalignr xmm0, xmm4, xmm4, 8 - vpclmulqdq xmm4, xmm4, xmm3, 16 - vpsrldq xmm6, xmm6, 8 - vpxor xmm7, xmm7, xmm6 - vpxor xmm4, xmm4, xmm0 - vpalignr xmm8, xmm4, xmm4, 8 - vpclmulqdq xmm4, xmm4, xmm3, 16 - vpxor xmm8, xmm8, xmm7 - vpxor xmm8, xmm8, xmm4 - jmp L23 -L22: -L23: - mov r15, rsi - cmp rsi, rcx - jbe L32 - movdqu xmm0, xmmword ptr [rbx + 0] - mov r10, rsi - and r10, 15 - cmp r10, 8 - jae L34 - mov rcx, 0 - pinsrq xmm0, rcx, 1 - mov rcx, r10 - shl rcx, 3 - mov r11, 1 - shl r11, cl - sub r11, 1 - pextrq rcx, xmm0, 0 - and rcx, r11 - pinsrq xmm0, rcx, 0 - jmp L35 -L34: - mov rcx, r10 - sub rcx, 8 - shl rcx, 3 - mov r11, 1 - shl r11, cl - sub r11, 1 - pextrq rcx, xmm0, 1 - and rcx, r11 - pinsrq xmm0, rcx, 1 -L35: - pshufb xmm0, xmm9 - movdqu xmm5, xmmword ptr [r9 + -32] - vpxor xmm0, xmm8, xmm0 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - movdqu xmm4, xmm1 - vpxor xmm6, xmm2, xmm3 - movdqu xmm7, xmm5 - pxor xmm3, xmm3 - mov r11, 3254779904 - pinsrd xmm3, r11d, 3 - vpslldq xmm5, xmm6, 8 - vpxor xmm4, xmm4, xmm5 - vpalignr xmm0, xmm4, xmm4, 8 - vpclmulqdq xmm4, xmm4, xmm3, 16 - vpsrldq xmm6, xmm6, 8 - vpxor xmm7, xmm7, xmm6 - vpxor xmm4, xmm4, xmm0 - vpalignr xmm8, xmm4, xmm4, 8 - vpclmulqdq xmm4, xmm4, xmm3, 16 - vpxor xmm8, xmm8, xmm7 - vpxor xmm8, xmm8, xmm4 - jmp L33 -L32: -L33: - mov rcx, rax - mov r11, 0 - mov r13, rsi - pxor xmm0, xmm0 - mov rax, r11 - imul rax, 8 - pinsrq xmm0, rax, 1 - mov rax, r13 - imul rax, 8 - pinsrq xmm0, rax, 0 - movdqu xmm5, xmmword ptr [r9 + -32] - vpxor xmm0, xmm8, xmm0 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - movdqu xmm4, xmm1 - vpxor xmm6, xmm2, xmm3 - movdqu xmm7, xmm5 - pxor xmm3, xmm3 - mov r11, 3254779904 - pinsrd xmm3, r11d, 3 - vpslldq xmm5, xmm6, 8 - vpxor xmm4, xmm4, xmm5 - vpalignr xmm0, xmm4, xmm4, 8 - vpclmulqdq xmm4, xmm4, xmm3, 16 - vpsrldq xmm6, xmm6, 8 - vpxor xmm7, xmm7, xmm6 - vpxor xmm4, xmm4, xmm0 - vpalignr xmm8, xmm4, xmm4, 8 - vpclmulqdq xmm4, xmm4, xmm3, 16 - vpxor xmm8, xmm8, xmm7 - vpxor xmm8, xmm8, xmm4 - movdqu xmmword ptr [rcx + 0], xmm8 -L19: - pop rsi - pop rdi - jmp L17 -L16: - push r15 - push r14 - push r13 - push r12 - push rsi - push rdi - push rbp - push rbx - pextrq rax, xmm15, 0 - push rax - pextrq rax, xmm15, 1 - push rax - pextrq rax, xmm14, 0 - push rax - pextrq rax, xmm14, 1 - push rax - pextrq rax, xmm13, 0 - push rax - pextrq rax, xmm13, 1 - push rax - pextrq rax, xmm12, 0 - push rax - pextrq rax, xmm12, 1 - push rax - pextrq rax, xmm11, 0 - push rax - pextrq rax, xmm11, 1 - push rax - pextrq rax, xmm10, 0 - push rax - pextrq rax, xmm10, 1 - push rax - pextrq rax, xmm9, 0 - push rax - pextrq rax, xmm9, 1 - push rax - pextrq rax, xmm8, 0 - push rax - pextrq rax, xmm8, 1 - push rax - pextrq rax, xmm7, 0 - push rax - pextrq rax, xmm7, 1 - push rax - pextrq rax, xmm6, 0 - push rax - pextrq rax, xmm6, 1 - push rax - mov rdi, rcx - mov rsi, rdx - mov rdx, r8 - mov rcx, r9 - mov r8, qword ptr [rsp + 264] - mov r9, qword ptr [rsp + 272] - cmp rsi, 12 - jne L36 - movdqu xmm0, xmmword ptr [r8 + 0] - mov rax, 579005069656919567 - pinsrq xmm1, rax, 0 - mov rax, 283686952306183 - pinsrq xmm1, rax, 1 - pshufb xmm0, xmm1 - mov rax, 1 - pinsrd xmm0, eax, 0 - movdqu xmmword ptr [rcx + 0], xmm0 - jmp L37 -L36: - mov rax, rcx - add r9, 32 - mov rbx, r8 - mov rcx, rdx - imul rcx, 16 - mov r10, 579005069656919567 - pinsrq xmm9, r10, 0 - mov r10, 283686952306183 - pinsrq xmm9, r10, 1 - pxor xmm8, xmm8 - mov r11, rdi - jmp L39 -ALIGN 16 -L38: - add r11, 80 - movdqu xmm5, xmmword ptr [r9 + -32] - movdqu xmm0, xmmword ptr [r11 + 0] - pshufb xmm0, xmm9 - sub r11, 16 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - movdqu xmm0, xmmword ptr [r11 + 0] - pshufb xmm0, xmm9 - movdqu xmm4, xmm1 - movdqu xmm1, xmmword ptr [r9 + -16] - vpxor xmm6, xmm2, xmm3 - movdqu xmm7, xmm5 - movdqu xmm5, xmm1 - sub r11, 16 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - movdqu xmm0, xmmword ptr [r11 + 0] - pshufb xmm0, xmm9 - vpxor xmm4, xmm4, xmm1 - movdqu xmm1, xmmword ptr [r9 + 16] - vpxor xmm6, xmm6, xmm2 - vpxor xmm6, xmm6, xmm3 - vpxor xmm7, xmm7, xmm5 - movdqu xmm5, xmm1 - sub r11, 16 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - movdqu xmm0, xmmword ptr [r11 + 0] - pshufb xmm0, xmm9 - vpxor xmm4, xmm4, xmm1 - movdqu xmm1, xmmword ptr [r9 + 32] - vpxor xmm6, xmm6, xmm2 - vpxor xmm6, xmm6, xmm3 - vpxor xmm7, xmm7, xmm5 - movdqu xmm5, xmm1 - sub r11, 16 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - movdqu xmm0, xmmword ptr [r11 + 0] - pshufb xmm0, xmm9 - vpxor xmm4, xmm4, xmm1 - movdqu xmm1, xmmword ptr [r9 + 64] - vpxor xmm6, xmm6, xmm2 - vpxor xmm6, xmm6, xmm3 - vpxor xmm7, xmm7, xmm5 - movdqu xmm5, xmm1 - sub r11, 16 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - movdqu xmm0, xmmword ptr [r11 + 0] - pshufb xmm0, xmm9 - vpxor xmm4, xmm4, xmm1 - movdqu xmm1, xmmword ptr [r9 + 80] - vpxor xmm6, xmm6, xmm2 - vpxor xmm6, xmm6, xmm3 - vpxor xmm7, xmm7, xmm5 - movdqu xmm5, xmm1 - vpxor xmm0, xmm8, xmm0 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - vpxor xmm4, xmm4, xmm1 - vpxor xmm6, xmm6, xmm2 - vpxor xmm6, xmm6, xmm3 - vpxor xmm7, xmm7, xmm5 - pxor xmm3, xmm3 - mov r10, 3254779904 - pinsrd xmm3, r10d, 3 - vpslldq xmm5, xmm6, 8 - vpxor xmm4, xmm4, xmm5 - vpalignr xmm0, xmm4, xmm4, 8 - vpclmulqdq xmm4, xmm4, xmm3, 16 - vpsrldq xmm6, xmm6, 8 - vpxor xmm7, xmm7, xmm6 - vpxor xmm4, xmm4, xmm0 - vpalignr xmm8, xmm4, xmm4, 8 - vpclmulqdq xmm4, xmm4, xmm3, 16 - vpxor xmm8, xmm8, xmm7 - vpxor xmm8, xmm8, xmm4 - add r11, 96 - sub rdx, 6 -ALIGN 16 -L39: - cmp rdx, 6 - jae L38 - cmp rdx, 0 - jbe L40 - mov r10, rdx - sub r10, 1 - imul r10, 16 - add r11, r10 - movdqu xmm5, xmmword ptr [r9 + -32] - movdqu xmm0, xmmword ptr [r11 + 0] - pshufb xmm0, xmm9 - cmp rdx, 1 - jne L42 - vpxor xmm0, xmm8, xmm0 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - movdqu xmm4, xmm1 - vpxor xmm6, xmm2, xmm3 - movdqu xmm7, xmm5 - jmp L43 -L42: - sub r11, 16 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - movdqu xmm0, xmmword ptr [r11 + 0] - pshufb xmm0, xmm9 - movdqu xmm4, xmm1 - movdqu xmm1, xmmword ptr [r9 + -16] - vpxor xmm6, xmm2, xmm3 - movdqu xmm7, xmm5 - movdqu xmm5, xmm1 - cmp rdx, 2 - je L44 - sub r11, 16 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - movdqu xmm0, xmmword ptr [r11 + 0] - pshufb xmm0, xmm9 - vpxor xmm4, xmm4, xmm1 - movdqu xmm1, xmmword ptr [r9 + 16] - vpxor xmm6, xmm6, xmm2 - vpxor xmm6, xmm6, xmm3 - vpxor xmm7, xmm7, xmm5 - movdqu xmm5, xmm1 - cmp rdx, 3 - je L46 - sub r11, 16 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - movdqu xmm0, xmmword ptr [r11 + 0] - pshufb xmm0, xmm9 - vpxor xmm4, xmm4, xmm1 - movdqu xmm1, xmmword ptr [r9 + 32] - vpxor xmm6, xmm6, xmm2 - vpxor xmm6, xmm6, xmm3 - vpxor xmm7, xmm7, xmm5 - movdqu xmm5, xmm1 - cmp rdx, 4 - je L48 - sub r11, 16 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - movdqu xmm0, xmmword ptr [r11 + 0] - pshufb xmm0, xmm9 - vpxor xmm4, xmm4, xmm1 - movdqu xmm1, xmmword ptr [r9 + 64] - vpxor xmm6, xmm6, xmm2 - vpxor xmm6, xmm6, xmm3 - vpxor xmm7, xmm7, xmm5 - movdqu xmm5, xmm1 - jmp L49 -L48: -L49: - jmp L47 -L46: -L47: - jmp L45 -L44: -L45: - vpxor xmm0, xmm8, xmm0 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - vpxor xmm4, xmm4, xmm1 - vpxor xmm6, xmm6, xmm2 - vpxor xmm6, xmm6, xmm3 - vpxor xmm7, xmm7, xmm5 -L43: - pxor xmm3, xmm3 - mov r10, 3254779904 - pinsrd xmm3, r10d, 3 - vpslldq xmm5, xmm6, 8 - vpxor xmm4, xmm4, xmm5 - vpalignr xmm0, xmm4, xmm4, 8 - vpclmulqdq xmm4, xmm4, xmm3, 16 - vpsrldq xmm6, xmm6, 8 - vpxor xmm7, xmm7, xmm6 - vpxor xmm4, xmm4, xmm0 - vpalignr xmm8, xmm4, xmm4, 8 - vpclmulqdq xmm4, xmm4, xmm3, 16 - vpxor xmm8, xmm8, xmm7 - vpxor xmm8, xmm8, xmm4 - jmp L41 -L40: -L41: - mov r15, rsi - cmp rsi, rcx - jbe L50 - movdqu xmm0, xmmword ptr [rbx + 0] - mov r10, rsi - and r10, 15 - cmp r10, 8 - jae L52 - mov rcx, 0 - pinsrq xmm0, rcx, 1 - mov rcx, r10 - shl rcx, 3 - mov r11, 1 - shl r11, cl - sub r11, 1 - pextrq rcx, xmm0, 0 - and rcx, r11 - pinsrq xmm0, rcx, 0 - jmp L53 -L52: - mov rcx, r10 - sub rcx, 8 - shl rcx, 3 - mov r11, 1 - shl r11, cl - sub r11, 1 - pextrq rcx, xmm0, 1 - and rcx, r11 - pinsrq xmm0, rcx, 1 -L53: - pshufb xmm0, xmm9 - movdqu xmm5, xmmword ptr [r9 + -32] - vpxor xmm0, xmm8, xmm0 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - movdqu xmm4, xmm1 - vpxor xmm6, xmm2, xmm3 - movdqu xmm7, xmm5 - pxor xmm3, xmm3 - mov r11, 3254779904 - pinsrd xmm3, r11d, 3 - vpslldq xmm5, xmm6, 8 - vpxor xmm4, xmm4, xmm5 - vpalignr xmm0, xmm4, xmm4, 8 - vpclmulqdq xmm4, xmm4, xmm3, 16 - vpsrldq xmm6, xmm6, 8 - vpxor xmm7, xmm7, xmm6 - vpxor xmm4, xmm4, xmm0 - vpalignr xmm8, xmm4, xmm4, 8 - vpclmulqdq xmm4, xmm4, xmm3, 16 - vpxor xmm8, xmm8, xmm7 - vpxor xmm8, xmm8, xmm4 - jmp L51 -L50: -L51: - mov rcx, rax - mov r11, 0 - mov r13, rsi - pxor xmm0, xmm0 - mov rax, r11 - imul rax, 8 - pinsrq xmm0, rax, 1 - mov rax, r13 - imul rax, 8 - pinsrq xmm0, rax, 0 - movdqu xmm5, xmmword ptr [r9 + -32] - vpxor xmm0, xmm8, xmm0 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - movdqu xmm4, xmm1 - vpxor xmm6, xmm2, xmm3 - movdqu xmm7, xmm5 - pxor xmm3, xmm3 - mov r11, 3254779904 - pinsrd xmm3, r11d, 3 - vpslldq xmm5, xmm6, 8 - vpxor xmm4, xmm4, xmm5 - vpalignr xmm0, xmm4, xmm4, 8 - vpclmulqdq xmm4, xmm4, xmm3, 16 - vpsrldq xmm6, xmm6, 8 - vpxor xmm7, xmm7, xmm6 - vpxor xmm4, xmm4, xmm0 - vpalignr xmm8, xmm4, xmm4, 8 - vpclmulqdq xmm4, xmm4, xmm3, 16 - vpxor xmm8, xmm8, xmm7 - vpxor xmm8, xmm8, xmm4 - movdqu xmmword ptr [rcx + 0], xmm8 -L37: - pop rax - pinsrq xmm6, rax, 1 - pop rax - pinsrq xmm6, rax, 0 - pop rax - pinsrq xmm7, rax, 1 - pop rax - pinsrq xmm7, rax, 0 - pop rax - pinsrq xmm8, rax, 1 - pop rax - pinsrq xmm8, rax, 0 - pop rax - pinsrq xmm9, rax, 1 - pop rax - pinsrq xmm9, rax, 0 - pop rax - pinsrq xmm10, rax, 1 - pop rax - pinsrq xmm10, rax, 0 - pop rax - pinsrq xmm11, rax, 1 - pop rax - pinsrq xmm11, rax, 0 - pop rax - pinsrq xmm12, rax, 1 - pop rax - pinsrq xmm12, rax, 0 - pop rax - pinsrq xmm13, rax, 1 - pop rax - pinsrq xmm13, rax, 0 - pop rax - pinsrq xmm14, rax, 1 - pop rax - pinsrq xmm14, rax, 0 - pop rax - pinsrq xmm15, rax, 1 - pop rax - pinsrq xmm15, rax, 0 - pop rbx - pop rbp - pop rdi - pop rsi - pop r12 - pop r13 - pop r14 - pop r15 -L17: - ret -compute_iv_stdcall endp -ALIGN 16 -gcm128_encrypt_opt proc - push r15 - push r14 - push r13 - push r12 - push rsi - push rdi - push rbp - push rbx - pextrq rax, xmm15, 0 - push rax - pextrq rax, xmm15, 1 - push rax - pextrq rax, xmm14, 0 - push rax - pextrq rax, xmm14, 1 - push rax - pextrq rax, xmm13, 0 - push rax - pextrq rax, xmm13, 1 - push rax - pextrq rax, xmm12, 0 - push rax - pextrq rax, xmm12, 1 - push rax - pextrq rax, xmm11, 0 - push rax - pextrq rax, xmm11, 1 - push rax - pextrq rax, xmm10, 0 - push rax - pextrq rax, xmm10, 1 - push rax - pextrq rax, xmm9, 0 - push rax - pextrq rax, xmm9, 1 - push rax - pextrq rax, xmm8, 0 - push rax - pextrq rax, xmm8, 1 - push rax - pextrq rax, xmm7, 0 - push rax - pextrq rax, xmm7, 1 - push rax - pextrq rax, xmm6, 0 - push rax - pextrq rax, xmm6, 1 - push rax - mov rdi, rcx - mov rsi, rdx - mov rdx, r8 - mov rcx, r9 - mov r8, qword ptr [rsp + 264] - mov r9, qword ptr [rsp + 272] - mov rbp, qword ptr [rsp + 352] - mov r13, rcx - lea r9, qword ptr [r9 + 32] - mov rbx, qword ptr [rsp + 280] - mov rcx, rdx - imul rcx, 16 - mov r10, 579005069656919567 - pinsrq xmm9, r10, 0 - mov r10, 283686952306183 - pinsrq xmm9, r10, 1 - pxor xmm8, xmm8 - mov r11, rdi - jmp L55 -ALIGN 16 -L54: - add r11, 80 - movdqu xmm5, xmmword ptr [r9 + -32] - movdqu xmm0, xmmword ptr [r11 + 0] - pshufb xmm0, xmm9 - sub r11, 16 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - movdqu xmm0, xmmword ptr [r11 + 0] - pshufb xmm0, xmm9 - movdqu xmm4, xmm1 - movdqu xmm1, xmmword ptr [r9 + -16] - vpxor xmm6, xmm2, xmm3 - movdqu xmm7, xmm5 - movdqu xmm5, xmm1 - sub r11, 16 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - movdqu xmm0, xmmword ptr [r11 + 0] - pshufb xmm0, xmm9 - vpxor xmm4, xmm4, xmm1 - movdqu xmm1, xmmword ptr [r9 + 16] - vpxor xmm6, xmm6, xmm2 - vpxor xmm6, xmm6, xmm3 - vpxor xmm7, xmm7, xmm5 - movdqu xmm5, xmm1 - sub r11, 16 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - movdqu xmm0, xmmword ptr [r11 + 0] - pshufb xmm0, xmm9 - vpxor xmm4, xmm4, xmm1 - movdqu xmm1, xmmword ptr [r9 + 32] - vpxor xmm6, xmm6, xmm2 - vpxor xmm6, xmm6, xmm3 - vpxor xmm7, xmm7, xmm5 - movdqu xmm5, xmm1 - sub r11, 16 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - movdqu xmm0, xmmword ptr [r11 + 0] - pshufb xmm0, xmm9 - vpxor xmm4, xmm4, xmm1 - movdqu xmm1, xmmword ptr [r9 + 64] - vpxor xmm6, xmm6, xmm2 - vpxor xmm6, xmm6, xmm3 - vpxor xmm7, xmm7, xmm5 - movdqu xmm5, xmm1 - sub r11, 16 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - movdqu xmm0, xmmword ptr [r11 + 0] - pshufb xmm0, xmm9 - vpxor xmm4, xmm4, xmm1 - movdqu xmm1, xmmword ptr [r9 + 80] - vpxor xmm6, xmm6, xmm2 - vpxor xmm6, xmm6, xmm3 - vpxor xmm7, xmm7, xmm5 - movdqu xmm5, xmm1 - vpxor xmm0, xmm8, xmm0 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - vpxor xmm4, xmm4, xmm1 - vpxor xmm6, xmm6, xmm2 - vpxor xmm6, xmm6, xmm3 - vpxor xmm7, xmm7, xmm5 - pxor xmm3, xmm3 - mov r10, 3254779904 - pinsrd xmm3, r10d, 3 - vpslldq xmm5, xmm6, 8 - vpxor xmm4, xmm4, xmm5 - vpalignr xmm0, xmm4, xmm4, 8 - vpclmulqdq xmm4, xmm4, xmm3, 16 - vpsrldq xmm6, xmm6, 8 - vpxor xmm7, xmm7, xmm6 - vpxor xmm4, xmm4, xmm0 - vpalignr xmm8, xmm4, xmm4, 8 - vpclmulqdq xmm4, xmm4, xmm3, 16 - vpxor xmm8, xmm8, xmm7 - vpxor xmm8, xmm8, xmm4 - add r11, 96 - sub rdx, 6 -ALIGN 16 -L55: - cmp rdx, 6 - jae L54 - cmp rdx, 0 - jbe L56 - mov r10, rdx - sub r10, 1 - imul r10, 16 - add r11, r10 - movdqu xmm5, xmmword ptr [r9 + -32] - movdqu xmm0, xmmword ptr [r11 + 0] - pshufb xmm0, xmm9 - cmp rdx, 1 - jne L58 - vpxor xmm0, xmm8, xmm0 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - movdqu xmm4, xmm1 - vpxor xmm6, xmm2, xmm3 - movdqu xmm7, xmm5 - jmp L59 -L58: - sub r11, 16 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - movdqu xmm0, xmmword ptr [r11 + 0] - pshufb xmm0, xmm9 - movdqu xmm4, xmm1 - movdqu xmm1, xmmword ptr [r9 + -16] - vpxor xmm6, xmm2, xmm3 - movdqu xmm7, xmm5 - movdqu xmm5, xmm1 - cmp rdx, 2 - je L60 - sub r11, 16 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - movdqu xmm0, xmmword ptr [r11 + 0] - pshufb xmm0, xmm9 - vpxor xmm4, xmm4, xmm1 - movdqu xmm1, xmmword ptr [r9 + 16] - vpxor xmm6, xmm6, xmm2 - vpxor xmm6, xmm6, xmm3 - vpxor xmm7, xmm7, xmm5 - movdqu xmm5, xmm1 - cmp rdx, 3 - je L62 - sub r11, 16 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - movdqu xmm0, xmmword ptr [r11 + 0] - pshufb xmm0, xmm9 - vpxor xmm4, xmm4, xmm1 - movdqu xmm1, xmmword ptr [r9 + 32] - vpxor xmm6, xmm6, xmm2 - vpxor xmm6, xmm6, xmm3 - vpxor xmm7, xmm7, xmm5 - movdqu xmm5, xmm1 - cmp rdx, 4 - je L64 - sub r11, 16 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - movdqu xmm0, xmmword ptr [r11 + 0] - pshufb xmm0, xmm9 - vpxor xmm4, xmm4, xmm1 - movdqu xmm1, xmmword ptr [r9 + 64] - vpxor xmm6, xmm6, xmm2 - vpxor xmm6, xmm6, xmm3 - vpxor xmm7, xmm7, xmm5 - movdqu xmm5, xmm1 - jmp L65 -L64: -L65: - jmp L63 -L62: -L63: - jmp L61 -L60: -L61: - vpxor xmm0, xmm8, xmm0 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - vpxor xmm4, xmm4, xmm1 - vpxor xmm6, xmm6, xmm2 - vpxor xmm6, xmm6, xmm3 - vpxor xmm7, xmm7, xmm5 -L59: - pxor xmm3, xmm3 - mov r10, 3254779904 - pinsrd xmm3, r10d, 3 - vpslldq xmm5, xmm6, 8 - vpxor xmm4, xmm4, xmm5 - vpalignr xmm0, xmm4, xmm4, 8 - vpclmulqdq xmm4, xmm4, xmm3, 16 - vpsrldq xmm6, xmm6, 8 - vpxor xmm7, xmm7, xmm6 - vpxor xmm4, xmm4, xmm0 - vpalignr xmm8, xmm4, xmm4, 8 - vpclmulqdq xmm4, xmm4, xmm3, 16 - vpxor xmm8, xmm8, xmm7 - vpxor xmm8, xmm8, xmm4 - jmp L57 -L56: -L57: - mov r15, rsi - cmp rsi, rcx - jbe L66 - movdqu xmm0, xmmword ptr [rbx + 0] - mov r10, rsi - and r10, 15 - cmp r10, 8 - jae L68 - mov rcx, 0 - pinsrq xmm0, rcx, 1 - mov rcx, r10 - shl rcx, 3 - mov r11, 1 - shl r11, cl - sub r11, 1 - pextrq rcx, xmm0, 0 - and rcx, r11 - pinsrq xmm0, rcx, 0 - jmp L69 -L68: - mov rcx, r10 - sub rcx, 8 - shl rcx, 3 - mov r11, 1 - shl r11, cl - sub r11, 1 - pextrq rcx, xmm0, 1 - and rcx, r11 - pinsrq xmm0, rcx, 1 -L69: - pshufb xmm0, xmm9 - movdqu xmm5, xmmword ptr [r9 + -32] - vpxor xmm0, xmm8, xmm0 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - movdqu xmm4, xmm1 - vpxor xmm6, xmm2, xmm3 - movdqu xmm7, xmm5 - pxor xmm3, xmm3 - mov r11, 3254779904 - pinsrd xmm3, r11d, 3 - vpslldq xmm5, xmm6, 8 - vpxor xmm4, xmm4, xmm5 - vpalignr xmm0, xmm4, xmm4, 8 - vpclmulqdq xmm4, xmm4, xmm3, 16 - vpsrldq xmm6, xmm6, 8 - vpxor xmm7, xmm7, xmm6 - vpxor xmm4, xmm4, xmm0 - vpalignr xmm8, xmm4, xmm4, 8 - vpclmulqdq xmm4, xmm4, xmm3, 16 - vpxor xmm8, xmm8, xmm7 - vpxor xmm8, xmm8, xmm4 - jmp L67 -L66: -L67: - mov rdi, qword ptr [rsp + 288] - mov rsi, qword ptr [rsp + 296] - mov rdx, qword ptr [rsp + 304] - mov rcx, r13 - movdqu xmm0, xmm9 - movdqu xmm1, xmmword ptr [r8 + 0] - movdqu xmmword ptr [rbp + 0], xmm1 - pxor xmm10, xmm10 - mov r11, 1 - pinsrq xmm10, r11, 0 - vpaddd xmm1, xmm1, xmm10 - cmp rdx, 0 - jne L70 - vpshufb xmm1, xmm1, xmm0 - movdqu xmmword ptr [rbp + 32], xmm1 - jmp L71 -L70: - movdqu xmmword ptr [rbp + 32], xmm8 - add rcx, 128 - pextrq rbx, xmm1, 0 - and rbx, 255 - vpshufb xmm1, xmm1, xmm0 - lea r14, qword ptr [rsi + 96] - movdqu xmm4, xmmword ptr [rcx + -128] - pxor xmm2, xmm2 - mov r11, 72057594037927936 - pinsrq xmm2, r11, 1 - movdqu xmm15, xmmword ptr [rcx + -112] - mov r12, rcx - sub r12, 96 - vpxor xmm9, xmm1, xmm4 - add rbx, 6 - cmp rbx, 256 - jae L72 - vpaddd xmm10, xmm1, xmm2 - vpaddd xmm11, xmm10, xmm2 - vpxor xmm10, xmm10, xmm4 - vpaddd xmm12, xmm11, xmm2 - vpxor xmm11, xmm11, xmm4 - vpaddd xmm13, xmm12, xmm2 - vpxor xmm12, xmm12, xmm4 - vpaddd xmm14, xmm13, xmm2 - vpxor xmm13, xmm13, xmm4 - vpaddd xmm1, xmm14, xmm2 - vpxor xmm14, xmm14, xmm4 - jmp L73 -L72: - sub rbx, 256 - vpshufb xmm6, xmm1, xmm0 - pxor xmm5, xmm5 - mov r11, 1 - pinsrq xmm5, r11, 0 - vpaddd xmm10, xmm6, xmm5 - pxor xmm5, xmm5 - mov r11, 2 - pinsrq xmm5, r11, 0 - vpaddd xmm11, xmm6, xmm5 - vpaddd xmm12, xmm10, xmm5 - vpshufb xmm10, xmm10, xmm0 - vpaddd xmm13, xmm11, xmm5 - vpshufb xmm11, xmm11, xmm0 - vpxor xmm10, xmm10, xmm4 - vpaddd xmm14, xmm12, xmm5 - vpshufb xmm12, xmm12, xmm0 - vpxor xmm11, xmm11, xmm4 - vpaddd xmm1, xmm13, xmm5 - vpshufb xmm13, xmm13, xmm0 - vpxor xmm12, xmm12, xmm4 - vpshufb xmm14, xmm14, xmm0 - vpxor xmm13, xmm13, xmm4 - vpshufb xmm1, xmm1, xmm0 - vpxor xmm14, xmm14, xmm4 -L73: - vaesenc xmm9, xmm9, xmm15 - vaesenc xmm10, xmm10, xmm15 - vaesenc xmm11, xmm11, xmm15 - vaesenc xmm12, xmm12, xmm15 - vaesenc xmm13, xmm13, xmm15 - vaesenc xmm14, xmm14, xmm15 - movdqu xmm15, xmmword ptr [rcx + -96] - vaesenc xmm9, xmm9, xmm15 - vaesenc xmm10, xmm10, xmm15 - vaesenc xmm11, xmm11, xmm15 - vaesenc xmm12, xmm12, xmm15 - vaesenc xmm13, xmm13, xmm15 - vaesenc xmm14, xmm14, xmm15 - movdqu xmm15, xmmword ptr [rcx + -80] - vaesenc xmm9, xmm9, xmm15 - vaesenc xmm10, xmm10, xmm15 - vaesenc xmm11, xmm11, xmm15 - vaesenc xmm12, xmm12, xmm15 - vaesenc xmm13, xmm13, xmm15 - vaesenc xmm14, xmm14, xmm15 - movdqu xmm15, xmmword ptr [rcx + -64] - vaesenc xmm9, xmm9, xmm15 - vaesenc xmm10, xmm10, xmm15 - vaesenc xmm11, xmm11, xmm15 - vaesenc xmm12, xmm12, xmm15 - vaesenc xmm13, xmm13, xmm15 - vaesenc xmm14, xmm14, xmm15 - movdqu xmm15, xmmword ptr [rcx + -48] - vaesenc xmm9, xmm9, xmm15 - vaesenc xmm10, xmm10, xmm15 - vaesenc xmm11, xmm11, xmm15 - vaesenc xmm12, xmm12, xmm15 - vaesenc xmm13, xmm13, xmm15 - vaesenc xmm14, xmm14, xmm15 - movdqu xmm15, xmmword ptr [rcx + -32] - vaesenc xmm9, xmm9, xmm15 - vaesenc xmm10, xmm10, xmm15 - vaesenc xmm11, xmm11, xmm15 - vaesenc xmm12, xmm12, xmm15 - vaesenc xmm13, xmm13, xmm15 - vaesenc xmm14, xmm14, xmm15 - movdqu xmm15, xmmword ptr [rcx + -16] - vaesenc xmm9, xmm9, xmm15 - vaesenc xmm10, xmm10, xmm15 - vaesenc xmm11, xmm11, xmm15 - vaesenc xmm12, xmm12, xmm15 - vaesenc xmm13, xmm13, xmm15 - vaesenc xmm14, xmm14, xmm15 - movdqu xmm15, xmmword ptr [rcx + 0] - vaesenc xmm9, xmm9, xmm15 - vaesenc xmm10, xmm10, xmm15 - vaesenc xmm11, xmm11, xmm15 - vaesenc xmm12, xmm12, xmm15 - vaesenc xmm13, xmm13, xmm15 - vaesenc xmm14, xmm14, xmm15 - movdqu xmm15, xmmword ptr [rcx + 16] - movdqu xmm3, xmmword ptr [rcx + 32] - vaesenc xmm9, xmm9, xmm15 - vpxor xmm4, xmm3, xmmword ptr [rdi + 0] - vaesenc xmm10, xmm10, xmm15 - vpxor xmm5, xmm3, xmmword ptr [rdi + 16] - vaesenc xmm11, xmm11, xmm15 - vpxor xmm6, xmm3, xmmword ptr [rdi + 32] - vaesenc xmm12, xmm12, xmm15 - vpxor xmm8, xmm3, xmmword ptr [rdi + 48] - vaesenc xmm13, xmm13, xmm15 - vpxor xmm2, xmm3, xmmword ptr [rdi + 64] - vaesenc xmm14, xmm14, xmm15 - vpxor xmm3, xmm3, xmmword ptr [rdi + 80] - lea rdi, qword ptr [rdi + 96] - vaesenclast xmm9, xmm9, xmm4 - vaesenclast xmm10, xmm10, xmm5 - vaesenclast xmm11, xmm11, xmm6 - vaesenclast xmm12, xmm12, xmm8 - vaesenclast xmm13, xmm13, xmm2 - vaesenclast xmm14, xmm14, xmm3 - movdqu xmmword ptr [rsi + 0], xmm9 - movdqu xmmword ptr [rsi + 16], xmm10 - movdqu xmmword ptr [rsi + 32], xmm11 - movdqu xmmword ptr [rsi + 48], xmm12 - movdqu xmmword ptr [rsi + 64], xmm13 - movdqu xmmword ptr [rsi + 80], xmm14 - lea rsi, qword ptr [rsi + 96] - vpshufb xmm8, xmm9, xmm0 - vpshufb xmm2, xmm10, xmm0 - movdqu xmmword ptr [rbp + 112], xmm8 - vpshufb xmm4, xmm11, xmm0 - movdqu xmmword ptr [rbp + 96], xmm2 - vpshufb xmm5, xmm12, xmm0 - movdqu xmmword ptr [rbp + 80], xmm4 - vpshufb xmm6, xmm13, xmm0 - movdqu xmmword ptr [rbp + 64], xmm5 - vpshufb xmm7, xmm14, xmm0 - movdqu xmmword ptr [rbp + 48], xmm6 - movdqu xmm4, xmmword ptr [rcx + -128] - pxor xmm2, xmm2 - mov r11, 72057594037927936 - pinsrq xmm2, r11, 1 - movdqu xmm15, xmmword ptr [rcx + -112] - mov r12, rcx - sub r12, 96 - vpxor xmm9, xmm1, xmm4 - add rbx, 6 - cmp rbx, 256 - jae L74 - vpaddd xmm10, xmm1, xmm2 - vpaddd xmm11, xmm10, xmm2 - vpxor xmm10, xmm10, xmm4 - vpaddd xmm12, xmm11, xmm2 - vpxor xmm11, xmm11, xmm4 - vpaddd xmm13, xmm12, xmm2 - vpxor xmm12, xmm12, xmm4 - vpaddd xmm14, xmm13, xmm2 - vpxor xmm13, xmm13, xmm4 - vpaddd xmm1, xmm14, xmm2 - vpxor xmm14, xmm14, xmm4 - jmp L75 -L74: - sub rbx, 256 - vpshufb xmm6, xmm1, xmm0 - pxor xmm5, xmm5 - mov r11, 1 - pinsrq xmm5, r11, 0 - vpaddd xmm10, xmm6, xmm5 - pxor xmm5, xmm5 - mov r11, 2 - pinsrq xmm5, r11, 0 - vpaddd xmm11, xmm6, xmm5 - vpaddd xmm12, xmm10, xmm5 - vpshufb xmm10, xmm10, xmm0 - vpaddd xmm13, xmm11, xmm5 - vpshufb xmm11, xmm11, xmm0 - vpxor xmm10, xmm10, xmm4 - vpaddd xmm14, xmm12, xmm5 - vpshufb xmm12, xmm12, xmm0 - vpxor xmm11, xmm11, xmm4 - vpaddd xmm1, xmm13, xmm5 - vpshufb xmm13, xmm13, xmm0 - vpxor xmm12, xmm12, xmm4 - vpshufb xmm14, xmm14, xmm0 - vpxor xmm13, xmm13, xmm4 - vpshufb xmm1, xmm1, xmm0 - vpxor xmm14, xmm14, xmm4 -L75: - vaesenc xmm9, xmm9, xmm15 - vaesenc xmm10, xmm10, xmm15 - vaesenc xmm11, xmm11, xmm15 - vaesenc xmm12, xmm12, xmm15 - vaesenc xmm13, xmm13, xmm15 - vaesenc xmm14, xmm14, xmm15 - movdqu xmm15, xmmword ptr [rcx + -96] - vaesenc xmm9, xmm9, xmm15 - vaesenc xmm10, xmm10, xmm15 - vaesenc xmm11, xmm11, xmm15 - vaesenc xmm12, xmm12, xmm15 - vaesenc xmm13, xmm13, xmm15 - vaesenc xmm14, xmm14, xmm15 - movdqu xmm15, xmmword ptr [rcx + -80] - vaesenc xmm9, xmm9, xmm15 - vaesenc xmm10, xmm10, xmm15 - vaesenc xmm11, xmm11, xmm15 - vaesenc xmm12, xmm12, xmm15 - vaesenc xmm13, xmm13, xmm15 - vaesenc xmm14, xmm14, xmm15 - movdqu xmm15, xmmword ptr [rcx + -64] - vaesenc xmm9, xmm9, xmm15 - vaesenc xmm10, xmm10, xmm15 - vaesenc xmm11, xmm11, xmm15 - vaesenc xmm12, xmm12, xmm15 - vaesenc xmm13, xmm13, xmm15 - vaesenc xmm14, xmm14, xmm15 - movdqu xmm15, xmmword ptr [rcx + -48] - vaesenc xmm9, xmm9, xmm15 - vaesenc xmm10, xmm10, xmm15 - vaesenc xmm11, xmm11, xmm15 - vaesenc xmm12, xmm12, xmm15 - vaesenc xmm13, xmm13, xmm15 - vaesenc xmm14, xmm14, xmm15 - movdqu xmm15, xmmword ptr [rcx + -32] - vaesenc xmm9, xmm9, xmm15 - vaesenc xmm10, xmm10, xmm15 - vaesenc xmm11, xmm11, xmm15 - vaesenc xmm12, xmm12, xmm15 - vaesenc xmm13, xmm13, xmm15 - vaesenc xmm14, xmm14, xmm15 - movdqu xmm15, xmmword ptr [rcx + -16] - vaesenc xmm9, xmm9, xmm15 - vaesenc xmm10, xmm10, xmm15 - vaesenc xmm11, xmm11, xmm15 - vaesenc xmm12, xmm12, xmm15 - vaesenc xmm13, xmm13, xmm15 - vaesenc xmm14, xmm14, xmm15 - movdqu xmm15, xmmword ptr [rcx + 0] - vaesenc xmm9, xmm9, xmm15 - vaesenc xmm10, xmm10, xmm15 - vaesenc xmm11, xmm11, xmm15 - vaesenc xmm12, xmm12, xmm15 - vaesenc xmm13, xmm13, xmm15 - vaesenc xmm14, xmm14, xmm15 - movdqu xmm15, xmmword ptr [rcx + 16] - movdqu xmm3, xmmword ptr [rcx + 32] - vaesenc xmm9, xmm9, xmm15 - vpxor xmm4, xmm3, xmmword ptr [rdi + 0] - vaesenc xmm10, xmm10, xmm15 - vpxor xmm5, xmm3, xmmword ptr [rdi + 16] - vaesenc xmm11, xmm11, xmm15 - vpxor xmm6, xmm3, xmmword ptr [rdi + 32] - vaesenc xmm12, xmm12, xmm15 - vpxor xmm8, xmm3, xmmword ptr [rdi + 48] - vaesenc xmm13, xmm13, xmm15 - vpxor xmm2, xmm3, xmmword ptr [rdi + 64] - vaesenc xmm14, xmm14, xmm15 - vpxor xmm3, xmm3, xmmword ptr [rdi + 80] - lea rdi, qword ptr [rdi + 96] - vaesenclast xmm9, xmm9, xmm4 - vaesenclast xmm10, xmm10, xmm5 - vaesenclast xmm11, xmm11, xmm6 - vaesenclast xmm12, xmm12, xmm8 - vaesenclast xmm13, xmm13, xmm2 - vaesenclast xmm14, xmm14, xmm3 - movdqu xmmword ptr [rsi + 0], xmm9 - movdqu xmmword ptr [rsi + 16], xmm10 - movdqu xmmword ptr [rsi + 32], xmm11 - movdqu xmmword ptr [rsi + 48], xmm12 - movdqu xmmword ptr [rsi + 64], xmm13 - movdqu xmmword ptr [rsi + 80], xmm14 - lea rsi, qword ptr [rsi + 96] - sub rdx, 12 - movdqu xmm8, xmmword ptr [rbp + 32] - pxor xmm2, xmm2 - mov r11, 72057594037927936 - pinsrq xmm2, r11, 1 - vpxor xmm4, xmm4, xmm4 - movdqu xmm15, xmmword ptr [rcx + -128] - vpaddd xmm10, xmm1, xmm2 - vpaddd xmm11, xmm10, xmm2 - vpaddd xmm12, xmm11, xmm2 - vpaddd xmm13, xmm12, xmm2 - vpaddd xmm14, xmm13, xmm2 - vpxor xmm9, xmm1, xmm15 - movdqu xmmword ptr [rbp + 16], xmm4 - jmp L77 -ALIGN 16 -L76: - add rbx, 6 - cmp rbx, 256 - jb L78 - mov r11, 579005069656919567 - pinsrq xmm0, r11, 0 - mov r11, 283686952306183 - pinsrq xmm0, r11, 1 - vpshufb xmm6, xmm1, xmm0 - pxor xmm5, xmm5 - mov r11, 1 - pinsrq xmm5, r11, 0 - vpaddd xmm10, xmm6, xmm5 - pxor xmm5, xmm5 - mov r11, 2 - pinsrq xmm5, r11, 0 - vpaddd xmm11, xmm6, xmm5 - movdqu xmm3, xmmword ptr [r9 + -32] - vpaddd xmm12, xmm10, xmm5 - vpshufb xmm10, xmm10, xmm0 - vpaddd xmm13, xmm11, xmm5 - vpshufb xmm11, xmm11, xmm0 - vpxor xmm10, xmm10, xmm15 - vpaddd xmm14, xmm12, xmm5 - vpshufb xmm12, xmm12, xmm0 - vpxor xmm11, xmm11, xmm15 - vpaddd xmm1, xmm13, xmm5 - vpshufb xmm13, xmm13, xmm0 - vpshufb xmm14, xmm14, xmm0 - vpshufb xmm1, xmm1, xmm0 - sub rbx, 256 - jmp L79 -L78: - movdqu xmm3, xmmword ptr [r9 + -32] - vpaddd xmm1, xmm2, xmm14 - vpxor xmm10, xmm10, xmm15 - vpxor xmm11, xmm11, xmm15 -L79: - movdqu xmmword ptr [rbp + 128], xmm1 - vpclmulqdq xmm5, xmm7, xmm3, 16 - vpxor xmm12, xmm12, xmm15 - movdqu xmm2, xmmword ptr [rcx + -112] - vpclmulqdq xmm6, xmm7, xmm3, 1 - vaesenc xmm9, xmm9, xmm2 - movdqu xmm0, xmmword ptr [rbp + 48] - vpxor xmm13, xmm13, xmm15 - vpclmulqdq xmm1, xmm7, xmm3, 0 - vaesenc xmm10, xmm10, xmm2 - vpxor xmm14, xmm14, xmm15 - vpclmulqdq xmm7, xmm7, xmm3, 17 - vaesenc xmm11, xmm11, xmm2 - movdqu xmm3, xmmword ptr [r9 + -16] - vaesenc xmm12, xmm12, xmm2 - vpxor xmm6, xmm6, xmm5 - vpclmulqdq xmm5, xmm0, xmm3, 0 - vpxor xmm8, xmm8, xmm4 - vaesenc xmm13, xmm13, xmm2 - vpxor xmm4, xmm1, xmm5 - vpclmulqdq xmm1, xmm0, xmm3, 16 - vaesenc xmm14, xmm14, xmm2 - movdqu xmm15, xmmword ptr [rcx + -96] - vpclmulqdq xmm2, xmm0, xmm3, 1 - vaesenc xmm9, xmm9, xmm15 - vpxor xmm8, xmm8, xmmword ptr [rbp + 16] - vpclmulqdq xmm3, xmm0, xmm3, 17 - movdqu xmm0, xmmword ptr [rbp + 64] - vaesenc xmm10, xmm10, xmm15 - movbe r13, qword ptr [r14 + 88] - vaesenc xmm11, xmm11, xmm15 - movbe r12, qword ptr [r14 + 80] - vaesenc xmm12, xmm12, xmm15 - mov qword ptr [rbp + 32], r13 - vaesenc xmm13, xmm13, xmm15 - mov qword ptr [rbp + 40], r12 - movdqu xmm5, xmmword ptr [r9 + 16] - vaesenc xmm14, xmm14, xmm15 - movdqu xmm15, xmmword ptr [rcx + -80] - vpxor xmm6, xmm6, xmm1 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vaesenc xmm9, xmm9, xmm15 - vpxor xmm6, xmm6, xmm2 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vaesenc xmm10, xmm10, xmm15 - vpxor xmm7, xmm7, xmm3 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vaesenc xmm11, xmm11, xmm15 - vpclmulqdq xmm5, xmm0, xmm5, 17 - movdqu xmm0, xmmword ptr [rbp + 80] - vaesenc xmm12, xmm12, xmm15 - vaesenc xmm13, xmm13, xmm15 - vpxor xmm4, xmm4, xmm1 - movdqu xmm1, xmmword ptr [r9 + 32] - vaesenc xmm14, xmm14, xmm15 - movdqu xmm15, xmmword ptr [rcx + -64] - vpxor xmm6, xmm6, xmm2 - vpclmulqdq xmm2, xmm0, xmm1, 0 - vaesenc xmm9, xmm9, xmm15 - vpxor xmm6, xmm6, xmm3 - vpclmulqdq xmm3, xmm0, xmm1, 16 - vaesenc xmm10, xmm10, xmm15 - movbe r13, qword ptr [r14 + 72] - vpxor xmm7, xmm7, xmm5 - vpclmulqdq xmm5, xmm0, xmm1, 1 - vaesenc xmm11, xmm11, xmm15 - movbe r12, qword ptr [r14 + 64] - vpclmulqdq xmm1, xmm0, xmm1, 17 - movdqu xmm0, xmmword ptr [rbp + 96] - vaesenc xmm12, xmm12, xmm15 - mov qword ptr [rbp + 48], r13 - vaesenc xmm13, xmm13, xmm15 - mov qword ptr [rbp + 56], r12 - vpxor xmm4, xmm4, xmm2 - movdqu xmm2, xmmword ptr [r9 + 64] - vaesenc xmm14, xmm14, xmm15 - movdqu xmm15, xmmword ptr [rcx + -48] - vpxor xmm6, xmm6, xmm3 - vpclmulqdq xmm3, xmm0, xmm2, 0 - vaesenc xmm9, xmm9, xmm15 - vpxor xmm6, xmm6, xmm5 - vpclmulqdq xmm5, xmm0, xmm2, 16 - vaesenc xmm10, xmm10, xmm15 - movbe r13, qword ptr [r14 + 56] - vpxor xmm7, xmm7, xmm1 - vpclmulqdq xmm1, xmm0, xmm2, 1 - vpxor xmm8, xmm8, xmmword ptr [rbp + 112] - vaesenc xmm11, xmm11, xmm15 - movbe r12, qword ptr [r14 + 48] - vpclmulqdq xmm2, xmm0, xmm2, 17 - vaesenc xmm12, xmm12, xmm15 - mov qword ptr [rbp + 64], r13 - vaesenc xmm13, xmm13, xmm15 - mov qword ptr [rbp + 72], r12 - vpxor xmm4, xmm4, xmm3 - movdqu xmm3, xmmword ptr [r9 + 80] - vaesenc xmm14, xmm14, xmm15 - movdqu xmm15, xmmword ptr [rcx + -32] - vpxor xmm6, xmm6, xmm5 - vpclmulqdq xmm5, xmm8, xmm3, 16 - vaesenc xmm9, xmm9, xmm15 - vpxor xmm6, xmm6, xmm1 - vpclmulqdq xmm1, xmm8, xmm3, 1 - vaesenc xmm10, xmm10, xmm15 - movbe r13, qword ptr [r14 + 40] - vpxor xmm7, xmm7, xmm2 - vpclmulqdq xmm2, xmm8, xmm3, 0 - vaesenc xmm11, xmm11, xmm15 - movbe r12, qword ptr [r14 + 32] - vpclmulqdq xmm8, xmm8, xmm3, 17 - vaesenc xmm12, xmm12, xmm15 - mov qword ptr [rbp + 80], r13 - vaesenc xmm13, xmm13, xmm15 - mov qword ptr [rbp + 88], r12 - vpxor xmm6, xmm6, xmm5 - vaesenc xmm14, xmm14, xmm15 - vpxor xmm6, xmm6, xmm1 - movdqu xmm15, xmmword ptr [rcx + -16] - vpslldq xmm5, xmm6, 8 - vpxor xmm4, xmm4, xmm2 - pxor xmm3, xmm3 - mov r11, 13979173243358019584 - pinsrq xmm3, r11, 1 - vaesenc xmm9, xmm9, xmm15 - vpxor xmm7, xmm7, xmm8 - vaesenc xmm10, xmm10, xmm15 - vpxor xmm4, xmm4, xmm5 - movbe r13, qword ptr [r14 + 24] - vaesenc xmm11, xmm11, xmm15 - movbe r12, qword ptr [r14 + 16] - vpalignr xmm0, xmm4, xmm4, 8 - vpclmulqdq xmm4, xmm4, xmm3, 16 - mov qword ptr [rbp + 96], r13 - vaesenc xmm12, xmm12, xmm15 - mov qword ptr [rbp + 104], r12 - vaesenc xmm13, xmm13, xmm15 - vaesenc xmm14, xmm14, xmm15 - movdqu xmm1, xmmword ptr [rcx + 0] - vaesenc xmm9, xmm9, xmm1 - movdqu xmm15, xmmword ptr [rcx + 16] - vaesenc xmm10, xmm10, xmm1 - vpsrldq xmm6, xmm6, 8 - vaesenc xmm11, xmm11, xmm1 - vpxor xmm7, xmm7, xmm6 - vaesenc xmm12, xmm12, xmm1 - vpxor xmm4, xmm4, xmm0 - movbe r13, qword ptr [r14 + 8] - vaesenc xmm13, xmm13, xmm1 - movbe r12, qword ptr [r14 + 0] - vaesenc xmm14, xmm14, xmm1 - movdqu xmm1, xmmword ptr [rcx + 32] - vaesenc xmm9, xmm9, xmm15 - movdqu xmmword ptr [rbp + 16], xmm7 - vpalignr xmm8, xmm4, xmm4, 8 - vaesenc xmm10, xmm10, xmm15 - vpclmulqdq xmm4, xmm4, xmm3, 16 - vpxor xmm2, xmm1, xmmword ptr [rdi + 0] - vaesenc xmm11, xmm11, xmm15 - vpxor xmm0, xmm1, xmmword ptr [rdi + 16] - vaesenc xmm12, xmm12, xmm15 - vpxor xmm5, xmm1, xmmword ptr [rdi + 32] - vaesenc xmm13, xmm13, xmm15 - vpxor xmm6, xmm1, xmmword ptr [rdi + 48] - vaesenc xmm14, xmm14, xmm15 - vpxor xmm7, xmm1, xmmword ptr [rdi + 64] - vpxor xmm3, xmm1, xmmword ptr [rdi + 80] - movdqu xmm1, xmmword ptr [rbp + 128] - vaesenclast xmm9, xmm9, xmm2 - pxor xmm2, xmm2 - mov r11, 72057594037927936 - pinsrq xmm2, r11, 1 - vaesenclast xmm10, xmm10, xmm0 - vpaddd xmm0, xmm1, xmm2 - mov qword ptr [rbp + 112], r13 - lea rdi, qword ptr [rdi + 96] - vaesenclast xmm11, xmm11, xmm5 - vpaddd xmm5, xmm0, xmm2 - mov qword ptr [rbp + 120], r12 - lea rsi, qword ptr [rsi + 96] - movdqu xmm15, xmmword ptr [rcx + -128] - vaesenclast xmm12, xmm12, xmm6 - vpaddd xmm6, xmm5, xmm2 - vaesenclast xmm13, xmm13, xmm7 - vpaddd xmm7, xmm6, xmm2 - vaesenclast xmm14, xmm14, xmm3 - vpaddd xmm3, xmm7, xmm2 - sub rdx, 6 - add r14, 96 - cmp rdx, 0 - jbe L80 - movdqu xmmword ptr [rsi + -96], xmm9 - vpxor xmm9, xmm1, xmm15 - movdqu xmmword ptr [rsi + -80], xmm10 - movdqu xmm10, xmm0 - movdqu xmmword ptr [rsi + -64], xmm11 - movdqu xmm11, xmm5 - movdqu xmmword ptr [rsi + -48], xmm12 - movdqu xmm12, xmm6 - movdqu xmmword ptr [rsi + -32], xmm13 - movdqu xmm13, xmm7 - movdqu xmmword ptr [rsi + -16], xmm14 - movdqu xmm14, xmm3 - movdqu xmm7, xmmword ptr [rbp + 32] - jmp L81 -L80: - vpxor xmm8, xmm8, xmmword ptr [rbp + 16] - vpxor xmm8, xmm8, xmm4 -L81: -ALIGN 16 -L77: - cmp rdx, 0 - ja L76 - movdqu xmm7, xmmword ptr [rbp + 32] - movdqu xmmword ptr [rbp + 32], xmm1 - pxor xmm4, xmm4 - movdqu xmmword ptr [rbp + 16], xmm4 - movdqu xmm3, xmmword ptr [r9 + -32] - vpclmulqdq xmm1, xmm7, xmm3, 0 - vpclmulqdq xmm5, xmm7, xmm3, 16 - movdqu xmm0, xmmword ptr [rbp + 48] - vpclmulqdq xmm6, xmm7, xmm3, 1 - vpclmulqdq xmm7, xmm7, xmm3, 17 - movdqu xmm3, xmmword ptr [r9 + -16] - vpxor xmm6, xmm6, xmm5 - vpclmulqdq xmm5, xmm0, xmm3, 0 - vpxor xmm8, xmm8, xmm4 - vpxor xmm4, xmm1, xmm5 - vpclmulqdq xmm1, xmm0, xmm3, 16 - vpclmulqdq xmm2, xmm0, xmm3, 1 - vpxor xmm8, xmm8, xmmword ptr [rbp + 16] - vpclmulqdq xmm3, xmm0, xmm3, 17 - movdqu xmm0, xmmword ptr [rbp + 64] - movdqu xmm5, xmmword ptr [r9 + 16] - vpxor xmm6, xmm6, xmm1 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpxor xmm6, xmm6, xmm2 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpxor xmm7, xmm7, xmm3 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - movdqu xmm0, xmmword ptr [rbp + 80] - vpxor xmm4, xmm4, xmm1 - movdqu xmm1, xmmword ptr [r9 + 32] - vpxor xmm6, xmm6, xmm2 - vpclmulqdq xmm2, xmm0, xmm1, 0 - vpxor xmm6, xmm6, xmm3 - vpclmulqdq xmm3, xmm0, xmm1, 16 - vpxor xmm7, xmm7, xmm5 - vpclmulqdq xmm5, xmm0, xmm1, 1 - vpclmulqdq xmm1, xmm0, xmm1, 17 - movdqu xmm0, xmmword ptr [rbp + 96] - vpxor xmm4, xmm4, xmm2 - movdqu xmm2, xmmword ptr [r9 + 64] - vpxor xmm6, xmm6, xmm3 - vpclmulqdq xmm3, xmm0, xmm2, 0 - vpxor xmm6, xmm6, xmm5 - vpclmulqdq xmm5, xmm0, xmm2, 16 - vpxor xmm7, xmm7, xmm1 - vpclmulqdq xmm1, xmm0, xmm2, 1 - vpxor xmm8, xmm8, xmmword ptr [rbp + 112] - vpclmulqdq xmm2, xmm0, xmm2, 17 - vpxor xmm4, xmm4, xmm3 - movdqu xmm3, xmmword ptr [r9 + 80] - vpxor xmm6, xmm6, xmm5 - vpclmulqdq xmm5, xmm8, xmm3, 16 - vpxor xmm6, xmm6, xmm1 - vpclmulqdq xmm1, xmm8, xmm3, 1 - vpxor xmm7, xmm7, xmm2 - vpclmulqdq xmm2, xmm8, xmm3, 0 - vpclmulqdq xmm8, xmm8, xmm3, 17 - vpxor xmm6, xmm6, xmm5 - vpxor xmm6, xmm6, xmm1 - vpxor xmm4, xmm4, xmm2 - pxor xmm3, xmm3 - mov rax, 3254779904 - pinsrd xmm3, eax, 3 - vpxor xmm7, xmm7, xmm8 - vpslldq xmm5, xmm6, 8 - vpxor xmm4, xmm4, xmm5 - vpalignr xmm0, xmm4, xmm4, 8 - vpclmulqdq xmm4, xmm4, xmm3, 16 - vpsrldq xmm6, xmm6, 8 - vpxor xmm7, xmm7, xmm6 - vpxor xmm4, xmm4, xmm0 - vpalignr xmm8, xmm4, xmm4, 8 - vpclmulqdq xmm4, xmm4, xmm3, 16 - vpxor xmm8, xmm8, xmm7 - vpxor xmm8, xmm8, xmm4 - mov r12, 579005069656919567 - pinsrq xmm0, r12, 0 - mov r12, 283686952306183 - pinsrq xmm0, r12, 1 - movdqu xmmword ptr [rsi + -96], xmm9 - vpshufb xmm9, xmm9, xmm0 - vpxor xmm1, xmm1, xmm7 - movdqu xmmword ptr [rsi + -80], xmm10 - vpshufb xmm10, xmm10, xmm0 - movdqu xmmword ptr [rsi + -64], xmm11 - vpshufb xmm11, xmm11, xmm0 - movdqu xmmword ptr [rsi + -48], xmm12 - vpshufb xmm12, xmm12, xmm0 - movdqu xmmword ptr [rsi + -32], xmm13 - vpshufb xmm13, xmm13, xmm0 - movdqu xmmword ptr [rsi + -16], xmm14 - vpshufb xmm14, xmm14, xmm0 - pxor xmm4, xmm4 - movdqu xmm7, xmm14 - movdqu xmmword ptr [rbp + 16], xmm4 - movdqu xmmword ptr [rbp + 48], xmm13 - movdqu xmmword ptr [rbp + 64], xmm12 - movdqu xmmword ptr [rbp + 80], xmm11 - movdqu xmmword ptr [rbp + 96], xmm10 - movdqu xmmword ptr [rbp + 112], xmm9 - movdqu xmm3, xmmword ptr [r9 + -32] - vpclmulqdq xmm1, xmm7, xmm3, 0 - vpclmulqdq xmm5, xmm7, xmm3, 16 - movdqu xmm0, xmmword ptr [rbp + 48] - vpclmulqdq xmm6, xmm7, xmm3, 1 - vpclmulqdq xmm7, xmm7, xmm3, 17 - movdqu xmm3, xmmword ptr [r9 + -16] - vpxor xmm6, xmm6, xmm5 - vpclmulqdq xmm5, xmm0, xmm3, 0 - vpxor xmm8, xmm8, xmm4 - vpxor xmm4, xmm1, xmm5 - vpclmulqdq xmm1, xmm0, xmm3, 16 - vpclmulqdq xmm2, xmm0, xmm3, 1 - vpxor xmm8, xmm8, xmmword ptr [rbp + 16] - vpclmulqdq xmm3, xmm0, xmm3, 17 - movdqu xmm0, xmmword ptr [rbp + 64] - movdqu xmm5, xmmword ptr [r9 + 16] - vpxor xmm6, xmm6, xmm1 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpxor xmm6, xmm6, xmm2 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpxor xmm7, xmm7, xmm3 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - movdqu xmm0, xmmword ptr [rbp + 80] - vpxor xmm4, xmm4, xmm1 - movdqu xmm1, xmmword ptr [r9 + 32] - vpxor xmm6, xmm6, xmm2 - vpclmulqdq xmm2, xmm0, xmm1, 0 - vpxor xmm6, xmm6, xmm3 - vpclmulqdq xmm3, xmm0, xmm1, 16 - vpxor xmm7, xmm7, xmm5 - vpclmulqdq xmm5, xmm0, xmm1, 1 - vpclmulqdq xmm1, xmm0, xmm1, 17 - movdqu xmm0, xmmword ptr [rbp + 96] - vpxor xmm4, xmm4, xmm2 - movdqu xmm2, xmmword ptr [r9 + 64] - vpxor xmm6, xmm6, xmm3 - vpclmulqdq xmm3, xmm0, xmm2, 0 - vpxor xmm6, xmm6, xmm5 - vpclmulqdq xmm5, xmm0, xmm2, 16 - vpxor xmm7, xmm7, xmm1 - vpclmulqdq xmm1, xmm0, xmm2, 1 - vpxor xmm8, xmm8, xmmword ptr [rbp + 112] - vpclmulqdq xmm2, xmm0, xmm2, 17 - vpxor xmm4, xmm4, xmm3 - movdqu xmm3, xmmword ptr [r9 + 80] - vpxor xmm6, xmm6, xmm5 - vpclmulqdq xmm5, xmm8, xmm3, 16 - vpxor xmm6, xmm6, xmm1 - vpclmulqdq xmm1, xmm8, xmm3, 1 - vpxor xmm7, xmm7, xmm2 - vpclmulqdq xmm2, xmm8, xmm3, 0 - vpclmulqdq xmm8, xmm8, xmm3, 17 - vpxor xmm6, xmm6, xmm5 - vpxor xmm6, xmm6, xmm1 - vpxor xmm4, xmm4, xmm2 - pxor xmm3, xmm3 - mov rax, 3254779904 - pinsrd xmm3, eax, 3 - vpxor xmm7, xmm7, xmm8 - vpslldq xmm5, xmm6, 8 - vpxor xmm4, xmm4, xmm5 - vpalignr xmm0, xmm4, xmm4, 8 - vpclmulqdq xmm4, xmm4, xmm3, 16 - vpsrldq xmm6, xmm6, 8 - vpxor xmm7, xmm7, xmm6 - vpxor xmm4, xmm4, xmm0 - vpalignr xmm8, xmm4, xmm4, 8 - vpclmulqdq xmm4, xmm4, xmm3, 16 - vpxor xmm8, xmm8, xmm7 - vpxor xmm8, xmm8, xmm4 - sub rcx, 128 -L71: - movdqu xmm11, xmmword ptr [rbp + 32] - mov r8, rcx - mov rax, qword ptr [rsp + 312] - mov rdi, qword ptr [rsp + 320] - mov rdx, qword ptr [rsp + 328] - mov r14, rdx - mov r12, 579005069656919567 - pinsrq xmm9, r12, 0 - mov r12, 283686952306183 - pinsrq xmm9, r12, 1 - pshufb xmm11, xmm9 - pxor xmm10, xmm10 - mov rbx, 1 - pinsrd xmm10, ebx, 0 - mov r11, rax - mov r10, rdi - mov rbx, 0 - jmp L83 -ALIGN 16 -L82: - movdqu xmm0, xmm11 - pshufb xmm0, xmm9 - movdqu xmm2, xmmword ptr [r8 + 0] - pxor xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 16] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 32] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 48] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 64] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 80] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 96] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 112] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 128] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 144] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 160] - aesenclast xmm0, xmm2 - pxor xmm2, xmm2 - movdqu xmm2, xmmword ptr [r11 + 0] - pxor xmm2, xmm0 - movdqu xmmword ptr [r10 + 0], xmm2 - add rbx, 1 - add r11, 16 - add r10, 16 - paddd xmm11, xmm10 -ALIGN 16 -L83: - cmp rbx, rdx - jne L82 - mov r11, rdi - jmp L85 -ALIGN 16 -L84: - add r11, 80 - movdqu xmm5, xmmword ptr [r9 + -32] - movdqu xmm0, xmmword ptr [r11 + 0] - pshufb xmm0, xmm9 - sub r11, 16 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - movdqu xmm0, xmmword ptr [r11 + 0] - pshufb xmm0, xmm9 - movdqu xmm4, xmm1 - movdqu xmm1, xmmword ptr [r9 + -16] - vpxor xmm6, xmm2, xmm3 - movdqu xmm7, xmm5 - movdqu xmm5, xmm1 - sub r11, 16 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - movdqu xmm0, xmmword ptr [r11 + 0] - pshufb xmm0, xmm9 - vpxor xmm4, xmm4, xmm1 - movdqu xmm1, xmmword ptr [r9 + 16] - vpxor xmm6, xmm6, xmm2 - vpxor xmm6, xmm6, xmm3 - vpxor xmm7, xmm7, xmm5 - movdqu xmm5, xmm1 - sub r11, 16 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - movdqu xmm0, xmmword ptr [r11 + 0] - pshufb xmm0, xmm9 - vpxor xmm4, xmm4, xmm1 - movdqu xmm1, xmmword ptr [r9 + 32] - vpxor xmm6, xmm6, xmm2 - vpxor xmm6, xmm6, xmm3 - vpxor xmm7, xmm7, xmm5 - movdqu xmm5, xmm1 - sub r11, 16 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - movdqu xmm0, xmmword ptr [r11 + 0] - pshufb xmm0, xmm9 - vpxor xmm4, xmm4, xmm1 - movdqu xmm1, xmmword ptr [r9 + 64] - vpxor xmm6, xmm6, xmm2 - vpxor xmm6, xmm6, xmm3 - vpxor xmm7, xmm7, xmm5 - movdqu xmm5, xmm1 - sub r11, 16 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - movdqu xmm0, xmmword ptr [r11 + 0] - pshufb xmm0, xmm9 - vpxor xmm4, xmm4, xmm1 - movdqu xmm1, xmmword ptr [r9 + 80] - vpxor xmm6, xmm6, xmm2 - vpxor xmm6, xmm6, xmm3 - vpxor xmm7, xmm7, xmm5 - movdqu xmm5, xmm1 - vpxor xmm0, xmm8, xmm0 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - vpxor xmm4, xmm4, xmm1 - vpxor xmm6, xmm6, xmm2 - vpxor xmm6, xmm6, xmm3 - vpxor xmm7, xmm7, xmm5 - pxor xmm3, xmm3 - mov r10, 3254779904 - pinsrd xmm3, r10d, 3 - vpslldq xmm5, xmm6, 8 - vpxor xmm4, xmm4, xmm5 - vpalignr xmm0, xmm4, xmm4, 8 - vpclmulqdq xmm4, xmm4, xmm3, 16 - vpsrldq xmm6, xmm6, 8 - vpxor xmm7, xmm7, xmm6 - vpxor xmm4, xmm4, xmm0 - vpalignr xmm8, xmm4, xmm4, 8 - vpclmulqdq xmm4, xmm4, xmm3, 16 - vpxor xmm8, xmm8, xmm7 - vpxor xmm8, xmm8, xmm4 - add r11, 96 - sub rdx, 6 -ALIGN 16 -L85: - cmp rdx, 6 - jae L84 - cmp rdx, 0 - jbe L86 - mov r10, rdx - sub r10, 1 - imul r10, 16 - add r11, r10 - movdqu xmm5, xmmword ptr [r9 + -32] - movdqu xmm0, xmmword ptr [r11 + 0] - pshufb xmm0, xmm9 - cmp rdx, 1 - jne L88 - vpxor xmm0, xmm8, xmm0 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - movdqu xmm4, xmm1 - vpxor xmm6, xmm2, xmm3 - movdqu xmm7, xmm5 - jmp L89 -L88: - sub r11, 16 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - movdqu xmm0, xmmword ptr [r11 + 0] - pshufb xmm0, xmm9 - movdqu xmm4, xmm1 - movdqu xmm1, xmmword ptr [r9 + -16] - vpxor xmm6, xmm2, xmm3 - movdqu xmm7, xmm5 - movdqu xmm5, xmm1 - cmp rdx, 2 - je L90 - sub r11, 16 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - movdqu xmm0, xmmword ptr [r11 + 0] - pshufb xmm0, xmm9 - vpxor xmm4, xmm4, xmm1 - movdqu xmm1, xmmword ptr [r9 + 16] - vpxor xmm6, xmm6, xmm2 - vpxor xmm6, xmm6, xmm3 - vpxor xmm7, xmm7, xmm5 - movdqu xmm5, xmm1 - cmp rdx, 3 - je L92 - sub r11, 16 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - movdqu xmm0, xmmword ptr [r11 + 0] - pshufb xmm0, xmm9 - vpxor xmm4, xmm4, xmm1 - movdqu xmm1, xmmword ptr [r9 + 32] - vpxor xmm6, xmm6, xmm2 - vpxor xmm6, xmm6, xmm3 - vpxor xmm7, xmm7, xmm5 - movdqu xmm5, xmm1 - cmp rdx, 4 - je L94 - sub r11, 16 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - movdqu xmm0, xmmword ptr [r11 + 0] - pshufb xmm0, xmm9 - vpxor xmm4, xmm4, xmm1 - movdqu xmm1, xmmword ptr [r9 + 64] - vpxor xmm6, xmm6, xmm2 - vpxor xmm6, xmm6, xmm3 - vpxor xmm7, xmm7, xmm5 - movdqu xmm5, xmm1 - jmp L95 -L94: -L95: - jmp L93 -L92: -L93: - jmp L91 -L90: -L91: - vpxor xmm0, xmm8, xmm0 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - vpxor xmm4, xmm4, xmm1 - vpxor xmm6, xmm6, xmm2 - vpxor xmm6, xmm6, xmm3 - vpxor xmm7, xmm7, xmm5 -L89: - pxor xmm3, xmm3 - mov r10, 3254779904 - pinsrd xmm3, r10d, 3 - vpslldq xmm5, xmm6, 8 - vpxor xmm4, xmm4, xmm5 - vpalignr xmm0, xmm4, xmm4, 8 - vpclmulqdq xmm4, xmm4, xmm3, 16 - vpsrldq xmm6, xmm6, 8 - vpxor xmm7, xmm7, xmm6 - vpxor xmm4, xmm4, xmm0 - vpalignr xmm8, xmm4, xmm4, 8 - vpclmulqdq xmm4, xmm4, xmm3, 16 - vpxor xmm8, xmm8, xmm7 - vpxor xmm8, xmm8, xmm4 - jmp L87 -L86: -L87: - add r14, qword ptr [rsp + 304] - imul r14, 16 - mov r13, qword ptr [rsp + 344] - cmp r13, r14 - jbe L96 - mov rax, qword ptr [rsp + 336] - mov r10, r13 - and r10, 15 - movdqu xmm0, xmm11 - pshufb xmm0, xmm9 - movdqu xmm2, xmmword ptr [r8 + 0] - pxor xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 16] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 32] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 48] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 64] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 80] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 96] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 112] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 128] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 144] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 160] - aesenclast xmm0, xmm2 - pxor xmm2, xmm2 - movdqu xmm4, xmmword ptr [rax + 0] - pxor xmm0, xmm4 - movdqu xmmword ptr [rax + 0], xmm0 - cmp r10, 8 - jae L98 - mov rcx, 0 - pinsrq xmm0, rcx, 1 - mov rcx, r10 - shl rcx, 3 - mov r11, 1 - shl r11, cl - sub r11, 1 - pextrq rcx, xmm0, 0 - and rcx, r11 - pinsrq xmm0, rcx, 0 - jmp L99 -L98: - mov rcx, r10 - sub rcx, 8 - shl rcx, 3 - mov r11, 1 - shl r11, cl - sub r11, 1 - pextrq rcx, xmm0, 1 - and rcx, r11 - pinsrq xmm0, rcx, 1 -L99: - pshufb xmm0, xmm9 - movdqu xmm5, xmmword ptr [r9 + -32] - vpxor xmm0, xmm8, xmm0 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - movdqu xmm4, xmm1 - vpxor xmm6, xmm2, xmm3 - movdqu xmm7, xmm5 - pxor xmm3, xmm3 - mov r11, 3254779904 - pinsrd xmm3, r11d, 3 - vpslldq xmm5, xmm6, 8 - vpxor xmm4, xmm4, xmm5 - vpalignr xmm0, xmm4, xmm4, 8 - vpclmulqdq xmm4, xmm4, xmm3, 16 - vpsrldq xmm6, xmm6, 8 - vpxor xmm7, xmm7, xmm6 - vpxor xmm4, xmm4, xmm0 - vpalignr xmm8, xmm4, xmm4, 8 - vpclmulqdq xmm4, xmm4, xmm3, 16 - vpxor xmm8, xmm8, xmm7 - vpxor xmm8, xmm8, xmm4 - jmp L97 -L96: -L97: - mov r11, r15 - pxor xmm0, xmm0 - mov rax, r11 - imul rax, 8 - pinsrq xmm0, rax, 1 - mov rax, r13 - imul rax, 8 - pinsrq xmm0, rax, 0 - movdqu xmm5, xmmword ptr [r9 + -32] - vpxor xmm0, xmm8, xmm0 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - movdqu xmm4, xmm1 - vpxor xmm6, xmm2, xmm3 - movdqu xmm7, xmm5 - pxor xmm3, xmm3 - mov r11, 3254779904 - pinsrd xmm3, r11d, 3 - vpslldq xmm5, xmm6, 8 - vpxor xmm4, xmm4, xmm5 - vpalignr xmm0, xmm4, xmm4, 8 - vpclmulqdq xmm4, xmm4, xmm3, 16 - vpsrldq xmm6, xmm6, 8 - vpxor xmm7, xmm7, xmm6 - vpxor xmm4, xmm4, xmm0 - vpalignr xmm8, xmm4, xmm4, 8 - vpclmulqdq xmm4, xmm4, xmm3, 16 - vpxor xmm8, xmm8, xmm7 - vpxor xmm8, xmm8, xmm4 - movdqu xmm0, xmmword ptr [rbp + 0] - pshufb xmm0, xmm9 - movdqu xmm2, xmmword ptr [r8 + 0] - pxor xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 16] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 32] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 48] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 64] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 80] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 96] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 112] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 128] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 144] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 160] - aesenclast xmm0, xmm2 - pxor xmm2, xmm2 - pshufb xmm8, xmm9 - pxor xmm8, xmm0 - mov r15, qword ptr [rsp + 360] - movdqu xmmword ptr [r15 + 0], xmm8 - pop rax - pinsrq xmm6, rax, 1 - pop rax - pinsrq xmm6, rax, 0 - pop rax - pinsrq xmm7, rax, 1 - pop rax - pinsrq xmm7, rax, 0 - pop rax - pinsrq xmm8, rax, 1 - pop rax - pinsrq xmm8, rax, 0 - pop rax - pinsrq xmm9, rax, 1 - pop rax - pinsrq xmm9, rax, 0 - pop rax - pinsrq xmm10, rax, 1 - pop rax - pinsrq xmm10, rax, 0 - pop rax - pinsrq xmm11, rax, 1 - pop rax - pinsrq xmm11, rax, 0 - pop rax - pinsrq xmm12, rax, 1 - pop rax - pinsrq xmm12, rax, 0 - pop rax - pinsrq xmm13, rax, 1 - pop rax - pinsrq xmm13, rax, 0 - pop rax - pinsrq xmm14, rax, 1 - pop rax - pinsrq xmm14, rax, 0 - pop rax - pinsrq xmm15, rax, 1 - pop rax - pinsrq xmm15, rax, 0 - pop rbx - pop rbp - pop rdi - pop rsi - pop r12 - pop r13 - pop r14 - pop r15 - ret -gcm128_encrypt_opt endp -ALIGN 16 -gcm256_encrypt_opt proc - push r15 - push r14 - push r13 - push r12 - push rsi - push rdi - push rbp - push rbx - pextrq rax, xmm15, 0 - push rax - pextrq rax, xmm15, 1 - push rax - pextrq rax, xmm14, 0 - push rax - pextrq rax, xmm14, 1 - push rax - pextrq rax, xmm13, 0 - push rax - pextrq rax, xmm13, 1 - push rax - pextrq rax, xmm12, 0 - push rax - pextrq rax, xmm12, 1 - push rax - pextrq rax, xmm11, 0 - push rax - pextrq rax, xmm11, 1 - push rax - pextrq rax, xmm10, 0 - push rax - pextrq rax, xmm10, 1 - push rax - pextrq rax, xmm9, 0 - push rax - pextrq rax, xmm9, 1 - push rax - pextrq rax, xmm8, 0 - push rax - pextrq rax, xmm8, 1 - push rax - pextrq rax, xmm7, 0 - push rax - pextrq rax, xmm7, 1 - push rax - pextrq rax, xmm6, 0 - push rax - pextrq rax, xmm6, 1 - push rax - mov rdi, rcx - mov rsi, rdx - mov rdx, r8 - mov rcx, r9 - mov r8, qword ptr [rsp + 264] - mov r9, qword ptr [rsp + 272] - mov rbp, qword ptr [rsp + 352] - mov r13, rcx - lea r9, qword ptr [r9 + 32] - mov rbx, qword ptr [rsp + 280] - mov rcx, rdx - imul rcx, 16 - mov r10, 579005069656919567 - pinsrq xmm9, r10, 0 - mov r10, 283686952306183 - pinsrq xmm9, r10, 1 - pxor xmm8, xmm8 - mov r11, rdi - jmp L101 -ALIGN 16 -L100: - add r11, 80 - movdqu xmm5, xmmword ptr [r9 + -32] - movdqu xmm0, xmmword ptr [r11 + 0] - pshufb xmm0, xmm9 - sub r11, 16 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - movdqu xmm0, xmmword ptr [r11 + 0] - pshufb xmm0, xmm9 - movdqu xmm4, xmm1 - movdqu xmm1, xmmword ptr [r9 + -16] - vpxor xmm6, xmm2, xmm3 - movdqu xmm7, xmm5 - movdqu xmm5, xmm1 - sub r11, 16 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - movdqu xmm0, xmmword ptr [r11 + 0] - pshufb xmm0, xmm9 - vpxor xmm4, xmm4, xmm1 - movdqu xmm1, xmmword ptr [r9 + 16] - vpxor xmm6, xmm6, xmm2 - vpxor xmm6, xmm6, xmm3 - vpxor xmm7, xmm7, xmm5 - movdqu xmm5, xmm1 - sub r11, 16 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - movdqu xmm0, xmmword ptr [r11 + 0] - pshufb xmm0, xmm9 - vpxor xmm4, xmm4, xmm1 - movdqu xmm1, xmmword ptr [r9 + 32] - vpxor xmm6, xmm6, xmm2 - vpxor xmm6, xmm6, xmm3 - vpxor xmm7, xmm7, xmm5 - movdqu xmm5, xmm1 - sub r11, 16 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - movdqu xmm0, xmmword ptr [r11 + 0] - pshufb xmm0, xmm9 - vpxor xmm4, xmm4, xmm1 - movdqu xmm1, xmmword ptr [r9 + 64] - vpxor xmm6, xmm6, xmm2 - vpxor xmm6, xmm6, xmm3 - vpxor xmm7, xmm7, xmm5 - movdqu xmm5, xmm1 - sub r11, 16 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - movdqu xmm0, xmmword ptr [r11 + 0] - pshufb xmm0, xmm9 - vpxor xmm4, xmm4, xmm1 - movdqu xmm1, xmmword ptr [r9 + 80] - vpxor xmm6, xmm6, xmm2 - vpxor xmm6, xmm6, xmm3 - vpxor xmm7, xmm7, xmm5 - movdqu xmm5, xmm1 - vpxor xmm0, xmm8, xmm0 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - vpxor xmm4, xmm4, xmm1 - vpxor xmm6, xmm6, xmm2 - vpxor xmm6, xmm6, xmm3 - vpxor xmm7, xmm7, xmm5 - pxor xmm3, xmm3 - mov r10, 3254779904 - pinsrd xmm3, r10d, 3 - vpslldq xmm5, xmm6, 8 - vpxor xmm4, xmm4, xmm5 - vpalignr xmm0, xmm4, xmm4, 8 - vpclmulqdq xmm4, xmm4, xmm3, 16 - vpsrldq xmm6, xmm6, 8 - vpxor xmm7, xmm7, xmm6 - vpxor xmm4, xmm4, xmm0 - vpalignr xmm8, xmm4, xmm4, 8 - vpclmulqdq xmm4, xmm4, xmm3, 16 - vpxor xmm8, xmm8, xmm7 - vpxor xmm8, xmm8, xmm4 - add r11, 96 - sub rdx, 6 -ALIGN 16 -L101: - cmp rdx, 6 - jae L100 - cmp rdx, 0 - jbe L102 - mov r10, rdx - sub r10, 1 - imul r10, 16 - add r11, r10 - movdqu xmm5, xmmword ptr [r9 + -32] - movdqu xmm0, xmmword ptr [r11 + 0] - pshufb xmm0, xmm9 - cmp rdx, 1 - jne L104 - vpxor xmm0, xmm8, xmm0 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - movdqu xmm4, xmm1 - vpxor xmm6, xmm2, xmm3 - movdqu xmm7, xmm5 - jmp L105 -L104: - sub r11, 16 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - movdqu xmm0, xmmword ptr [r11 + 0] - pshufb xmm0, xmm9 - movdqu xmm4, xmm1 - movdqu xmm1, xmmword ptr [r9 + -16] - vpxor xmm6, xmm2, xmm3 - movdqu xmm7, xmm5 - movdqu xmm5, xmm1 - cmp rdx, 2 - je L106 - sub r11, 16 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - movdqu xmm0, xmmword ptr [r11 + 0] - pshufb xmm0, xmm9 - vpxor xmm4, xmm4, xmm1 - movdqu xmm1, xmmword ptr [r9 + 16] - vpxor xmm6, xmm6, xmm2 - vpxor xmm6, xmm6, xmm3 - vpxor xmm7, xmm7, xmm5 - movdqu xmm5, xmm1 - cmp rdx, 3 - je L108 - sub r11, 16 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - movdqu xmm0, xmmword ptr [r11 + 0] - pshufb xmm0, xmm9 - vpxor xmm4, xmm4, xmm1 - movdqu xmm1, xmmword ptr [r9 + 32] - vpxor xmm6, xmm6, xmm2 - vpxor xmm6, xmm6, xmm3 - vpxor xmm7, xmm7, xmm5 - movdqu xmm5, xmm1 - cmp rdx, 4 - je L110 - sub r11, 16 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - movdqu xmm0, xmmword ptr [r11 + 0] - pshufb xmm0, xmm9 - vpxor xmm4, xmm4, xmm1 - movdqu xmm1, xmmword ptr [r9 + 64] - vpxor xmm6, xmm6, xmm2 - vpxor xmm6, xmm6, xmm3 - vpxor xmm7, xmm7, xmm5 - movdqu xmm5, xmm1 - jmp L111 -L110: -L111: - jmp L109 -L108: -L109: - jmp L107 -L106: -L107: - vpxor xmm0, xmm8, xmm0 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - vpxor xmm4, xmm4, xmm1 - vpxor xmm6, xmm6, xmm2 - vpxor xmm6, xmm6, xmm3 - vpxor xmm7, xmm7, xmm5 -L105: - pxor xmm3, xmm3 - mov r10, 3254779904 - pinsrd xmm3, r10d, 3 - vpslldq xmm5, xmm6, 8 - vpxor xmm4, xmm4, xmm5 - vpalignr xmm0, xmm4, xmm4, 8 - vpclmulqdq xmm4, xmm4, xmm3, 16 - vpsrldq xmm6, xmm6, 8 - vpxor xmm7, xmm7, xmm6 - vpxor xmm4, xmm4, xmm0 - vpalignr xmm8, xmm4, xmm4, 8 - vpclmulqdq xmm4, xmm4, xmm3, 16 - vpxor xmm8, xmm8, xmm7 - vpxor xmm8, xmm8, xmm4 - jmp L103 -L102: -L103: - mov r15, rsi - cmp rsi, rcx - jbe L112 - movdqu xmm0, xmmword ptr [rbx + 0] - mov r10, rsi - and r10, 15 - cmp r10, 8 - jae L114 - mov rcx, 0 - pinsrq xmm0, rcx, 1 - mov rcx, r10 - shl rcx, 3 - mov r11, 1 - shl r11, cl - sub r11, 1 - pextrq rcx, xmm0, 0 - and rcx, r11 - pinsrq xmm0, rcx, 0 - jmp L115 -L114: - mov rcx, r10 - sub rcx, 8 - shl rcx, 3 - mov r11, 1 - shl r11, cl - sub r11, 1 - pextrq rcx, xmm0, 1 - and rcx, r11 - pinsrq xmm0, rcx, 1 -L115: - pshufb xmm0, xmm9 - movdqu xmm5, xmmword ptr [r9 + -32] - vpxor xmm0, xmm8, xmm0 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - movdqu xmm4, xmm1 - vpxor xmm6, xmm2, xmm3 - movdqu xmm7, xmm5 - pxor xmm3, xmm3 - mov r11, 3254779904 - pinsrd xmm3, r11d, 3 - vpslldq xmm5, xmm6, 8 - vpxor xmm4, xmm4, xmm5 - vpalignr xmm0, xmm4, xmm4, 8 - vpclmulqdq xmm4, xmm4, xmm3, 16 - vpsrldq xmm6, xmm6, 8 - vpxor xmm7, xmm7, xmm6 - vpxor xmm4, xmm4, xmm0 - vpalignr xmm8, xmm4, xmm4, 8 - vpclmulqdq xmm4, xmm4, xmm3, 16 - vpxor xmm8, xmm8, xmm7 - vpxor xmm8, xmm8, xmm4 - jmp L113 -L112: -L113: - mov rdi, qword ptr [rsp + 288] - mov rsi, qword ptr [rsp + 296] - mov rdx, qword ptr [rsp + 304] - mov rcx, r13 - movdqu xmm0, xmm9 - movdqu xmm1, xmmword ptr [r8 + 0] - movdqu xmmword ptr [rbp + 0], xmm1 - pxor xmm10, xmm10 - mov r11, 1 - pinsrq xmm10, r11, 0 - vpaddd xmm1, xmm1, xmm10 - cmp rdx, 0 - jne L116 - vpshufb xmm1, xmm1, xmm0 - movdqu xmmword ptr [rbp + 32], xmm1 - jmp L117 -L116: - movdqu xmmword ptr [rbp + 32], xmm8 - add rcx, 128 - pextrq rbx, xmm1, 0 - and rbx, 255 - vpshufb xmm1, xmm1, xmm0 - lea r14, qword ptr [rsi + 96] - movdqu xmm4, xmmword ptr [rcx + -128] - pxor xmm2, xmm2 - mov r11, 72057594037927936 - pinsrq xmm2, r11, 1 - movdqu xmm15, xmmword ptr [rcx + -112] - mov r12, rcx - sub r12, 96 - vpxor xmm9, xmm1, xmm4 - add rbx, 6 - cmp rbx, 256 - jae L118 - vpaddd xmm10, xmm1, xmm2 - vpaddd xmm11, xmm10, xmm2 - vpxor xmm10, xmm10, xmm4 - vpaddd xmm12, xmm11, xmm2 - vpxor xmm11, xmm11, xmm4 - vpaddd xmm13, xmm12, xmm2 - vpxor xmm12, xmm12, xmm4 - vpaddd xmm14, xmm13, xmm2 - vpxor xmm13, xmm13, xmm4 - vpaddd xmm1, xmm14, xmm2 - vpxor xmm14, xmm14, xmm4 - jmp L119 -L118: - sub rbx, 256 - vpshufb xmm6, xmm1, xmm0 - pxor xmm5, xmm5 - mov r11, 1 - pinsrq xmm5, r11, 0 - vpaddd xmm10, xmm6, xmm5 - pxor xmm5, xmm5 - mov r11, 2 - pinsrq xmm5, r11, 0 - vpaddd xmm11, xmm6, xmm5 - vpaddd xmm12, xmm10, xmm5 - vpshufb xmm10, xmm10, xmm0 - vpaddd xmm13, xmm11, xmm5 - vpshufb xmm11, xmm11, xmm0 - vpxor xmm10, xmm10, xmm4 - vpaddd xmm14, xmm12, xmm5 - vpshufb xmm12, xmm12, xmm0 - vpxor xmm11, xmm11, xmm4 - vpaddd xmm1, xmm13, xmm5 - vpshufb xmm13, xmm13, xmm0 - vpxor xmm12, xmm12, xmm4 - vpshufb xmm14, xmm14, xmm0 - vpxor xmm13, xmm13, xmm4 - vpshufb xmm1, xmm1, xmm0 - vpxor xmm14, xmm14, xmm4 -L119: - vaesenc xmm9, xmm9, xmm15 - vaesenc xmm10, xmm10, xmm15 - vaesenc xmm11, xmm11, xmm15 - vaesenc xmm12, xmm12, xmm15 - vaesenc xmm13, xmm13, xmm15 - vaesenc xmm14, xmm14, xmm15 - movdqu xmm15, xmmword ptr [rcx + -96] - vaesenc xmm9, xmm9, xmm15 - vaesenc xmm10, xmm10, xmm15 - vaesenc xmm11, xmm11, xmm15 - vaesenc xmm12, xmm12, xmm15 - vaesenc xmm13, xmm13, xmm15 - vaesenc xmm14, xmm14, xmm15 - movdqu xmm15, xmmword ptr [rcx + -80] - vaesenc xmm9, xmm9, xmm15 - vaesenc xmm10, xmm10, xmm15 - vaesenc xmm11, xmm11, xmm15 - vaesenc xmm12, xmm12, xmm15 - vaesenc xmm13, xmm13, xmm15 - vaesenc xmm14, xmm14, xmm15 - movdqu xmm15, xmmword ptr [rcx + -64] - vaesenc xmm9, xmm9, xmm15 - vaesenc xmm10, xmm10, xmm15 - vaesenc xmm11, xmm11, xmm15 - vaesenc xmm12, xmm12, xmm15 - vaesenc xmm13, xmm13, xmm15 - vaesenc xmm14, xmm14, xmm15 - movdqu xmm15, xmmword ptr [rcx + -48] - vaesenc xmm9, xmm9, xmm15 - vaesenc xmm10, xmm10, xmm15 - vaesenc xmm11, xmm11, xmm15 - vaesenc xmm12, xmm12, xmm15 - vaesenc xmm13, xmm13, xmm15 - vaesenc xmm14, xmm14, xmm15 - movdqu xmm15, xmmword ptr [rcx + -32] - vaesenc xmm9, xmm9, xmm15 - vaesenc xmm10, xmm10, xmm15 - vaesenc xmm11, xmm11, xmm15 - vaesenc xmm12, xmm12, xmm15 - vaesenc xmm13, xmm13, xmm15 - vaesenc xmm14, xmm14, xmm15 - movdqu xmm15, xmmword ptr [rcx + -16] - vaesenc xmm9, xmm9, xmm15 - vaesenc xmm10, xmm10, xmm15 - vaesenc xmm11, xmm11, xmm15 - vaesenc xmm12, xmm12, xmm15 - vaesenc xmm13, xmm13, xmm15 - vaesenc xmm14, xmm14, xmm15 - movdqu xmm15, xmmword ptr [rcx + 0] - vaesenc xmm9, xmm9, xmm15 - vaesenc xmm10, xmm10, xmm15 - vaesenc xmm11, xmm11, xmm15 - vaesenc xmm12, xmm12, xmm15 - vaesenc xmm13, xmm13, xmm15 - vaesenc xmm14, xmm14, xmm15 - movdqu xmm15, xmmword ptr [rcx + 16] - vaesenc xmm9, xmm9, xmm15 - vaesenc xmm10, xmm10, xmm15 - vaesenc xmm11, xmm11, xmm15 - vaesenc xmm12, xmm12, xmm15 - vaesenc xmm13, xmm13, xmm15 - vaesenc xmm14, xmm14, xmm15 - movdqu xmm15, xmmword ptr [rcx + 32] - vaesenc xmm9, xmm9, xmm15 - vaesenc xmm10, xmm10, xmm15 - vaesenc xmm11, xmm11, xmm15 - vaesenc xmm12, xmm12, xmm15 - vaesenc xmm13, xmm13, xmm15 - vaesenc xmm14, xmm14, xmm15 - movdqu xmm15, xmmword ptr [rcx + 48] - vaesenc xmm9, xmm9, xmm15 - vaesenc xmm10, xmm10, xmm15 - vaesenc xmm11, xmm11, xmm15 - vaesenc xmm12, xmm12, xmm15 - vaesenc xmm13, xmm13, xmm15 - vaesenc xmm14, xmm14, xmm15 - movdqu xmm15, xmmword ptr [rcx + 64] - vaesenc xmm9, xmm9, xmm15 - vaesenc xmm10, xmm10, xmm15 - vaesenc xmm11, xmm11, xmm15 - vaesenc xmm12, xmm12, xmm15 - vaesenc xmm13, xmm13, xmm15 - vaesenc xmm14, xmm14, xmm15 - movdqu xmm15, xmmword ptr [rcx + 80] - movdqu xmm3, xmmword ptr [rcx + 96] - vaesenc xmm9, xmm9, xmm15 - vpxor xmm4, xmm3, xmmword ptr [rdi + 0] - vaesenc xmm10, xmm10, xmm15 - vpxor xmm5, xmm3, xmmword ptr [rdi + 16] - vaesenc xmm11, xmm11, xmm15 - vpxor xmm6, xmm3, xmmword ptr [rdi + 32] - vaesenc xmm12, xmm12, xmm15 - vpxor xmm8, xmm3, xmmword ptr [rdi + 48] - vaesenc xmm13, xmm13, xmm15 - vpxor xmm2, xmm3, xmmword ptr [rdi + 64] - vaesenc xmm14, xmm14, xmm15 - vpxor xmm3, xmm3, xmmword ptr [rdi + 80] - lea rdi, qword ptr [rdi + 96] - vaesenclast xmm9, xmm9, xmm4 - vaesenclast xmm10, xmm10, xmm5 - vaesenclast xmm11, xmm11, xmm6 - vaesenclast xmm12, xmm12, xmm8 - vaesenclast xmm13, xmm13, xmm2 - vaesenclast xmm14, xmm14, xmm3 - movdqu xmmword ptr [rsi + 0], xmm9 - movdqu xmmword ptr [rsi + 16], xmm10 - movdqu xmmword ptr [rsi + 32], xmm11 - movdqu xmmword ptr [rsi + 48], xmm12 - movdqu xmmword ptr [rsi + 64], xmm13 - movdqu xmmword ptr [rsi + 80], xmm14 - lea rsi, qword ptr [rsi + 96] - vpshufb xmm8, xmm9, xmm0 - vpshufb xmm2, xmm10, xmm0 - movdqu xmmword ptr [rbp + 112], xmm8 - vpshufb xmm4, xmm11, xmm0 - movdqu xmmword ptr [rbp + 96], xmm2 - vpshufb xmm5, xmm12, xmm0 - movdqu xmmword ptr [rbp + 80], xmm4 - vpshufb xmm6, xmm13, xmm0 - movdqu xmmword ptr [rbp + 64], xmm5 - vpshufb xmm7, xmm14, xmm0 - movdqu xmmword ptr [rbp + 48], xmm6 - movdqu xmm4, xmmword ptr [rcx + -128] - pxor xmm2, xmm2 - mov r11, 72057594037927936 - pinsrq xmm2, r11, 1 - movdqu xmm15, xmmword ptr [rcx + -112] - mov r12, rcx - sub r12, 96 - vpxor xmm9, xmm1, xmm4 - add rbx, 6 - cmp rbx, 256 - jae L120 - vpaddd xmm10, xmm1, xmm2 - vpaddd xmm11, xmm10, xmm2 - vpxor xmm10, xmm10, xmm4 - vpaddd xmm12, xmm11, xmm2 - vpxor xmm11, xmm11, xmm4 - vpaddd xmm13, xmm12, xmm2 - vpxor xmm12, xmm12, xmm4 - vpaddd xmm14, xmm13, xmm2 - vpxor xmm13, xmm13, xmm4 - vpaddd xmm1, xmm14, xmm2 - vpxor xmm14, xmm14, xmm4 - jmp L121 -L120: - sub rbx, 256 - vpshufb xmm6, xmm1, xmm0 - pxor xmm5, xmm5 - mov r11, 1 - pinsrq xmm5, r11, 0 - vpaddd xmm10, xmm6, xmm5 - pxor xmm5, xmm5 - mov r11, 2 - pinsrq xmm5, r11, 0 - vpaddd xmm11, xmm6, xmm5 - vpaddd xmm12, xmm10, xmm5 - vpshufb xmm10, xmm10, xmm0 - vpaddd xmm13, xmm11, xmm5 - vpshufb xmm11, xmm11, xmm0 - vpxor xmm10, xmm10, xmm4 - vpaddd xmm14, xmm12, xmm5 - vpshufb xmm12, xmm12, xmm0 - vpxor xmm11, xmm11, xmm4 - vpaddd xmm1, xmm13, xmm5 - vpshufb xmm13, xmm13, xmm0 - vpxor xmm12, xmm12, xmm4 - vpshufb xmm14, xmm14, xmm0 - vpxor xmm13, xmm13, xmm4 - vpshufb xmm1, xmm1, xmm0 - vpxor xmm14, xmm14, xmm4 -L121: - vaesenc xmm9, xmm9, xmm15 - vaesenc xmm10, xmm10, xmm15 - vaesenc xmm11, xmm11, xmm15 - vaesenc xmm12, xmm12, xmm15 - vaesenc xmm13, xmm13, xmm15 - vaesenc xmm14, xmm14, xmm15 - movdqu xmm15, xmmword ptr [rcx + -96] - vaesenc xmm9, xmm9, xmm15 - vaesenc xmm10, xmm10, xmm15 - vaesenc xmm11, xmm11, xmm15 - vaesenc xmm12, xmm12, xmm15 - vaesenc xmm13, xmm13, xmm15 - vaesenc xmm14, xmm14, xmm15 - movdqu xmm15, xmmword ptr [rcx + -80] - vaesenc xmm9, xmm9, xmm15 - vaesenc xmm10, xmm10, xmm15 - vaesenc xmm11, xmm11, xmm15 - vaesenc xmm12, xmm12, xmm15 - vaesenc xmm13, xmm13, xmm15 - vaesenc xmm14, xmm14, xmm15 - movdqu xmm15, xmmword ptr [rcx + -64] - vaesenc xmm9, xmm9, xmm15 - vaesenc xmm10, xmm10, xmm15 - vaesenc xmm11, xmm11, xmm15 - vaesenc xmm12, xmm12, xmm15 - vaesenc xmm13, xmm13, xmm15 - vaesenc xmm14, xmm14, xmm15 - movdqu xmm15, xmmword ptr [rcx + -48] - vaesenc xmm9, xmm9, xmm15 - vaesenc xmm10, xmm10, xmm15 - vaesenc xmm11, xmm11, xmm15 - vaesenc xmm12, xmm12, xmm15 - vaesenc xmm13, xmm13, xmm15 - vaesenc xmm14, xmm14, xmm15 - movdqu xmm15, xmmword ptr [rcx + -32] - vaesenc xmm9, xmm9, xmm15 - vaesenc xmm10, xmm10, xmm15 - vaesenc xmm11, xmm11, xmm15 - vaesenc xmm12, xmm12, xmm15 - vaesenc xmm13, xmm13, xmm15 - vaesenc xmm14, xmm14, xmm15 - movdqu xmm15, xmmword ptr [rcx + -16] - vaesenc xmm9, xmm9, xmm15 - vaesenc xmm10, xmm10, xmm15 - vaesenc xmm11, xmm11, xmm15 - vaesenc xmm12, xmm12, xmm15 - vaesenc xmm13, xmm13, xmm15 - vaesenc xmm14, xmm14, xmm15 - movdqu xmm15, xmmword ptr [rcx + 0] - vaesenc xmm9, xmm9, xmm15 - vaesenc xmm10, xmm10, xmm15 - vaesenc xmm11, xmm11, xmm15 - vaesenc xmm12, xmm12, xmm15 - vaesenc xmm13, xmm13, xmm15 - vaesenc xmm14, xmm14, xmm15 - movdqu xmm15, xmmword ptr [rcx + 16] - vaesenc xmm9, xmm9, xmm15 - vaesenc xmm10, xmm10, xmm15 - vaesenc xmm11, xmm11, xmm15 - vaesenc xmm12, xmm12, xmm15 - vaesenc xmm13, xmm13, xmm15 - vaesenc xmm14, xmm14, xmm15 - movdqu xmm15, xmmword ptr [rcx + 32] - vaesenc xmm9, xmm9, xmm15 - vaesenc xmm10, xmm10, xmm15 - vaesenc xmm11, xmm11, xmm15 - vaesenc xmm12, xmm12, xmm15 - vaesenc xmm13, xmm13, xmm15 - vaesenc xmm14, xmm14, xmm15 - movdqu xmm15, xmmword ptr [rcx + 48] - vaesenc xmm9, xmm9, xmm15 - vaesenc xmm10, xmm10, xmm15 - vaesenc xmm11, xmm11, xmm15 - vaesenc xmm12, xmm12, xmm15 - vaesenc xmm13, xmm13, xmm15 - vaesenc xmm14, xmm14, xmm15 - movdqu xmm15, xmmword ptr [rcx + 64] - vaesenc xmm9, xmm9, xmm15 - vaesenc xmm10, xmm10, xmm15 - vaesenc xmm11, xmm11, xmm15 - vaesenc xmm12, xmm12, xmm15 - vaesenc xmm13, xmm13, xmm15 - vaesenc xmm14, xmm14, xmm15 - movdqu xmm15, xmmword ptr [rcx + 80] - movdqu xmm3, xmmword ptr [rcx + 96] - vaesenc xmm9, xmm9, xmm15 - vpxor xmm4, xmm3, xmmword ptr [rdi + 0] - vaesenc xmm10, xmm10, xmm15 - vpxor xmm5, xmm3, xmmword ptr [rdi + 16] - vaesenc xmm11, xmm11, xmm15 - vpxor xmm6, xmm3, xmmword ptr [rdi + 32] - vaesenc xmm12, xmm12, xmm15 - vpxor xmm8, xmm3, xmmword ptr [rdi + 48] - vaesenc xmm13, xmm13, xmm15 - vpxor xmm2, xmm3, xmmword ptr [rdi + 64] - vaesenc xmm14, xmm14, xmm15 - vpxor xmm3, xmm3, xmmword ptr [rdi + 80] - lea rdi, qword ptr [rdi + 96] - vaesenclast xmm9, xmm9, xmm4 - vaesenclast xmm10, xmm10, xmm5 - vaesenclast xmm11, xmm11, xmm6 - vaesenclast xmm12, xmm12, xmm8 - vaesenclast xmm13, xmm13, xmm2 - vaesenclast xmm14, xmm14, xmm3 - movdqu xmmword ptr [rsi + 0], xmm9 - movdqu xmmword ptr [rsi + 16], xmm10 - movdqu xmmword ptr [rsi + 32], xmm11 - movdqu xmmword ptr [rsi + 48], xmm12 - movdqu xmmword ptr [rsi + 64], xmm13 - movdqu xmmword ptr [rsi + 80], xmm14 - lea rsi, qword ptr [rsi + 96] - sub rdx, 12 - movdqu xmm8, xmmword ptr [rbp + 32] - pxor xmm2, xmm2 - mov r11, 72057594037927936 - pinsrq xmm2, r11, 1 - vpxor xmm4, xmm4, xmm4 - movdqu xmm15, xmmword ptr [rcx + -128] - vpaddd xmm10, xmm1, xmm2 - vpaddd xmm11, xmm10, xmm2 - vpaddd xmm12, xmm11, xmm2 - vpaddd xmm13, xmm12, xmm2 - vpaddd xmm14, xmm13, xmm2 - vpxor xmm9, xmm1, xmm15 - movdqu xmmword ptr [rbp + 16], xmm4 - jmp L123 -ALIGN 16 -L122: - add rbx, 6 - cmp rbx, 256 - jb L124 - mov r11, 579005069656919567 - pinsrq xmm0, r11, 0 - mov r11, 283686952306183 - pinsrq xmm0, r11, 1 - vpshufb xmm6, xmm1, xmm0 - pxor xmm5, xmm5 - mov r11, 1 - pinsrq xmm5, r11, 0 - vpaddd xmm10, xmm6, xmm5 - pxor xmm5, xmm5 - mov r11, 2 - pinsrq xmm5, r11, 0 - vpaddd xmm11, xmm6, xmm5 - movdqu xmm3, xmmword ptr [r9 + -32] - vpaddd xmm12, xmm10, xmm5 - vpshufb xmm10, xmm10, xmm0 - vpaddd xmm13, xmm11, xmm5 - vpshufb xmm11, xmm11, xmm0 - vpxor xmm10, xmm10, xmm15 - vpaddd xmm14, xmm12, xmm5 - vpshufb xmm12, xmm12, xmm0 - vpxor xmm11, xmm11, xmm15 - vpaddd xmm1, xmm13, xmm5 - vpshufb xmm13, xmm13, xmm0 - vpshufb xmm14, xmm14, xmm0 - vpshufb xmm1, xmm1, xmm0 - sub rbx, 256 - jmp L125 -L124: - movdqu xmm3, xmmword ptr [r9 + -32] - vpaddd xmm1, xmm2, xmm14 - vpxor xmm10, xmm10, xmm15 - vpxor xmm11, xmm11, xmm15 -L125: - movdqu xmmword ptr [rbp + 128], xmm1 - vpclmulqdq xmm5, xmm7, xmm3, 16 - vpxor xmm12, xmm12, xmm15 - movdqu xmm2, xmmword ptr [rcx + -112] - vpclmulqdq xmm6, xmm7, xmm3, 1 - vaesenc xmm9, xmm9, xmm2 - movdqu xmm0, xmmword ptr [rbp + 48] - vpxor xmm13, xmm13, xmm15 - vpclmulqdq xmm1, xmm7, xmm3, 0 - vaesenc xmm10, xmm10, xmm2 - vpxor xmm14, xmm14, xmm15 - vpclmulqdq xmm7, xmm7, xmm3, 17 - vaesenc xmm11, xmm11, xmm2 - movdqu xmm3, xmmword ptr [r9 + -16] - vaesenc xmm12, xmm12, xmm2 - vpxor xmm6, xmm6, xmm5 - vpclmulqdq xmm5, xmm0, xmm3, 0 - vpxor xmm8, xmm8, xmm4 - vaesenc xmm13, xmm13, xmm2 - vpxor xmm4, xmm1, xmm5 - vpclmulqdq xmm1, xmm0, xmm3, 16 - vaesenc xmm14, xmm14, xmm2 - movdqu xmm15, xmmword ptr [rcx + -96] - vpclmulqdq xmm2, xmm0, xmm3, 1 - vaesenc xmm9, xmm9, xmm15 - vpxor xmm8, xmm8, xmmword ptr [rbp + 16] - vpclmulqdq xmm3, xmm0, xmm3, 17 - movdqu xmm0, xmmword ptr [rbp + 64] - vaesenc xmm10, xmm10, xmm15 - movbe r13, qword ptr [r14 + 88] - vaesenc xmm11, xmm11, xmm15 - movbe r12, qword ptr [r14 + 80] - vaesenc xmm12, xmm12, xmm15 - mov qword ptr [rbp + 32], r13 - vaesenc xmm13, xmm13, xmm15 - mov qword ptr [rbp + 40], r12 - movdqu xmm5, xmmword ptr [r9 + 16] - vaesenc xmm14, xmm14, xmm15 - movdqu xmm15, xmmword ptr [rcx + -80] - vpxor xmm6, xmm6, xmm1 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vaesenc xmm9, xmm9, xmm15 - vpxor xmm6, xmm6, xmm2 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vaesenc xmm10, xmm10, xmm15 - vpxor xmm7, xmm7, xmm3 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vaesenc xmm11, xmm11, xmm15 - vpclmulqdq xmm5, xmm0, xmm5, 17 - movdqu xmm0, xmmword ptr [rbp + 80] - vaesenc xmm12, xmm12, xmm15 - vaesenc xmm13, xmm13, xmm15 - vpxor xmm4, xmm4, xmm1 - movdqu xmm1, xmmword ptr [r9 + 32] - vaesenc xmm14, xmm14, xmm15 - movdqu xmm15, xmmword ptr [rcx + -64] - vpxor xmm6, xmm6, xmm2 - vpclmulqdq xmm2, xmm0, xmm1, 0 - vaesenc xmm9, xmm9, xmm15 - vpxor xmm6, xmm6, xmm3 - vpclmulqdq xmm3, xmm0, xmm1, 16 - vaesenc xmm10, xmm10, xmm15 - movbe r13, qword ptr [r14 + 72] - vpxor xmm7, xmm7, xmm5 - vpclmulqdq xmm5, xmm0, xmm1, 1 - vaesenc xmm11, xmm11, xmm15 - movbe r12, qword ptr [r14 + 64] - vpclmulqdq xmm1, xmm0, xmm1, 17 - movdqu xmm0, xmmword ptr [rbp + 96] - vaesenc xmm12, xmm12, xmm15 - mov qword ptr [rbp + 48], r13 - vaesenc xmm13, xmm13, xmm15 - mov qword ptr [rbp + 56], r12 - vpxor xmm4, xmm4, xmm2 - movdqu xmm2, xmmword ptr [r9 + 64] - vaesenc xmm14, xmm14, xmm15 - movdqu xmm15, xmmword ptr [rcx + -48] - vpxor xmm6, xmm6, xmm3 - vpclmulqdq xmm3, xmm0, xmm2, 0 - vaesenc xmm9, xmm9, xmm15 - vpxor xmm6, xmm6, xmm5 - vpclmulqdq xmm5, xmm0, xmm2, 16 - vaesenc xmm10, xmm10, xmm15 - movbe r13, qword ptr [r14 + 56] - vpxor xmm7, xmm7, xmm1 - vpclmulqdq xmm1, xmm0, xmm2, 1 - vpxor xmm8, xmm8, xmmword ptr [rbp + 112] - vaesenc xmm11, xmm11, xmm15 - movbe r12, qword ptr [r14 + 48] - vpclmulqdq xmm2, xmm0, xmm2, 17 - vaesenc xmm12, xmm12, xmm15 - mov qword ptr [rbp + 64], r13 - vaesenc xmm13, xmm13, xmm15 - mov qword ptr [rbp + 72], r12 - vpxor xmm4, xmm4, xmm3 - movdqu xmm3, xmmword ptr [r9 + 80] - vaesenc xmm14, xmm14, xmm15 - movdqu xmm15, xmmword ptr [rcx + -32] - vpxor xmm6, xmm6, xmm5 - vpclmulqdq xmm5, xmm8, xmm3, 16 - vaesenc xmm9, xmm9, xmm15 - vpxor xmm6, xmm6, xmm1 - vpclmulqdq xmm1, xmm8, xmm3, 1 - vaesenc xmm10, xmm10, xmm15 - movbe r13, qword ptr [r14 + 40] - vpxor xmm7, xmm7, xmm2 - vpclmulqdq xmm2, xmm8, xmm3, 0 - vaesenc xmm11, xmm11, xmm15 - movbe r12, qword ptr [r14 + 32] - vpclmulqdq xmm8, xmm8, xmm3, 17 - vaesenc xmm12, xmm12, xmm15 - mov qword ptr [rbp + 80], r13 - vaesenc xmm13, xmm13, xmm15 - mov qword ptr [rbp + 88], r12 - vpxor xmm6, xmm6, xmm5 - vaesenc xmm14, xmm14, xmm15 - vpxor xmm6, xmm6, xmm1 - movdqu xmm15, xmmword ptr [rcx + -16] - vpslldq xmm5, xmm6, 8 - vpxor xmm4, xmm4, xmm2 - pxor xmm3, xmm3 - mov r11, 13979173243358019584 - pinsrq xmm3, r11, 1 - vaesenc xmm9, xmm9, xmm15 - vpxor xmm7, xmm7, xmm8 - vaesenc xmm10, xmm10, xmm15 - vpxor xmm4, xmm4, xmm5 - movbe r13, qword ptr [r14 + 24] - vaesenc xmm11, xmm11, xmm15 - movbe r12, qword ptr [r14 + 16] - vpalignr xmm0, xmm4, xmm4, 8 - vpclmulqdq xmm4, xmm4, xmm3, 16 - mov qword ptr [rbp + 96], r13 - vaesenc xmm12, xmm12, xmm15 - mov qword ptr [rbp + 104], r12 - vaesenc xmm13, xmm13, xmm15 - vaesenc xmm14, xmm14, xmm15 - movdqu xmm1, xmmword ptr [rcx + 0] - vaesenc xmm9, xmm9, xmm1 - movdqu xmm15, xmmword ptr [rcx + 16] - vaesenc xmm10, xmm10, xmm1 - vpsrldq xmm6, xmm6, 8 - vaesenc xmm11, xmm11, xmm1 - vpxor xmm7, xmm7, xmm6 - vaesenc xmm12, xmm12, xmm1 - vpxor xmm4, xmm4, xmm0 - movbe r13, qword ptr [r14 + 8] - vaesenc xmm13, xmm13, xmm1 - movbe r12, qword ptr [r14 + 0] - vaesenc xmm14, xmm14, xmm1 - movdqu xmm1, xmmword ptr [rcx + 32] - vaesenc xmm9, xmm9, xmm15 - vaesenc xmm10, xmm10, xmm15 - vaesenc xmm11, xmm11, xmm15 - vaesenc xmm12, xmm12, xmm15 - vaesenc xmm13, xmm13, xmm15 - vaesenc xmm14, xmm14, xmm15 - vaesenc xmm9, xmm9, xmm1 - vaesenc xmm10, xmm10, xmm1 - vaesenc xmm11, xmm11, xmm1 - vaesenc xmm12, xmm12, xmm1 - vaesenc xmm13, xmm13, xmm1 - movdqu xmm15, xmmword ptr [rcx + 48] - vaesenc xmm14, xmm14, xmm1 - movdqu xmm1, xmmword ptr [rcx + 64] - vaesenc xmm9, xmm9, xmm15 - vaesenc xmm10, xmm10, xmm15 - vaesenc xmm11, xmm11, xmm15 - vaesenc xmm12, xmm12, xmm15 - vaesenc xmm13, xmm13, xmm15 - vaesenc xmm14, xmm14, xmm15 - vaesenc xmm9, xmm9, xmm1 - vaesenc xmm10, xmm10, xmm1 - vaesenc xmm11, xmm11, xmm1 - vaesenc xmm12, xmm12, xmm1 - vaesenc xmm13, xmm13, xmm1 - movdqu xmm15, xmmword ptr [rcx + 80] - vaesenc xmm14, xmm14, xmm1 - movdqu xmm1, xmmword ptr [rcx + 96] - vaesenc xmm9, xmm9, xmm15 - movdqu xmmword ptr [rbp + 16], xmm7 - vpalignr xmm8, xmm4, xmm4, 8 - vaesenc xmm10, xmm10, xmm15 - vpclmulqdq xmm4, xmm4, xmm3, 16 - vpxor xmm2, xmm1, xmmword ptr [rdi + 0] - vaesenc xmm11, xmm11, xmm15 - vpxor xmm0, xmm1, xmmword ptr [rdi + 16] - vaesenc xmm12, xmm12, xmm15 - vpxor xmm5, xmm1, xmmword ptr [rdi + 32] - vaesenc xmm13, xmm13, xmm15 - vpxor xmm6, xmm1, xmmword ptr [rdi + 48] - vaesenc xmm14, xmm14, xmm15 - vpxor xmm7, xmm1, xmmword ptr [rdi + 64] - vpxor xmm3, xmm1, xmmword ptr [rdi + 80] - movdqu xmm1, xmmword ptr [rbp + 128] - vaesenclast xmm9, xmm9, xmm2 - pxor xmm2, xmm2 - mov r11, 72057594037927936 - pinsrq xmm2, r11, 1 - vaesenclast xmm10, xmm10, xmm0 - vpaddd xmm0, xmm1, xmm2 - mov qword ptr [rbp + 112], r13 - lea rdi, qword ptr [rdi + 96] - vaesenclast xmm11, xmm11, xmm5 - vpaddd xmm5, xmm0, xmm2 - mov qword ptr [rbp + 120], r12 - lea rsi, qword ptr [rsi + 96] - movdqu xmm15, xmmword ptr [rcx + -128] - vaesenclast xmm12, xmm12, xmm6 - vpaddd xmm6, xmm5, xmm2 - vaesenclast xmm13, xmm13, xmm7 - vpaddd xmm7, xmm6, xmm2 - vaesenclast xmm14, xmm14, xmm3 - vpaddd xmm3, xmm7, xmm2 - sub rdx, 6 - add r14, 96 - cmp rdx, 0 - jbe L126 - movdqu xmmword ptr [rsi + -96], xmm9 - vpxor xmm9, xmm1, xmm15 - movdqu xmmword ptr [rsi + -80], xmm10 - movdqu xmm10, xmm0 - movdqu xmmword ptr [rsi + -64], xmm11 - movdqu xmm11, xmm5 - movdqu xmmword ptr [rsi + -48], xmm12 - movdqu xmm12, xmm6 - movdqu xmmword ptr [rsi + -32], xmm13 - movdqu xmm13, xmm7 - movdqu xmmword ptr [rsi + -16], xmm14 - movdqu xmm14, xmm3 - movdqu xmm7, xmmword ptr [rbp + 32] - jmp L127 -L126: - vpxor xmm8, xmm8, xmmword ptr [rbp + 16] - vpxor xmm8, xmm8, xmm4 -L127: -ALIGN 16 -L123: - cmp rdx, 0 - ja L122 - movdqu xmm7, xmmword ptr [rbp + 32] - movdqu xmmword ptr [rbp + 32], xmm1 - pxor xmm4, xmm4 - movdqu xmmword ptr [rbp + 16], xmm4 - movdqu xmm3, xmmword ptr [r9 + -32] - vpclmulqdq xmm1, xmm7, xmm3, 0 - vpclmulqdq xmm5, xmm7, xmm3, 16 - movdqu xmm0, xmmword ptr [rbp + 48] - vpclmulqdq xmm6, xmm7, xmm3, 1 - vpclmulqdq xmm7, xmm7, xmm3, 17 - movdqu xmm3, xmmword ptr [r9 + -16] - vpxor xmm6, xmm6, xmm5 - vpclmulqdq xmm5, xmm0, xmm3, 0 - vpxor xmm8, xmm8, xmm4 - vpxor xmm4, xmm1, xmm5 - vpclmulqdq xmm1, xmm0, xmm3, 16 - vpclmulqdq xmm2, xmm0, xmm3, 1 - vpxor xmm8, xmm8, xmmword ptr [rbp + 16] - vpclmulqdq xmm3, xmm0, xmm3, 17 - movdqu xmm0, xmmword ptr [rbp + 64] - movdqu xmm5, xmmword ptr [r9 + 16] - vpxor xmm6, xmm6, xmm1 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpxor xmm6, xmm6, xmm2 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpxor xmm7, xmm7, xmm3 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - movdqu xmm0, xmmword ptr [rbp + 80] - vpxor xmm4, xmm4, xmm1 - movdqu xmm1, xmmword ptr [r9 + 32] - vpxor xmm6, xmm6, xmm2 - vpclmulqdq xmm2, xmm0, xmm1, 0 - vpxor xmm6, xmm6, xmm3 - vpclmulqdq xmm3, xmm0, xmm1, 16 - vpxor xmm7, xmm7, xmm5 - vpclmulqdq xmm5, xmm0, xmm1, 1 - vpclmulqdq xmm1, xmm0, xmm1, 17 - movdqu xmm0, xmmword ptr [rbp + 96] - vpxor xmm4, xmm4, xmm2 - movdqu xmm2, xmmword ptr [r9 + 64] - vpxor xmm6, xmm6, xmm3 - vpclmulqdq xmm3, xmm0, xmm2, 0 - vpxor xmm6, xmm6, xmm5 - vpclmulqdq xmm5, xmm0, xmm2, 16 - vpxor xmm7, xmm7, xmm1 - vpclmulqdq xmm1, xmm0, xmm2, 1 - vpxor xmm8, xmm8, xmmword ptr [rbp + 112] - vpclmulqdq xmm2, xmm0, xmm2, 17 - vpxor xmm4, xmm4, xmm3 - movdqu xmm3, xmmword ptr [r9 + 80] - vpxor xmm6, xmm6, xmm5 - vpclmulqdq xmm5, xmm8, xmm3, 16 - vpxor xmm6, xmm6, xmm1 - vpclmulqdq xmm1, xmm8, xmm3, 1 - vpxor xmm7, xmm7, xmm2 - vpclmulqdq xmm2, xmm8, xmm3, 0 - vpclmulqdq xmm8, xmm8, xmm3, 17 - vpxor xmm6, xmm6, xmm5 - vpxor xmm6, xmm6, xmm1 - vpxor xmm4, xmm4, xmm2 - pxor xmm3, xmm3 - mov rax, 3254779904 - pinsrd xmm3, eax, 3 - vpxor xmm7, xmm7, xmm8 - vpslldq xmm5, xmm6, 8 - vpxor xmm4, xmm4, xmm5 - vpalignr xmm0, xmm4, xmm4, 8 - vpclmulqdq xmm4, xmm4, xmm3, 16 - vpsrldq xmm6, xmm6, 8 - vpxor xmm7, xmm7, xmm6 - vpxor xmm4, xmm4, xmm0 - vpalignr xmm8, xmm4, xmm4, 8 - vpclmulqdq xmm4, xmm4, xmm3, 16 - vpxor xmm8, xmm8, xmm7 - vpxor xmm8, xmm8, xmm4 - mov r12, 579005069656919567 - pinsrq xmm0, r12, 0 - mov r12, 283686952306183 - pinsrq xmm0, r12, 1 - movdqu xmmword ptr [rsi + -96], xmm9 - vpshufb xmm9, xmm9, xmm0 - vpxor xmm1, xmm1, xmm7 - movdqu xmmword ptr [rsi + -80], xmm10 - vpshufb xmm10, xmm10, xmm0 - movdqu xmmword ptr [rsi + -64], xmm11 - vpshufb xmm11, xmm11, xmm0 - movdqu xmmword ptr [rsi + -48], xmm12 - vpshufb xmm12, xmm12, xmm0 - movdqu xmmword ptr [rsi + -32], xmm13 - vpshufb xmm13, xmm13, xmm0 - movdqu xmmword ptr [rsi + -16], xmm14 - vpshufb xmm14, xmm14, xmm0 - pxor xmm4, xmm4 - movdqu xmm7, xmm14 - movdqu xmmword ptr [rbp + 16], xmm4 - movdqu xmmword ptr [rbp + 48], xmm13 - movdqu xmmword ptr [rbp + 64], xmm12 - movdqu xmmword ptr [rbp + 80], xmm11 - movdqu xmmword ptr [rbp + 96], xmm10 - movdqu xmmword ptr [rbp + 112], xmm9 - movdqu xmm3, xmmword ptr [r9 + -32] - vpclmulqdq xmm1, xmm7, xmm3, 0 - vpclmulqdq xmm5, xmm7, xmm3, 16 - movdqu xmm0, xmmword ptr [rbp + 48] - vpclmulqdq xmm6, xmm7, xmm3, 1 - vpclmulqdq xmm7, xmm7, xmm3, 17 - movdqu xmm3, xmmword ptr [r9 + -16] - vpxor xmm6, xmm6, xmm5 - vpclmulqdq xmm5, xmm0, xmm3, 0 - vpxor xmm8, xmm8, xmm4 - vpxor xmm4, xmm1, xmm5 - vpclmulqdq xmm1, xmm0, xmm3, 16 - vpclmulqdq xmm2, xmm0, xmm3, 1 - vpxor xmm8, xmm8, xmmword ptr [rbp + 16] - vpclmulqdq xmm3, xmm0, xmm3, 17 - movdqu xmm0, xmmword ptr [rbp + 64] - movdqu xmm5, xmmword ptr [r9 + 16] - vpxor xmm6, xmm6, xmm1 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpxor xmm6, xmm6, xmm2 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpxor xmm7, xmm7, xmm3 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - movdqu xmm0, xmmword ptr [rbp + 80] - vpxor xmm4, xmm4, xmm1 - movdqu xmm1, xmmword ptr [r9 + 32] - vpxor xmm6, xmm6, xmm2 - vpclmulqdq xmm2, xmm0, xmm1, 0 - vpxor xmm6, xmm6, xmm3 - vpclmulqdq xmm3, xmm0, xmm1, 16 - vpxor xmm7, xmm7, xmm5 - vpclmulqdq xmm5, xmm0, xmm1, 1 - vpclmulqdq xmm1, xmm0, xmm1, 17 - movdqu xmm0, xmmword ptr [rbp + 96] - vpxor xmm4, xmm4, xmm2 - movdqu xmm2, xmmword ptr [r9 + 64] - vpxor xmm6, xmm6, xmm3 - vpclmulqdq xmm3, xmm0, xmm2, 0 - vpxor xmm6, xmm6, xmm5 - vpclmulqdq xmm5, xmm0, xmm2, 16 - vpxor xmm7, xmm7, xmm1 - vpclmulqdq xmm1, xmm0, xmm2, 1 - vpxor xmm8, xmm8, xmmword ptr [rbp + 112] - vpclmulqdq xmm2, xmm0, xmm2, 17 - vpxor xmm4, xmm4, xmm3 - movdqu xmm3, xmmword ptr [r9 + 80] - vpxor xmm6, xmm6, xmm5 - vpclmulqdq xmm5, xmm8, xmm3, 16 - vpxor xmm6, xmm6, xmm1 - vpclmulqdq xmm1, xmm8, xmm3, 1 - vpxor xmm7, xmm7, xmm2 - vpclmulqdq xmm2, xmm8, xmm3, 0 - vpclmulqdq xmm8, xmm8, xmm3, 17 - vpxor xmm6, xmm6, xmm5 - vpxor xmm6, xmm6, xmm1 - vpxor xmm4, xmm4, xmm2 - pxor xmm3, xmm3 - mov rax, 3254779904 - pinsrd xmm3, eax, 3 - vpxor xmm7, xmm7, xmm8 - vpslldq xmm5, xmm6, 8 - vpxor xmm4, xmm4, xmm5 - vpalignr xmm0, xmm4, xmm4, 8 - vpclmulqdq xmm4, xmm4, xmm3, 16 - vpsrldq xmm6, xmm6, 8 - vpxor xmm7, xmm7, xmm6 - vpxor xmm4, xmm4, xmm0 - vpalignr xmm8, xmm4, xmm4, 8 - vpclmulqdq xmm4, xmm4, xmm3, 16 - vpxor xmm8, xmm8, xmm7 - vpxor xmm8, xmm8, xmm4 - sub rcx, 128 -L117: - movdqu xmm11, xmmword ptr [rbp + 32] - mov r8, rcx - mov rax, qword ptr [rsp + 312] - mov rdi, qword ptr [rsp + 320] - mov rdx, qword ptr [rsp + 328] - mov r14, rdx - mov r12, 579005069656919567 - pinsrq xmm9, r12, 0 - mov r12, 283686952306183 - pinsrq xmm9, r12, 1 - pshufb xmm11, xmm9 - pxor xmm10, xmm10 - mov rbx, 1 - pinsrd xmm10, ebx, 0 - mov r11, rax - mov r10, rdi - mov rbx, 0 - jmp L129 -ALIGN 16 -L128: - movdqu xmm0, xmm11 - pshufb xmm0, xmm9 - movdqu xmm2, xmmword ptr [r8 + 0] - pxor xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 16] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 32] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 48] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 64] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 80] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 96] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 112] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 128] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 144] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 160] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 176] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 192] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 208] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 224] - aesenclast xmm0, xmm2 - pxor xmm2, xmm2 - movdqu xmm2, xmmword ptr [r11 + 0] - pxor xmm2, xmm0 - movdqu xmmword ptr [r10 + 0], xmm2 - add rbx, 1 - add r11, 16 - add r10, 16 - paddd xmm11, xmm10 -ALIGN 16 -L129: - cmp rbx, rdx - jne L128 - mov r11, rdi - jmp L131 -ALIGN 16 -L130: - add r11, 80 - movdqu xmm5, xmmword ptr [r9 + -32] - movdqu xmm0, xmmword ptr [r11 + 0] - pshufb xmm0, xmm9 - sub r11, 16 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - movdqu xmm0, xmmword ptr [r11 + 0] - pshufb xmm0, xmm9 - movdqu xmm4, xmm1 - movdqu xmm1, xmmword ptr [r9 + -16] - vpxor xmm6, xmm2, xmm3 - movdqu xmm7, xmm5 - movdqu xmm5, xmm1 - sub r11, 16 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - movdqu xmm0, xmmword ptr [r11 + 0] - pshufb xmm0, xmm9 - vpxor xmm4, xmm4, xmm1 - movdqu xmm1, xmmword ptr [r9 + 16] - vpxor xmm6, xmm6, xmm2 - vpxor xmm6, xmm6, xmm3 - vpxor xmm7, xmm7, xmm5 - movdqu xmm5, xmm1 - sub r11, 16 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - movdqu xmm0, xmmword ptr [r11 + 0] - pshufb xmm0, xmm9 - vpxor xmm4, xmm4, xmm1 - movdqu xmm1, xmmword ptr [r9 + 32] - vpxor xmm6, xmm6, xmm2 - vpxor xmm6, xmm6, xmm3 - vpxor xmm7, xmm7, xmm5 - movdqu xmm5, xmm1 - sub r11, 16 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - movdqu xmm0, xmmword ptr [r11 + 0] - pshufb xmm0, xmm9 - vpxor xmm4, xmm4, xmm1 - movdqu xmm1, xmmword ptr [r9 + 64] - vpxor xmm6, xmm6, xmm2 - vpxor xmm6, xmm6, xmm3 - vpxor xmm7, xmm7, xmm5 - movdqu xmm5, xmm1 - sub r11, 16 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - movdqu xmm0, xmmword ptr [r11 + 0] - pshufb xmm0, xmm9 - vpxor xmm4, xmm4, xmm1 - movdqu xmm1, xmmword ptr [r9 + 80] - vpxor xmm6, xmm6, xmm2 - vpxor xmm6, xmm6, xmm3 - vpxor xmm7, xmm7, xmm5 - movdqu xmm5, xmm1 - vpxor xmm0, xmm8, xmm0 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - vpxor xmm4, xmm4, xmm1 - vpxor xmm6, xmm6, xmm2 - vpxor xmm6, xmm6, xmm3 - vpxor xmm7, xmm7, xmm5 - pxor xmm3, xmm3 - mov r10, 3254779904 - pinsrd xmm3, r10d, 3 - vpslldq xmm5, xmm6, 8 - vpxor xmm4, xmm4, xmm5 - vpalignr xmm0, xmm4, xmm4, 8 - vpclmulqdq xmm4, xmm4, xmm3, 16 - vpsrldq xmm6, xmm6, 8 - vpxor xmm7, xmm7, xmm6 - vpxor xmm4, xmm4, xmm0 - vpalignr xmm8, xmm4, xmm4, 8 - vpclmulqdq xmm4, xmm4, xmm3, 16 - vpxor xmm8, xmm8, xmm7 - vpxor xmm8, xmm8, xmm4 - add r11, 96 - sub rdx, 6 -ALIGN 16 -L131: - cmp rdx, 6 - jae L130 - cmp rdx, 0 - jbe L132 - mov r10, rdx - sub r10, 1 - imul r10, 16 - add r11, r10 - movdqu xmm5, xmmword ptr [r9 + -32] - movdqu xmm0, xmmword ptr [r11 + 0] - pshufb xmm0, xmm9 - cmp rdx, 1 - jne L134 - vpxor xmm0, xmm8, xmm0 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - movdqu xmm4, xmm1 - vpxor xmm6, xmm2, xmm3 - movdqu xmm7, xmm5 - jmp L135 -L134: - sub r11, 16 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - movdqu xmm0, xmmword ptr [r11 + 0] - pshufb xmm0, xmm9 - movdqu xmm4, xmm1 - movdqu xmm1, xmmword ptr [r9 + -16] - vpxor xmm6, xmm2, xmm3 - movdqu xmm7, xmm5 - movdqu xmm5, xmm1 - cmp rdx, 2 - je L136 - sub r11, 16 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - movdqu xmm0, xmmword ptr [r11 + 0] - pshufb xmm0, xmm9 - vpxor xmm4, xmm4, xmm1 - movdqu xmm1, xmmword ptr [r9 + 16] - vpxor xmm6, xmm6, xmm2 - vpxor xmm6, xmm6, xmm3 - vpxor xmm7, xmm7, xmm5 - movdqu xmm5, xmm1 - cmp rdx, 3 - je L138 - sub r11, 16 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - movdqu xmm0, xmmword ptr [r11 + 0] - pshufb xmm0, xmm9 - vpxor xmm4, xmm4, xmm1 - movdqu xmm1, xmmword ptr [r9 + 32] - vpxor xmm6, xmm6, xmm2 - vpxor xmm6, xmm6, xmm3 - vpxor xmm7, xmm7, xmm5 - movdqu xmm5, xmm1 - cmp rdx, 4 - je L140 - sub r11, 16 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - movdqu xmm0, xmmword ptr [r11 + 0] - pshufb xmm0, xmm9 - vpxor xmm4, xmm4, xmm1 - movdqu xmm1, xmmword ptr [r9 + 64] - vpxor xmm6, xmm6, xmm2 - vpxor xmm6, xmm6, xmm3 - vpxor xmm7, xmm7, xmm5 - movdqu xmm5, xmm1 - jmp L141 -L140: -L141: - jmp L139 -L138: -L139: - jmp L137 -L136: -L137: - vpxor xmm0, xmm8, xmm0 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - vpxor xmm4, xmm4, xmm1 - vpxor xmm6, xmm6, xmm2 - vpxor xmm6, xmm6, xmm3 - vpxor xmm7, xmm7, xmm5 -L135: - pxor xmm3, xmm3 - mov r10, 3254779904 - pinsrd xmm3, r10d, 3 - vpslldq xmm5, xmm6, 8 - vpxor xmm4, xmm4, xmm5 - vpalignr xmm0, xmm4, xmm4, 8 - vpclmulqdq xmm4, xmm4, xmm3, 16 - vpsrldq xmm6, xmm6, 8 - vpxor xmm7, xmm7, xmm6 - vpxor xmm4, xmm4, xmm0 - vpalignr xmm8, xmm4, xmm4, 8 - vpclmulqdq xmm4, xmm4, xmm3, 16 - vpxor xmm8, xmm8, xmm7 - vpxor xmm8, xmm8, xmm4 - jmp L133 -L132: -L133: - add r14, qword ptr [rsp + 304] - imul r14, 16 - mov r13, qword ptr [rsp + 344] - cmp r13, r14 - jbe L142 - mov rax, qword ptr [rsp + 336] - mov r10, r13 - and r10, 15 - movdqu xmm0, xmm11 - pshufb xmm0, xmm9 - movdqu xmm2, xmmword ptr [r8 + 0] - pxor xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 16] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 32] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 48] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 64] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 80] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 96] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 112] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 128] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 144] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 160] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 176] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 192] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 208] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 224] - aesenclast xmm0, xmm2 - pxor xmm2, xmm2 - movdqu xmm4, xmmword ptr [rax + 0] - pxor xmm0, xmm4 - movdqu xmmword ptr [rax + 0], xmm0 - cmp r10, 8 - jae L144 - mov rcx, 0 - pinsrq xmm0, rcx, 1 - mov rcx, r10 - shl rcx, 3 - mov r11, 1 - shl r11, cl - sub r11, 1 - pextrq rcx, xmm0, 0 - and rcx, r11 - pinsrq xmm0, rcx, 0 - jmp L145 -L144: - mov rcx, r10 - sub rcx, 8 - shl rcx, 3 - mov r11, 1 - shl r11, cl - sub r11, 1 - pextrq rcx, xmm0, 1 - and rcx, r11 - pinsrq xmm0, rcx, 1 -L145: - pshufb xmm0, xmm9 - movdqu xmm5, xmmword ptr [r9 + -32] - vpxor xmm0, xmm8, xmm0 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - movdqu xmm4, xmm1 - vpxor xmm6, xmm2, xmm3 - movdqu xmm7, xmm5 - pxor xmm3, xmm3 - mov r11, 3254779904 - pinsrd xmm3, r11d, 3 - vpslldq xmm5, xmm6, 8 - vpxor xmm4, xmm4, xmm5 - vpalignr xmm0, xmm4, xmm4, 8 - vpclmulqdq xmm4, xmm4, xmm3, 16 - vpsrldq xmm6, xmm6, 8 - vpxor xmm7, xmm7, xmm6 - vpxor xmm4, xmm4, xmm0 - vpalignr xmm8, xmm4, xmm4, 8 - vpclmulqdq xmm4, xmm4, xmm3, 16 - vpxor xmm8, xmm8, xmm7 - vpxor xmm8, xmm8, xmm4 - jmp L143 -L142: -L143: - mov r11, r15 - pxor xmm0, xmm0 - mov rax, r11 - imul rax, 8 - pinsrq xmm0, rax, 1 - mov rax, r13 - imul rax, 8 - pinsrq xmm0, rax, 0 - movdqu xmm5, xmmword ptr [r9 + -32] - vpxor xmm0, xmm8, xmm0 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - movdqu xmm4, xmm1 - vpxor xmm6, xmm2, xmm3 - movdqu xmm7, xmm5 - pxor xmm3, xmm3 - mov r11, 3254779904 - pinsrd xmm3, r11d, 3 - vpslldq xmm5, xmm6, 8 - vpxor xmm4, xmm4, xmm5 - vpalignr xmm0, xmm4, xmm4, 8 - vpclmulqdq xmm4, xmm4, xmm3, 16 - vpsrldq xmm6, xmm6, 8 - vpxor xmm7, xmm7, xmm6 - vpxor xmm4, xmm4, xmm0 - vpalignr xmm8, xmm4, xmm4, 8 - vpclmulqdq xmm4, xmm4, xmm3, 16 - vpxor xmm8, xmm8, xmm7 - vpxor xmm8, xmm8, xmm4 - movdqu xmm0, xmmword ptr [rbp + 0] - pshufb xmm0, xmm9 - movdqu xmm2, xmmword ptr [r8 + 0] - pxor xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 16] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 32] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 48] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 64] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 80] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 96] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 112] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 128] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 144] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 160] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 176] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 192] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 208] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 224] - aesenclast xmm0, xmm2 - pxor xmm2, xmm2 - pshufb xmm8, xmm9 - pxor xmm8, xmm0 - mov r15, qword ptr [rsp + 360] - movdqu xmmword ptr [r15 + 0], xmm8 - pop rax - pinsrq xmm6, rax, 1 - pop rax - pinsrq xmm6, rax, 0 - pop rax - pinsrq xmm7, rax, 1 - pop rax - pinsrq xmm7, rax, 0 - pop rax - pinsrq xmm8, rax, 1 - pop rax - pinsrq xmm8, rax, 0 - pop rax - pinsrq xmm9, rax, 1 - pop rax - pinsrq xmm9, rax, 0 - pop rax - pinsrq xmm10, rax, 1 - pop rax - pinsrq xmm10, rax, 0 - pop rax - pinsrq xmm11, rax, 1 - pop rax - pinsrq xmm11, rax, 0 - pop rax - pinsrq xmm12, rax, 1 - pop rax - pinsrq xmm12, rax, 0 - pop rax - pinsrq xmm13, rax, 1 - pop rax - pinsrq xmm13, rax, 0 - pop rax - pinsrq xmm14, rax, 1 - pop rax - pinsrq xmm14, rax, 0 - pop rax - pinsrq xmm15, rax, 1 - pop rax - pinsrq xmm15, rax, 0 - pop rbx - pop rbp - pop rdi - pop rsi - pop r12 - pop r13 - pop r14 - pop r15 - ret -gcm256_encrypt_opt endp -ALIGN 16 -gcm128_decrypt_opt proc - push r15 - push r14 - push r13 - push r12 - push rsi - push rdi - push rbp - push rbx - pextrq rax, xmm15, 0 - push rax - pextrq rax, xmm15, 1 - push rax - pextrq rax, xmm14, 0 - push rax - pextrq rax, xmm14, 1 - push rax - pextrq rax, xmm13, 0 - push rax - pextrq rax, xmm13, 1 - push rax - pextrq rax, xmm12, 0 - push rax - pextrq rax, xmm12, 1 - push rax - pextrq rax, xmm11, 0 - push rax - pextrq rax, xmm11, 1 - push rax - pextrq rax, xmm10, 0 - push rax - pextrq rax, xmm10, 1 - push rax - pextrq rax, xmm9, 0 - push rax - pextrq rax, xmm9, 1 - push rax - pextrq rax, xmm8, 0 - push rax - pextrq rax, xmm8, 1 - push rax - pextrq rax, xmm7, 0 - push rax - pextrq rax, xmm7, 1 - push rax - pextrq rax, xmm6, 0 - push rax - pextrq rax, xmm6, 1 - push rax - mov rdi, rcx - mov rsi, rdx - mov rdx, r8 - mov rcx, r9 - mov r8, qword ptr [rsp + 264] - mov r9, qword ptr [rsp + 272] - mov rbp, qword ptr [rsp + 352] - mov r13, rcx - lea r9, qword ptr [r9 + 32] - mov rbx, qword ptr [rsp + 280] - mov rcx, rdx - imul rcx, 16 - mov r10, 579005069656919567 - pinsrq xmm9, r10, 0 - mov r10, 283686952306183 - pinsrq xmm9, r10, 1 - pxor xmm8, xmm8 - mov r11, rdi - jmp L147 -ALIGN 16 -L146: - add r11, 80 - movdqu xmm5, xmmword ptr [r9 + -32] - movdqu xmm0, xmmword ptr [r11 + 0] - pshufb xmm0, xmm9 - sub r11, 16 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - movdqu xmm0, xmmword ptr [r11 + 0] - pshufb xmm0, xmm9 - movdqu xmm4, xmm1 - movdqu xmm1, xmmword ptr [r9 + -16] - vpxor xmm6, xmm2, xmm3 - movdqu xmm7, xmm5 - movdqu xmm5, xmm1 - sub r11, 16 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - movdqu xmm0, xmmword ptr [r11 + 0] - pshufb xmm0, xmm9 - vpxor xmm4, xmm4, xmm1 - movdqu xmm1, xmmword ptr [r9 + 16] - vpxor xmm6, xmm6, xmm2 - vpxor xmm6, xmm6, xmm3 - vpxor xmm7, xmm7, xmm5 - movdqu xmm5, xmm1 - sub r11, 16 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - movdqu xmm0, xmmword ptr [r11 + 0] - pshufb xmm0, xmm9 - vpxor xmm4, xmm4, xmm1 - movdqu xmm1, xmmword ptr [r9 + 32] - vpxor xmm6, xmm6, xmm2 - vpxor xmm6, xmm6, xmm3 - vpxor xmm7, xmm7, xmm5 - movdqu xmm5, xmm1 - sub r11, 16 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - movdqu xmm0, xmmword ptr [r11 + 0] - pshufb xmm0, xmm9 - vpxor xmm4, xmm4, xmm1 - movdqu xmm1, xmmword ptr [r9 + 64] - vpxor xmm6, xmm6, xmm2 - vpxor xmm6, xmm6, xmm3 - vpxor xmm7, xmm7, xmm5 - movdqu xmm5, xmm1 - sub r11, 16 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - movdqu xmm0, xmmword ptr [r11 + 0] - pshufb xmm0, xmm9 - vpxor xmm4, xmm4, xmm1 - movdqu xmm1, xmmword ptr [r9 + 80] - vpxor xmm6, xmm6, xmm2 - vpxor xmm6, xmm6, xmm3 - vpxor xmm7, xmm7, xmm5 - movdqu xmm5, xmm1 - vpxor xmm0, xmm8, xmm0 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - vpxor xmm4, xmm4, xmm1 - vpxor xmm6, xmm6, xmm2 - vpxor xmm6, xmm6, xmm3 - vpxor xmm7, xmm7, xmm5 - pxor xmm3, xmm3 - mov r10, 3254779904 - pinsrd xmm3, r10d, 3 - vpslldq xmm5, xmm6, 8 - vpxor xmm4, xmm4, xmm5 - vpalignr xmm0, xmm4, xmm4, 8 - vpclmulqdq xmm4, xmm4, xmm3, 16 - vpsrldq xmm6, xmm6, 8 - vpxor xmm7, xmm7, xmm6 - vpxor xmm4, xmm4, xmm0 - vpalignr xmm8, xmm4, xmm4, 8 - vpclmulqdq xmm4, xmm4, xmm3, 16 - vpxor xmm8, xmm8, xmm7 - vpxor xmm8, xmm8, xmm4 - add r11, 96 - sub rdx, 6 -ALIGN 16 -L147: - cmp rdx, 6 - jae L146 - cmp rdx, 0 - jbe L148 - mov r10, rdx - sub r10, 1 - imul r10, 16 - add r11, r10 - movdqu xmm5, xmmword ptr [r9 + -32] - movdqu xmm0, xmmword ptr [r11 + 0] - pshufb xmm0, xmm9 - cmp rdx, 1 - jne L150 - vpxor xmm0, xmm8, xmm0 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - movdqu xmm4, xmm1 - vpxor xmm6, xmm2, xmm3 - movdqu xmm7, xmm5 - jmp L151 -L150: - sub r11, 16 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - movdqu xmm0, xmmword ptr [r11 + 0] - pshufb xmm0, xmm9 - movdqu xmm4, xmm1 - movdqu xmm1, xmmword ptr [r9 + -16] - vpxor xmm6, xmm2, xmm3 - movdqu xmm7, xmm5 - movdqu xmm5, xmm1 - cmp rdx, 2 - je L152 - sub r11, 16 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - movdqu xmm0, xmmword ptr [r11 + 0] - pshufb xmm0, xmm9 - vpxor xmm4, xmm4, xmm1 - movdqu xmm1, xmmword ptr [r9 + 16] - vpxor xmm6, xmm6, xmm2 - vpxor xmm6, xmm6, xmm3 - vpxor xmm7, xmm7, xmm5 - movdqu xmm5, xmm1 - cmp rdx, 3 - je L154 - sub r11, 16 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - movdqu xmm0, xmmword ptr [r11 + 0] - pshufb xmm0, xmm9 - vpxor xmm4, xmm4, xmm1 - movdqu xmm1, xmmword ptr [r9 + 32] - vpxor xmm6, xmm6, xmm2 - vpxor xmm6, xmm6, xmm3 - vpxor xmm7, xmm7, xmm5 - movdqu xmm5, xmm1 - cmp rdx, 4 - je L156 - sub r11, 16 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - movdqu xmm0, xmmword ptr [r11 + 0] - pshufb xmm0, xmm9 - vpxor xmm4, xmm4, xmm1 - movdqu xmm1, xmmword ptr [r9 + 64] - vpxor xmm6, xmm6, xmm2 - vpxor xmm6, xmm6, xmm3 - vpxor xmm7, xmm7, xmm5 - movdqu xmm5, xmm1 - jmp L157 -L156: -L157: - jmp L155 -L154: -L155: - jmp L153 -L152: -L153: - vpxor xmm0, xmm8, xmm0 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - vpxor xmm4, xmm4, xmm1 - vpxor xmm6, xmm6, xmm2 - vpxor xmm6, xmm6, xmm3 - vpxor xmm7, xmm7, xmm5 -L151: - pxor xmm3, xmm3 - mov r10, 3254779904 - pinsrd xmm3, r10d, 3 - vpslldq xmm5, xmm6, 8 - vpxor xmm4, xmm4, xmm5 - vpalignr xmm0, xmm4, xmm4, 8 - vpclmulqdq xmm4, xmm4, xmm3, 16 - vpsrldq xmm6, xmm6, 8 - vpxor xmm7, xmm7, xmm6 - vpxor xmm4, xmm4, xmm0 - vpalignr xmm8, xmm4, xmm4, 8 - vpclmulqdq xmm4, xmm4, xmm3, 16 - vpxor xmm8, xmm8, xmm7 - vpxor xmm8, xmm8, xmm4 - jmp L149 -L148: -L149: - mov r15, rsi - cmp rsi, rcx - jbe L158 - movdqu xmm0, xmmword ptr [rbx + 0] - mov r10, rsi - and r10, 15 - cmp r10, 8 - jae L160 - mov rcx, 0 - pinsrq xmm0, rcx, 1 - mov rcx, r10 - shl rcx, 3 - mov r11, 1 - shl r11, cl - sub r11, 1 - pextrq rcx, xmm0, 0 - and rcx, r11 - pinsrq xmm0, rcx, 0 - jmp L161 -L160: - mov rcx, r10 - sub rcx, 8 - shl rcx, 3 - mov r11, 1 - shl r11, cl - sub r11, 1 - pextrq rcx, xmm0, 1 - and rcx, r11 - pinsrq xmm0, rcx, 1 -L161: - pshufb xmm0, xmm9 - movdqu xmm5, xmmword ptr [r9 + -32] - vpxor xmm0, xmm8, xmm0 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - movdqu xmm4, xmm1 - vpxor xmm6, xmm2, xmm3 - movdqu xmm7, xmm5 - pxor xmm3, xmm3 - mov r11, 3254779904 - pinsrd xmm3, r11d, 3 - vpslldq xmm5, xmm6, 8 - vpxor xmm4, xmm4, xmm5 - vpalignr xmm0, xmm4, xmm4, 8 - vpclmulqdq xmm4, xmm4, xmm3, 16 - vpsrldq xmm6, xmm6, 8 - vpxor xmm7, xmm7, xmm6 - vpxor xmm4, xmm4, xmm0 - vpalignr xmm8, xmm4, xmm4, 8 - vpclmulqdq xmm4, xmm4, xmm3, 16 - vpxor xmm8, xmm8, xmm7 - vpxor xmm8, xmm8, xmm4 - jmp L159 -L158: -L159: - mov rdi, qword ptr [rsp + 288] - mov rsi, qword ptr [rsp + 296] - mov rdx, qword ptr [rsp + 304] - mov rcx, r13 - movdqu xmm0, xmm9 - movdqu xmm1, xmmword ptr [r8 + 0] - movdqu xmmword ptr [rbp + 0], xmm1 - pxor xmm10, xmm10 - mov r11, 1 - pinsrq xmm10, r11, 0 - vpaddd xmm1, xmm1, xmm10 - cmp rdx, 0 - jne L162 - vpshufb xmm1, xmm1, xmm0 - movdqu xmmword ptr [rbp + 32], xmm1 - jmp L163 -L162: - movdqu xmmword ptr [rbp + 32], xmm8 - add rcx, 128 - pextrq rbx, xmm1, 0 - and rbx, 255 - vpshufb xmm1, xmm1, xmm0 - lea r14, qword ptr [rdi + 96] - movdqu xmm8, xmmword ptr [rbp + 32] - movdqu xmm7, xmmword ptr [rdi + 80] - movdqu xmm4, xmmword ptr [rdi + 64] - movdqu xmm5, xmmword ptr [rdi + 48] - movdqu xmm6, xmmword ptr [rdi + 32] - vpshufb xmm7, xmm7, xmm0 - movdqu xmm2, xmmword ptr [rdi + 16] - vpshufb xmm4, xmm4, xmm0 - movdqu xmm3, xmmword ptr [rdi + 0] - vpshufb xmm5, xmm5, xmm0 - movdqu xmmword ptr [rbp + 48], xmm4 - vpshufb xmm6, xmm6, xmm0 - movdqu xmmword ptr [rbp + 64], xmm5 - vpshufb xmm2, xmm2, xmm0 - movdqu xmmword ptr [rbp + 80], xmm6 - vpshufb xmm3, xmm3, xmm0 - movdqu xmmword ptr [rbp + 96], xmm2 - movdqu xmmword ptr [rbp + 112], xmm3 - pxor xmm2, xmm2 - mov r11, 72057594037927936 - pinsrq xmm2, r11, 1 - vpxor xmm4, xmm4, xmm4 - movdqu xmm15, xmmword ptr [rcx + -128] - vpaddd xmm10, xmm1, xmm2 - vpaddd xmm11, xmm10, xmm2 - vpaddd xmm12, xmm11, xmm2 - vpaddd xmm13, xmm12, xmm2 - vpaddd xmm14, xmm13, xmm2 - vpxor xmm9, xmm1, xmm15 - movdqu xmmword ptr [rbp + 16], xmm4 - cmp rdx, 6 - jne L164 - sub r14, 96 - jmp L165 -L164: -L165: - jmp L167 -ALIGN 16 -L166: - add rbx, 6 - cmp rbx, 256 - jb L168 - mov r11, 579005069656919567 - pinsrq xmm0, r11, 0 - mov r11, 283686952306183 - pinsrq xmm0, r11, 1 - vpshufb xmm6, xmm1, xmm0 - pxor xmm5, xmm5 - mov r11, 1 - pinsrq xmm5, r11, 0 - vpaddd xmm10, xmm6, xmm5 - pxor xmm5, xmm5 - mov r11, 2 - pinsrq xmm5, r11, 0 - vpaddd xmm11, xmm6, xmm5 - movdqu xmm3, xmmword ptr [r9 + -32] - vpaddd xmm12, xmm10, xmm5 - vpshufb xmm10, xmm10, xmm0 - vpaddd xmm13, xmm11, xmm5 - vpshufb xmm11, xmm11, xmm0 - vpxor xmm10, xmm10, xmm15 - vpaddd xmm14, xmm12, xmm5 - vpshufb xmm12, xmm12, xmm0 - vpxor xmm11, xmm11, xmm15 - vpaddd xmm1, xmm13, xmm5 - vpshufb xmm13, xmm13, xmm0 - vpshufb xmm14, xmm14, xmm0 - vpshufb xmm1, xmm1, xmm0 - sub rbx, 256 - jmp L169 -L168: - movdqu xmm3, xmmword ptr [r9 + -32] - vpaddd xmm1, xmm2, xmm14 - vpxor xmm10, xmm10, xmm15 - vpxor xmm11, xmm11, xmm15 -L169: - movdqu xmmword ptr [rbp + 128], xmm1 - vpclmulqdq xmm5, xmm7, xmm3, 16 - vpxor xmm12, xmm12, xmm15 - movdqu xmm2, xmmword ptr [rcx + -112] - vpclmulqdq xmm6, xmm7, xmm3, 1 - vaesenc xmm9, xmm9, xmm2 - movdqu xmm0, xmmword ptr [rbp + 48] - vpxor xmm13, xmm13, xmm15 - vpclmulqdq xmm1, xmm7, xmm3, 0 - vaesenc xmm10, xmm10, xmm2 - vpxor xmm14, xmm14, xmm15 - vpclmulqdq xmm7, xmm7, xmm3, 17 - vaesenc xmm11, xmm11, xmm2 - movdqu xmm3, xmmword ptr [r9 + -16] - vaesenc xmm12, xmm12, xmm2 - vpxor xmm6, xmm6, xmm5 - vpclmulqdq xmm5, xmm0, xmm3, 0 - vpxor xmm8, xmm8, xmm4 - vaesenc xmm13, xmm13, xmm2 - vpxor xmm4, xmm1, xmm5 - vpclmulqdq xmm1, xmm0, xmm3, 16 - vaesenc xmm14, xmm14, xmm2 - movdqu xmm15, xmmword ptr [rcx + -96] - vpclmulqdq xmm2, xmm0, xmm3, 1 - vaesenc xmm9, xmm9, xmm15 - vpxor xmm8, xmm8, xmmword ptr [rbp + 16] - vpclmulqdq xmm3, xmm0, xmm3, 17 - movdqu xmm0, xmmword ptr [rbp + 64] - vaesenc xmm10, xmm10, xmm15 - movbe r13, qword ptr [r14 + 88] - vaesenc xmm11, xmm11, xmm15 - movbe r12, qword ptr [r14 + 80] - vaesenc xmm12, xmm12, xmm15 - mov qword ptr [rbp + 32], r13 - vaesenc xmm13, xmm13, xmm15 - mov qword ptr [rbp + 40], r12 - movdqu xmm5, xmmword ptr [r9 + 16] - vaesenc xmm14, xmm14, xmm15 - movdqu xmm15, xmmword ptr [rcx + -80] - vpxor xmm6, xmm6, xmm1 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vaesenc xmm9, xmm9, xmm15 - vpxor xmm6, xmm6, xmm2 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vaesenc xmm10, xmm10, xmm15 - vpxor xmm7, xmm7, xmm3 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vaesenc xmm11, xmm11, xmm15 - vpclmulqdq xmm5, xmm0, xmm5, 17 - movdqu xmm0, xmmword ptr [rbp + 80] - vaesenc xmm12, xmm12, xmm15 - vaesenc xmm13, xmm13, xmm15 - vpxor xmm4, xmm4, xmm1 - movdqu xmm1, xmmword ptr [r9 + 32] - vaesenc xmm14, xmm14, xmm15 - movdqu xmm15, xmmword ptr [rcx + -64] - vpxor xmm6, xmm6, xmm2 - vpclmulqdq xmm2, xmm0, xmm1, 0 - vaesenc xmm9, xmm9, xmm15 - vpxor xmm6, xmm6, xmm3 - vpclmulqdq xmm3, xmm0, xmm1, 16 - vaesenc xmm10, xmm10, xmm15 - movbe r13, qword ptr [r14 + 72] - vpxor xmm7, xmm7, xmm5 - vpclmulqdq xmm5, xmm0, xmm1, 1 - vaesenc xmm11, xmm11, xmm15 - movbe r12, qword ptr [r14 + 64] - vpclmulqdq xmm1, xmm0, xmm1, 17 - movdqu xmm0, xmmword ptr [rbp + 96] - vaesenc xmm12, xmm12, xmm15 - mov qword ptr [rbp + 48], r13 - vaesenc xmm13, xmm13, xmm15 - mov qword ptr [rbp + 56], r12 - vpxor xmm4, xmm4, xmm2 - movdqu xmm2, xmmword ptr [r9 + 64] - vaesenc xmm14, xmm14, xmm15 - movdqu xmm15, xmmword ptr [rcx + -48] - vpxor xmm6, xmm6, xmm3 - vpclmulqdq xmm3, xmm0, xmm2, 0 - vaesenc xmm9, xmm9, xmm15 - vpxor xmm6, xmm6, xmm5 - vpclmulqdq xmm5, xmm0, xmm2, 16 - vaesenc xmm10, xmm10, xmm15 - movbe r13, qword ptr [r14 + 56] - vpxor xmm7, xmm7, xmm1 - vpclmulqdq xmm1, xmm0, xmm2, 1 - vpxor xmm8, xmm8, xmmword ptr [rbp + 112] - vaesenc xmm11, xmm11, xmm15 - movbe r12, qword ptr [r14 + 48] - vpclmulqdq xmm2, xmm0, xmm2, 17 - vaesenc xmm12, xmm12, xmm15 - mov qword ptr [rbp + 64], r13 - vaesenc xmm13, xmm13, xmm15 - mov qword ptr [rbp + 72], r12 - vpxor xmm4, xmm4, xmm3 - movdqu xmm3, xmmword ptr [r9 + 80] - vaesenc xmm14, xmm14, xmm15 - movdqu xmm15, xmmword ptr [rcx + -32] - vpxor xmm6, xmm6, xmm5 - vpclmulqdq xmm5, xmm8, xmm3, 16 - vaesenc xmm9, xmm9, xmm15 - vpxor xmm6, xmm6, xmm1 - vpclmulqdq xmm1, xmm8, xmm3, 1 - vaesenc xmm10, xmm10, xmm15 - movbe r13, qword ptr [r14 + 40] - vpxor xmm7, xmm7, xmm2 - vpclmulqdq xmm2, xmm8, xmm3, 0 - vaesenc xmm11, xmm11, xmm15 - movbe r12, qword ptr [r14 + 32] - vpclmulqdq xmm8, xmm8, xmm3, 17 - vaesenc xmm12, xmm12, xmm15 - mov qword ptr [rbp + 80], r13 - vaesenc xmm13, xmm13, xmm15 - mov qword ptr [rbp + 88], r12 - vpxor xmm6, xmm6, xmm5 - vaesenc xmm14, xmm14, xmm15 - vpxor xmm6, xmm6, xmm1 - movdqu xmm15, xmmword ptr [rcx + -16] - vpslldq xmm5, xmm6, 8 - vpxor xmm4, xmm4, xmm2 - pxor xmm3, xmm3 - mov r11, 13979173243358019584 - pinsrq xmm3, r11, 1 - vaesenc xmm9, xmm9, xmm15 - vpxor xmm7, xmm7, xmm8 - vaesenc xmm10, xmm10, xmm15 - vpxor xmm4, xmm4, xmm5 - movbe r13, qword ptr [r14 + 24] - vaesenc xmm11, xmm11, xmm15 - movbe r12, qword ptr [r14 + 16] - vpalignr xmm0, xmm4, xmm4, 8 - vpclmulqdq xmm4, xmm4, xmm3, 16 - mov qword ptr [rbp + 96], r13 - vaesenc xmm12, xmm12, xmm15 - mov qword ptr [rbp + 104], r12 - vaesenc xmm13, xmm13, xmm15 - vaesenc xmm14, xmm14, xmm15 - movdqu xmm1, xmmword ptr [rcx + 0] - vaesenc xmm9, xmm9, xmm1 - movdqu xmm15, xmmword ptr [rcx + 16] - vaesenc xmm10, xmm10, xmm1 - vpsrldq xmm6, xmm6, 8 - vaesenc xmm11, xmm11, xmm1 - vpxor xmm7, xmm7, xmm6 - vaesenc xmm12, xmm12, xmm1 - vpxor xmm4, xmm4, xmm0 - movbe r13, qword ptr [r14 + 8] - vaesenc xmm13, xmm13, xmm1 - movbe r12, qword ptr [r14 + 0] - vaesenc xmm14, xmm14, xmm1 - movdqu xmm1, xmmword ptr [rcx + 32] - vaesenc xmm9, xmm9, xmm15 - movdqu xmmword ptr [rbp + 16], xmm7 - vpalignr xmm8, xmm4, xmm4, 8 - vaesenc xmm10, xmm10, xmm15 - vpclmulqdq xmm4, xmm4, xmm3, 16 - vpxor xmm2, xmm1, xmmword ptr [rdi + 0] - vaesenc xmm11, xmm11, xmm15 - vpxor xmm0, xmm1, xmmword ptr [rdi + 16] - vaesenc xmm12, xmm12, xmm15 - vpxor xmm5, xmm1, xmmword ptr [rdi + 32] - vaesenc xmm13, xmm13, xmm15 - vpxor xmm6, xmm1, xmmword ptr [rdi + 48] - vaesenc xmm14, xmm14, xmm15 - vpxor xmm7, xmm1, xmmword ptr [rdi + 64] - vpxor xmm3, xmm1, xmmword ptr [rdi + 80] - movdqu xmm1, xmmword ptr [rbp + 128] - vaesenclast xmm9, xmm9, xmm2 - pxor xmm2, xmm2 - mov r11, 72057594037927936 - pinsrq xmm2, r11, 1 - vaesenclast xmm10, xmm10, xmm0 - vpaddd xmm0, xmm1, xmm2 - mov qword ptr [rbp + 112], r13 - lea rdi, qword ptr [rdi + 96] - vaesenclast xmm11, xmm11, xmm5 - vpaddd xmm5, xmm0, xmm2 - mov qword ptr [rbp + 120], r12 - lea rsi, qword ptr [rsi + 96] - movdqu xmm15, xmmword ptr [rcx + -128] - vaesenclast xmm12, xmm12, xmm6 - vpaddd xmm6, xmm5, xmm2 - vaesenclast xmm13, xmm13, xmm7 - vpaddd xmm7, xmm6, xmm2 - vaesenclast xmm14, xmm14, xmm3 - vpaddd xmm3, xmm7, xmm2 - sub rdx, 6 - cmp rdx, 6 - jbe L170 - add r14, 96 - jmp L171 -L170: -L171: - cmp rdx, 0 - jbe L172 - movdqu xmmword ptr [rsi + -96], xmm9 - vpxor xmm9, xmm1, xmm15 - movdqu xmmword ptr [rsi + -80], xmm10 - movdqu xmm10, xmm0 - movdqu xmmword ptr [rsi + -64], xmm11 - movdqu xmm11, xmm5 - movdqu xmmword ptr [rsi + -48], xmm12 - movdqu xmm12, xmm6 - movdqu xmmword ptr [rsi + -32], xmm13 - movdqu xmm13, xmm7 - movdqu xmmword ptr [rsi + -16], xmm14 - movdqu xmm14, xmm3 - movdqu xmm7, xmmword ptr [rbp + 32] - jmp L173 -L172: - vpxor xmm8, xmm8, xmmword ptr [rbp + 16] - vpxor xmm8, xmm8, xmm4 -L173: -ALIGN 16 -L167: - cmp rdx, 0 - ja L166 - movdqu xmmword ptr [rbp + 32], xmm1 - movdqu xmmword ptr [rsi + -96], xmm9 - movdqu xmmword ptr [rsi + -80], xmm10 - movdqu xmmword ptr [rsi + -64], xmm11 - movdqu xmmword ptr [rsi + -48], xmm12 - movdqu xmmword ptr [rsi + -32], xmm13 - movdqu xmmword ptr [rsi + -16], xmm14 - sub rcx, 128 -L163: - movdqu xmm11, xmmword ptr [rbp + 32] - mov r8, rcx - mov rax, qword ptr [rsp + 312] - mov rdi, qword ptr [rsp + 320] - mov rdx, qword ptr [rsp + 328] - mov r14, rdx - mov r12, 579005069656919567 - pinsrq xmm9, r12, 0 - mov r12, 283686952306183 - pinsrq xmm9, r12, 1 - pshufb xmm11, xmm9 - mov rbx, rdi - mov r12, rdx - mov rdi, rax - mov r11, rdi - jmp L175 -ALIGN 16 -L174: - add r11, 80 - movdqu xmm5, xmmword ptr [r9 + -32] - movdqu xmm0, xmmword ptr [r11 + 0] - pshufb xmm0, xmm9 - sub r11, 16 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - movdqu xmm0, xmmword ptr [r11 + 0] - pshufb xmm0, xmm9 - movdqu xmm4, xmm1 - movdqu xmm1, xmmword ptr [r9 + -16] - vpxor xmm6, xmm2, xmm3 - movdqu xmm7, xmm5 - movdqu xmm5, xmm1 - sub r11, 16 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - movdqu xmm0, xmmword ptr [r11 + 0] - pshufb xmm0, xmm9 - vpxor xmm4, xmm4, xmm1 - movdqu xmm1, xmmword ptr [r9 + 16] - vpxor xmm6, xmm6, xmm2 - vpxor xmm6, xmm6, xmm3 - vpxor xmm7, xmm7, xmm5 - movdqu xmm5, xmm1 - sub r11, 16 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - movdqu xmm0, xmmword ptr [r11 + 0] - pshufb xmm0, xmm9 - vpxor xmm4, xmm4, xmm1 - movdqu xmm1, xmmword ptr [r9 + 32] - vpxor xmm6, xmm6, xmm2 - vpxor xmm6, xmm6, xmm3 - vpxor xmm7, xmm7, xmm5 - movdqu xmm5, xmm1 - sub r11, 16 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - movdqu xmm0, xmmword ptr [r11 + 0] - pshufb xmm0, xmm9 - vpxor xmm4, xmm4, xmm1 - movdqu xmm1, xmmword ptr [r9 + 64] - vpxor xmm6, xmm6, xmm2 - vpxor xmm6, xmm6, xmm3 - vpxor xmm7, xmm7, xmm5 - movdqu xmm5, xmm1 - sub r11, 16 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - movdqu xmm0, xmmword ptr [r11 + 0] - pshufb xmm0, xmm9 - vpxor xmm4, xmm4, xmm1 - movdqu xmm1, xmmword ptr [r9 + 80] - vpxor xmm6, xmm6, xmm2 - vpxor xmm6, xmm6, xmm3 - vpxor xmm7, xmm7, xmm5 - movdqu xmm5, xmm1 - vpxor xmm0, xmm8, xmm0 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - vpxor xmm4, xmm4, xmm1 - vpxor xmm6, xmm6, xmm2 - vpxor xmm6, xmm6, xmm3 - vpxor xmm7, xmm7, xmm5 - pxor xmm3, xmm3 - mov r10, 3254779904 - pinsrd xmm3, r10d, 3 - vpslldq xmm5, xmm6, 8 - vpxor xmm4, xmm4, xmm5 - vpalignr xmm0, xmm4, xmm4, 8 - vpclmulqdq xmm4, xmm4, xmm3, 16 - vpsrldq xmm6, xmm6, 8 - vpxor xmm7, xmm7, xmm6 - vpxor xmm4, xmm4, xmm0 - vpalignr xmm8, xmm4, xmm4, 8 - vpclmulqdq xmm4, xmm4, xmm3, 16 - vpxor xmm8, xmm8, xmm7 - vpxor xmm8, xmm8, xmm4 - add r11, 96 - sub rdx, 6 -ALIGN 16 -L175: - cmp rdx, 6 - jae L174 - cmp rdx, 0 - jbe L176 - mov r10, rdx - sub r10, 1 - imul r10, 16 - add r11, r10 - movdqu xmm5, xmmword ptr [r9 + -32] - movdqu xmm0, xmmword ptr [r11 + 0] - pshufb xmm0, xmm9 - cmp rdx, 1 - jne L178 - vpxor xmm0, xmm8, xmm0 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - movdqu xmm4, xmm1 - vpxor xmm6, xmm2, xmm3 - movdqu xmm7, xmm5 - jmp L179 -L178: - sub r11, 16 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - movdqu xmm0, xmmword ptr [r11 + 0] - pshufb xmm0, xmm9 - movdqu xmm4, xmm1 - movdqu xmm1, xmmword ptr [r9 + -16] - vpxor xmm6, xmm2, xmm3 - movdqu xmm7, xmm5 - movdqu xmm5, xmm1 - cmp rdx, 2 - je L180 - sub r11, 16 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - movdqu xmm0, xmmword ptr [r11 + 0] - pshufb xmm0, xmm9 - vpxor xmm4, xmm4, xmm1 - movdqu xmm1, xmmword ptr [r9 + 16] - vpxor xmm6, xmm6, xmm2 - vpxor xmm6, xmm6, xmm3 - vpxor xmm7, xmm7, xmm5 - movdqu xmm5, xmm1 - cmp rdx, 3 - je L182 - sub r11, 16 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - movdqu xmm0, xmmword ptr [r11 + 0] - pshufb xmm0, xmm9 - vpxor xmm4, xmm4, xmm1 - movdqu xmm1, xmmword ptr [r9 + 32] - vpxor xmm6, xmm6, xmm2 - vpxor xmm6, xmm6, xmm3 - vpxor xmm7, xmm7, xmm5 - movdqu xmm5, xmm1 - cmp rdx, 4 - je L184 - sub r11, 16 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - movdqu xmm0, xmmword ptr [r11 + 0] - pshufb xmm0, xmm9 - vpxor xmm4, xmm4, xmm1 - movdqu xmm1, xmmword ptr [r9 + 64] - vpxor xmm6, xmm6, xmm2 - vpxor xmm6, xmm6, xmm3 - vpxor xmm7, xmm7, xmm5 - movdqu xmm5, xmm1 - jmp L185 -L184: -L185: - jmp L183 -L182: -L183: - jmp L181 -L180: -L181: - vpxor xmm0, xmm8, xmm0 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - vpxor xmm4, xmm4, xmm1 - vpxor xmm6, xmm6, xmm2 - vpxor xmm6, xmm6, xmm3 - vpxor xmm7, xmm7, xmm5 -L179: - pxor xmm3, xmm3 - mov r10, 3254779904 - pinsrd xmm3, r10d, 3 - vpslldq xmm5, xmm6, 8 - vpxor xmm4, xmm4, xmm5 - vpalignr xmm0, xmm4, xmm4, 8 - vpclmulqdq xmm4, xmm4, xmm3, 16 - vpsrldq xmm6, xmm6, 8 - vpxor xmm7, xmm7, xmm6 - vpxor xmm4, xmm4, xmm0 - vpalignr xmm8, xmm4, xmm4, 8 - vpclmulqdq xmm4, xmm4, xmm3, 16 - vpxor xmm8, xmm8, xmm7 - vpxor xmm8, xmm8, xmm4 - jmp L177 -L176: -L177: - mov rdi, rbx - mov rdx, r12 - pxor xmm10, xmm10 - mov rbx, 1 - pinsrd xmm10, ebx, 0 - mov r11, rax - mov r10, rdi - mov rbx, 0 - jmp L187 -ALIGN 16 -L186: - movdqu xmm0, xmm11 - pshufb xmm0, xmm9 - movdqu xmm2, xmmword ptr [r8 + 0] - pxor xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 16] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 32] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 48] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 64] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 80] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 96] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 112] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 128] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 144] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 160] - aesenclast xmm0, xmm2 - pxor xmm2, xmm2 - movdqu xmm2, xmmword ptr [r11 + 0] - pxor xmm2, xmm0 - movdqu xmmword ptr [r10 + 0], xmm2 - add rbx, 1 - add r11, 16 - add r10, 16 - paddd xmm11, xmm10 -ALIGN 16 -L187: - cmp rbx, rdx - jne L186 - add r14, qword ptr [rsp + 304] - imul r14, 16 - mov r13, qword ptr [rsp + 344] - cmp r13, r14 - jbe L188 - mov rax, qword ptr [rsp + 336] - mov r10, r13 - and r10, 15 - movdqu xmm0, xmmword ptr [rax + 0] - movdqu xmm10, xmm0 - cmp r10, 8 - jae L190 - mov rcx, 0 - pinsrq xmm0, rcx, 1 - mov rcx, r10 - shl rcx, 3 - mov r11, 1 - shl r11, cl - sub r11, 1 - pextrq rcx, xmm0, 0 - and rcx, r11 - pinsrq xmm0, rcx, 0 - jmp L191 -L190: - mov rcx, r10 - sub rcx, 8 - shl rcx, 3 - mov r11, 1 - shl r11, cl - sub r11, 1 - pextrq rcx, xmm0, 1 - and rcx, r11 - pinsrq xmm0, rcx, 1 -L191: - pshufb xmm0, xmm9 - movdqu xmm5, xmmword ptr [r9 + -32] - vpxor xmm0, xmm8, xmm0 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - movdqu xmm4, xmm1 - vpxor xmm6, xmm2, xmm3 - movdqu xmm7, xmm5 - pxor xmm3, xmm3 - mov r11, 3254779904 - pinsrd xmm3, r11d, 3 - vpslldq xmm5, xmm6, 8 - vpxor xmm4, xmm4, xmm5 - vpalignr xmm0, xmm4, xmm4, 8 - vpclmulqdq xmm4, xmm4, xmm3, 16 - vpsrldq xmm6, xmm6, 8 - vpxor xmm7, xmm7, xmm6 - vpxor xmm4, xmm4, xmm0 - vpalignr xmm8, xmm4, xmm4, 8 - vpclmulqdq xmm4, xmm4, xmm3, 16 - vpxor xmm8, xmm8, xmm7 - vpxor xmm8, xmm8, xmm4 - movdqu xmm0, xmm11 - pshufb xmm0, xmm9 - movdqu xmm2, xmmword ptr [r8 + 0] - pxor xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 16] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 32] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 48] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 64] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 80] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 96] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 112] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 128] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 144] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 160] - aesenclast xmm0, xmm2 - pxor xmm2, xmm2 - pxor xmm10, xmm0 - movdqu xmmword ptr [rax + 0], xmm10 - jmp L189 -L188: -L189: - mov r11, r15 - pxor xmm0, xmm0 - mov rax, r11 - imul rax, 8 - pinsrq xmm0, rax, 1 - mov rax, r13 - imul rax, 8 - pinsrq xmm0, rax, 0 - movdqu xmm5, xmmword ptr [r9 + -32] - vpxor xmm0, xmm8, xmm0 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - movdqu xmm4, xmm1 - vpxor xmm6, xmm2, xmm3 - movdqu xmm7, xmm5 - pxor xmm3, xmm3 - mov r11, 3254779904 - pinsrd xmm3, r11d, 3 - vpslldq xmm5, xmm6, 8 - vpxor xmm4, xmm4, xmm5 - vpalignr xmm0, xmm4, xmm4, 8 - vpclmulqdq xmm4, xmm4, xmm3, 16 - vpsrldq xmm6, xmm6, 8 - vpxor xmm7, xmm7, xmm6 - vpxor xmm4, xmm4, xmm0 - vpalignr xmm8, xmm4, xmm4, 8 - vpclmulqdq xmm4, xmm4, xmm3, 16 - vpxor xmm8, xmm8, xmm7 - vpxor xmm8, xmm8, xmm4 - movdqu xmm0, xmmword ptr [rbp + 0] - pshufb xmm0, xmm9 - movdqu xmm2, xmmword ptr [r8 + 0] - pxor xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 16] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 32] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 48] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 64] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 80] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 96] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 112] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 128] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 144] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 160] - aesenclast xmm0, xmm2 - pxor xmm2, xmm2 - pshufb xmm8, xmm9 - pxor xmm8, xmm0 - mov r15, qword ptr [rsp + 360] - movdqu xmm0, xmmword ptr [r15 + 0] - pcmpeqd xmm0, xmm8 - pextrq rdx, xmm0, 0 - sub rdx, 18446744073709551615 - mov rax, 0 - adc rax, 0 - pextrq rdx, xmm0, 1 - sub rdx, 18446744073709551615 - mov rdx, 0 - adc rdx, 0 - add rax, rdx - mov rcx, rax - pop rax - pinsrq xmm6, rax, 1 - pop rax - pinsrq xmm6, rax, 0 - pop rax - pinsrq xmm7, rax, 1 - pop rax - pinsrq xmm7, rax, 0 - pop rax - pinsrq xmm8, rax, 1 - pop rax - pinsrq xmm8, rax, 0 - pop rax - pinsrq xmm9, rax, 1 - pop rax - pinsrq xmm9, rax, 0 - pop rax - pinsrq xmm10, rax, 1 - pop rax - pinsrq xmm10, rax, 0 - pop rax - pinsrq xmm11, rax, 1 - pop rax - pinsrq xmm11, rax, 0 - pop rax - pinsrq xmm12, rax, 1 - pop rax - pinsrq xmm12, rax, 0 - pop rax - pinsrq xmm13, rax, 1 - pop rax - pinsrq xmm13, rax, 0 - pop rax - pinsrq xmm14, rax, 1 - pop rax - pinsrq xmm14, rax, 0 - pop rax - pinsrq xmm15, rax, 1 - pop rax - pinsrq xmm15, rax, 0 - pop rbx - pop rbp - pop rdi - pop rsi - pop r12 - pop r13 - pop r14 - pop r15 - mov rax, rcx - ret -gcm128_decrypt_opt endp -ALIGN 16 -gcm256_decrypt_opt proc - push r15 - push r14 - push r13 - push r12 - push rsi - push rdi - push rbp - push rbx - pextrq rax, xmm15, 0 - push rax - pextrq rax, xmm15, 1 - push rax - pextrq rax, xmm14, 0 - push rax - pextrq rax, xmm14, 1 - push rax - pextrq rax, xmm13, 0 - push rax - pextrq rax, xmm13, 1 - push rax - pextrq rax, xmm12, 0 - push rax - pextrq rax, xmm12, 1 - push rax - pextrq rax, xmm11, 0 - push rax - pextrq rax, xmm11, 1 - push rax - pextrq rax, xmm10, 0 - push rax - pextrq rax, xmm10, 1 - push rax - pextrq rax, xmm9, 0 - push rax - pextrq rax, xmm9, 1 - push rax - pextrq rax, xmm8, 0 - push rax - pextrq rax, xmm8, 1 - push rax - pextrq rax, xmm7, 0 - push rax - pextrq rax, xmm7, 1 - push rax - pextrq rax, xmm6, 0 - push rax - pextrq rax, xmm6, 1 - push rax - mov rdi, rcx - mov rsi, rdx - mov rdx, r8 - mov rcx, r9 - mov r8, qword ptr [rsp + 264] - mov r9, qword ptr [rsp + 272] - mov rbp, qword ptr [rsp + 352] - mov r13, rcx - lea r9, qword ptr [r9 + 32] - mov rbx, qword ptr [rsp + 280] - mov rcx, rdx - imul rcx, 16 - mov r10, 579005069656919567 - pinsrq xmm9, r10, 0 - mov r10, 283686952306183 - pinsrq xmm9, r10, 1 - pxor xmm8, xmm8 - mov r11, rdi - jmp L193 -ALIGN 16 -L192: - add r11, 80 - movdqu xmm5, xmmword ptr [r9 + -32] - movdqu xmm0, xmmword ptr [r11 + 0] - pshufb xmm0, xmm9 - sub r11, 16 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - movdqu xmm0, xmmword ptr [r11 + 0] - pshufb xmm0, xmm9 - movdqu xmm4, xmm1 - movdqu xmm1, xmmword ptr [r9 + -16] - vpxor xmm6, xmm2, xmm3 - movdqu xmm7, xmm5 - movdqu xmm5, xmm1 - sub r11, 16 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - movdqu xmm0, xmmword ptr [r11 + 0] - pshufb xmm0, xmm9 - vpxor xmm4, xmm4, xmm1 - movdqu xmm1, xmmword ptr [r9 + 16] - vpxor xmm6, xmm6, xmm2 - vpxor xmm6, xmm6, xmm3 - vpxor xmm7, xmm7, xmm5 - movdqu xmm5, xmm1 - sub r11, 16 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - movdqu xmm0, xmmword ptr [r11 + 0] - pshufb xmm0, xmm9 - vpxor xmm4, xmm4, xmm1 - movdqu xmm1, xmmword ptr [r9 + 32] - vpxor xmm6, xmm6, xmm2 - vpxor xmm6, xmm6, xmm3 - vpxor xmm7, xmm7, xmm5 - movdqu xmm5, xmm1 - sub r11, 16 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - movdqu xmm0, xmmword ptr [r11 + 0] - pshufb xmm0, xmm9 - vpxor xmm4, xmm4, xmm1 - movdqu xmm1, xmmword ptr [r9 + 64] - vpxor xmm6, xmm6, xmm2 - vpxor xmm6, xmm6, xmm3 - vpxor xmm7, xmm7, xmm5 - movdqu xmm5, xmm1 - sub r11, 16 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - movdqu xmm0, xmmword ptr [r11 + 0] - pshufb xmm0, xmm9 - vpxor xmm4, xmm4, xmm1 - movdqu xmm1, xmmword ptr [r9 + 80] - vpxor xmm6, xmm6, xmm2 - vpxor xmm6, xmm6, xmm3 - vpxor xmm7, xmm7, xmm5 - movdqu xmm5, xmm1 - vpxor xmm0, xmm8, xmm0 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - vpxor xmm4, xmm4, xmm1 - vpxor xmm6, xmm6, xmm2 - vpxor xmm6, xmm6, xmm3 - vpxor xmm7, xmm7, xmm5 - pxor xmm3, xmm3 - mov r10, 3254779904 - pinsrd xmm3, r10d, 3 - vpslldq xmm5, xmm6, 8 - vpxor xmm4, xmm4, xmm5 - vpalignr xmm0, xmm4, xmm4, 8 - vpclmulqdq xmm4, xmm4, xmm3, 16 - vpsrldq xmm6, xmm6, 8 - vpxor xmm7, xmm7, xmm6 - vpxor xmm4, xmm4, xmm0 - vpalignr xmm8, xmm4, xmm4, 8 - vpclmulqdq xmm4, xmm4, xmm3, 16 - vpxor xmm8, xmm8, xmm7 - vpxor xmm8, xmm8, xmm4 - add r11, 96 - sub rdx, 6 -ALIGN 16 -L193: - cmp rdx, 6 - jae L192 - cmp rdx, 0 - jbe L194 - mov r10, rdx - sub r10, 1 - imul r10, 16 - add r11, r10 - movdqu xmm5, xmmword ptr [r9 + -32] - movdqu xmm0, xmmword ptr [r11 + 0] - pshufb xmm0, xmm9 - cmp rdx, 1 - jne L196 - vpxor xmm0, xmm8, xmm0 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - movdqu xmm4, xmm1 - vpxor xmm6, xmm2, xmm3 - movdqu xmm7, xmm5 - jmp L197 -L196: - sub r11, 16 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - movdqu xmm0, xmmword ptr [r11 + 0] - pshufb xmm0, xmm9 - movdqu xmm4, xmm1 - movdqu xmm1, xmmword ptr [r9 + -16] - vpxor xmm6, xmm2, xmm3 - movdqu xmm7, xmm5 - movdqu xmm5, xmm1 - cmp rdx, 2 - je L198 - sub r11, 16 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - movdqu xmm0, xmmword ptr [r11 + 0] - pshufb xmm0, xmm9 - vpxor xmm4, xmm4, xmm1 - movdqu xmm1, xmmword ptr [r9 + 16] - vpxor xmm6, xmm6, xmm2 - vpxor xmm6, xmm6, xmm3 - vpxor xmm7, xmm7, xmm5 - movdqu xmm5, xmm1 - cmp rdx, 3 - je L200 - sub r11, 16 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - movdqu xmm0, xmmword ptr [r11 + 0] - pshufb xmm0, xmm9 - vpxor xmm4, xmm4, xmm1 - movdqu xmm1, xmmword ptr [r9 + 32] - vpxor xmm6, xmm6, xmm2 - vpxor xmm6, xmm6, xmm3 - vpxor xmm7, xmm7, xmm5 - movdqu xmm5, xmm1 - cmp rdx, 4 - je L202 - sub r11, 16 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - movdqu xmm0, xmmword ptr [r11 + 0] - pshufb xmm0, xmm9 - vpxor xmm4, xmm4, xmm1 - movdqu xmm1, xmmword ptr [r9 + 64] - vpxor xmm6, xmm6, xmm2 - vpxor xmm6, xmm6, xmm3 - vpxor xmm7, xmm7, xmm5 - movdqu xmm5, xmm1 - jmp L203 -L202: -L203: - jmp L201 -L200: -L201: - jmp L199 -L198: -L199: - vpxor xmm0, xmm8, xmm0 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - vpxor xmm4, xmm4, xmm1 - vpxor xmm6, xmm6, xmm2 - vpxor xmm6, xmm6, xmm3 - vpxor xmm7, xmm7, xmm5 -L197: - pxor xmm3, xmm3 - mov r10, 3254779904 - pinsrd xmm3, r10d, 3 - vpslldq xmm5, xmm6, 8 - vpxor xmm4, xmm4, xmm5 - vpalignr xmm0, xmm4, xmm4, 8 - vpclmulqdq xmm4, xmm4, xmm3, 16 - vpsrldq xmm6, xmm6, 8 - vpxor xmm7, xmm7, xmm6 - vpxor xmm4, xmm4, xmm0 - vpalignr xmm8, xmm4, xmm4, 8 - vpclmulqdq xmm4, xmm4, xmm3, 16 - vpxor xmm8, xmm8, xmm7 - vpxor xmm8, xmm8, xmm4 - jmp L195 -L194: -L195: - mov r15, rsi - cmp rsi, rcx - jbe L204 - movdqu xmm0, xmmword ptr [rbx + 0] - mov r10, rsi - and r10, 15 - cmp r10, 8 - jae L206 - mov rcx, 0 - pinsrq xmm0, rcx, 1 - mov rcx, r10 - shl rcx, 3 - mov r11, 1 - shl r11, cl - sub r11, 1 - pextrq rcx, xmm0, 0 - and rcx, r11 - pinsrq xmm0, rcx, 0 - jmp L207 -L206: - mov rcx, r10 - sub rcx, 8 - shl rcx, 3 - mov r11, 1 - shl r11, cl - sub r11, 1 - pextrq rcx, xmm0, 1 - and rcx, r11 - pinsrq xmm0, rcx, 1 -L207: - pshufb xmm0, xmm9 - movdqu xmm5, xmmword ptr [r9 + -32] - vpxor xmm0, xmm8, xmm0 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - movdqu xmm4, xmm1 - vpxor xmm6, xmm2, xmm3 - movdqu xmm7, xmm5 - pxor xmm3, xmm3 - mov r11, 3254779904 - pinsrd xmm3, r11d, 3 - vpslldq xmm5, xmm6, 8 - vpxor xmm4, xmm4, xmm5 - vpalignr xmm0, xmm4, xmm4, 8 - vpclmulqdq xmm4, xmm4, xmm3, 16 - vpsrldq xmm6, xmm6, 8 - vpxor xmm7, xmm7, xmm6 - vpxor xmm4, xmm4, xmm0 - vpalignr xmm8, xmm4, xmm4, 8 - vpclmulqdq xmm4, xmm4, xmm3, 16 - vpxor xmm8, xmm8, xmm7 - vpxor xmm8, xmm8, xmm4 - jmp L205 -L204: -L205: - mov rdi, qword ptr [rsp + 288] - mov rsi, qword ptr [rsp + 296] - mov rdx, qword ptr [rsp + 304] - mov rcx, r13 - movdqu xmm0, xmm9 - movdqu xmm1, xmmword ptr [r8 + 0] - movdqu xmmword ptr [rbp + 0], xmm1 - pxor xmm10, xmm10 - mov r11, 1 - pinsrq xmm10, r11, 0 - vpaddd xmm1, xmm1, xmm10 - cmp rdx, 0 - jne L208 - vpshufb xmm1, xmm1, xmm0 - movdqu xmmword ptr [rbp + 32], xmm1 - jmp L209 -L208: - movdqu xmmword ptr [rbp + 32], xmm8 - add rcx, 128 - pextrq rbx, xmm1, 0 - and rbx, 255 - vpshufb xmm1, xmm1, xmm0 - lea r14, qword ptr [rdi + 96] - movdqu xmm8, xmmword ptr [rbp + 32] - movdqu xmm7, xmmword ptr [rdi + 80] - movdqu xmm4, xmmword ptr [rdi + 64] - movdqu xmm5, xmmword ptr [rdi + 48] - movdqu xmm6, xmmword ptr [rdi + 32] - vpshufb xmm7, xmm7, xmm0 - movdqu xmm2, xmmword ptr [rdi + 16] - vpshufb xmm4, xmm4, xmm0 - movdqu xmm3, xmmword ptr [rdi + 0] - vpshufb xmm5, xmm5, xmm0 - movdqu xmmword ptr [rbp + 48], xmm4 - vpshufb xmm6, xmm6, xmm0 - movdqu xmmword ptr [rbp + 64], xmm5 - vpshufb xmm2, xmm2, xmm0 - movdqu xmmword ptr [rbp + 80], xmm6 - vpshufb xmm3, xmm3, xmm0 - movdqu xmmword ptr [rbp + 96], xmm2 - movdqu xmmword ptr [rbp + 112], xmm3 - pxor xmm2, xmm2 - mov r11, 72057594037927936 - pinsrq xmm2, r11, 1 - vpxor xmm4, xmm4, xmm4 - movdqu xmm15, xmmword ptr [rcx + -128] - vpaddd xmm10, xmm1, xmm2 - vpaddd xmm11, xmm10, xmm2 - vpaddd xmm12, xmm11, xmm2 - vpaddd xmm13, xmm12, xmm2 - vpaddd xmm14, xmm13, xmm2 - vpxor xmm9, xmm1, xmm15 - movdqu xmmword ptr [rbp + 16], xmm4 - cmp rdx, 6 - jne L210 - sub r14, 96 - jmp L211 -L210: -L211: - jmp L213 -ALIGN 16 -L212: - add rbx, 6 - cmp rbx, 256 - jb L214 - mov r11, 579005069656919567 - pinsrq xmm0, r11, 0 - mov r11, 283686952306183 - pinsrq xmm0, r11, 1 - vpshufb xmm6, xmm1, xmm0 - pxor xmm5, xmm5 - mov r11, 1 - pinsrq xmm5, r11, 0 - vpaddd xmm10, xmm6, xmm5 - pxor xmm5, xmm5 - mov r11, 2 - pinsrq xmm5, r11, 0 - vpaddd xmm11, xmm6, xmm5 - movdqu xmm3, xmmword ptr [r9 + -32] - vpaddd xmm12, xmm10, xmm5 - vpshufb xmm10, xmm10, xmm0 - vpaddd xmm13, xmm11, xmm5 - vpshufb xmm11, xmm11, xmm0 - vpxor xmm10, xmm10, xmm15 - vpaddd xmm14, xmm12, xmm5 - vpshufb xmm12, xmm12, xmm0 - vpxor xmm11, xmm11, xmm15 - vpaddd xmm1, xmm13, xmm5 - vpshufb xmm13, xmm13, xmm0 - vpshufb xmm14, xmm14, xmm0 - vpshufb xmm1, xmm1, xmm0 - sub rbx, 256 - jmp L215 -L214: - movdqu xmm3, xmmword ptr [r9 + -32] - vpaddd xmm1, xmm2, xmm14 - vpxor xmm10, xmm10, xmm15 - vpxor xmm11, xmm11, xmm15 -L215: - movdqu xmmword ptr [rbp + 128], xmm1 - vpclmulqdq xmm5, xmm7, xmm3, 16 - vpxor xmm12, xmm12, xmm15 - movdqu xmm2, xmmword ptr [rcx + -112] - vpclmulqdq xmm6, xmm7, xmm3, 1 - vaesenc xmm9, xmm9, xmm2 - movdqu xmm0, xmmword ptr [rbp + 48] - vpxor xmm13, xmm13, xmm15 - vpclmulqdq xmm1, xmm7, xmm3, 0 - vaesenc xmm10, xmm10, xmm2 - vpxor xmm14, xmm14, xmm15 - vpclmulqdq xmm7, xmm7, xmm3, 17 - vaesenc xmm11, xmm11, xmm2 - movdqu xmm3, xmmword ptr [r9 + -16] - vaesenc xmm12, xmm12, xmm2 - vpxor xmm6, xmm6, xmm5 - vpclmulqdq xmm5, xmm0, xmm3, 0 - vpxor xmm8, xmm8, xmm4 - vaesenc xmm13, xmm13, xmm2 - vpxor xmm4, xmm1, xmm5 - vpclmulqdq xmm1, xmm0, xmm3, 16 - vaesenc xmm14, xmm14, xmm2 - movdqu xmm15, xmmword ptr [rcx + -96] - vpclmulqdq xmm2, xmm0, xmm3, 1 - vaesenc xmm9, xmm9, xmm15 - vpxor xmm8, xmm8, xmmword ptr [rbp + 16] - vpclmulqdq xmm3, xmm0, xmm3, 17 - movdqu xmm0, xmmword ptr [rbp + 64] - vaesenc xmm10, xmm10, xmm15 - movbe r13, qword ptr [r14 + 88] - vaesenc xmm11, xmm11, xmm15 - movbe r12, qword ptr [r14 + 80] - vaesenc xmm12, xmm12, xmm15 - mov qword ptr [rbp + 32], r13 - vaesenc xmm13, xmm13, xmm15 - mov qword ptr [rbp + 40], r12 - movdqu xmm5, xmmword ptr [r9 + 16] - vaesenc xmm14, xmm14, xmm15 - movdqu xmm15, xmmword ptr [rcx + -80] - vpxor xmm6, xmm6, xmm1 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vaesenc xmm9, xmm9, xmm15 - vpxor xmm6, xmm6, xmm2 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vaesenc xmm10, xmm10, xmm15 - vpxor xmm7, xmm7, xmm3 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vaesenc xmm11, xmm11, xmm15 - vpclmulqdq xmm5, xmm0, xmm5, 17 - movdqu xmm0, xmmword ptr [rbp + 80] - vaesenc xmm12, xmm12, xmm15 - vaesenc xmm13, xmm13, xmm15 - vpxor xmm4, xmm4, xmm1 - movdqu xmm1, xmmword ptr [r9 + 32] - vaesenc xmm14, xmm14, xmm15 - movdqu xmm15, xmmword ptr [rcx + -64] - vpxor xmm6, xmm6, xmm2 - vpclmulqdq xmm2, xmm0, xmm1, 0 - vaesenc xmm9, xmm9, xmm15 - vpxor xmm6, xmm6, xmm3 - vpclmulqdq xmm3, xmm0, xmm1, 16 - vaesenc xmm10, xmm10, xmm15 - movbe r13, qword ptr [r14 + 72] - vpxor xmm7, xmm7, xmm5 - vpclmulqdq xmm5, xmm0, xmm1, 1 - vaesenc xmm11, xmm11, xmm15 - movbe r12, qword ptr [r14 + 64] - vpclmulqdq xmm1, xmm0, xmm1, 17 - movdqu xmm0, xmmword ptr [rbp + 96] - vaesenc xmm12, xmm12, xmm15 - mov qword ptr [rbp + 48], r13 - vaesenc xmm13, xmm13, xmm15 - mov qword ptr [rbp + 56], r12 - vpxor xmm4, xmm4, xmm2 - movdqu xmm2, xmmword ptr [r9 + 64] - vaesenc xmm14, xmm14, xmm15 - movdqu xmm15, xmmword ptr [rcx + -48] - vpxor xmm6, xmm6, xmm3 - vpclmulqdq xmm3, xmm0, xmm2, 0 - vaesenc xmm9, xmm9, xmm15 - vpxor xmm6, xmm6, xmm5 - vpclmulqdq xmm5, xmm0, xmm2, 16 - vaesenc xmm10, xmm10, xmm15 - movbe r13, qword ptr [r14 + 56] - vpxor xmm7, xmm7, xmm1 - vpclmulqdq xmm1, xmm0, xmm2, 1 - vpxor xmm8, xmm8, xmmword ptr [rbp + 112] - vaesenc xmm11, xmm11, xmm15 - movbe r12, qword ptr [r14 + 48] - vpclmulqdq xmm2, xmm0, xmm2, 17 - vaesenc xmm12, xmm12, xmm15 - mov qword ptr [rbp + 64], r13 - vaesenc xmm13, xmm13, xmm15 - mov qword ptr [rbp + 72], r12 - vpxor xmm4, xmm4, xmm3 - movdqu xmm3, xmmword ptr [r9 + 80] - vaesenc xmm14, xmm14, xmm15 - movdqu xmm15, xmmword ptr [rcx + -32] - vpxor xmm6, xmm6, xmm5 - vpclmulqdq xmm5, xmm8, xmm3, 16 - vaesenc xmm9, xmm9, xmm15 - vpxor xmm6, xmm6, xmm1 - vpclmulqdq xmm1, xmm8, xmm3, 1 - vaesenc xmm10, xmm10, xmm15 - movbe r13, qword ptr [r14 + 40] - vpxor xmm7, xmm7, xmm2 - vpclmulqdq xmm2, xmm8, xmm3, 0 - vaesenc xmm11, xmm11, xmm15 - movbe r12, qword ptr [r14 + 32] - vpclmulqdq xmm8, xmm8, xmm3, 17 - vaesenc xmm12, xmm12, xmm15 - mov qword ptr [rbp + 80], r13 - vaesenc xmm13, xmm13, xmm15 - mov qword ptr [rbp + 88], r12 - vpxor xmm6, xmm6, xmm5 - vaesenc xmm14, xmm14, xmm15 - vpxor xmm6, xmm6, xmm1 - movdqu xmm15, xmmword ptr [rcx + -16] - vpslldq xmm5, xmm6, 8 - vpxor xmm4, xmm4, xmm2 - pxor xmm3, xmm3 - mov r11, 13979173243358019584 - pinsrq xmm3, r11, 1 - vaesenc xmm9, xmm9, xmm15 - vpxor xmm7, xmm7, xmm8 - vaesenc xmm10, xmm10, xmm15 - vpxor xmm4, xmm4, xmm5 - movbe r13, qword ptr [r14 + 24] - vaesenc xmm11, xmm11, xmm15 - movbe r12, qword ptr [r14 + 16] - vpalignr xmm0, xmm4, xmm4, 8 - vpclmulqdq xmm4, xmm4, xmm3, 16 - mov qword ptr [rbp + 96], r13 - vaesenc xmm12, xmm12, xmm15 - mov qword ptr [rbp + 104], r12 - vaesenc xmm13, xmm13, xmm15 - vaesenc xmm14, xmm14, xmm15 - movdqu xmm1, xmmword ptr [rcx + 0] - vaesenc xmm9, xmm9, xmm1 - movdqu xmm15, xmmword ptr [rcx + 16] - vaesenc xmm10, xmm10, xmm1 - vpsrldq xmm6, xmm6, 8 - vaesenc xmm11, xmm11, xmm1 - vpxor xmm7, xmm7, xmm6 - vaesenc xmm12, xmm12, xmm1 - vpxor xmm4, xmm4, xmm0 - movbe r13, qword ptr [r14 + 8] - vaesenc xmm13, xmm13, xmm1 - movbe r12, qword ptr [r14 + 0] - vaesenc xmm14, xmm14, xmm1 - movdqu xmm1, xmmword ptr [rcx + 32] - vaesenc xmm9, xmm9, xmm15 - vaesenc xmm10, xmm10, xmm15 - vaesenc xmm11, xmm11, xmm15 - vaesenc xmm12, xmm12, xmm15 - vaesenc xmm13, xmm13, xmm15 - vaesenc xmm14, xmm14, xmm15 - vaesenc xmm9, xmm9, xmm1 - vaesenc xmm10, xmm10, xmm1 - vaesenc xmm11, xmm11, xmm1 - vaesenc xmm12, xmm12, xmm1 - vaesenc xmm13, xmm13, xmm1 - movdqu xmm15, xmmword ptr [rcx + 48] - vaesenc xmm14, xmm14, xmm1 - movdqu xmm1, xmmword ptr [rcx + 64] - vaesenc xmm9, xmm9, xmm15 - vaesenc xmm10, xmm10, xmm15 - vaesenc xmm11, xmm11, xmm15 - vaesenc xmm12, xmm12, xmm15 - vaesenc xmm13, xmm13, xmm15 - vaesenc xmm14, xmm14, xmm15 - vaesenc xmm9, xmm9, xmm1 - vaesenc xmm10, xmm10, xmm1 - vaesenc xmm11, xmm11, xmm1 - vaesenc xmm12, xmm12, xmm1 - vaesenc xmm13, xmm13, xmm1 - movdqu xmm15, xmmword ptr [rcx + 80] - vaesenc xmm14, xmm14, xmm1 - movdqu xmm1, xmmword ptr [rcx + 96] - vaesenc xmm9, xmm9, xmm15 - movdqu xmmword ptr [rbp + 16], xmm7 - vpalignr xmm8, xmm4, xmm4, 8 - vaesenc xmm10, xmm10, xmm15 - vpclmulqdq xmm4, xmm4, xmm3, 16 - vpxor xmm2, xmm1, xmmword ptr [rdi + 0] - vaesenc xmm11, xmm11, xmm15 - vpxor xmm0, xmm1, xmmword ptr [rdi + 16] - vaesenc xmm12, xmm12, xmm15 - vpxor xmm5, xmm1, xmmword ptr [rdi + 32] - vaesenc xmm13, xmm13, xmm15 - vpxor xmm6, xmm1, xmmword ptr [rdi + 48] - vaesenc xmm14, xmm14, xmm15 - vpxor xmm7, xmm1, xmmword ptr [rdi + 64] - vpxor xmm3, xmm1, xmmword ptr [rdi + 80] - movdqu xmm1, xmmword ptr [rbp + 128] - vaesenclast xmm9, xmm9, xmm2 - pxor xmm2, xmm2 - mov r11, 72057594037927936 - pinsrq xmm2, r11, 1 - vaesenclast xmm10, xmm10, xmm0 - vpaddd xmm0, xmm1, xmm2 - mov qword ptr [rbp + 112], r13 - lea rdi, qword ptr [rdi + 96] - vaesenclast xmm11, xmm11, xmm5 - vpaddd xmm5, xmm0, xmm2 - mov qword ptr [rbp + 120], r12 - lea rsi, qword ptr [rsi + 96] - movdqu xmm15, xmmword ptr [rcx + -128] - vaesenclast xmm12, xmm12, xmm6 - vpaddd xmm6, xmm5, xmm2 - vaesenclast xmm13, xmm13, xmm7 - vpaddd xmm7, xmm6, xmm2 - vaesenclast xmm14, xmm14, xmm3 - vpaddd xmm3, xmm7, xmm2 - sub rdx, 6 - cmp rdx, 6 - jbe L216 - add r14, 96 - jmp L217 -L216: -L217: - cmp rdx, 0 - jbe L218 - movdqu xmmword ptr [rsi + -96], xmm9 - vpxor xmm9, xmm1, xmm15 - movdqu xmmword ptr [rsi + -80], xmm10 - movdqu xmm10, xmm0 - movdqu xmmword ptr [rsi + -64], xmm11 - movdqu xmm11, xmm5 - movdqu xmmword ptr [rsi + -48], xmm12 - movdqu xmm12, xmm6 - movdqu xmmword ptr [rsi + -32], xmm13 - movdqu xmm13, xmm7 - movdqu xmmword ptr [rsi + -16], xmm14 - movdqu xmm14, xmm3 - movdqu xmm7, xmmword ptr [rbp + 32] - jmp L219 -L218: - vpxor xmm8, xmm8, xmmword ptr [rbp + 16] - vpxor xmm8, xmm8, xmm4 -L219: -ALIGN 16 -L213: - cmp rdx, 0 - ja L212 - movdqu xmmword ptr [rbp + 32], xmm1 - movdqu xmmword ptr [rsi + -96], xmm9 - movdqu xmmword ptr [rsi + -80], xmm10 - movdqu xmmword ptr [rsi + -64], xmm11 - movdqu xmmword ptr [rsi + -48], xmm12 - movdqu xmmword ptr [rsi + -32], xmm13 - movdqu xmmword ptr [rsi + -16], xmm14 - sub rcx, 128 -L209: - movdqu xmm11, xmmword ptr [rbp + 32] - mov r8, rcx - mov rax, qword ptr [rsp + 312] - mov rdi, qword ptr [rsp + 320] - mov rdx, qword ptr [rsp + 328] - mov r14, rdx - mov r12, 579005069656919567 - pinsrq xmm9, r12, 0 - mov r12, 283686952306183 - pinsrq xmm9, r12, 1 - pshufb xmm11, xmm9 - mov rbx, rdi - mov r12, rdx - mov rdi, rax - mov r11, rdi - jmp L221 -ALIGN 16 -L220: - add r11, 80 - movdqu xmm5, xmmword ptr [r9 + -32] - movdqu xmm0, xmmword ptr [r11 + 0] - pshufb xmm0, xmm9 - sub r11, 16 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - movdqu xmm0, xmmword ptr [r11 + 0] - pshufb xmm0, xmm9 - movdqu xmm4, xmm1 - movdqu xmm1, xmmword ptr [r9 + -16] - vpxor xmm6, xmm2, xmm3 - movdqu xmm7, xmm5 - movdqu xmm5, xmm1 - sub r11, 16 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - movdqu xmm0, xmmword ptr [r11 + 0] - pshufb xmm0, xmm9 - vpxor xmm4, xmm4, xmm1 - movdqu xmm1, xmmword ptr [r9 + 16] - vpxor xmm6, xmm6, xmm2 - vpxor xmm6, xmm6, xmm3 - vpxor xmm7, xmm7, xmm5 - movdqu xmm5, xmm1 - sub r11, 16 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - movdqu xmm0, xmmword ptr [r11 + 0] - pshufb xmm0, xmm9 - vpxor xmm4, xmm4, xmm1 - movdqu xmm1, xmmword ptr [r9 + 32] - vpxor xmm6, xmm6, xmm2 - vpxor xmm6, xmm6, xmm3 - vpxor xmm7, xmm7, xmm5 - movdqu xmm5, xmm1 - sub r11, 16 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - movdqu xmm0, xmmword ptr [r11 + 0] - pshufb xmm0, xmm9 - vpxor xmm4, xmm4, xmm1 - movdqu xmm1, xmmword ptr [r9 + 64] - vpxor xmm6, xmm6, xmm2 - vpxor xmm6, xmm6, xmm3 - vpxor xmm7, xmm7, xmm5 - movdqu xmm5, xmm1 - sub r11, 16 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - movdqu xmm0, xmmword ptr [r11 + 0] - pshufb xmm0, xmm9 - vpxor xmm4, xmm4, xmm1 - movdqu xmm1, xmmword ptr [r9 + 80] - vpxor xmm6, xmm6, xmm2 - vpxor xmm6, xmm6, xmm3 - vpxor xmm7, xmm7, xmm5 - movdqu xmm5, xmm1 - vpxor xmm0, xmm8, xmm0 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - vpxor xmm4, xmm4, xmm1 - vpxor xmm6, xmm6, xmm2 - vpxor xmm6, xmm6, xmm3 - vpxor xmm7, xmm7, xmm5 - pxor xmm3, xmm3 - mov r10, 3254779904 - pinsrd xmm3, r10d, 3 - vpslldq xmm5, xmm6, 8 - vpxor xmm4, xmm4, xmm5 - vpalignr xmm0, xmm4, xmm4, 8 - vpclmulqdq xmm4, xmm4, xmm3, 16 - vpsrldq xmm6, xmm6, 8 - vpxor xmm7, xmm7, xmm6 - vpxor xmm4, xmm4, xmm0 - vpalignr xmm8, xmm4, xmm4, 8 - vpclmulqdq xmm4, xmm4, xmm3, 16 - vpxor xmm8, xmm8, xmm7 - vpxor xmm8, xmm8, xmm4 - add r11, 96 - sub rdx, 6 -ALIGN 16 -L221: - cmp rdx, 6 - jae L220 - cmp rdx, 0 - jbe L222 - mov r10, rdx - sub r10, 1 - imul r10, 16 - add r11, r10 - movdqu xmm5, xmmword ptr [r9 + -32] - movdqu xmm0, xmmword ptr [r11 + 0] - pshufb xmm0, xmm9 - cmp rdx, 1 - jne L224 - vpxor xmm0, xmm8, xmm0 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - movdqu xmm4, xmm1 - vpxor xmm6, xmm2, xmm3 - movdqu xmm7, xmm5 - jmp L225 -L224: - sub r11, 16 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - movdqu xmm0, xmmword ptr [r11 + 0] - pshufb xmm0, xmm9 - movdqu xmm4, xmm1 - movdqu xmm1, xmmword ptr [r9 + -16] - vpxor xmm6, xmm2, xmm3 - movdqu xmm7, xmm5 - movdqu xmm5, xmm1 - cmp rdx, 2 - je L226 - sub r11, 16 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - movdqu xmm0, xmmword ptr [r11 + 0] - pshufb xmm0, xmm9 - vpxor xmm4, xmm4, xmm1 - movdqu xmm1, xmmword ptr [r9 + 16] - vpxor xmm6, xmm6, xmm2 - vpxor xmm6, xmm6, xmm3 - vpxor xmm7, xmm7, xmm5 - movdqu xmm5, xmm1 - cmp rdx, 3 - je L228 - sub r11, 16 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - movdqu xmm0, xmmword ptr [r11 + 0] - pshufb xmm0, xmm9 - vpxor xmm4, xmm4, xmm1 - movdqu xmm1, xmmword ptr [r9 + 32] - vpxor xmm6, xmm6, xmm2 - vpxor xmm6, xmm6, xmm3 - vpxor xmm7, xmm7, xmm5 - movdqu xmm5, xmm1 - cmp rdx, 4 - je L230 - sub r11, 16 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - movdqu xmm0, xmmword ptr [r11 + 0] - pshufb xmm0, xmm9 - vpxor xmm4, xmm4, xmm1 - movdqu xmm1, xmmword ptr [r9 + 64] - vpxor xmm6, xmm6, xmm2 - vpxor xmm6, xmm6, xmm3 - vpxor xmm7, xmm7, xmm5 - movdqu xmm5, xmm1 - jmp L231 -L230: -L231: - jmp L229 -L228: -L229: - jmp L227 -L226: -L227: - vpxor xmm0, xmm8, xmm0 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - vpxor xmm4, xmm4, xmm1 - vpxor xmm6, xmm6, xmm2 - vpxor xmm6, xmm6, xmm3 - vpxor xmm7, xmm7, xmm5 -L225: - pxor xmm3, xmm3 - mov r10, 3254779904 - pinsrd xmm3, r10d, 3 - vpslldq xmm5, xmm6, 8 - vpxor xmm4, xmm4, xmm5 - vpalignr xmm0, xmm4, xmm4, 8 - vpclmulqdq xmm4, xmm4, xmm3, 16 - vpsrldq xmm6, xmm6, 8 - vpxor xmm7, xmm7, xmm6 - vpxor xmm4, xmm4, xmm0 - vpalignr xmm8, xmm4, xmm4, 8 - vpclmulqdq xmm4, xmm4, xmm3, 16 - vpxor xmm8, xmm8, xmm7 - vpxor xmm8, xmm8, xmm4 - jmp L223 -L222: -L223: - mov rdi, rbx - mov rdx, r12 - pxor xmm10, xmm10 - mov rbx, 1 - pinsrd xmm10, ebx, 0 - mov r11, rax - mov r10, rdi - mov rbx, 0 - jmp L233 -ALIGN 16 -L232: - movdqu xmm0, xmm11 - pshufb xmm0, xmm9 - movdqu xmm2, xmmword ptr [r8 + 0] - pxor xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 16] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 32] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 48] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 64] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 80] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 96] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 112] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 128] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 144] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 160] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 176] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 192] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 208] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 224] - aesenclast xmm0, xmm2 - pxor xmm2, xmm2 - movdqu xmm2, xmmword ptr [r11 + 0] - pxor xmm2, xmm0 - movdqu xmmword ptr [r10 + 0], xmm2 - add rbx, 1 - add r11, 16 - add r10, 16 - paddd xmm11, xmm10 -ALIGN 16 -L233: - cmp rbx, rdx - jne L232 - add r14, qword ptr [rsp + 304] - imul r14, 16 - mov r13, qword ptr [rsp + 344] - cmp r13, r14 - jbe L234 - mov rax, qword ptr [rsp + 336] - mov r10, r13 - and r10, 15 - movdqu xmm0, xmmword ptr [rax + 0] - movdqu xmm10, xmm0 - cmp r10, 8 - jae L236 - mov rcx, 0 - pinsrq xmm0, rcx, 1 - mov rcx, r10 - shl rcx, 3 - mov r11, 1 - shl r11, cl - sub r11, 1 - pextrq rcx, xmm0, 0 - and rcx, r11 - pinsrq xmm0, rcx, 0 - jmp L237 -L236: - mov rcx, r10 - sub rcx, 8 - shl rcx, 3 - mov r11, 1 - shl r11, cl - sub r11, 1 - pextrq rcx, xmm0, 1 - and rcx, r11 - pinsrq xmm0, rcx, 1 -L237: - pshufb xmm0, xmm9 - movdqu xmm5, xmmword ptr [r9 + -32] - vpxor xmm0, xmm8, xmm0 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - movdqu xmm4, xmm1 - vpxor xmm6, xmm2, xmm3 - movdqu xmm7, xmm5 - pxor xmm3, xmm3 - mov r11, 3254779904 - pinsrd xmm3, r11d, 3 - vpslldq xmm5, xmm6, 8 - vpxor xmm4, xmm4, xmm5 - vpalignr xmm0, xmm4, xmm4, 8 - vpclmulqdq xmm4, xmm4, xmm3, 16 - vpsrldq xmm6, xmm6, 8 - vpxor xmm7, xmm7, xmm6 - vpxor xmm4, xmm4, xmm0 - vpalignr xmm8, xmm4, xmm4, 8 - vpclmulqdq xmm4, xmm4, xmm3, 16 - vpxor xmm8, xmm8, xmm7 - vpxor xmm8, xmm8, xmm4 - movdqu xmm0, xmm11 - pshufb xmm0, xmm9 - movdqu xmm2, xmmword ptr [r8 + 0] - pxor xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 16] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 32] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 48] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 64] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 80] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 96] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 112] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 128] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 144] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 160] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 176] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 192] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 208] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 224] - aesenclast xmm0, xmm2 - pxor xmm2, xmm2 - pxor xmm10, xmm0 - movdqu xmmword ptr [rax + 0], xmm10 - jmp L235 -L234: -L235: - mov r11, r15 - pxor xmm0, xmm0 - mov rax, r11 - imul rax, 8 - pinsrq xmm0, rax, 1 - mov rax, r13 - imul rax, 8 - pinsrq xmm0, rax, 0 - movdqu xmm5, xmmword ptr [r9 + -32] - vpxor xmm0, xmm8, xmm0 - vpclmulqdq xmm1, xmm0, xmm5, 0 - vpclmulqdq xmm2, xmm0, xmm5, 16 - vpclmulqdq xmm3, xmm0, xmm5, 1 - vpclmulqdq xmm5, xmm0, xmm5, 17 - movdqu xmm4, xmm1 - vpxor xmm6, xmm2, xmm3 - movdqu xmm7, xmm5 - pxor xmm3, xmm3 - mov r11, 3254779904 - pinsrd xmm3, r11d, 3 - vpslldq xmm5, xmm6, 8 - vpxor xmm4, xmm4, xmm5 - vpalignr xmm0, xmm4, xmm4, 8 - vpclmulqdq xmm4, xmm4, xmm3, 16 - vpsrldq xmm6, xmm6, 8 - vpxor xmm7, xmm7, xmm6 - vpxor xmm4, xmm4, xmm0 - vpalignr xmm8, xmm4, xmm4, 8 - vpclmulqdq xmm4, xmm4, xmm3, 16 - vpxor xmm8, xmm8, xmm7 - vpxor xmm8, xmm8, xmm4 - movdqu xmm0, xmmword ptr [rbp + 0] - pshufb xmm0, xmm9 - movdqu xmm2, xmmword ptr [r8 + 0] - pxor xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 16] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 32] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 48] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 64] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 80] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 96] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 112] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 128] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 144] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 160] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 176] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 192] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 208] - aesenc xmm0, xmm2 - movdqu xmm2, xmmword ptr [r8 + 224] - aesenclast xmm0, xmm2 - pxor xmm2, xmm2 - pshufb xmm8, xmm9 - pxor xmm8, xmm0 - mov r15, qword ptr [rsp + 360] - movdqu xmm0, xmmword ptr [r15 + 0] - pcmpeqd xmm0, xmm8 - pextrq rdx, xmm0, 0 - sub rdx, 18446744073709551615 - mov rax, 0 - adc rax, 0 - pextrq rdx, xmm0, 1 - sub rdx, 18446744073709551615 - mov rdx, 0 - adc rdx, 0 - add rax, rdx - mov rcx, rax - pop rax - pinsrq xmm6, rax, 1 - pop rax - pinsrq xmm6, rax, 0 - pop rax - pinsrq xmm7, rax, 1 - pop rax - pinsrq xmm7, rax, 0 - pop rax - pinsrq xmm8, rax, 1 - pop rax - pinsrq xmm8, rax, 0 - pop rax - pinsrq xmm9, rax, 1 - pop rax - pinsrq xmm9, rax, 0 - pop rax - pinsrq xmm10, rax, 1 - pop rax - pinsrq xmm10, rax, 0 - pop rax - pinsrq xmm11, rax, 1 - pop rax - pinsrq xmm11, rax, 0 - pop rax - pinsrq xmm12, rax, 1 - pop rax - pinsrq xmm12, rax, 0 - pop rax - pinsrq xmm13, rax, 1 - pop rax - pinsrq xmm13, rax, 0 - pop rax - pinsrq xmm14, rax, 1 - pop rax - pinsrq xmm14, rax, 0 - pop rax - pinsrq xmm15, rax, 1 - pop rax - pinsrq xmm15, rax, 0 - pop rbx - pop rbp - pop rdi - pop rsi - pop r12 - pop r13 - pop r14 - pop r15 - mov rax, rcx - ret -gcm256_decrypt_opt endp -end diff --git a/dist/c89-compatible/configure b/dist/c89-compatible/configure deleted file mode 100755 index 4219b92d9a..0000000000 --- a/dist/c89-compatible/configure +++ /dev/null @@ -1,534 +0,0 @@ -#!/usr/bin/env bash - -set -o pipefail -set -e - -if ! [ -f libintvector.h ]; then - echo "This script is intended to be run from dist/foo." - exit 1 -fi - -if [[ $CC == "" ]]; then - CC=cc -fi - -my_mktemp_dir () { - local name=$1 - # Note: --suffix is not POSIX, doesn't work on OSX. - # Also on OSX, the XXXXXXX pattern cannot be followed by anything else. - local dir=$(mktemp -d /tmp/$name-XXXXXXX) - if [[ $dir == "" ]]; then - # Note: -p doesn't work on OSX, all tests will fail in case there is no - # permission to write to /tmp - local dir=$(mktemp -d -p . $name-XXXXXXX) - fi - echo $dir -} - -my_mktemp_c () { - local name=$1 - local dir=$(my_mktemp_dir $name) - local file=$dir/$name.c - echo $file -} - -# Helpers to help do feature or bug detection -# ------------------------------------------- - -detect_ocamlfind () { - package=$1 - loc=$(ocamlfind query $package) - r=$? - if [[ $r == 0 ]]; then - echo "... found $package in $loc" - else - echo "OCaml package $package not found" - fi - return $r -} - -detect_ocaml () { - # Detect ocamlfind - loc=$(which ocamlfind) - r=$? - if [[ $r == 0 ]]; then - echo "... found ocamlfind in $loc" - else - echo "ocamlfind not found" - return 1 - fi - # Detect packages - for p in ctypes; do - detect_ocamlfind $p || return 1 - done -} - -check_no_bug81300 () { - if [[ "$cross_build" == "1" ]]; then - # We don't want to build anything with march=native - return 0 - fi - # Perform the check only if lib_intrinsics.h is present. In practice, - # lib_intrinsics.h is present in all the directories which contain libintvector.h, - # but mozilla. If lib_intrinsics.h is not present, assume there is no bug and - # return success. - if [ -f lib_intrinsics.h ]; then - local dir=$(my_mktemp_dir testbug81300) - local file=$dir/testbug81300.c - local out=$dir/testbug81300.out - cat > $file < -#include -#include -#include -uint64_t add4_variables(uint64_t *x, uint64_t y0) { - uint64_t *r2 = x + 2; - uint64_t *r3 = x + 3; - uint64_t cc = Lib_IntTypes_Intrinsics_add_carry_u64(0, x[0], y0, x); - uint64_t cc1 = Lib_IntTypes_Intrinsics_add_carry_u64(cc, 1, 0, x); - uint64_t cc2 = Lib_IntTypes_Intrinsics_add_carry_u64(1, 0, 0, r2); - uint64_t cc3 = Lib_IntTypes_Intrinsics_add_carry_u64(cc2, x[3], y0, r3); - return cc3; -} -uint64_t sub4(uint64_t *x, uint64_t *y, uint64_t *result) { - uint64_t *r3 = result + 3; - uint64_t cc3 = Lib_IntTypes_Intrinsics_sub_borrow_u64(1, x[3], y[3], r3); - return cc3; -} -void p256_sub(uint64_t *arg1, uint64_t *arg2, uint64_t *out) { - uint64_t t = sub4(arg1, arg2, out); - uint64_t c = add4_variables(out, t); - (void)c; -} -int main() { - uint64_t *a = (uint64_t *) malloc(sizeof(uint64_t) * 4); - memset(a, 0, 32); - uint64_t *b = (uint64_t *) malloc(sizeof(uint64_t) * 4); - memset(b, 0, 32); - uint64_t *c = (uint64_t *) malloc(sizeof(uint64_t) * 4); - memset(c, 0, 32); - a[3] = 16561854653415423667ul; - b[3] = 16275405352713846784ul; - p256_sub(a, b, c); - printf("result == %"PRIu64" \n", c[3]); - return 0; -} -EOF - $CC $CROSS_CFLAGS -I. -I../karamel/include -I../karamel/krmllib/dist/minimal -DHACL_CAN_COMPILE_INTRINSICS -march=native -O3 $file -o $out && $out > $out.test - $CC $CROSS_CFLAGS -I. -I../karamel/include -I../karamel/krmllib/dist/minimal -march=native -O3 $file -o $out && $out > $out.ref - diff $out.test $out.ref - else - echo "... WARNING: lib_intrinsics.h is missing: we can't test whether bug 81300 is present or not and assume it is not." - return 0 - fi -} - -detect_uint128 () { - local file=$(my_mktemp_c testint128) - echo "unsigned __int128 x = 0;" > $file - $CC $CROSS_CFLAGS -c $file -o /dev/null -} - -detect_broken_xcode () { - # For posterity, here are a list of things that don't work - # - checking if cc will compile curve25519-inline.h - # - checking if cc will compile fsqr2 as is - # - checking if cc will compile fsqr2 marked as extern - # - checking if cc will compile fsqr2 as is with an extern caller in the file to - # prevent it from being eliminated - # - # In short, I couldn't figure out a minimal testcase for the error: - # ./curve25519-inline.h:595:5: error: inline assembly requires more registers than available - # - # Furthermore, this error only seems to happen on the exact config Travis - # uses. - # - Installing XCode 10.0 (for MacOS 10.14, Catalina) does not give me the right LLVM build - # - Installing XCode 10.1 (for MacOS 10.14, Catalina) does not give me the right LLVM build - # - Installing XCode 10.3 (for MacOS 10.14, Catalina) bails because my OSX is too recent - $CC --version | grep -q clang-1000.11.45.5 -} - -check_explicit_bzero () { - local file=$(my_mktemp_c testbzero) - cat > $file < - -int main () { - unsigned char *block[32] = { 0 }; - explicit_bzero(block, 32); - return 0; -} -EOF - $CC $CROSS_CFLAGS -Werror -c $file -o /dev/null -} - -# Target platform detection -# ------------------------- - -detect_x64 () { - [[ $target_arch == "x86_64" ]] || [[ $target_arch == "amd64" ]] -} - -detect_x86 () { - [[ $target_arch == "i386" ]] || [[ $target_arch == "i686" ]] || \ - [[ $target_arch == "i86pc" ]] || [[ $target_arch == "ia32" ]] -} - -detect_arm () { - # On Raspberry pi, uname -m is armv6l or armv7l so we need to cut! - [[ $(echo $target_arch | cut -c 1-3) == "arm" ]] || \ - [[ $target_arch == "aarch64" ]] -} - -detect_arm_neon () { - local file=$(my_mktemp_c testvec128) - cat > $file < - -int main () { - uint8_t block[32] = { 0 }; - Lib_IntVector_Intrinsics_vec128 b1 = Lib_IntVector_Intrinsics_vec128_load32_le(block); - Lib_IntVector_Intrinsics_vec128 b2 = Lib_IntVector_Intrinsics_vec128_load32_le(block + 16); - Lib_IntVector_Intrinsics_vec128 test = Lib_IntVector_Intrinsics_vec128_interleave_high64(b1, b2); - return 0; -} -EOF - if [[ $target_arch == "aarch64" ]] && [[ $target_abi == "darwin" ]]; then - $CC $CROSS_CFLAGS -DHACL_CAN_COMPILE_VEC128 -I. -c $file -o /dev/null - else - $CC $CROSS_CFLAGS -DHACL_CAN_COMPILE_VEC128 -I. -march=armv8-a+simd -c $file -o /dev/null - fi -} - -# We only detect the 64-bit version of the SystemZ architecture (s390x). -detect_systemz () { - [[ $target_arch == "s390x" ]] -} - -# We only detect the 64-bit version of the PowerPC architecture -detect_powerpc () { - [[ $target_arch == "ppc64le" ]] -} - - -# Usage info -# ---------- - -show_help () { - printf "\nUsage: configure -target \n\n" - printf "This script configures HACL/Evercrypt. You can specify the following options:\n\n" - printf " -target Specify the target triple for the build. This follows the\n" - printf " Clang target triple convention.\n" - printf " Details: https://clang.llvm.org/docs/CrossCompilation.html\n" - printf " Currently supported triples are:\n" - printf " * aarch64-none-linux-android\n" - printf " * aarch64-none-linux-gnu\n" - printf " * aarch64-apple-darwin\n" - printf " * aarch64-apple-ios\n" - printf " * x86_64-apple-ios-simulator\n" - printf " --disable-bzero Do not use explicit_bzero (binary will work with an old GLIBC)\n" - printf " --disable-ocaml Disable OCAML bindings\n" - printf " --enable-power9 Enable Power ISA v3.0 instruction set for PowerPC architecture\n" - printf "\n" -} - -# Main auto-detection - -echo -n > Makefile.config -echo -n > config.h - -# Default arguments -disable_ocaml=0 -disable_bzero=0 -enable_power9=0 -cross_build=0 - -# Parse command line arguments. -all_args=("$@") -while [ $# -gt 0 ]; do - case "$1" in - -target) build_target="$2"; shift ;; - --disable-ocaml) disable_ocaml=1 ;; - --disable-bzero) disable_bzero=1 ;; - --enable-power9) enable_power9=1 ;; - --help) show_help; exit 0 ;; - *) show_help; exit 2 ;; - esac - shift -done - -# We need to have the following to decide what to build and what not. -# * build_target: the llvm target triplet, e.g. armv7a-none-eabi -# This is being decomposed into the following information. -# * target_arch: the target architecture, e.g. aarch64, arm, x86_64, i386 -# * target_sys: the target system, e.g. none, linux, win32, darwin -# * target_abi: the target abi, e.g. eabi, gnu, android, macho - -# If the user provided a build target via a command-line option, fill -# target_{arch,sys,abi} accordingly; otherwise, query the host via uname for -# these parameteres and assume we don't do cross-compilation. -if [ ! -z "$build_target" ]; then - cross_build=1 - echo "Doing cross build for $build_target. Make sure that your toolchain is configured properly." - if [[ "$build_target" == "aarch64-none-linux-android" ]]; then - # NOTE: We assume that the Android environment is set up correctly. - # Make sure that `crtbegin_so.o` and `crtend_so.o` can be found. - - if [ -z $NDK ]; then - echo "Compiling for aarch64-none-linux-android requires an NDK toolchain." - echo "Then please set at least NDK appropriately. SYSROOT and NDK_TOOLCHAIN might be necessary as well." - echo "Get your toolchain here: https://developer.android.com/ndk/downloads/" - exit 1 - fi - if [[ $SYSROOT == "" ]]; then - SYSROOT="$NDK/toolchains/llvm/prebuilt/darwin-x86_64/sysroot" - fi - if [[ $NDK_TOOLCHAIN == "" ]]; then - NDK_TOOLCHAIN="$NDK/toolchains/llvm/prebuilt/darwin-x86_64" - fi - - target_arch="aarch64" - target_sys="Linux" - target_abi="android" - CROSS_TARGET="-target $build_target" - CROSS_SYSROOT="--sysroot=$SYSROOT" - CROSS_INCLUDES="-I$SYSROOT/usr/include/" - - # Set compiler - CC="$NDK/toolchains/llvm/prebuilt/darwin-x86_64/bin/clang" - echo "CC=$CC" >> Makefile.config - elif [[ "$build_target" == "aarch64-none-linux-gnu" ]]; then - if [ -z $TOOLCHAIN ]; then - echo "Compiling for aarch64-none-linux-gnu-gcc requires a GCC toolchain." - echo "This probably only works on Windows and Linux." - echo "Get your toolchain here: https://developer.arm.com/tools-and-software/open-source-software/developer-tools/gnu-toolchain/gnu-a/downloads" - echo "And then set TOOLCHAIN appropriately." - exit 1 - fi - - target_arch="aarch64" - target_sys="Linux" - target_abi="gnu" - - # Set compiler - CC="$TOOLCHAIN/bin/aarch64-none-linux-gnu-gcc" - echo "CC=$CC" >> Makefile.config - # Using GCC here doesn't require setting anything else. - elif [[ "$build_target" == "ia32" ]]; then - # Cross compiling for 32-bit Intel (presumably from x64 Intel). - CROSS_TARGET="-m32" - CROSS_LDLAGS="-m32" - target_arch="ia32" - target_sys=$(uname) - target_abi="native" - echo "MARCH = ia32" >> Makefile.config - elif [[ "$build_target" == "aarch64-apple-darwin" ]]; then - # Cross compiling for aarch64-apple-darwin (presumably on x86_64-apple-darwin). - target_arch="aarch64" - target_sys="apple" - target_abi="darwin" - # See https://developer.apple.com/documentation/apple-silicon/building-a-universal-macos-binary - CROSS_TARGET="-target arm64-apple-macos11" - elif [[ "$build_target" == "aarch64-apple-ios" ]]; then - # Cross compiling for aarch64-apple-ios - # (presumably on x86_64-apple-darwin or aarch64-apple-darwin). - target_arch="aarch64" - target_sys="apple" - target_abi="ios" - CROSS_TARGET="-target arm64-apple-ios13.0" - CROSS_SYSROOT="-isysroot $(xcrun --sdk iphoneos --show-sdk-path) -arch arm64" - # CROSS_INCLUDES="-I$SYSROOT/usr/include/" - elif [[ "$build_target" == "x86_64-apple-ios-simulator" ]]; then - # Cross compiling for x86_64-apple-ios (emulator only) - # (presumably on x86_64-apple-darwin). - target_arch="x86_64" - target_sys="apple" - target_abi="ios-simulator" - CROSS_TARGET="-target x86_64-apple-ios-simulator -mios-simulator-version-min=13.0" - CROSS_SYSROOT="-isysroot $(xcrun --sdk iphonesimulator --show-sdk-path)" - else - show_help - exit 0 - fi - CROSS_CFLAGS="$CROSS_TARGET $CROSS_SYSROOT $CROSS_INCLUDES" - echo "CFLAGS += $CROSS_CFLAGS" >> Makefile.config - echo "LDFLAGS += $CROSS_LDLAGS" >> Makefile.config -else - target_arch=$(uname -m) - target_sys=$(uname) - target_abi="native" -fi - -if [[ $target_arch == "aarch64" ]]; then - # Set UNAME and MARCH when cross-compiling - echo "UNAME = Linux" >> Makefile.config - echo "MARCH = aarch64" >> Makefile.config -fi - -compile_vec128=false -compile_vec256=false -compile_vale=false -compile_inline_asm=false -compile_intrinsics=false - -echo "CFLAGS ?=" >> Makefile.config - -if detect_arm; then - echo "... detected ARM platform" - echo "TARGET_ARCHITECTURE = ARM" >> Makefile.config - if detect_arm_neon; then - echo "#define TARGET_ARCHITECTURE TARGET_ARCHITECTURE_ID_ARM8" >> config.h - echo "... $build_target supports 128-bit NEON" - compile_vec128=true - echo "... $CC $CROSS_CFLAGS can cross-compile to ARM64 with SIMD" - echo "CFLAGS_128 = -march=armv8-a+simd" >> Makefile.config - else - echo "#define TARGET_ARCHITECTURE TARGET_ARCHITECTURE_ID_ARM7" >> config.h - fi -fi - -if detect_x86; then - # Just print information: none of the above build options are supported on x86 - echo "... detected x86 platform" - echo "TARGET_ARCHITECTURE = x86" >> Makefile.config - echo "#define TARGET_ARCHITECTURE TARGET_ARCHITECTURE_ID_X86" >> config.h - # TODO: can probably detect intrinsics here too and try to enable them! -fi - -if detect_x64; then - echo "... detected x64 platform" - echo "TARGET_ARCHITECTURE = x64" >> Makefile.config - echo "#define TARGET_ARCHITECTURE TARGET_ARCHITECTURE_ID_X64" >> config.h - echo "... $build_target supports compilation of 128-bit and 256-bit AVX/AVX2" - compile_vec128=true - echo "CFLAGS_128 = -mavx" >> Makefile.config - compile_vec256=true - echo "CFLAGS_256 = -mavx -mavx2" >> Makefile.config - # x64 always supports Vale -- this configure script assumes a GCC-like - # compiler, meaning that in theory inline assembly should work (rather than - # the external linking) BUT some versions of xcode are irremediably broken and - # fail with a register allocator error - # Note: MSVC compilers don't support inline GCC assembly and are expected to - # use their own build system - echo "... $build_target supports vale" - compile_vale=true - if detect_broken_xcode; then - echo "found broken XCode version, known to refuse to compile our inline ASM, disabling " - else - echo "... not using known buggy Apple LLVM build" - echo "... $build_target supports our inline ASM" - compile_inline_asm=true - fi -fi - -if detect_x86 || detect_x64; then - if ! check_no_bug81300; then - echo "found broken GCC < 5.5 with bug 81300, disabling subborrow + addcarry" - else - compile_intrinsics=true - echo "... using a non-buggy GCC" - fi -fi - -if detect_systemz; then - echo "... detected z platform" - echo "TARGET_ARCHITECTURE = SystemZ" >> Makefile.config - echo "#define TARGET_ARCHITECTURE TARGET_ARCHITECTURE_ID_SYSTEMZ" >> config.h - echo "... $build_target supports 128-bit z VSX" - compile_vec128=true - echo "CFLAGS_128 = -m64 -mzarch -mvx -mzvector -march=native" >> Makefile.config - # In the case of IBMz, some of the vectorized functions are defined as - # inline static rather than as macros, meaning we need to compile all - # the files with the vector compilation options. Note that this is not - # a problem because we don't do cross-compilation for IBMz. Also note - # that we don't have support for vec256 for IBMz. - echo "CFLAGS += \$(CFLAGS_128)" >> Makefile.config -fi - -# TODO: try to enable the intrinsics -if detect_powerpc; then - echo "... detected PowerPC platform" - echo "... $build_target supports 128-bit Power VSX" - echo "TARGET_ARCHITECTURE = PowerPC64" >> Makefile.config - echo "#define TARGET_ARCHITECTURE TARGET_ARCHITECTURE_ID_POWERPC64" >> config.h - compile_vec128=true - if [[ "$enable_power9" == "1" ]]; then - echo "... enable Power ISA v3.0 instruction set" - echo "CFLAGS_128 = -mcpu=power9" >> Makefile.config - fi -fi - -if $compile_intrinsics; then - echo "$build_target supports _addcarry_u64" - echo "#define HACL_CAN_COMPILE_INTRINSICS 1" >> config.h -fi - -if ! $compile_vale; then - # All reference to Vale symbols are properly guarded in the EverCrypt layer by - # ifdef TARGET_X64 -- with the exception of Curve25519, which needs to be - # disabled by the build system since it contains unguarded references to Vale - # symbols. - echo "$build_target does not support x64 assembly, disabling Curve64" - echo "BLACKLIST += Hacl_Curve25519_64.c $(ls Hacl_HPKE_Curve64_*.c | xargs)" >> Makefile.config - echo "$build_target does not support legacy vale stubs" - echo "BLACKLIST += evercrypt_vale_stubs.c" >> Makefile.config -else - echo "#define HACL_CAN_COMPILE_VALE 1" >> config.h -fi - -if $compile_inline_asm; then - echo "#define HACL_CAN_COMPILE_INLINE_ASM 1" >> config.h -fi - -if ! $compile_vec128; then - echo "$CC $CROSS_CFLAGS cannot compile 128-bit vector arithmetic, disabling" - echo "BLACKLIST += $(ls *CP128*.c *_128.c *_Vec128.c | xargs)" >> Makefile.config - echo "#define Lib_IntVector_Intrinsics_vec128 void *" >> config.h -else - echo "#define HACL_CAN_COMPILE_VEC128 1" >> config.h -fi - -if ! $compile_vec256; then - echo "$build_target does not support 256-bit arithmetic" - echo "BLACKLIST += $(ls *CP256*.c *_256.c *_Vec256.c | xargs)" >> Makefile.config - echo "#define Lib_IntVector_Intrinsics_vec256 void *" >> config.h -else - echo "#define HACL_CAN_COMPILE_VEC256 1" >> config.h -fi - -if ! detect_uint128; then - # Explicitly not supporting compilation with MSVC, which would entail not - # defining KRML_VERIFIED_UINT128. - echo "$CC $CROSS_CFLAGS does not support unsigned __int128 -- using a fallback verified implementation" - echo "CFLAGS += -DKRML_VERIFIED_UINT128" >> Makefile.config -else - echo "#define HACL_CAN_COMPILE_UINT128 1" >> config.h -fi - -if [[ "$disable_ocaml" == "1" ]] || ! detect_ocaml; then - echo "OCaml bindings disabled" - echo "DISABLE_OCAML_BINDINGS=1" >> Makefile.config -fi - -if [[ $target_sys == "Linux" ]]; then - if [[ "$disable_bzero" == "1" ]]; then - echo "disabling the use of explicit_bzero" - echo "#define LINUX_NO_EXPLICIT_BZERO" >> config.h - elif check_explicit_bzero; then - echo "... glibc is recent enough for explicit_bzero" - else - echo "toolchain does not support explicit_bzero" - echo "#define LINUX_NO_EXPLICIT_BZERO" >> config.h - fi - if [[ ! $target_arch == "aarch64" ]]; then - echo "LDFLAGS += -Xlinker -z -Xlinker noexecstack -Xlinker --unresolved-symbols=report-all" >> Makefile.config - fi -fi - -# Export the compilation flags to Makefile.config so that we can reuse them -# in other directories, like tests -if $compile_vec128; then echo "COMPILE_VEC128 = 1" >> Makefile.config; fi -if $compile_vec256; then echo "COMPILE_VEC256 = 1" >> Makefile.config; fi -if $compile_vale; then echo "COMPILE_VALE = 1" >> Makefile.config; fi -if $compile_inline_asm; then echo "COMPILE_INLINE_ASM = 1" >> Makefile.config; fi -if $compile_intrinsics; then echo "COMPILE_INTRINSICS = 1" >> Makefile.config; fi diff --git a/dist/c89-compatible/cpuid-x86_64-darwin.S b/dist/c89-compatible/cpuid-x86_64-darwin.S deleted file mode 100644 index 1205b915e0..0000000000 --- a/dist/c89-compatible/cpuid-x86_64-darwin.S +++ /dev/null @@ -1,166 +0,0 @@ -.text -.global _check_aesni -_check_aesni: - mov %rbx, %r9 - mov $0, %rcx - mov $1, %rax - cpuid - mov %rcx, %rax - and $33554432, %rax - shr $24, %rax - and $2, %rcx - and %rcx, %rax - mov %r9, %rbx - ret - -.global _check_sha -_check_sha: - mov %rbx, %r9 - mov $7, %rax - mov $0, %rcx - cpuid - and $536870912, %rbx - mov %rbx, %rax - mov %r9, %rbx - ret - -.global _check_adx_bmi2 -_check_adx_bmi2: - mov %rbx, %r9 - mov $7, %rax - mov $0, %rcx - cpuid - mov %rbx, %rax - and $524288, %rax - shr $11, %rax - and $256, %rbx - and %rbx, %rax - mov %r9, %rbx - ret - -.global _check_avx -_check_avx: - mov %rbx, %r9 - mov $0, %rcx - mov $1, %rax - cpuid - mov %rcx, %rax - and $268435456, %rax - shr $27, %rax - mov %r9, %rbx - ret - -.global _check_avx2 -_check_avx2: - mov %rbx, %r9 - mov $7, %rax - mov $0, %rcx - cpuid - and $32, %rbx - mov %rbx, %rax - mov %r9, %rbx - ret - -.global _check_movbe -_check_movbe: - mov %rbx, %r9 - mov $0, %rcx - mov $1, %rax - cpuid - mov %rcx, %rax - and $4194304, %rax - shr $21, %rax - mov %r9, %rbx - ret - -.global _check_sse -_check_sse: - mov %rbx, %r9 - mov $0, %rcx - mov $1, %rax - cpuid - mov %rcx, %rax - and $524288, %rax - and $512, %rcx - and $67108864, %rdx - shr $10, %rax - shr $17, %rdx - and %rdx, %rax - and %rcx, %rax - mov %r9, %rbx - ret - -.global _check_rdrand -_check_rdrand: - mov %rbx, %r9 - mov $0, %rcx - mov $1, %rax - cpuid - mov %rcx, %rax - and $1073741824, %rax - shr $29, %rax - mov %r9, %rbx - ret - -.global _check_avx512 -_check_avx512: - mov %rbx, %r9 - mov $7, %rax - mov $0, %rcx - cpuid - mov %rbx, %rax - mov %rbx, %r10 - mov %rbx, %r11 - and $65536, %rbx - and $131072, %rax - and $1073741824, %r10 - shr $1, %rax - shr $14, %r10 - and %rbx, %rax - mov $2147483648, %rbx - and %rbx, %r11 - shr $15, %r11 - and %r10, %rax - and %r11, %rax - mov %r9, %rbx - ret - -.global _check_osxsave -_check_osxsave: - mov %rbx, %r9 - mov $0, %rcx - mov $1, %rax - cpuid - mov %rcx, %rax - and $134217728, %rax - shr $26, %rax - mov %r9, %rbx - ret - -.global _check_avx_xcr0 -_check_avx_xcr0: - mov $0, %rcx - xgetbv - mov %rax, %rcx - and $4, %rax - shr $1, %rax - and $2, %rcx - and %rcx, %rax - ret - -.global _check_avx512_xcr0 -_check_avx512_xcr0: - mov $0, %rcx - xgetbv - mov %rax, %rcx - mov %rax, %rdx - and $32, %rax - and $64, %rcx - and $128, %rdx - shr $2, %rdx - shr $1, %rcx - and %rdx, %rax - and %rcx, %rax - ret - - diff --git a/dist/c89-compatible/cpuid-x86_64-linux.S b/dist/c89-compatible/cpuid-x86_64-linux.S deleted file mode 100644 index 4508bdcd6e..0000000000 --- a/dist/c89-compatible/cpuid-x86_64-linux.S +++ /dev/null @@ -1,166 +0,0 @@ -.text -.global check_aesni -check_aesni: - mov %rbx, %r9 - mov $0, %rcx - mov $1, %rax - cpuid - mov %rcx, %rax - and $33554432, %rax - shr $24, %rax - and $2, %rcx - and %rcx, %rax - mov %r9, %rbx - ret - -.global check_sha -check_sha: - mov %rbx, %r9 - mov $7, %rax - mov $0, %rcx - cpuid - and $536870912, %rbx - mov %rbx, %rax - mov %r9, %rbx - ret - -.global check_adx_bmi2 -check_adx_bmi2: - mov %rbx, %r9 - mov $7, %rax - mov $0, %rcx - cpuid - mov %rbx, %rax - and $524288, %rax - shr $11, %rax - and $256, %rbx - and %rbx, %rax - mov %r9, %rbx - ret - -.global check_avx -check_avx: - mov %rbx, %r9 - mov $0, %rcx - mov $1, %rax - cpuid - mov %rcx, %rax - and $268435456, %rax - shr $27, %rax - mov %r9, %rbx - ret - -.global check_avx2 -check_avx2: - mov %rbx, %r9 - mov $7, %rax - mov $0, %rcx - cpuid - and $32, %rbx - mov %rbx, %rax - mov %r9, %rbx - ret - -.global check_movbe -check_movbe: - mov %rbx, %r9 - mov $0, %rcx - mov $1, %rax - cpuid - mov %rcx, %rax - and $4194304, %rax - shr $21, %rax - mov %r9, %rbx - ret - -.global check_sse -check_sse: - mov %rbx, %r9 - mov $0, %rcx - mov $1, %rax - cpuid - mov %rcx, %rax - and $524288, %rax - and $512, %rcx - and $67108864, %rdx - shr $10, %rax - shr $17, %rdx - and %rdx, %rax - and %rcx, %rax - mov %r9, %rbx - ret - -.global check_rdrand -check_rdrand: - mov %rbx, %r9 - mov $0, %rcx - mov $1, %rax - cpuid - mov %rcx, %rax - and $1073741824, %rax - shr $29, %rax - mov %r9, %rbx - ret - -.global check_avx512 -check_avx512: - mov %rbx, %r9 - mov $7, %rax - mov $0, %rcx - cpuid - mov %rbx, %rax - mov %rbx, %r10 - mov %rbx, %r11 - and $65536, %rbx - and $131072, %rax - and $1073741824, %r10 - shr $1, %rax - shr $14, %r10 - and %rbx, %rax - mov $2147483648, %rbx - and %rbx, %r11 - shr $15, %r11 - and %r10, %rax - and %r11, %rax - mov %r9, %rbx - ret - -.global check_osxsave -check_osxsave: - mov %rbx, %r9 - mov $0, %rcx - mov $1, %rax - cpuid - mov %rcx, %rax - and $134217728, %rax - shr $26, %rax - mov %r9, %rbx - ret - -.global check_avx_xcr0 -check_avx_xcr0: - mov $0, %rcx - xgetbv - mov %rax, %rcx - and $4, %rax - shr $1, %rax - and $2, %rcx - and %rcx, %rax - ret - -.global check_avx512_xcr0 -check_avx512_xcr0: - mov $0, %rcx - xgetbv - mov %rax, %rcx - mov %rax, %rdx - and $32, %rax - and $64, %rcx - and $128, %rdx - shr $2, %rdx - shr $1, %rcx - and %rdx, %rax - and %rcx, %rax - ret - -.section .note.GNU-stack,"",%progbits diff --git a/dist/c89-compatible/cpuid-x86_64-mingw.S b/dist/c89-compatible/cpuid-x86_64-mingw.S deleted file mode 100644 index 47633417c4..0000000000 --- a/dist/c89-compatible/cpuid-x86_64-mingw.S +++ /dev/null @@ -1,166 +0,0 @@ -.text -.global check_aesni -check_aesni: - mov %rbx, %r9 - mov $0, %rcx - mov $1, %rax - cpuid - mov %rcx, %rax - and $33554432, %rax - shr $24, %rax - and $2, %rcx - and %rcx, %rax - mov %r9, %rbx - ret - -.global check_sha -check_sha: - mov %rbx, %r9 - mov $7, %rax - mov $0, %rcx - cpuid - and $536870912, %rbx - mov %rbx, %rax - mov %r9, %rbx - ret - -.global check_adx_bmi2 -check_adx_bmi2: - mov %rbx, %r9 - mov $7, %rax - mov $0, %rcx - cpuid - mov %rbx, %rax - and $524288, %rax - shr $11, %rax - and $256, %rbx - and %rbx, %rax - mov %r9, %rbx - ret - -.global check_avx -check_avx: - mov %rbx, %r9 - mov $0, %rcx - mov $1, %rax - cpuid - mov %rcx, %rax - and $268435456, %rax - shr $27, %rax - mov %r9, %rbx - ret - -.global check_avx2 -check_avx2: - mov %rbx, %r9 - mov $7, %rax - mov $0, %rcx - cpuid - and $32, %rbx - mov %rbx, %rax - mov %r9, %rbx - ret - -.global check_movbe -check_movbe: - mov %rbx, %r9 - mov $0, %rcx - mov $1, %rax - cpuid - mov %rcx, %rax - and $4194304, %rax - shr $21, %rax - mov %r9, %rbx - ret - -.global check_sse -check_sse: - mov %rbx, %r9 - mov $0, %rcx - mov $1, %rax - cpuid - mov %rcx, %rax - and $524288, %rax - and $512, %rcx - and $67108864, %rdx - shr $10, %rax - shr $17, %rdx - and %rdx, %rax - and %rcx, %rax - mov %r9, %rbx - ret - -.global check_rdrand -check_rdrand: - mov %rbx, %r9 - mov $0, %rcx - mov $1, %rax - cpuid - mov %rcx, %rax - and $1073741824, %rax - shr $29, %rax - mov %r9, %rbx - ret - -.global check_avx512 -check_avx512: - mov %rbx, %r9 - mov $7, %rax - mov $0, %rcx - cpuid - mov %rbx, %rax - mov %rbx, %r10 - mov %rbx, %r11 - and $65536, %rbx - and $131072, %rax - and $1073741824, %r10 - shr $1, %rax - shr $14, %r10 - and %rbx, %rax - mov $2147483648, %rbx - and %rbx, %r11 - shr $15, %r11 - and %r10, %rax - and %r11, %rax - mov %r9, %rbx - ret - -.global check_osxsave -check_osxsave: - mov %rbx, %r9 - mov $0, %rcx - mov $1, %rax - cpuid - mov %rcx, %rax - and $134217728, %rax - shr $26, %rax - mov %r9, %rbx - ret - -.global check_avx_xcr0 -check_avx_xcr0: - mov $0, %rcx - xgetbv - mov %rax, %rcx - and $4, %rax - shr $1, %rax - and $2, %rcx - and %rcx, %rax - ret - -.global check_avx512_xcr0 -check_avx512_xcr0: - mov $0, %rcx - xgetbv - mov %rax, %rcx - mov %rax, %rdx - and $32, %rax - and $64, %rcx - and $128, %rdx - shr $2, %rdx - shr $1, %rcx - and %rdx, %rax - and %rcx, %rax - ret - - diff --git a/dist/c89-compatible/cpuid-x86_64-msvc.asm b/dist/c89-compatible/cpuid-x86_64-msvc.asm deleted file mode 100644 index 5659ec6c01..0000000000 --- a/dist/c89-compatible/cpuid-x86_64-msvc.asm +++ /dev/null @@ -1,166 +0,0 @@ -.code -ALIGN 16 -check_aesni proc - mov r9, rbx - mov rcx, 0 - mov rax, 1 - cpuid - mov rax, rcx - and rax, 33554432 - shr rax, 24 - and rcx, 2 - and rax, rcx - mov rbx, r9 - ret -check_aesni endp -ALIGN 16 -check_sha proc - mov r9, rbx - mov rax, 7 - mov rcx, 0 - cpuid - and rbx, 536870912 - mov rax, rbx - mov rbx, r9 - ret -check_sha endp -ALIGN 16 -check_adx_bmi2 proc - mov r9, rbx - mov rax, 7 - mov rcx, 0 - cpuid - mov rax, rbx - and rax, 524288 - shr rax, 11 - and rbx, 256 - and rax, rbx - mov rbx, r9 - ret -check_adx_bmi2 endp -ALIGN 16 -check_avx proc - mov r9, rbx - mov rcx, 0 - mov rax, 1 - cpuid - mov rax, rcx - and rax, 268435456 - shr rax, 27 - mov rbx, r9 - ret -check_avx endp -ALIGN 16 -check_avx2 proc - mov r9, rbx - mov rax, 7 - mov rcx, 0 - cpuid - and rbx, 32 - mov rax, rbx - mov rbx, r9 - ret -check_avx2 endp -ALIGN 16 -check_movbe proc - mov r9, rbx - mov rcx, 0 - mov rax, 1 - cpuid - mov rax, rcx - and rax, 4194304 - shr rax, 21 - mov rbx, r9 - ret -check_movbe endp -ALIGN 16 -check_sse proc - mov r9, rbx - mov rcx, 0 - mov rax, 1 - cpuid - mov rax, rcx - and rax, 524288 - and rcx, 512 - and rdx, 67108864 - shr rax, 10 - shr rdx, 17 - and rax, rdx - and rax, rcx - mov rbx, r9 - ret -check_sse endp -ALIGN 16 -check_rdrand proc - mov r9, rbx - mov rcx, 0 - mov rax, 1 - cpuid - mov rax, rcx - and rax, 1073741824 - shr rax, 29 - mov rbx, r9 - ret -check_rdrand endp -ALIGN 16 -check_avx512 proc - mov r9, rbx - mov rax, 7 - mov rcx, 0 - cpuid - mov rax, rbx - mov r10, rbx - mov r11, rbx - and rbx, 65536 - and rax, 131072 - and r10, 1073741824 - shr rax, 1 - shr r10, 14 - and rax, rbx - mov rbx, 2147483648 - and r11, rbx - shr r11, 15 - and rax, r10 - and rax, r11 - mov rbx, r9 - ret -check_avx512 endp -ALIGN 16 -check_osxsave proc - mov r9, rbx - mov rcx, 0 - mov rax, 1 - cpuid - mov rax, rcx - and rax, 134217728 - shr rax, 26 - mov rbx, r9 - ret -check_osxsave endp -ALIGN 16 -check_avx_xcr0 proc - mov rcx, 0 - xgetbv - mov rcx, rax - and rax, 4 - shr rax, 1 - and rcx, 2 - and rax, rcx - ret -check_avx_xcr0 endp -ALIGN 16 -check_avx512_xcr0 proc - mov rcx, 0 - xgetbv - mov rcx, rax - mov rdx, rax - and rax, 32 - and rcx, 64 - and rdx, 128 - shr rdx, 2 - shr rcx, 1 - and rax, rdx - and rax, rcx - ret -check_avx512_xcr0 endp -end diff --git a/dist/c89-compatible/curve25519-inline.h b/dist/c89-compatible/curve25519-inline.h deleted file mode 100644 index d553167081..0000000000 --- a/dist/c89-compatible/curve25519-inline.h +++ /dev/null @@ -1,747 +0,0 @@ -#ifdef __GNUC__ -#if defined(__x86_64__) || defined(_M_X64) -#pragma once -#include - -// Computes the addition of four-element f1 with value in f2 -// and returns the carry (if any) -static inline void add_scalar (uint64_t *out, uint64_t *f1, uint64_t f2) -{ - __asm__ volatile( - // Clear registers to propagate the carry bit - " xor %%r8d, %%r8d;" - " xor %%r9d, %%r9d;" - " xor %%r10d, %%r10d;" - " xor %%r11d, %%r11d;" - " xor %%eax, %%eax;" - - // Begin addition chain - " addq 0(%2), %0;" - " movq %0, 0(%1);" - " adcxq 8(%2), %%r8;" - " movq %%r8, 8(%1);" - " adcxq 16(%2), %%r9;" - " movq %%r9, 16(%1);" - " adcxq 24(%2), %%r10;" - " movq %%r10, 24(%1);" - - // Return the carry bit in a register - " adcx %%r11, %%rax;" - : "+&r" (f2) - : "r" (out), "r" (f1) - : "%rax", "%r8", "%r9", "%r10", "%r11", "memory", "cc" - ); -} - -// Computes the field addition of two field elements -static inline void fadd (uint64_t *out, uint64_t *f1, uint64_t *f2) -{ - __asm__ volatile( - // Compute the raw addition of f1 + f2 - " movq 0(%0), %%r8;" - " addq 0(%2), %%r8;" - " movq 8(%0), %%r9;" - " adcxq 8(%2), %%r9;" - " movq 16(%0), %%r10;" - " adcxq 16(%2), %%r10;" - " movq 24(%0), %%r11;" - " adcxq 24(%2), %%r11;" - - /////// Wrap the result back into the field ////// - - // Step 1: Compute carry*38 - " mov $0, %%rax;" - " mov $38, %0;" - " cmovc %0, %%rax;" - - // Step 2: Add carry*38 to the original sum - " xor %%ecx, %%ecx;" - " add %%rax, %%r8;" - " adcx %%rcx, %%r9;" - " movq %%r9, 8(%1);" - " adcx %%rcx, %%r10;" - " movq %%r10, 16(%1);" - " adcx %%rcx, %%r11;" - " movq %%r11, 24(%1);" - - // Step 3: Fold the carry bit back in; guaranteed not to carry at this point - " mov $0, %%rax;" - " cmovc %0, %%rax;" - " add %%rax, %%r8;" - " movq %%r8, 0(%1);" - : "+&r" (f2) - : "r" (out), "r" (f1) - : "%rax", "%rcx", "%r8", "%r9", "%r10", "%r11", "memory", "cc" - ); -} - -// Computes the field substraction of two field elements -static inline void fsub (uint64_t *out, uint64_t *f1, uint64_t *f2) -{ - __asm__ volatile( - // Compute the raw substraction of f1-f2 - " movq 0(%1), %%r8;" - " subq 0(%2), %%r8;" - " movq 8(%1), %%r9;" - " sbbq 8(%2), %%r9;" - " movq 16(%1), %%r10;" - " sbbq 16(%2), %%r10;" - " movq 24(%1), %%r11;" - " sbbq 24(%2), %%r11;" - - /////// Wrap the result back into the field ////// - - // Step 1: Compute carry*38 - " mov $0, %%rax;" - " mov $38, %%rcx;" - " cmovc %%rcx, %%rax;" - - // Step 2: Substract carry*38 from the original difference - " sub %%rax, %%r8;" - " sbb $0, %%r9;" - " sbb $0, %%r10;" - " sbb $0, %%r11;" - - // Step 3: Fold the carry bit back in; guaranteed not to carry at this point - " mov $0, %%rax;" - " cmovc %%rcx, %%rax;" - " sub %%rax, %%r8;" - - // Store the result - " movq %%r8, 0(%0);" - " movq %%r9, 8(%0);" - " movq %%r10, 16(%0);" - " movq %%r11, 24(%0);" - : - : "r" (out), "r" (f1), "r" (f2) - : "%rax", "%rcx", "%r8", "%r9", "%r10", "%r11", "memory", "cc" - ); -} - -// Computes a field multiplication: out <- f1 * f2 -// Uses the 8-element buffer tmp for intermediate results -static inline void fmul (uint64_t *out, uint64_t *f1, uint64_t *f2, uint64_t *tmp) -{ - __asm__ volatile( - - /////// Compute the raw multiplication: tmp <- src1 * src2 ////// - - // Compute src1[0] * src2 - " movq 0(%0), %%rdx;" - " mulxq 0(%1), %%r8, %%r9;" " xor %%r10d, %%r10d;" " movq %%r8, 0(%2);" - " mulxq 8(%1), %%r10, %%r11;" " adox %%r9, %%r10;" " movq %%r10, 8(%2);" - " mulxq 16(%1), %%rbx, %%r13;" " adox %%r11, %%rbx;" - " mulxq 24(%1), %%r14, %%rdx;" " adox %%r13, %%r14;" " mov $0, %%rax;" - " adox %%rdx, %%rax;" - - // Compute src1[1] * src2 - " movq 8(%0), %%rdx;" - " mulxq 0(%1), %%r8, %%r9;" " xor %%r10d, %%r10d;" " adcxq 8(%2), %%r8;" " movq %%r8, 8(%2);" - " mulxq 8(%1), %%r10, %%r11;" " adox %%r9, %%r10;" " adcx %%rbx, %%r10;" " movq %%r10, 16(%2);" - " mulxq 16(%1), %%rbx, %%r13;" " adox %%r11, %%rbx;" " adcx %%r14, %%rbx;" " mov $0, %%r8;" - " mulxq 24(%1), %%r14, %%rdx;" " adox %%r13, %%r14;" " adcx %%rax, %%r14;" " mov $0, %%rax;" - " adox %%rdx, %%rax;" " adcx %%r8, %%rax;" - - - // Compute src1[2] * src2 - " movq 16(%0), %%rdx;" - " mulxq 0(%1), %%r8, %%r9;" " xor %%r10d, %%r10d;" " adcxq 16(%2), %%r8;" " movq %%r8, 16(%2);" - " mulxq 8(%1), %%r10, %%r11;" " adox %%r9, %%r10;" " adcx %%rbx, %%r10;" " movq %%r10, 24(%2);" - " mulxq 16(%1), %%rbx, %%r13;" " adox %%r11, %%rbx;" " adcx %%r14, %%rbx;" " mov $0, %%r8;" - " mulxq 24(%1), %%r14, %%rdx;" " adox %%r13, %%r14;" " adcx %%rax, %%r14;" " mov $0, %%rax;" - " adox %%rdx, %%rax;" " adcx %%r8, %%rax;" - - - // Compute src1[3] * src2 - " movq 24(%0), %%rdx;" - " mulxq 0(%1), %%r8, %%r9;" " xor %%r10d, %%r10d;" " adcxq 24(%2), %%r8;" " movq %%r8, 24(%2);" - " mulxq 8(%1), %%r10, %%r11;" " adox %%r9, %%r10;" " adcx %%rbx, %%r10;" " movq %%r10, 32(%2);" - " mulxq 16(%1), %%rbx, %%r13;" " adox %%r11, %%rbx;" " adcx %%r14, %%rbx;" " movq %%rbx, 40(%2);" " mov $0, %%r8;" - " mulxq 24(%1), %%r14, %%rdx;" " adox %%r13, %%r14;" " adcx %%rax, %%r14;" " movq %%r14, 48(%2);" " mov $0, %%rax;" - " adox %%rdx, %%rax;" " adcx %%r8, %%rax;" " movq %%rax, 56(%2);" - - // Line up pointers - " mov %2, %0;" - " mov %3, %2;" - - /////// Wrap the result back into the field ////// - - // Step 1: Compute dst + carry == tmp_hi * 38 + tmp_lo - " mov $38, %%rdx;" - " mulxq 32(%0), %%r8, %%r13;" - " xor %k1, %k1;" - " adoxq 0(%0), %%r8;" - " mulxq 40(%0), %%r9, %%rbx;" - " adcx %%r13, %%r9;" - " adoxq 8(%0), %%r9;" - " mulxq 48(%0), %%r10, %%r13;" - " adcx %%rbx, %%r10;" - " adoxq 16(%0), %%r10;" - " mulxq 56(%0), %%r11, %%rax;" - " adcx %%r13, %%r11;" - " adoxq 24(%0), %%r11;" - " adcx %1, %%rax;" - " adox %1, %%rax;" - " imul %%rdx, %%rax;" - - // Step 2: Fold the carry back into dst - " add %%rax, %%r8;" - " adcx %1, %%r9;" - " movq %%r9, 8(%2);" - " adcx %1, %%r10;" - " movq %%r10, 16(%2);" - " adcx %1, %%r11;" - " movq %%r11, 24(%2);" - - // Step 3: Fold the carry bit back in; guaranteed not to carry at this point - " mov $0, %%rax;" - " cmovc %%rdx, %%rax;" - " add %%rax, %%r8;" - " movq %%r8, 0(%2);" - : "+&r" (f1), "+&r" (f2), "+&r" (tmp) - : "r" (out) - : "%rax", "%rbx", "%rdx", "%r8", "%r9", "%r10", "%r11", "%r13", "%r14", "memory", "cc" - ); -} - -// Computes two field multiplications: -// out[0] <- f1[0] * f2[0] -// out[1] <- f1[1] * f2[1] -// Uses the 16-element buffer tmp for intermediate results: -static inline void fmul2 (uint64_t *out, uint64_t *f1, uint64_t *f2, uint64_t *tmp) -{ - __asm__ volatile( - - /////// Compute the raw multiplication tmp[0] <- f1[0] * f2[0] ////// - - // Compute src1[0] * src2 - " movq 0(%0), %%rdx;" - " mulxq 0(%1), %%r8, %%r9;" " xor %%r10d, %%r10d;" " movq %%r8, 0(%2);" - " mulxq 8(%1), %%r10, %%r11;" " adox %%r9, %%r10;" " movq %%r10, 8(%2);" - " mulxq 16(%1), %%rbx, %%r13;" " adox %%r11, %%rbx;" - " mulxq 24(%1), %%r14, %%rdx;" " adox %%r13, %%r14;" " mov $0, %%rax;" - " adox %%rdx, %%rax;" - - // Compute src1[1] * src2 - " movq 8(%0), %%rdx;" - " mulxq 0(%1), %%r8, %%r9;" " xor %%r10d, %%r10d;" " adcxq 8(%2), %%r8;" " movq %%r8, 8(%2);" - " mulxq 8(%1), %%r10, %%r11;" " adox %%r9, %%r10;" " adcx %%rbx, %%r10;" " movq %%r10, 16(%2);" - " mulxq 16(%1), %%rbx, %%r13;" " adox %%r11, %%rbx;" " adcx %%r14, %%rbx;" " mov $0, %%r8;" - " mulxq 24(%1), %%r14, %%rdx;" " adox %%r13, %%r14;" " adcx %%rax, %%r14;" " mov $0, %%rax;" - " adox %%rdx, %%rax;" " adcx %%r8, %%rax;" - - - // Compute src1[2] * src2 - " movq 16(%0), %%rdx;" - " mulxq 0(%1), %%r8, %%r9;" " xor %%r10d, %%r10d;" " adcxq 16(%2), %%r8;" " movq %%r8, 16(%2);" - " mulxq 8(%1), %%r10, %%r11;" " adox %%r9, %%r10;" " adcx %%rbx, %%r10;" " movq %%r10, 24(%2);" - " mulxq 16(%1), %%rbx, %%r13;" " adox %%r11, %%rbx;" " adcx %%r14, %%rbx;" " mov $0, %%r8;" - " mulxq 24(%1), %%r14, %%rdx;" " adox %%r13, %%r14;" " adcx %%rax, %%r14;" " mov $0, %%rax;" - " adox %%rdx, %%rax;" " adcx %%r8, %%rax;" - - - // Compute src1[3] * src2 - " movq 24(%0), %%rdx;" - " mulxq 0(%1), %%r8, %%r9;" " xor %%r10d, %%r10d;" " adcxq 24(%2), %%r8;" " movq %%r8, 24(%2);" - " mulxq 8(%1), %%r10, %%r11;" " adox %%r9, %%r10;" " adcx %%rbx, %%r10;" " movq %%r10, 32(%2);" - " mulxq 16(%1), %%rbx, %%r13;" " adox %%r11, %%rbx;" " adcx %%r14, %%rbx;" " movq %%rbx, 40(%2);" " mov $0, %%r8;" - " mulxq 24(%1), %%r14, %%rdx;" " adox %%r13, %%r14;" " adcx %%rax, %%r14;" " movq %%r14, 48(%2);" " mov $0, %%rax;" - " adox %%rdx, %%rax;" " adcx %%r8, %%rax;" " movq %%rax, 56(%2);" - - /////// Compute the raw multiplication tmp[1] <- f1[1] * f2[1] ////// - - // Compute src1[0] * src2 - " movq 32(%0), %%rdx;" - " mulxq 32(%1), %%r8, %%r9;" " xor %%r10d, %%r10d;" " movq %%r8, 64(%2);" - " mulxq 40(%1), %%r10, %%r11;" " adox %%r9, %%r10;" " movq %%r10, 72(%2);" - " mulxq 48(%1), %%rbx, %%r13;" " adox %%r11, %%rbx;" - " mulxq 56(%1), %%r14, %%rdx;" " adox %%r13, %%r14;" " mov $0, %%rax;" - " adox %%rdx, %%rax;" - - // Compute src1[1] * src2 - " movq 40(%0), %%rdx;" - " mulxq 32(%1), %%r8, %%r9;" " xor %%r10d, %%r10d;" " adcxq 72(%2), %%r8;" " movq %%r8, 72(%2);" - " mulxq 40(%1), %%r10, %%r11;" " adox %%r9, %%r10;" " adcx %%rbx, %%r10;" " movq %%r10, 80(%2);" - " mulxq 48(%1), %%rbx, %%r13;" " adox %%r11, %%rbx;" " adcx %%r14, %%rbx;" " mov $0, %%r8;" - " mulxq 56(%1), %%r14, %%rdx;" " adox %%r13, %%r14;" " adcx %%rax, %%r14;" " mov $0, %%rax;" - " adox %%rdx, %%rax;" " adcx %%r8, %%rax;" - - - // Compute src1[2] * src2 - " movq 48(%0), %%rdx;" - " mulxq 32(%1), %%r8, %%r9;" " xor %%r10d, %%r10d;" " adcxq 80(%2), %%r8;" " movq %%r8, 80(%2);" - " mulxq 40(%1), %%r10, %%r11;" " adox %%r9, %%r10;" " adcx %%rbx, %%r10;" " movq %%r10, 88(%2);" - " mulxq 48(%1), %%rbx, %%r13;" " adox %%r11, %%rbx;" " adcx %%r14, %%rbx;" " mov $0, %%r8;" - " mulxq 56(%1), %%r14, %%rdx;" " adox %%r13, %%r14;" " adcx %%rax, %%r14;" " mov $0, %%rax;" - " adox %%rdx, %%rax;" " adcx %%r8, %%rax;" - - - // Compute src1[3] * src2 - " movq 56(%0), %%rdx;" - " mulxq 32(%1), %%r8, %%r9;" " xor %%r10d, %%r10d;" " adcxq 88(%2), %%r8;" " movq %%r8, 88(%2);" - " mulxq 40(%1), %%r10, %%r11;" " adox %%r9, %%r10;" " adcx %%rbx, %%r10;" " movq %%r10, 96(%2);" - " mulxq 48(%1), %%rbx, %%r13;" " adox %%r11, %%rbx;" " adcx %%r14, %%rbx;" " movq %%rbx, 104(%2);" " mov $0, %%r8;" - " mulxq 56(%1), %%r14, %%rdx;" " adox %%r13, %%r14;" " adcx %%rax, %%r14;" " movq %%r14, 112(%2);" " mov $0, %%rax;" - " adox %%rdx, %%rax;" " adcx %%r8, %%rax;" " movq %%rax, 120(%2);" - - // Line up pointers - " mov %2, %0;" - " mov %3, %2;" - - /////// Wrap the results back into the field ////// - - // Step 1: Compute dst + carry == tmp_hi * 38 + tmp_lo - " mov $38, %%rdx;" - " mulxq 32(%0), %%r8, %%r13;" - " xor %k1, %k1;" - " adoxq 0(%0), %%r8;" - " mulxq 40(%0), %%r9, %%rbx;" - " adcx %%r13, %%r9;" - " adoxq 8(%0), %%r9;" - " mulxq 48(%0), %%r10, %%r13;" - " adcx %%rbx, %%r10;" - " adoxq 16(%0), %%r10;" - " mulxq 56(%0), %%r11, %%rax;" - " adcx %%r13, %%r11;" - " adoxq 24(%0), %%r11;" - " adcx %1, %%rax;" - " adox %1, %%rax;" - " imul %%rdx, %%rax;" - - // Step 2: Fold the carry back into dst - " add %%rax, %%r8;" - " adcx %1, %%r9;" - " movq %%r9, 8(%2);" - " adcx %1, %%r10;" - " movq %%r10, 16(%2);" - " adcx %1, %%r11;" - " movq %%r11, 24(%2);" - - // Step 3: Fold the carry bit back in; guaranteed not to carry at this point - " mov $0, %%rax;" - " cmovc %%rdx, %%rax;" - " add %%rax, %%r8;" - " movq %%r8, 0(%2);" - - // Step 1: Compute dst + carry == tmp_hi * 38 + tmp_lo - " mov $38, %%rdx;" - " mulxq 96(%0), %%r8, %%r13;" - " xor %k1, %k1;" - " adoxq 64(%0), %%r8;" - " mulxq 104(%0), %%r9, %%rbx;" - " adcx %%r13, %%r9;" - " adoxq 72(%0), %%r9;" - " mulxq 112(%0), %%r10, %%r13;" - " adcx %%rbx, %%r10;" - " adoxq 80(%0), %%r10;" - " mulxq 120(%0), %%r11, %%rax;" - " adcx %%r13, %%r11;" - " adoxq 88(%0), %%r11;" - " adcx %1, %%rax;" - " adox %1, %%rax;" - " imul %%rdx, %%rax;" - - // Step 2: Fold the carry back into dst - " add %%rax, %%r8;" - " adcx %1, %%r9;" - " movq %%r9, 40(%2);" - " adcx %1, %%r10;" - " movq %%r10, 48(%2);" - " adcx %1, %%r11;" - " movq %%r11, 56(%2);" - - // Step 3: Fold the carry bit back in; guaranteed not to carry at this point - " mov $0, %%rax;" - " cmovc %%rdx, %%rax;" - " add %%rax, %%r8;" - " movq %%r8, 32(%2);" - : "+&r" (f1), "+&r" (f2), "+&r" (tmp) - : "r" (out) - : "%rax", "%rbx", "%rdx", "%r8", "%r9", "%r10", "%r11", "%r13", "%r14", "memory", "cc" - ); -} - -// Computes the field multiplication of four-element f1 with value in f2 -// Requires f2 to be smaller than 2^17 -static inline void fmul_scalar (uint64_t *out, uint64_t *f1, uint64_t f2) -{ - register uint64_t f2_r __asm__("rdx") = f2; - - __asm__ volatile( - // Compute the raw multiplication of f1*f2 - " mulxq 0(%2), %%r8, %%rcx;" // f1[0]*f2 - " mulxq 8(%2), %%r9, %%rbx;" // f1[1]*f2 - " add %%rcx, %%r9;" - " mov $0, %%rcx;" - " mulxq 16(%2), %%r10, %%r13;" // f1[2]*f2 - " adcx %%rbx, %%r10;" - " mulxq 24(%2), %%r11, %%rax;" // f1[3]*f2 - " adcx %%r13, %%r11;" - " adcx %%rcx, %%rax;" - - /////// Wrap the result back into the field ////// - - // Step 1: Compute carry*38 - " mov $38, %%rdx;" - " imul %%rdx, %%rax;" - - // Step 2: Fold the carry back into dst - " add %%rax, %%r8;" - " adcx %%rcx, %%r9;" - " movq %%r9, 8(%1);" - " adcx %%rcx, %%r10;" - " movq %%r10, 16(%1);" - " adcx %%rcx, %%r11;" - " movq %%r11, 24(%1);" - - // Step 3: Fold the carry bit back in; guaranteed not to carry at this point - " mov $0, %%rax;" - " cmovc %%rdx, %%rax;" - " add %%rax, %%r8;" - " movq %%r8, 0(%1);" - : "+&r" (f2_r) - : "r" (out), "r" (f1) - : "%rax", "%rbx", "%rcx", "%r8", "%r9", "%r10", "%r11", "%r13", "memory", "cc" - ); -} - -// Computes p1 <- bit ? p2 : p1 in constant time -static inline void cswap2 (uint64_t bit, uint64_t *p1, uint64_t *p2) -{ - __asm__ volatile( - // Transfer bit into CF flag - " add $18446744073709551615, %0;" - - // cswap p1[0], p2[0] - " movq 0(%1), %%r8;" - " movq 0(%2), %%r9;" - " mov %%r8, %%r10;" - " cmovc %%r9, %%r8;" - " cmovc %%r10, %%r9;" - " movq %%r8, 0(%1);" - " movq %%r9, 0(%2);" - - // cswap p1[1], p2[1] - " movq 8(%1), %%r8;" - " movq 8(%2), %%r9;" - " mov %%r8, %%r10;" - " cmovc %%r9, %%r8;" - " cmovc %%r10, %%r9;" - " movq %%r8, 8(%1);" - " movq %%r9, 8(%2);" - - // cswap p1[2], p2[2] - " movq 16(%1), %%r8;" - " movq 16(%2), %%r9;" - " mov %%r8, %%r10;" - " cmovc %%r9, %%r8;" - " cmovc %%r10, %%r9;" - " movq %%r8, 16(%1);" - " movq %%r9, 16(%2);" - - // cswap p1[3], p2[3] - " movq 24(%1), %%r8;" - " movq 24(%2), %%r9;" - " mov %%r8, %%r10;" - " cmovc %%r9, %%r8;" - " cmovc %%r10, %%r9;" - " movq %%r8, 24(%1);" - " movq %%r9, 24(%2);" - - // cswap p1[4], p2[4] - " movq 32(%1), %%r8;" - " movq 32(%2), %%r9;" - " mov %%r8, %%r10;" - " cmovc %%r9, %%r8;" - " cmovc %%r10, %%r9;" - " movq %%r8, 32(%1);" - " movq %%r9, 32(%2);" - - // cswap p1[5], p2[5] - " movq 40(%1), %%r8;" - " movq 40(%2), %%r9;" - " mov %%r8, %%r10;" - " cmovc %%r9, %%r8;" - " cmovc %%r10, %%r9;" - " movq %%r8, 40(%1);" - " movq %%r9, 40(%2);" - - // cswap p1[6], p2[6] - " movq 48(%1), %%r8;" - " movq 48(%2), %%r9;" - " mov %%r8, %%r10;" - " cmovc %%r9, %%r8;" - " cmovc %%r10, %%r9;" - " movq %%r8, 48(%1);" - " movq %%r9, 48(%2);" - - // cswap p1[7], p2[7] - " movq 56(%1), %%r8;" - " movq 56(%2), %%r9;" - " mov %%r8, %%r10;" - " cmovc %%r9, %%r8;" - " cmovc %%r10, %%r9;" - " movq %%r8, 56(%1);" - " movq %%r9, 56(%2);" - : "+&r" (bit) - : "r" (p1), "r" (p2) - : "%r8", "%r9", "%r10", "memory", "cc" - ); -} - -// Computes the square of a field element: out <- f * f -// Uses the 8-element buffer tmp for intermediate results -static inline void fsqr (uint64_t *out, uint64_t *f, uint64_t *tmp) -{ - __asm__ volatile( - - /////// Compute the raw multiplication: tmp <- f * f ////// - - // Step 1: Compute all partial products - " movq 0(%0), %%rdx;" // f[0] - " mulxq 8(%0), %%r8, %%r14;" " xor %%r15d, %%r15d;" // f[1]*f[0] - " mulxq 16(%0), %%r9, %%r10;" " adcx %%r14, %%r9;" // f[2]*f[0] - " mulxq 24(%0), %%rax, %%rcx;" " adcx %%rax, %%r10;" // f[3]*f[0] - " movq 24(%0), %%rdx;" // f[3] - " mulxq 8(%0), %%r11, %%rbx;" " adcx %%rcx, %%r11;" // f[1]*f[3] - " mulxq 16(%0), %%rax, %%r13;" " adcx %%rax, %%rbx;" // f[2]*f[3] - " movq 8(%0), %%rdx;" " adcx %%r15, %%r13;" // f1 - " mulxq 16(%0), %%rax, %%rcx;" " mov $0, %%r14;" // f[2]*f[1] - - // Step 2: Compute two parallel carry chains - " xor %%r15d, %%r15d;" - " adox %%rax, %%r10;" - " adcx %%r8, %%r8;" - " adox %%rcx, %%r11;" - " adcx %%r9, %%r9;" - " adox %%r15, %%rbx;" - " adcx %%r10, %%r10;" - " adox %%r15, %%r13;" - " adcx %%r11, %%r11;" - " adox %%r15, %%r14;" - " adcx %%rbx, %%rbx;" - " adcx %%r13, %%r13;" - " adcx %%r14, %%r14;" - - // Step 3: Compute intermediate squares - " movq 0(%0), %%rdx;" " mulx %%rdx, %%rax, %%rcx;" // f[0]^2 - " movq %%rax, 0(%1);" - " add %%rcx, %%r8;" " movq %%r8, 8(%1);" - " movq 8(%0), %%rdx;" " mulx %%rdx, %%rax, %%rcx;" // f[1]^2 - " adcx %%rax, %%r9;" " movq %%r9, 16(%1);" - " adcx %%rcx, %%r10;" " movq %%r10, 24(%1);" - " movq 16(%0), %%rdx;" " mulx %%rdx, %%rax, %%rcx;" // f[2]^2 - " adcx %%rax, %%r11;" " movq %%r11, 32(%1);" - " adcx %%rcx, %%rbx;" " movq %%rbx, 40(%1);" - " movq 24(%0), %%rdx;" " mulx %%rdx, %%rax, %%rcx;" // f[3]^2 - " adcx %%rax, %%r13;" " movq %%r13, 48(%1);" - " adcx %%rcx, %%r14;" " movq %%r14, 56(%1);" - - // Line up pointers - " mov %1, %0;" - " mov %2, %1;" - - /////// Wrap the result back into the field ////// - - // Step 1: Compute dst + carry == tmp_hi * 38 + tmp_lo - " mov $38, %%rdx;" - " mulxq 32(%0), %%r8, %%r13;" - " xor %%ecx, %%ecx;" - " adoxq 0(%0), %%r8;" - " mulxq 40(%0), %%r9, %%rbx;" - " adcx %%r13, %%r9;" - " adoxq 8(%0), %%r9;" - " mulxq 48(%0), %%r10, %%r13;" - " adcx %%rbx, %%r10;" - " adoxq 16(%0), %%r10;" - " mulxq 56(%0), %%r11, %%rax;" - " adcx %%r13, %%r11;" - " adoxq 24(%0), %%r11;" - " adcx %%rcx, %%rax;" - " adox %%rcx, %%rax;" - " imul %%rdx, %%rax;" - - // Step 2: Fold the carry back into dst - " add %%rax, %%r8;" - " adcx %%rcx, %%r9;" - " movq %%r9, 8(%1);" - " adcx %%rcx, %%r10;" - " movq %%r10, 16(%1);" - " adcx %%rcx, %%r11;" - " movq %%r11, 24(%1);" - - // Step 3: Fold the carry bit back in; guaranteed not to carry at this point - " mov $0, %%rax;" - " cmovc %%rdx, %%rax;" - " add %%rax, %%r8;" - " movq %%r8, 0(%1);" - : "+&r" (f), "+&r" (tmp) - : "r" (out) - : "%rax", "%rbx", "%rcx", "%rdx", "%r8", "%r9", "%r10", "%r11", "%r13", "%r14", "%r15", "memory", "cc" - ); -} - -// Computes two field squarings: -// out[0] <- f[0] * f[0] -// out[1] <- f[1] * f[1] -// Uses the 16-element buffer tmp for intermediate results -static inline void fsqr2 (uint64_t *out, uint64_t *f, uint64_t *tmp) -{ - __asm__ volatile( - // Step 1: Compute all partial products - " movq 0(%0), %%rdx;" // f[0] - " mulxq 8(%0), %%r8, %%r14;" " xor %%r15d, %%r15d;" // f[1]*f[0] - " mulxq 16(%0), %%r9, %%r10;" " adcx %%r14, %%r9;" // f[2]*f[0] - " mulxq 24(%0), %%rax, %%rcx;" " adcx %%rax, %%r10;" // f[3]*f[0] - " movq 24(%0), %%rdx;" // f[3] - " mulxq 8(%0), %%r11, %%rbx;" " adcx %%rcx, %%r11;" // f[1]*f[3] - " mulxq 16(%0), %%rax, %%r13;" " adcx %%rax, %%rbx;" // f[2]*f[3] - " movq 8(%0), %%rdx;" " adcx %%r15, %%r13;" // f1 - " mulxq 16(%0), %%rax, %%rcx;" " mov $0, %%r14;" // f[2]*f[1] - - // Step 2: Compute two parallel carry chains - " xor %%r15d, %%r15d;" - " adox %%rax, %%r10;" - " adcx %%r8, %%r8;" - " adox %%rcx, %%r11;" - " adcx %%r9, %%r9;" - " adox %%r15, %%rbx;" - " adcx %%r10, %%r10;" - " adox %%r15, %%r13;" - " adcx %%r11, %%r11;" - " adox %%r15, %%r14;" - " adcx %%rbx, %%rbx;" - " adcx %%r13, %%r13;" - " adcx %%r14, %%r14;" - - // Step 3: Compute intermediate squares - " movq 0(%0), %%rdx;" " mulx %%rdx, %%rax, %%rcx;" // f[0]^2 - " movq %%rax, 0(%1);" - " add %%rcx, %%r8;" " movq %%r8, 8(%1);" - " movq 8(%0), %%rdx;" " mulx %%rdx, %%rax, %%rcx;" // f[1]^2 - " adcx %%rax, %%r9;" " movq %%r9, 16(%1);" - " adcx %%rcx, %%r10;" " movq %%r10, 24(%1);" - " movq 16(%0), %%rdx;" " mulx %%rdx, %%rax, %%rcx;" // f[2]^2 - " adcx %%rax, %%r11;" " movq %%r11, 32(%1);" - " adcx %%rcx, %%rbx;" " movq %%rbx, 40(%1);" - " movq 24(%0), %%rdx;" " mulx %%rdx, %%rax, %%rcx;" // f[3]^2 - " adcx %%rax, %%r13;" " movq %%r13, 48(%1);" - " adcx %%rcx, %%r14;" " movq %%r14, 56(%1);" - - // Step 1: Compute all partial products - " movq 32(%0), %%rdx;" // f[0] - " mulxq 40(%0), %%r8, %%r14;" " xor %%r15d, %%r15d;" // f[1]*f[0] - " mulxq 48(%0), %%r9, %%r10;" " adcx %%r14, %%r9;" // f[2]*f[0] - " mulxq 56(%0), %%rax, %%rcx;" " adcx %%rax, %%r10;" // f[3]*f[0] - " movq 56(%0), %%rdx;" // f[3] - " mulxq 40(%0), %%r11, %%rbx;" " adcx %%rcx, %%r11;" // f[1]*f[3] - " mulxq 48(%0), %%rax, %%r13;" " adcx %%rax, %%rbx;" // f[2]*f[3] - " movq 40(%0), %%rdx;" " adcx %%r15, %%r13;" // f1 - " mulxq 48(%0), %%rax, %%rcx;" " mov $0, %%r14;" // f[2]*f[1] - - // Step 2: Compute two parallel carry chains - " xor %%r15d, %%r15d;" - " adox %%rax, %%r10;" - " adcx %%r8, %%r8;" - " adox %%rcx, %%r11;" - " adcx %%r9, %%r9;" - " adox %%r15, %%rbx;" - " adcx %%r10, %%r10;" - " adox %%r15, %%r13;" - " adcx %%r11, %%r11;" - " adox %%r15, %%r14;" - " adcx %%rbx, %%rbx;" - " adcx %%r13, %%r13;" - " adcx %%r14, %%r14;" - - // Step 3: Compute intermediate squares - " movq 32(%0), %%rdx;" " mulx %%rdx, %%rax, %%rcx;" // f[0]^2 - " movq %%rax, 64(%1);" - " add %%rcx, %%r8;" " movq %%r8, 72(%1);" - " movq 40(%0), %%rdx;" " mulx %%rdx, %%rax, %%rcx;" // f[1]^2 - " adcx %%rax, %%r9;" " movq %%r9, 80(%1);" - " adcx %%rcx, %%r10;" " movq %%r10, 88(%1);" - " movq 48(%0), %%rdx;" " mulx %%rdx, %%rax, %%rcx;" // f[2]^2 - " adcx %%rax, %%r11;" " movq %%r11, 96(%1);" - " adcx %%rcx, %%rbx;" " movq %%rbx, 104(%1);" - " movq 56(%0), %%rdx;" " mulx %%rdx, %%rax, %%rcx;" // f[3]^2 - " adcx %%rax, %%r13;" " movq %%r13, 112(%1);" - " adcx %%rcx, %%r14;" " movq %%r14, 120(%1);" - - // Line up pointers - " mov %1, %0;" - " mov %2, %1;" - - // Step 1: Compute dst + carry == tmp_hi * 38 + tmp_lo - " mov $38, %%rdx;" - " mulxq 32(%0), %%r8, %%r13;" - " xor %%ecx, %%ecx;" - " adoxq 0(%0), %%r8;" - " mulxq 40(%0), %%r9, %%rbx;" - " adcx %%r13, %%r9;" - " adoxq 8(%0), %%r9;" - " mulxq 48(%0), %%r10, %%r13;" - " adcx %%rbx, %%r10;" - " adoxq 16(%0), %%r10;" - " mulxq 56(%0), %%r11, %%rax;" - " adcx %%r13, %%r11;" - " adoxq 24(%0), %%r11;" - " adcx %%rcx, %%rax;" - " adox %%rcx, %%rax;" - " imul %%rdx, %%rax;" - - // Step 2: Fold the carry back into dst - " add %%rax, %%r8;" - " adcx %%rcx, %%r9;" - " movq %%r9, 8(%1);" - " adcx %%rcx, %%r10;" - " movq %%r10, 16(%1);" - " adcx %%rcx, %%r11;" - " movq %%r11, 24(%1);" - - // Step 3: Fold the carry bit back in; guaranteed not to carry at this point - " mov $0, %%rax;" - " cmovc %%rdx, %%rax;" - " add %%rax, %%r8;" - " movq %%r8, 0(%1);" - - // Step 1: Compute dst + carry == tmp_hi * 38 + tmp_lo - " mov $38, %%rdx;" - " mulxq 96(%0), %%r8, %%r13;" - " xor %%ecx, %%ecx;" - " adoxq 64(%0), %%r8;" - " mulxq 104(%0), %%r9, %%rbx;" - " adcx %%r13, %%r9;" - " adoxq 72(%0), %%r9;" - " mulxq 112(%0), %%r10, %%r13;" - " adcx %%rbx, %%r10;" - " adoxq 80(%0), %%r10;" - " mulxq 120(%0), %%r11, %%rax;" - " adcx %%r13, %%r11;" - " adoxq 88(%0), %%r11;" - " adcx %%rcx, %%rax;" - " adox %%rcx, %%rax;" - " imul %%rdx, %%rax;" - - // Step 2: Fold the carry back into dst - " add %%rax, %%r8;" - " adcx %%rcx, %%r9;" - " movq %%r9, 40(%1);" - " adcx %%rcx, %%r10;" - " movq %%r10, 48(%1);" - " adcx %%rcx, %%r11;" - " movq %%r11, 56(%1);" - - // Step 3: Fold the carry bit back in; guaranteed not to carry at this point - " mov $0, %%rax;" - " cmovc %%rdx, %%rax;" - " add %%rax, %%r8;" - " movq %%r8, 32(%1);" - : "+&r" (f), "+&r" (tmp) - : "r" (out) - : "%rax", "%rbx", "%rcx", "%rdx", "%r8", "%r9", "%r10", "%r11", "%r13", "%r14", "%r15", "memory", "cc" - ); -} - -#endif /* defined(__x86_64__) || defined(_M_X64) */ -#endif /* __GNUC__ */ diff --git a/dist/c89-compatible/curve25519-x86_64-darwin.S b/dist/c89-compatible/curve25519-x86_64-darwin.S deleted file mode 100644 index 26147d9da9..0000000000 --- a/dist/c89-compatible/curve25519-x86_64-darwin.S +++ /dev/null @@ -1,986 +0,0 @@ -.text -.global _add_scalar_e -_add_scalar_e: - push %rdi - push %rsi - ;# Clear registers to propagate the carry bit - xor %r8d, %r8d - xor %r9d, %r9d - xor %r10d, %r10d - xor %r11d, %r11d - xor %eax, %eax - - ;# Begin addition chain - addq 0(%rsi), %rdx - movq %rdx, 0(%rdi) - adcxq 8(%rsi), %r8 - movq %r8, 8(%rdi) - adcxq 16(%rsi), %r9 - movq %r9, 16(%rdi) - adcxq 24(%rsi), %r10 - movq %r10, 24(%rdi) - - ;# Return the carry bit in a register - adcx %r11, %rax - pop %rsi - pop %rdi - ret - -.global _fadd_e -_fadd_e: - ;# Compute the raw addition of f1 + f2 - movq 0(%rdx), %r8 - addq 0(%rsi), %r8 - movq 8(%rdx), %r9 - adcxq 8(%rsi), %r9 - movq 16(%rdx), %r10 - adcxq 16(%rsi), %r10 - movq 24(%rdx), %r11 - adcxq 24(%rsi), %r11 - ;# Wrap the result back into the field - ;# Step 1: Compute carry*38 - mov $0, %rax - mov $38, %rdx - cmovc %rdx, %rax - - ;# Step 2: Add carry*38 to the original sum - xor %ecx, %ecx - add %rax, %r8 - adcx %rcx, %r9 - movq %r9, 8(%rdi) - adcx %rcx, %r10 - movq %r10, 16(%rdi) - adcx %rcx, %r11 - movq %r11, 24(%rdi) - - ;# Step 3: Fold the carry bit back in; guaranteed not to carry at this point - mov $0, %rax - cmovc %rdx, %rax - add %rax, %r8 - movq %r8, 0(%rdi) - ret - -.global _fsub_e -_fsub_e: - ;# Compute the raw substraction of f1-f2 - movq 0(%rsi), %r8 - subq 0(%rdx), %r8 - movq 8(%rsi), %r9 - sbbq 8(%rdx), %r9 - movq 16(%rsi), %r10 - sbbq 16(%rdx), %r10 - movq 24(%rsi), %r11 - sbbq 24(%rdx), %r11 - ;# Wrap the result back into the field - ;# Step 1: Compute carry*38 - mov $0, %rax - mov $38, %rcx - cmovc %rcx, %rax - - ;# Step 2: Substract carry*38 from the original difference - sub %rax, %r8 - sbb $0, %r9 - sbb $0, %r10 - sbb $0, %r11 - - ;# Step 3: Fold the carry bit back in; guaranteed not to carry at this point - mov $0, %rax - cmovc %rcx, %rax - sub %rax, %r8 - - ;# Store the result - movq %r8, 0(%rdi) - movq %r9, 8(%rdi) - movq %r10, 16(%rdi) - movq %r11, 24(%rdi) - ret - -.global _fmul_scalar_e -_fmul_scalar_e: - push %rdi - push %r13 - push %rbx - ;# Compute the raw multiplication of f1*f2 - mulxq 0(%rsi), %r8, %rcx - ;# f1[0]*f2 - mulxq 8(%rsi), %r9, %rbx - ;# f1[1]*f2 - add %rcx, %r9 - mov $0, %rcx - mulxq 16(%rsi), %r10, %r13 - ;# f1[2]*f2 - adcx %rbx, %r10 - mulxq 24(%rsi), %r11, %rax - ;# f1[3]*f2 - adcx %r13, %r11 - adcx %rcx, %rax - ;# Wrap the result back into the field - ;# Step 1: Compute carry*38 - mov $38, %rdx - imul %rdx, %rax - - ;# Step 2: Fold the carry back into dst - add %rax, %r8 - adcx %rcx, %r9 - movq %r9, 8(%rdi) - adcx %rcx, %r10 - movq %r10, 16(%rdi) - adcx %rcx, %r11 - movq %r11, 24(%rdi) - - ;# Step 3: Fold the carry bit back in; guaranteed not to carry at this point - mov $0, %rax - cmovc %rdx, %rax - add %rax, %r8 - movq %r8, 0(%rdi) - pop %rbx - pop %r13 - pop %rdi - ret - -.global _fmul_e -_fmul_e: - push %r13 - push %r14 - push %r15 - push %rbx - mov %rdx, %r15 - ;# Compute the raw multiplication: tmp <- src1 * src2 - ;# Compute src1[0] * src2 - movq 0(%rsi), %rdx - mulxq 0(%rcx), %r8, %r9 - xor %r10d, %r10d - movq %r8, 0(%rdi) - - mulxq 8(%rcx), %r10, %r11 - adox %r9, %r10 - movq %r10, 8(%rdi) - - mulxq 16(%rcx), %rbx, %r13 - adox %r11, %rbx - mulxq 24(%rcx), %r14, %rdx - adox %r13, %r14 - mov $0, %rax - adox %rdx, %rax - - - ;# Compute src1[1] * src2 - movq 8(%rsi), %rdx - mulxq 0(%rcx), %r8, %r9 - xor %r10d, %r10d - adcxq 8(%rdi), %r8 - movq %r8, 8(%rdi) - mulxq 8(%rcx), %r10, %r11 - adox %r9, %r10 - adcx %rbx, %r10 - movq %r10, 16(%rdi) - mulxq 16(%rcx), %rbx, %r13 - adox %r11, %rbx - adcx %r14, %rbx - mov $0, %r8 - mulxq 24(%rcx), %r14, %rdx - adox %r13, %r14 - adcx %rax, %r14 - mov $0, %rax - adox %rdx, %rax - adcx %r8, %rax - - - ;# Compute src1[2] * src2 - movq 16(%rsi), %rdx - mulxq 0(%rcx), %r8, %r9 - xor %r10d, %r10d - adcxq 16(%rdi), %r8 - movq %r8, 16(%rdi) - mulxq 8(%rcx), %r10, %r11 - adox %r9, %r10 - adcx %rbx, %r10 - movq %r10, 24(%rdi) - mulxq 16(%rcx), %rbx, %r13 - adox %r11, %rbx - adcx %r14, %rbx - mov $0, %r8 - mulxq 24(%rcx), %r14, %rdx - adox %r13, %r14 - adcx %rax, %r14 - mov $0, %rax - adox %rdx, %rax - adcx %r8, %rax - - - ;# Compute src1[3] * src2 - movq 24(%rsi), %rdx - mulxq 0(%rcx), %r8, %r9 - xor %r10d, %r10d - adcxq 24(%rdi), %r8 - movq %r8, 24(%rdi) - mulxq 8(%rcx), %r10, %r11 - adox %r9, %r10 - adcx %rbx, %r10 - movq %r10, 32(%rdi) - mulxq 16(%rcx), %rbx, %r13 - adox %r11, %rbx - adcx %r14, %rbx - movq %rbx, 40(%rdi) - mov $0, %r8 - mulxq 24(%rcx), %r14, %rdx - adox %r13, %r14 - adcx %rax, %r14 - movq %r14, 48(%rdi) - mov $0, %rax - adox %rdx, %rax - adcx %r8, %rax - movq %rax, 56(%rdi) - - - ;# Line up pointers - mov %rdi, %rsi - mov %r15, %rdi - ;# Wrap the result back into the field - ;# Step 1: Compute dst + carry == tmp_hi * 38 + tmp_lo - mov $38, %rdx - mulxq 32(%rsi), %r8, %r13 - xor %ecx, %ecx - adoxq 0(%rsi), %r8 - mulxq 40(%rsi), %r9, %rbx - adcx %r13, %r9 - adoxq 8(%rsi), %r9 - mulxq 48(%rsi), %r10, %r13 - adcx %rbx, %r10 - adoxq 16(%rsi), %r10 - mulxq 56(%rsi), %r11, %rax - adcx %r13, %r11 - adoxq 24(%rsi), %r11 - adcx %rcx, %rax - adox %rcx, %rax - imul %rdx, %rax - - ;# Step 2: Fold the carry back into dst - add %rax, %r8 - adcx %rcx, %r9 - movq %r9, 8(%rdi) - adcx %rcx, %r10 - movq %r10, 16(%rdi) - adcx %rcx, %r11 - movq %r11, 24(%rdi) - - ;# Step 3: Fold the carry bit back in; guaranteed not to carry at this point - mov $0, %rax - cmovc %rdx, %rax - add %rax, %r8 - movq %r8, 0(%rdi) - pop %rbx - pop %r15 - pop %r14 - pop %r13 - ret - -.global _fmul2_e -_fmul2_e: - push %r13 - push %r14 - push %r15 - push %rbx - mov %rdx, %r15 - ;# Compute the raw multiplication tmp[0] <- f1[0] * f2[0] - ;# Compute src1[0] * src2 - movq 0(%rsi), %rdx - mulxq 0(%rcx), %r8, %r9 - xor %r10d, %r10d - movq %r8, 0(%rdi) - - mulxq 8(%rcx), %r10, %r11 - adox %r9, %r10 - movq %r10, 8(%rdi) - - mulxq 16(%rcx), %rbx, %r13 - adox %r11, %rbx - mulxq 24(%rcx), %r14, %rdx - adox %r13, %r14 - mov $0, %rax - adox %rdx, %rax - - - ;# Compute src1[1] * src2 - movq 8(%rsi), %rdx - mulxq 0(%rcx), %r8, %r9 - xor %r10d, %r10d - adcxq 8(%rdi), %r8 - movq %r8, 8(%rdi) - mulxq 8(%rcx), %r10, %r11 - adox %r9, %r10 - adcx %rbx, %r10 - movq %r10, 16(%rdi) - mulxq 16(%rcx), %rbx, %r13 - adox %r11, %rbx - adcx %r14, %rbx - mov $0, %r8 - mulxq 24(%rcx), %r14, %rdx - adox %r13, %r14 - adcx %rax, %r14 - mov $0, %rax - adox %rdx, %rax - adcx %r8, %rax - - - ;# Compute src1[2] * src2 - movq 16(%rsi), %rdx - mulxq 0(%rcx), %r8, %r9 - xor %r10d, %r10d - adcxq 16(%rdi), %r8 - movq %r8, 16(%rdi) - mulxq 8(%rcx), %r10, %r11 - adox %r9, %r10 - adcx %rbx, %r10 - movq %r10, 24(%rdi) - mulxq 16(%rcx), %rbx, %r13 - adox %r11, %rbx - adcx %r14, %rbx - mov $0, %r8 - mulxq 24(%rcx), %r14, %rdx - adox %r13, %r14 - adcx %rax, %r14 - mov $0, %rax - adox %rdx, %rax - adcx %r8, %rax - - - ;# Compute src1[3] * src2 - movq 24(%rsi), %rdx - mulxq 0(%rcx), %r8, %r9 - xor %r10d, %r10d - adcxq 24(%rdi), %r8 - movq %r8, 24(%rdi) - mulxq 8(%rcx), %r10, %r11 - adox %r9, %r10 - adcx %rbx, %r10 - movq %r10, 32(%rdi) - mulxq 16(%rcx), %rbx, %r13 - adox %r11, %rbx - adcx %r14, %rbx - movq %rbx, 40(%rdi) - mov $0, %r8 - mulxq 24(%rcx), %r14, %rdx - adox %r13, %r14 - adcx %rax, %r14 - movq %r14, 48(%rdi) - mov $0, %rax - adox %rdx, %rax - adcx %r8, %rax - movq %rax, 56(%rdi) - - ;# Compute the raw multiplication tmp[1] <- f1[1] * f2[1] - ;# Compute src1[0] * src2 - movq 32(%rsi), %rdx - mulxq 32(%rcx), %r8, %r9 - xor %r10d, %r10d - movq %r8, 64(%rdi) - - mulxq 40(%rcx), %r10, %r11 - adox %r9, %r10 - movq %r10, 72(%rdi) - - mulxq 48(%rcx), %rbx, %r13 - adox %r11, %rbx - mulxq 56(%rcx), %r14, %rdx - adox %r13, %r14 - mov $0, %rax - adox %rdx, %rax - - - ;# Compute src1[1] * src2 - movq 40(%rsi), %rdx - mulxq 32(%rcx), %r8, %r9 - xor %r10d, %r10d - adcxq 72(%rdi), %r8 - movq %r8, 72(%rdi) - mulxq 40(%rcx), %r10, %r11 - adox %r9, %r10 - adcx %rbx, %r10 - movq %r10, 80(%rdi) - mulxq 48(%rcx), %rbx, %r13 - adox %r11, %rbx - adcx %r14, %rbx - mov $0, %r8 - mulxq 56(%rcx), %r14, %rdx - adox %r13, %r14 - adcx %rax, %r14 - mov $0, %rax - adox %rdx, %rax - adcx %r8, %rax - - - ;# Compute src1[2] * src2 - movq 48(%rsi), %rdx - mulxq 32(%rcx), %r8, %r9 - xor %r10d, %r10d - adcxq 80(%rdi), %r8 - movq %r8, 80(%rdi) - mulxq 40(%rcx), %r10, %r11 - adox %r9, %r10 - adcx %rbx, %r10 - movq %r10, 88(%rdi) - mulxq 48(%rcx), %rbx, %r13 - adox %r11, %rbx - adcx %r14, %rbx - mov $0, %r8 - mulxq 56(%rcx), %r14, %rdx - adox %r13, %r14 - adcx %rax, %r14 - mov $0, %rax - adox %rdx, %rax - adcx %r8, %rax - - - ;# Compute src1[3] * src2 - movq 56(%rsi), %rdx - mulxq 32(%rcx), %r8, %r9 - xor %r10d, %r10d - adcxq 88(%rdi), %r8 - movq %r8, 88(%rdi) - mulxq 40(%rcx), %r10, %r11 - adox %r9, %r10 - adcx %rbx, %r10 - movq %r10, 96(%rdi) - mulxq 48(%rcx), %rbx, %r13 - adox %r11, %rbx - adcx %r14, %rbx - movq %rbx, 104(%rdi) - mov $0, %r8 - mulxq 56(%rcx), %r14, %rdx - adox %r13, %r14 - adcx %rax, %r14 - movq %r14, 112(%rdi) - mov $0, %rax - adox %rdx, %rax - adcx %r8, %rax - movq %rax, 120(%rdi) - - - ;# Line up pointers - mov %rdi, %rsi - mov %r15, %rdi - ;# Wrap the results back into the field - ;# Step 1: Compute dst + carry == tmp_hi * 38 + tmp_lo - mov $38, %rdx - mulxq 32(%rsi), %r8, %r13 - xor %ecx, %ecx - adoxq 0(%rsi), %r8 - mulxq 40(%rsi), %r9, %rbx - adcx %r13, %r9 - adoxq 8(%rsi), %r9 - mulxq 48(%rsi), %r10, %r13 - adcx %rbx, %r10 - adoxq 16(%rsi), %r10 - mulxq 56(%rsi), %r11, %rax - adcx %r13, %r11 - adoxq 24(%rsi), %r11 - adcx %rcx, %rax - adox %rcx, %rax - imul %rdx, %rax - - ;# Step 2: Fold the carry back into dst - add %rax, %r8 - adcx %rcx, %r9 - movq %r9, 8(%rdi) - adcx %rcx, %r10 - movq %r10, 16(%rdi) - adcx %rcx, %r11 - movq %r11, 24(%rdi) - - ;# Step 3: Fold the carry bit back in; guaranteed not to carry at this point - mov $0, %rax - cmovc %rdx, %rax - add %rax, %r8 - movq %r8, 0(%rdi) - - ;# Step 1: Compute dst + carry == tmp_hi * 38 + tmp_lo - mov $38, %rdx - mulxq 96(%rsi), %r8, %r13 - xor %ecx, %ecx - adoxq 64(%rsi), %r8 - mulxq 104(%rsi), %r9, %rbx - adcx %r13, %r9 - adoxq 72(%rsi), %r9 - mulxq 112(%rsi), %r10, %r13 - adcx %rbx, %r10 - adoxq 80(%rsi), %r10 - mulxq 120(%rsi), %r11, %rax - adcx %r13, %r11 - adoxq 88(%rsi), %r11 - adcx %rcx, %rax - adox %rcx, %rax - imul %rdx, %rax - - ;# Step 2: Fold the carry back into dst - add %rax, %r8 - adcx %rcx, %r9 - movq %r9, 40(%rdi) - adcx %rcx, %r10 - movq %r10, 48(%rdi) - adcx %rcx, %r11 - movq %r11, 56(%rdi) - - ;# Step 3: Fold the carry bit back in; guaranteed not to carry at this point - mov $0, %rax - cmovc %rdx, %rax - add %rax, %r8 - movq %r8, 32(%rdi) - pop %rbx - pop %r15 - pop %r14 - pop %r13 - ret - -.global _fsqr_e -_fsqr_e: - push %r15 - push %r13 - push %r14 - push %r12 - push %rbx - mov %rdx, %r12 - ;# Compute the raw multiplication: tmp <- f * f - ;# Step 1: Compute all partial products - movq 0(%rsi), %rdx - ;# f[0] - mulxq 8(%rsi), %r8, %r14 - xor %r15d, %r15d - ;# f[1]*f[0] - mulxq 16(%rsi), %r9, %r10 - adcx %r14, %r9 - ;# f[2]*f[0] - mulxq 24(%rsi), %rax, %rcx - adcx %rax, %r10 - ;# f[3]*f[0] - movq 24(%rsi), %rdx - ;# f[3] - mulxq 8(%rsi), %r11, %rbx - adcx %rcx, %r11 - ;# f[1]*f[3] - mulxq 16(%rsi), %rax, %r13 - adcx %rax, %rbx - ;# f[2]*f[3] - movq 8(%rsi), %rdx - adcx %r15, %r13 - ;# f1 - mulxq 16(%rsi), %rax, %rcx - mov $0, %r14 - ;# f[2]*f[1] - - ;# Step 2: Compute two parallel carry chains - xor %r15d, %r15d - adox %rax, %r10 - adcx %r8, %r8 - adox %rcx, %r11 - adcx %r9, %r9 - adox %r15, %rbx - adcx %r10, %r10 - adox %r15, %r13 - adcx %r11, %r11 - adox %r15, %r14 - adcx %rbx, %rbx - adcx %r13, %r13 - adcx %r14, %r14 - - ;# Step 3: Compute intermediate squares - movq 0(%rsi), %rdx - mulx %rdx, %rax, %rcx - ;# f[0]^2 - movq %rax, 0(%rdi) - - add %rcx, %r8 - movq %r8, 8(%rdi) - - movq 8(%rsi), %rdx - mulx %rdx, %rax, %rcx - ;# f[1]^2 - adcx %rax, %r9 - movq %r9, 16(%rdi) - - adcx %rcx, %r10 - movq %r10, 24(%rdi) - - movq 16(%rsi), %rdx - mulx %rdx, %rax, %rcx - ;# f[2]^2 - adcx %rax, %r11 - movq %r11, 32(%rdi) - - adcx %rcx, %rbx - movq %rbx, 40(%rdi) - - movq 24(%rsi), %rdx - mulx %rdx, %rax, %rcx - ;# f[3]^2 - adcx %rax, %r13 - movq %r13, 48(%rdi) - - adcx %rcx, %r14 - movq %r14, 56(%rdi) - - - ;# Line up pointers - mov %rdi, %rsi - mov %r12, %rdi - ;# Wrap the result back into the field - ;# Step 1: Compute dst + carry == tmp_hi * 38 + tmp_lo - mov $38, %rdx - mulxq 32(%rsi), %r8, %r13 - xor %ecx, %ecx - adoxq 0(%rsi), %r8 - mulxq 40(%rsi), %r9, %rbx - adcx %r13, %r9 - adoxq 8(%rsi), %r9 - mulxq 48(%rsi), %r10, %r13 - adcx %rbx, %r10 - adoxq 16(%rsi), %r10 - mulxq 56(%rsi), %r11, %rax - adcx %r13, %r11 - adoxq 24(%rsi), %r11 - adcx %rcx, %rax - adox %rcx, %rax - imul %rdx, %rax - - ;# Step 2: Fold the carry back into dst - add %rax, %r8 - adcx %rcx, %r9 - movq %r9, 8(%rdi) - adcx %rcx, %r10 - movq %r10, 16(%rdi) - adcx %rcx, %r11 - movq %r11, 24(%rdi) - - ;# Step 3: Fold the carry bit back in; guaranteed not to carry at this point - mov $0, %rax - cmovc %rdx, %rax - add %rax, %r8 - movq %r8, 0(%rdi) - pop %rbx - pop %r12 - pop %r14 - pop %r13 - pop %r15 - ret - -.global _fsqr2_e -_fsqr2_e: - push %r15 - push %r13 - push %r14 - push %r12 - push %rbx - mov %rdx, %r12 - ;# Step 1: Compute all partial products - movq 0(%rsi), %rdx - ;# f[0] - mulxq 8(%rsi), %r8, %r14 - xor %r15d, %r15d - ;# f[1]*f[0] - mulxq 16(%rsi), %r9, %r10 - adcx %r14, %r9 - ;# f[2]*f[0] - mulxq 24(%rsi), %rax, %rcx - adcx %rax, %r10 - ;# f[3]*f[0] - movq 24(%rsi), %rdx - ;# f[3] - mulxq 8(%rsi), %r11, %rbx - adcx %rcx, %r11 - ;# f[1]*f[3] - mulxq 16(%rsi), %rax, %r13 - adcx %rax, %rbx - ;# f[2]*f[3] - movq 8(%rsi), %rdx - adcx %r15, %r13 - ;# f1 - mulxq 16(%rsi), %rax, %rcx - mov $0, %r14 - ;# f[2]*f[1] - - ;# Step 2: Compute two parallel carry chains - xor %r15d, %r15d - adox %rax, %r10 - adcx %r8, %r8 - adox %rcx, %r11 - adcx %r9, %r9 - adox %r15, %rbx - adcx %r10, %r10 - adox %r15, %r13 - adcx %r11, %r11 - adox %r15, %r14 - adcx %rbx, %rbx - adcx %r13, %r13 - adcx %r14, %r14 - - ;# Step 3: Compute intermediate squares - movq 0(%rsi), %rdx - mulx %rdx, %rax, %rcx - ;# f[0]^2 - movq %rax, 0(%rdi) - - add %rcx, %r8 - movq %r8, 8(%rdi) - - movq 8(%rsi), %rdx - mulx %rdx, %rax, %rcx - ;# f[1]^2 - adcx %rax, %r9 - movq %r9, 16(%rdi) - - adcx %rcx, %r10 - movq %r10, 24(%rdi) - - movq 16(%rsi), %rdx - mulx %rdx, %rax, %rcx - ;# f[2]^2 - adcx %rax, %r11 - movq %r11, 32(%rdi) - - adcx %rcx, %rbx - movq %rbx, 40(%rdi) - - movq 24(%rsi), %rdx - mulx %rdx, %rax, %rcx - ;# f[3]^2 - adcx %rax, %r13 - movq %r13, 48(%rdi) - - adcx %rcx, %r14 - movq %r14, 56(%rdi) - - - ;# Step 1: Compute all partial products - movq 32(%rsi), %rdx - ;# f[0] - mulxq 40(%rsi), %r8, %r14 - xor %r15d, %r15d - ;# f[1]*f[0] - mulxq 48(%rsi), %r9, %r10 - adcx %r14, %r9 - ;# f[2]*f[0] - mulxq 56(%rsi), %rax, %rcx - adcx %rax, %r10 - ;# f[3]*f[0] - movq 56(%rsi), %rdx - ;# f[3] - mulxq 40(%rsi), %r11, %rbx - adcx %rcx, %r11 - ;# f[1]*f[3] - mulxq 48(%rsi), %rax, %r13 - adcx %rax, %rbx - ;# f[2]*f[3] - movq 40(%rsi), %rdx - adcx %r15, %r13 - ;# f1 - mulxq 48(%rsi), %rax, %rcx - mov $0, %r14 - ;# f[2]*f[1] - - ;# Step 2: Compute two parallel carry chains - xor %r15d, %r15d - adox %rax, %r10 - adcx %r8, %r8 - adox %rcx, %r11 - adcx %r9, %r9 - adox %r15, %rbx - adcx %r10, %r10 - adox %r15, %r13 - adcx %r11, %r11 - adox %r15, %r14 - adcx %rbx, %rbx - adcx %r13, %r13 - adcx %r14, %r14 - - ;# Step 3: Compute intermediate squares - movq 32(%rsi), %rdx - mulx %rdx, %rax, %rcx - ;# f[0]^2 - movq %rax, 64(%rdi) - - add %rcx, %r8 - movq %r8, 72(%rdi) - - movq 40(%rsi), %rdx - mulx %rdx, %rax, %rcx - ;# f[1]^2 - adcx %rax, %r9 - movq %r9, 80(%rdi) - - adcx %rcx, %r10 - movq %r10, 88(%rdi) - - movq 48(%rsi), %rdx - mulx %rdx, %rax, %rcx - ;# f[2]^2 - adcx %rax, %r11 - movq %r11, 96(%rdi) - - adcx %rcx, %rbx - movq %rbx, 104(%rdi) - - movq 56(%rsi), %rdx - mulx %rdx, %rax, %rcx - ;# f[3]^2 - adcx %rax, %r13 - movq %r13, 112(%rdi) - - adcx %rcx, %r14 - movq %r14, 120(%rdi) - - - ;# Line up pointers - mov %rdi, %rsi - mov %r12, %rdi - - ;# Step 1: Compute dst + carry == tmp_hi * 38 + tmp_lo - mov $38, %rdx - mulxq 32(%rsi), %r8, %r13 - xor %ecx, %ecx - adoxq 0(%rsi), %r8 - mulxq 40(%rsi), %r9, %rbx - adcx %r13, %r9 - adoxq 8(%rsi), %r9 - mulxq 48(%rsi), %r10, %r13 - adcx %rbx, %r10 - adoxq 16(%rsi), %r10 - mulxq 56(%rsi), %r11, %rax - adcx %r13, %r11 - adoxq 24(%rsi), %r11 - adcx %rcx, %rax - adox %rcx, %rax - imul %rdx, %rax - - ;# Step 2: Fold the carry back into dst - add %rax, %r8 - adcx %rcx, %r9 - movq %r9, 8(%rdi) - adcx %rcx, %r10 - movq %r10, 16(%rdi) - adcx %rcx, %r11 - movq %r11, 24(%rdi) - - ;# Step 3: Fold the carry bit back in; guaranteed not to carry at this point - mov $0, %rax - cmovc %rdx, %rax - add %rax, %r8 - movq %r8, 0(%rdi) - - ;# Step 1: Compute dst + carry == tmp_hi * 38 + tmp_lo - mov $38, %rdx - mulxq 96(%rsi), %r8, %r13 - xor %ecx, %ecx - adoxq 64(%rsi), %r8 - mulxq 104(%rsi), %r9, %rbx - adcx %r13, %r9 - adoxq 72(%rsi), %r9 - mulxq 112(%rsi), %r10, %r13 - adcx %rbx, %r10 - adoxq 80(%rsi), %r10 - mulxq 120(%rsi), %r11, %rax - adcx %r13, %r11 - adoxq 88(%rsi), %r11 - adcx %rcx, %rax - adox %rcx, %rax - imul %rdx, %rax - - ;# Step 2: Fold the carry back into dst - add %rax, %r8 - adcx %rcx, %r9 - movq %r9, 40(%rdi) - adcx %rcx, %r10 - movq %r10, 48(%rdi) - adcx %rcx, %r11 - movq %r11, 56(%rdi) - - ;# Step 3: Fold the carry bit back in; guaranteed not to carry at this point - mov $0, %rax - cmovc %rdx, %rax - add %rax, %r8 - movq %r8, 32(%rdi) - pop %rbx - pop %r12 - pop %r14 - pop %r13 - pop %r15 - ret - -.global _cswap2_e -_cswap2_e: - ;# Transfer bit into CF flag - add $18446744073709551615, %rdi - - ;# cswap p1[0], p2[0] - movq 0(%rsi), %r8 - movq 0(%rdx), %r9 - mov %r8, %r10 - cmovc %r9, %r8 - cmovc %r10, %r9 - movq %r8, 0(%rsi) - movq %r9, 0(%rdx) - - ;# cswap p1[1], p2[1] - movq 8(%rsi), %r8 - movq 8(%rdx), %r9 - mov %r8, %r10 - cmovc %r9, %r8 - cmovc %r10, %r9 - movq %r8, 8(%rsi) - movq %r9, 8(%rdx) - - ;# cswap p1[2], p2[2] - movq 16(%rsi), %r8 - movq 16(%rdx), %r9 - mov %r8, %r10 - cmovc %r9, %r8 - cmovc %r10, %r9 - movq %r8, 16(%rsi) - movq %r9, 16(%rdx) - - ;# cswap p1[3], p2[3] - movq 24(%rsi), %r8 - movq 24(%rdx), %r9 - mov %r8, %r10 - cmovc %r9, %r8 - cmovc %r10, %r9 - movq %r8, 24(%rsi) - movq %r9, 24(%rdx) - - ;# cswap p1[4], p2[4] - movq 32(%rsi), %r8 - movq 32(%rdx), %r9 - mov %r8, %r10 - cmovc %r9, %r8 - cmovc %r10, %r9 - movq %r8, 32(%rsi) - movq %r9, 32(%rdx) - - ;# cswap p1[5], p2[5] - movq 40(%rsi), %r8 - movq 40(%rdx), %r9 - mov %r8, %r10 - cmovc %r9, %r8 - cmovc %r10, %r9 - movq %r8, 40(%rsi) - movq %r9, 40(%rdx) - - ;# cswap p1[6], p2[6] - movq 48(%rsi), %r8 - movq 48(%rdx), %r9 - mov %r8, %r10 - cmovc %r9, %r8 - cmovc %r10, %r9 - movq %r8, 48(%rsi) - movq %r9, 48(%rdx) - - ;# cswap p1[7], p2[7] - movq 56(%rsi), %r8 - movq 56(%rdx), %r9 - mov %r8, %r10 - cmovc %r9, %r8 - cmovc %r10, %r9 - movq %r8, 56(%rsi) - movq %r9, 56(%rdx) - ret - - diff --git a/dist/c89-compatible/curve25519-x86_64-linux.S b/dist/c89-compatible/curve25519-x86_64-linux.S deleted file mode 100644 index 3c51f24b57..0000000000 --- a/dist/c89-compatible/curve25519-x86_64-linux.S +++ /dev/null @@ -1,986 +0,0 @@ -.text -.global add_scalar_e -add_scalar_e: - push %rdi - push %rsi - ;# Clear registers to propagate the carry bit - xor %r8d, %r8d - xor %r9d, %r9d - xor %r10d, %r10d - xor %r11d, %r11d - xor %eax, %eax - - ;# Begin addition chain - addq 0(%rsi), %rdx - movq %rdx, 0(%rdi) - adcxq 8(%rsi), %r8 - movq %r8, 8(%rdi) - adcxq 16(%rsi), %r9 - movq %r9, 16(%rdi) - adcxq 24(%rsi), %r10 - movq %r10, 24(%rdi) - - ;# Return the carry bit in a register - adcx %r11, %rax - pop %rsi - pop %rdi - ret - -.global fadd_e -fadd_e: - ;# Compute the raw addition of f1 + f2 - movq 0(%rdx), %r8 - addq 0(%rsi), %r8 - movq 8(%rdx), %r9 - adcxq 8(%rsi), %r9 - movq 16(%rdx), %r10 - adcxq 16(%rsi), %r10 - movq 24(%rdx), %r11 - adcxq 24(%rsi), %r11 - ;# Wrap the result back into the field - ;# Step 1: Compute carry*38 - mov $0, %rax - mov $38, %rdx - cmovc %rdx, %rax - - ;# Step 2: Add carry*38 to the original sum - xor %ecx, %ecx - add %rax, %r8 - adcx %rcx, %r9 - movq %r9, 8(%rdi) - adcx %rcx, %r10 - movq %r10, 16(%rdi) - adcx %rcx, %r11 - movq %r11, 24(%rdi) - - ;# Step 3: Fold the carry bit back in; guaranteed not to carry at this point - mov $0, %rax - cmovc %rdx, %rax - add %rax, %r8 - movq %r8, 0(%rdi) - ret - -.global fsub_e -fsub_e: - ;# Compute the raw substraction of f1-f2 - movq 0(%rsi), %r8 - subq 0(%rdx), %r8 - movq 8(%rsi), %r9 - sbbq 8(%rdx), %r9 - movq 16(%rsi), %r10 - sbbq 16(%rdx), %r10 - movq 24(%rsi), %r11 - sbbq 24(%rdx), %r11 - ;# Wrap the result back into the field - ;# Step 1: Compute carry*38 - mov $0, %rax - mov $38, %rcx - cmovc %rcx, %rax - - ;# Step 2: Substract carry*38 from the original difference - sub %rax, %r8 - sbb $0, %r9 - sbb $0, %r10 - sbb $0, %r11 - - ;# Step 3: Fold the carry bit back in; guaranteed not to carry at this point - mov $0, %rax - cmovc %rcx, %rax - sub %rax, %r8 - - ;# Store the result - movq %r8, 0(%rdi) - movq %r9, 8(%rdi) - movq %r10, 16(%rdi) - movq %r11, 24(%rdi) - ret - -.global fmul_scalar_e -fmul_scalar_e: - push %rdi - push %r13 - push %rbx - ;# Compute the raw multiplication of f1*f2 - mulxq 0(%rsi), %r8, %rcx - ;# f1[0]*f2 - mulxq 8(%rsi), %r9, %rbx - ;# f1[1]*f2 - add %rcx, %r9 - mov $0, %rcx - mulxq 16(%rsi), %r10, %r13 - ;# f1[2]*f2 - adcx %rbx, %r10 - mulxq 24(%rsi), %r11, %rax - ;# f1[3]*f2 - adcx %r13, %r11 - adcx %rcx, %rax - ;# Wrap the result back into the field - ;# Step 1: Compute carry*38 - mov $38, %rdx - imul %rdx, %rax - - ;# Step 2: Fold the carry back into dst - add %rax, %r8 - adcx %rcx, %r9 - movq %r9, 8(%rdi) - adcx %rcx, %r10 - movq %r10, 16(%rdi) - adcx %rcx, %r11 - movq %r11, 24(%rdi) - - ;# Step 3: Fold the carry bit back in; guaranteed not to carry at this point - mov $0, %rax - cmovc %rdx, %rax - add %rax, %r8 - movq %r8, 0(%rdi) - pop %rbx - pop %r13 - pop %rdi - ret - -.global fmul_e -fmul_e: - push %r13 - push %r14 - push %r15 - push %rbx - mov %rdx, %r15 - ;# Compute the raw multiplication: tmp <- src1 * src2 - ;# Compute src1[0] * src2 - movq 0(%rsi), %rdx - mulxq 0(%rcx), %r8, %r9 - xor %r10d, %r10d - movq %r8, 0(%rdi) - - mulxq 8(%rcx), %r10, %r11 - adox %r9, %r10 - movq %r10, 8(%rdi) - - mulxq 16(%rcx), %rbx, %r13 - adox %r11, %rbx - mulxq 24(%rcx), %r14, %rdx - adox %r13, %r14 - mov $0, %rax - adox %rdx, %rax - - - ;# Compute src1[1] * src2 - movq 8(%rsi), %rdx - mulxq 0(%rcx), %r8, %r9 - xor %r10d, %r10d - adcxq 8(%rdi), %r8 - movq %r8, 8(%rdi) - mulxq 8(%rcx), %r10, %r11 - adox %r9, %r10 - adcx %rbx, %r10 - movq %r10, 16(%rdi) - mulxq 16(%rcx), %rbx, %r13 - adox %r11, %rbx - adcx %r14, %rbx - mov $0, %r8 - mulxq 24(%rcx), %r14, %rdx - adox %r13, %r14 - adcx %rax, %r14 - mov $0, %rax - adox %rdx, %rax - adcx %r8, %rax - - - ;# Compute src1[2] * src2 - movq 16(%rsi), %rdx - mulxq 0(%rcx), %r8, %r9 - xor %r10d, %r10d - adcxq 16(%rdi), %r8 - movq %r8, 16(%rdi) - mulxq 8(%rcx), %r10, %r11 - adox %r9, %r10 - adcx %rbx, %r10 - movq %r10, 24(%rdi) - mulxq 16(%rcx), %rbx, %r13 - adox %r11, %rbx - adcx %r14, %rbx - mov $0, %r8 - mulxq 24(%rcx), %r14, %rdx - adox %r13, %r14 - adcx %rax, %r14 - mov $0, %rax - adox %rdx, %rax - adcx %r8, %rax - - - ;# Compute src1[3] * src2 - movq 24(%rsi), %rdx - mulxq 0(%rcx), %r8, %r9 - xor %r10d, %r10d - adcxq 24(%rdi), %r8 - movq %r8, 24(%rdi) - mulxq 8(%rcx), %r10, %r11 - adox %r9, %r10 - adcx %rbx, %r10 - movq %r10, 32(%rdi) - mulxq 16(%rcx), %rbx, %r13 - adox %r11, %rbx - adcx %r14, %rbx - movq %rbx, 40(%rdi) - mov $0, %r8 - mulxq 24(%rcx), %r14, %rdx - adox %r13, %r14 - adcx %rax, %r14 - movq %r14, 48(%rdi) - mov $0, %rax - adox %rdx, %rax - adcx %r8, %rax - movq %rax, 56(%rdi) - - - ;# Line up pointers - mov %rdi, %rsi - mov %r15, %rdi - ;# Wrap the result back into the field - ;# Step 1: Compute dst + carry == tmp_hi * 38 + tmp_lo - mov $38, %rdx - mulxq 32(%rsi), %r8, %r13 - xor %ecx, %ecx - adoxq 0(%rsi), %r8 - mulxq 40(%rsi), %r9, %rbx - adcx %r13, %r9 - adoxq 8(%rsi), %r9 - mulxq 48(%rsi), %r10, %r13 - adcx %rbx, %r10 - adoxq 16(%rsi), %r10 - mulxq 56(%rsi), %r11, %rax - adcx %r13, %r11 - adoxq 24(%rsi), %r11 - adcx %rcx, %rax - adox %rcx, %rax - imul %rdx, %rax - - ;# Step 2: Fold the carry back into dst - add %rax, %r8 - adcx %rcx, %r9 - movq %r9, 8(%rdi) - adcx %rcx, %r10 - movq %r10, 16(%rdi) - adcx %rcx, %r11 - movq %r11, 24(%rdi) - - ;# Step 3: Fold the carry bit back in; guaranteed not to carry at this point - mov $0, %rax - cmovc %rdx, %rax - add %rax, %r8 - movq %r8, 0(%rdi) - pop %rbx - pop %r15 - pop %r14 - pop %r13 - ret - -.global fmul2_e -fmul2_e: - push %r13 - push %r14 - push %r15 - push %rbx - mov %rdx, %r15 - ;# Compute the raw multiplication tmp[0] <- f1[0] * f2[0] - ;# Compute src1[0] * src2 - movq 0(%rsi), %rdx - mulxq 0(%rcx), %r8, %r9 - xor %r10d, %r10d - movq %r8, 0(%rdi) - - mulxq 8(%rcx), %r10, %r11 - adox %r9, %r10 - movq %r10, 8(%rdi) - - mulxq 16(%rcx), %rbx, %r13 - adox %r11, %rbx - mulxq 24(%rcx), %r14, %rdx - adox %r13, %r14 - mov $0, %rax - adox %rdx, %rax - - - ;# Compute src1[1] * src2 - movq 8(%rsi), %rdx - mulxq 0(%rcx), %r8, %r9 - xor %r10d, %r10d - adcxq 8(%rdi), %r8 - movq %r8, 8(%rdi) - mulxq 8(%rcx), %r10, %r11 - adox %r9, %r10 - adcx %rbx, %r10 - movq %r10, 16(%rdi) - mulxq 16(%rcx), %rbx, %r13 - adox %r11, %rbx - adcx %r14, %rbx - mov $0, %r8 - mulxq 24(%rcx), %r14, %rdx - adox %r13, %r14 - adcx %rax, %r14 - mov $0, %rax - adox %rdx, %rax - adcx %r8, %rax - - - ;# Compute src1[2] * src2 - movq 16(%rsi), %rdx - mulxq 0(%rcx), %r8, %r9 - xor %r10d, %r10d - adcxq 16(%rdi), %r8 - movq %r8, 16(%rdi) - mulxq 8(%rcx), %r10, %r11 - adox %r9, %r10 - adcx %rbx, %r10 - movq %r10, 24(%rdi) - mulxq 16(%rcx), %rbx, %r13 - adox %r11, %rbx - adcx %r14, %rbx - mov $0, %r8 - mulxq 24(%rcx), %r14, %rdx - adox %r13, %r14 - adcx %rax, %r14 - mov $0, %rax - adox %rdx, %rax - adcx %r8, %rax - - - ;# Compute src1[3] * src2 - movq 24(%rsi), %rdx - mulxq 0(%rcx), %r8, %r9 - xor %r10d, %r10d - adcxq 24(%rdi), %r8 - movq %r8, 24(%rdi) - mulxq 8(%rcx), %r10, %r11 - adox %r9, %r10 - adcx %rbx, %r10 - movq %r10, 32(%rdi) - mulxq 16(%rcx), %rbx, %r13 - adox %r11, %rbx - adcx %r14, %rbx - movq %rbx, 40(%rdi) - mov $0, %r8 - mulxq 24(%rcx), %r14, %rdx - adox %r13, %r14 - adcx %rax, %r14 - movq %r14, 48(%rdi) - mov $0, %rax - adox %rdx, %rax - adcx %r8, %rax - movq %rax, 56(%rdi) - - ;# Compute the raw multiplication tmp[1] <- f1[1] * f2[1] - ;# Compute src1[0] * src2 - movq 32(%rsi), %rdx - mulxq 32(%rcx), %r8, %r9 - xor %r10d, %r10d - movq %r8, 64(%rdi) - - mulxq 40(%rcx), %r10, %r11 - adox %r9, %r10 - movq %r10, 72(%rdi) - - mulxq 48(%rcx), %rbx, %r13 - adox %r11, %rbx - mulxq 56(%rcx), %r14, %rdx - adox %r13, %r14 - mov $0, %rax - adox %rdx, %rax - - - ;# Compute src1[1] * src2 - movq 40(%rsi), %rdx - mulxq 32(%rcx), %r8, %r9 - xor %r10d, %r10d - adcxq 72(%rdi), %r8 - movq %r8, 72(%rdi) - mulxq 40(%rcx), %r10, %r11 - adox %r9, %r10 - adcx %rbx, %r10 - movq %r10, 80(%rdi) - mulxq 48(%rcx), %rbx, %r13 - adox %r11, %rbx - adcx %r14, %rbx - mov $0, %r8 - mulxq 56(%rcx), %r14, %rdx - adox %r13, %r14 - adcx %rax, %r14 - mov $0, %rax - adox %rdx, %rax - adcx %r8, %rax - - - ;# Compute src1[2] * src2 - movq 48(%rsi), %rdx - mulxq 32(%rcx), %r8, %r9 - xor %r10d, %r10d - adcxq 80(%rdi), %r8 - movq %r8, 80(%rdi) - mulxq 40(%rcx), %r10, %r11 - adox %r9, %r10 - adcx %rbx, %r10 - movq %r10, 88(%rdi) - mulxq 48(%rcx), %rbx, %r13 - adox %r11, %rbx - adcx %r14, %rbx - mov $0, %r8 - mulxq 56(%rcx), %r14, %rdx - adox %r13, %r14 - adcx %rax, %r14 - mov $0, %rax - adox %rdx, %rax - adcx %r8, %rax - - - ;# Compute src1[3] * src2 - movq 56(%rsi), %rdx - mulxq 32(%rcx), %r8, %r9 - xor %r10d, %r10d - adcxq 88(%rdi), %r8 - movq %r8, 88(%rdi) - mulxq 40(%rcx), %r10, %r11 - adox %r9, %r10 - adcx %rbx, %r10 - movq %r10, 96(%rdi) - mulxq 48(%rcx), %rbx, %r13 - adox %r11, %rbx - adcx %r14, %rbx - movq %rbx, 104(%rdi) - mov $0, %r8 - mulxq 56(%rcx), %r14, %rdx - adox %r13, %r14 - adcx %rax, %r14 - movq %r14, 112(%rdi) - mov $0, %rax - adox %rdx, %rax - adcx %r8, %rax - movq %rax, 120(%rdi) - - - ;# Line up pointers - mov %rdi, %rsi - mov %r15, %rdi - ;# Wrap the results back into the field - ;# Step 1: Compute dst + carry == tmp_hi * 38 + tmp_lo - mov $38, %rdx - mulxq 32(%rsi), %r8, %r13 - xor %ecx, %ecx - adoxq 0(%rsi), %r8 - mulxq 40(%rsi), %r9, %rbx - adcx %r13, %r9 - adoxq 8(%rsi), %r9 - mulxq 48(%rsi), %r10, %r13 - adcx %rbx, %r10 - adoxq 16(%rsi), %r10 - mulxq 56(%rsi), %r11, %rax - adcx %r13, %r11 - adoxq 24(%rsi), %r11 - adcx %rcx, %rax - adox %rcx, %rax - imul %rdx, %rax - - ;# Step 2: Fold the carry back into dst - add %rax, %r8 - adcx %rcx, %r9 - movq %r9, 8(%rdi) - adcx %rcx, %r10 - movq %r10, 16(%rdi) - adcx %rcx, %r11 - movq %r11, 24(%rdi) - - ;# Step 3: Fold the carry bit back in; guaranteed not to carry at this point - mov $0, %rax - cmovc %rdx, %rax - add %rax, %r8 - movq %r8, 0(%rdi) - - ;# Step 1: Compute dst + carry == tmp_hi * 38 + tmp_lo - mov $38, %rdx - mulxq 96(%rsi), %r8, %r13 - xor %ecx, %ecx - adoxq 64(%rsi), %r8 - mulxq 104(%rsi), %r9, %rbx - adcx %r13, %r9 - adoxq 72(%rsi), %r9 - mulxq 112(%rsi), %r10, %r13 - adcx %rbx, %r10 - adoxq 80(%rsi), %r10 - mulxq 120(%rsi), %r11, %rax - adcx %r13, %r11 - adoxq 88(%rsi), %r11 - adcx %rcx, %rax - adox %rcx, %rax - imul %rdx, %rax - - ;# Step 2: Fold the carry back into dst - add %rax, %r8 - adcx %rcx, %r9 - movq %r9, 40(%rdi) - adcx %rcx, %r10 - movq %r10, 48(%rdi) - adcx %rcx, %r11 - movq %r11, 56(%rdi) - - ;# Step 3: Fold the carry bit back in; guaranteed not to carry at this point - mov $0, %rax - cmovc %rdx, %rax - add %rax, %r8 - movq %r8, 32(%rdi) - pop %rbx - pop %r15 - pop %r14 - pop %r13 - ret - -.global fsqr_e -fsqr_e: - push %r15 - push %r13 - push %r14 - push %r12 - push %rbx - mov %rdx, %r12 - ;# Compute the raw multiplication: tmp <- f * f - ;# Step 1: Compute all partial products - movq 0(%rsi), %rdx - ;# f[0] - mulxq 8(%rsi), %r8, %r14 - xor %r15d, %r15d - ;# f[1]*f[0] - mulxq 16(%rsi), %r9, %r10 - adcx %r14, %r9 - ;# f[2]*f[0] - mulxq 24(%rsi), %rax, %rcx - adcx %rax, %r10 - ;# f[3]*f[0] - movq 24(%rsi), %rdx - ;# f[3] - mulxq 8(%rsi), %r11, %rbx - adcx %rcx, %r11 - ;# f[1]*f[3] - mulxq 16(%rsi), %rax, %r13 - adcx %rax, %rbx - ;# f[2]*f[3] - movq 8(%rsi), %rdx - adcx %r15, %r13 - ;# f1 - mulxq 16(%rsi), %rax, %rcx - mov $0, %r14 - ;# f[2]*f[1] - - ;# Step 2: Compute two parallel carry chains - xor %r15d, %r15d - adox %rax, %r10 - adcx %r8, %r8 - adox %rcx, %r11 - adcx %r9, %r9 - adox %r15, %rbx - adcx %r10, %r10 - adox %r15, %r13 - adcx %r11, %r11 - adox %r15, %r14 - adcx %rbx, %rbx - adcx %r13, %r13 - adcx %r14, %r14 - - ;# Step 3: Compute intermediate squares - movq 0(%rsi), %rdx - mulx %rdx, %rax, %rcx - ;# f[0]^2 - movq %rax, 0(%rdi) - - add %rcx, %r8 - movq %r8, 8(%rdi) - - movq 8(%rsi), %rdx - mulx %rdx, %rax, %rcx - ;# f[1]^2 - adcx %rax, %r9 - movq %r9, 16(%rdi) - - adcx %rcx, %r10 - movq %r10, 24(%rdi) - - movq 16(%rsi), %rdx - mulx %rdx, %rax, %rcx - ;# f[2]^2 - adcx %rax, %r11 - movq %r11, 32(%rdi) - - adcx %rcx, %rbx - movq %rbx, 40(%rdi) - - movq 24(%rsi), %rdx - mulx %rdx, %rax, %rcx - ;# f[3]^2 - adcx %rax, %r13 - movq %r13, 48(%rdi) - - adcx %rcx, %r14 - movq %r14, 56(%rdi) - - - ;# Line up pointers - mov %rdi, %rsi - mov %r12, %rdi - ;# Wrap the result back into the field - ;# Step 1: Compute dst + carry == tmp_hi * 38 + tmp_lo - mov $38, %rdx - mulxq 32(%rsi), %r8, %r13 - xor %ecx, %ecx - adoxq 0(%rsi), %r8 - mulxq 40(%rsi), %r9, %rbx - adcx %r13, %r9 - adoxq 8(%rsi), %r9 - mulxq 48(%rsi), %r10, %r13 - adcx %rbx, %r10 - adoxq 16(%rsi), %r10 - mulxq 56(%rsi), %r11, %rax - adcx %r13, %r11 - adoxq 24(%rsi), %r11 - adcx %rcx, %rax - adox %rcx, %rax - imul %rdx, %rax - - ;# Step 2: Fold the carry back into dst - add %rax, %r8 - adcx %rcx, %r9 - movq %r9, 8(%rdi) - adcx %rcx, %r10 - movq %r10, 16(%rdi) - adcx %rcx, %r11 - movq %r11, 24(%rdi) - - ;# Step 3: Fold the carry bit back in; guaranteed not to carry at this point - mov $0, %rax - cmovc %rdx, %rax - add %rax, %r8 - movq %r8, 0(%rdi) - pop %rbx - pop %r12 - pop %r14 - pop %r13 - pop %r15 - ret - -.global fsqr2_e -fsqr2_e: - push %r15 - push %r13 - push %r14 - push %r12 - push %rbx - mov %rdx, %r12 - ;# Step 1: Compute all partial products - movq 0(%rsi), %rdx - ;# f[0] - mulxq 8(%rsi), %r8, %r14 - xor %r15d, %r15d - ;# f[1]*f[0] - mulxq 16(%rsi), %r9, %r10 - adcx %r14, %r9 - ;# f[2]*f[0] - mulxq 24(%rsi), %rax, %rcx - adcx %rax, %r10 - ;# f[3]*f[0] - movq 24(%rsi), %rdx - ;# f[3] - mulxq 8(%rsi), %r11, %rbx - adcx %rcx, %r11 - ;# f[1]*f[3] - mulxq 16(%rsi), %rax, %r13 - adcx %rax, %rbx - ;# f[2]*f[3] - movq 8(%rsi), %rdx - adcx %r15, %r13 - ;# f1 - mulxq 16(%rsi), %rax, %rcx - mov $0, %r14 - ;# f[2]*f[1] - - ;# Step 2: Compute two parallel carry chains - xor %r15d, %r15d - adox %rax, %r10 - adcx %r8, %r8 - adox %rcx, %r11 - adcx %r9, %r9 - adox %r15, %rbx - adcx %r10, %r10 - adox %r15, %r13 - adcx %r11, %r11 - adox %r15, %r14 - adcx %rbx, %rbx - adcx %r13, %r13 - adcx %r14, %r14 - - ;# Step 3: Compute intermediate squares - movq 0(%rsi), %rdx - mulx %rdx, %rax, %rcx - ;# f[0]^2 - movq %rax, 0(%rdi) - - add %rcx, %r8 - movq %r8, 8(%rdi) - - movq 8(%rsi), %rdx - mulx %rdx, %rax, %rcx - ;# f[1]^2 - adcx %rax, %r9 - movq %r9, 16(%rdi) - - adcx %rcx, %r10 - movq %r10, 24(%rdi) - - movq 16(%rsi), %rdx - mulx %rdx, %rax, %rcx - ;# f[2]^2 - adcx %rax, %r11 - movq %r11, 32(%rdi) - - adcx %rcx, %rbx - movq %rbx, 40(%rdi) - - movq 24(%rsi), %rdx - mulx %rdx, %rax, %rcx - ;# f[3]^2 - adcx %rax, %r13 - movq %r13, 48(%rdi) - - adcx %rcx, %r14 - movq %r14, 56(%rdi) - - - ;# Step 1: Compute all partial products - movq 32(%rsi), %rdx - ;# f[0] - mulxq 40(%rsi), %r8, %r14 - xor %r15d, %r15d - ;# f[1]*f[0] - mulxq 48(%rsi), %r9, %r10 - adcx %r14, %r9 - ;# f[2]*f[0] - mulxq 56(%rsi), %rax, %rcx - adcx %rax, %r10 - ;# f[3]*f[0] - movq 56(%rsi), %rdx - ;# f[3] - mulxq 40(%rsi), %r11, %rbx - adcx %rcx, %r11 - ;# f[1]*f[3] - mulxq 48(%rsi), %rax, %r13 - adcx %rax, %rbx - ;# f[2]*f[3] - movq 40(%rsi), %rdx - adcx %r15, %r13 - ;# f1 - mulxq 48(%rsi), %rax, %rcx - mov $0, %r14 - ;# f[2]*f[1] - - ;# Step 2: Compute two parallel carry chains - xor %r15d, %r15d - adox %rax, %r10 - adcx %r8, %r8 - adox %rcx, %r11 - adcx %r9, %r9 - adox %r15, %rbx - adcx %r10, %r10 - adox %r15, %r13 - adcx %r11, %r11 - adox %r15, %r14 - adcx %rbx, %rbx - adcx %r13, %r13 - adcx %r14, %r14 - - ;# Step 3: Compute intermediate squares - movq 32(%rsi), %rdx - mulx %rdx, %rax, %rcx - ;# f[0]^2 - movq %rax, 64(%rdi) - - add %rcx, %r8 - movq %r8, 72(%rdi) - - movq 40(%rsi), %rdx - mulx %rdx, %rax, %rcx - ;# f[1]^2 - adcx %rax, %r9 - movq %r9, 80(%rdi) - - adcx %rcx, %r10 - movq %r10, 88(%rdi) - - movq 48(%rsi), %rdx - mulx %rdx, %rax, %rcx - ;# f[2]^2 - adcx %rax, %r11 - movq %r11, 96(%rdi) - - adcx %rcx, %rbx - movq %rbx, 104(%rdi) - - movq 56(%rsi), %rdx - mulx %rdx, %rax, %rcx - ;# f[3]^2 - adcx %rax, %r13 - movq %r13, 112(%rdi) - - adcx %rcx, %r14 - movq %r14, 120(%rdi) - - - ;# Line up pointers - mov %rdi, %rsi - mov %r12, %rdi - - ;# Step 1: Compute dst + carry == tmp_hi * 38 + tmp_lo - mov $38, %rdx - mulxq 32(%rsi), %r8, %r13 - xor %ecx, %ecx - adoxq 0(%rsi), %r8 - mulxq 40(%rsi), %r9, %rbx - adcx %r13, %r9 - adoxq 8(%rsi), %r9 - mulxq 48(%rsi), %r10, %r13 - adcx %rbx, %r10 - adoxq 16(%rsi), %r10 - mulxq 56(%rsi), %r11, %rax - adcx %r13, %r11 - adoxq 24(%rsi), %r11 - adcx %rcx, %rax - adox %rcx, %rax - imul %rdx, %rax - - ;# Step 2: Fold the carry back into dst - add %rax, %r8 - adcx %rcx, %r9 - movq %r9, 8(%rdi) - adcx %rcx, %r10 - movq %r10, 16(%rdi) - adcx %rcx, %r11 - movq %r11, 24(%rdi) - - ;# Step 3: Fold the carry bit back in; guaranteed not to carry at this point - mov $0, %rax - cmovc %rdx, %rax - add %rax, %r8 - movq %r8, 0(%rdi) - - ;# Step 1: Compute dst + carry == tmp_hi * 38 + tmp_lo - mov $38, %rdx - mulxq 96(%rsi), %r8, %r13 - xor %ecx, %ecx - adoxq 64(%rsi), %r8 - mulxq 104(%rsi), %r9, %rbx - adcx %r13, %r9 - adoxq 72(%rsi), %r9 - mulxq 112(%rsi), %r10, %r13 - adcx %rbx, %r10 - adoxq 80(%rsi), %r10 - mulxq 120(%rsi), %r11, %rax - adcx %r13, %r11 - adoxq 88(%rsi), %r11 - adcx %rcx, %rax - adox %rcx, %rax - imul %rdx, %rax - - ;# Step 2: Fold the carry back into dst - add %rax, %r8 - adcx %rcx, %r9 - movq %r9, 40(%rdi) - adcx %rcx, %r10 - movq %r10, 48(%rdi) - adcx %rcx, %r11 - movq %r11, 56(%rdi) - - ;# Step 3: Fold the carry bit back in; guaranteed not to carry at this point - mov $0, %rax - cmovc %rdx, %rax - add %rax, %r8 - movq %r8, 32(%rdi) - pop %rbx - pop %r12 - pop %r14 - pop %r13 - pop %r15 - ret - -.global cswap2_e -cswap2_e: - ;# Transfer bit into CF flag - add $18446744073709551615, %rdi - - ;# cswap p1[0], p2[0] - movq 0(%rsi), %r8 - movq 0(%rdx), %r9 - mov %r8, %r10 - cmovc %r9, %r8 - cmovc %r10, %r9 - movq %r8, 0(%rsi) - movq %r9, 0(%rdx) - - ;# cswap p1[1], p2[1] - movq 8(%rsi), %r8 - movq 8(%rdx), %r9 - mov %r8, %r10 - cmovc %r9, %r8 - cmovc %r10, %r9 - movq %r8, 8(%rsi) - movq %r9, 8(%rdx) - - ;# cswap p1[2], p2[2] - movq 16(%rsi), %r8 - movq 16(%rdx), %r9 - mov %r8, %r10 - cmovc %r9, %r8 - cmovc %r10, %r9 - movq %r8, 16(%rsi) - movq %r9, 16(%rdx) - - ;# cswap p1[3], p2[3] - movq 24(%rsi), %r8 - movq 24(%rdx), %r9 - mov %r8, %r10 - cmovc %r9, %r8 - cmovc %r10, %r9 - movq %r8, 24(%rsi) - movq %r9, 24(%rdx) - - ;# cswap p1[4], p2[4] - movq 32(%rsi), %r8 - movq 32(%rdx), %r9 - mov %r8, %r10 - cmovc %r9, %r8 - cmovc %r10, %r9 - movq %r8, 32(%rsi) - movq %r9, 32(%rdx) - - ;# cswap p1[5], p2[5] - movq 40(%rsi), %r8 - movq 40(%rdx), %r9 - mov %r8, %r10 - cmovc %r9, %r8 - cmovc %r10, %r9 - movq %r8, 40(%rsi) - movq %r9, 40(%rdx) - - ;# cswap p1[6], p2[6] - movq 48(%rsi), %r8 - movq 48(%rdx), %r9 - mov %r8, %r10 - cmovc %r9, %r8 - cmovc %r10, %r9 - movq %r8, 48(%rsi) - movq %r9, 48(%rdx) - - ;# cswap p1[7], p2[7] - movq 56(%rsi), %r8 - movq 56(%rdx), %r9 - mov %r8, %r10 - cmovc %r9, %r8 - cmovc %r10, %r9 - movq %r8, 56(%rsi) - movq %r9, 56(%rdx) - ret - -.section .note.GNU-stack,"",%progbits diff --git a/dist/c89-compatible/curve25519-x86_64-mingw.S b/dist/c89-compatible/curve25519-x86_64-mingw.S deleted file mode 100644 index 93dd798e7d..0000000000 --- a/dist/c89-compatible/curve25519-x86_64-mingw.S +++ /dev/null @@ -1,1041 +0,0 @@ -.text -.global add_scalar_e -add_scalar_e: - push %rdi - push %rsi - mov %rcx, %rdi - mov %rdx, %rsi - mov %r8, %rdx - ;# Clear registers to propagate the carry bit - xor %r8d, %r8d - xor %r9d, %r9d - xor %r10d, %r10d - xor %r11d, %r11d - xor %eax, %eax - - ;# Begin addition chain - addq 0(%rsi), %rdx - movq %rdx, 0(%rdi) - adcxq 8(%rsi), %r8 - movq %r8, 8(%rdi) - adcxq 16(%rsi), %r9 - movq %r9, 16(%rdi) - adcxq 24(%rsi), %r10 - movq %r10, 24(%rdi) - - ;# Return the carry bit in a register - adcx %r11, %rax - pop %rsi - pop %rdi - ret - -.global fadd_e -fadd_e: - push %rdi - push %rsi - mov %rcx, %rdi - mov %rdx, %rsi - mov %r8, %rdx - ;# Compute the raw addition of f1 + f2 - movq 0(%rdx), %r8 - addq 0(%rsi), %r8 - movq 8(%rdx), %r9 - adcxq 8(%rsi), %r9 - movq 16(%rdx), %r10 - adcxq 16(%rsi), %r10 - movq 24(%rdx), %r11 - adcxq 24(%rsi), %r11 - ;# Wrap the result back into the field - ;# Step 1: Compute carry*38 - mov $0, %rax - mov $38, %rdx - cmovc %rdx, %rax - - ;# Step 2: Add carry*38 to the original sum - xor %ecx, %ecx - add %rax, %r8 - adcx %rcx, %r9 - movq %r9, 8(%rdi) - adcx %rcx, %r10 - movq %r10, 16(%rdi) - adcx %rcx, %r11 - movq %r11, 24(%rdi) - - ;# Step 3: Fold the carry bit back in; guaranteed not to carry at this point - mov $0, %rax - cmovc %rdx, %rax - add %rax, %r8 - movq %r8, 0(%rdi) - pop %rsi - pop %rdi - ret - -.global fsub_e -fsub_e: - push %rdi - push %rsi - mov %rcx, %rdi - mov %rdx, %rsi - mov %r8, %rdx - ;# Compute the raw substraction of f1-f2 - movq 0(%rsi), %r8 - subq 0(%rdx), %r8 - movq 8(%rsi), %r9 - sbbq 8(%rdx), %r9 - movq 16(%rsi), %r10 - sbbq 16(%rdx), %r10 - movq 24(%rsi), %r11 - sbbq 24(%rdx), %r11 - ;# Wrap the result back into the field - ;# Step 1: Compute carry*38 - mov $0, %rax - mov $38, %rcx - cmovc %rcx, %rax - - ;# Step 2: Substract carry*38 from the original difference - sub %rax, %r8 - sbb $0, %r9 - sbb $0, %r10 - sbb $0, %r11 - - ;# Step 3: Fold the carry bit back in; guaranteed not to carry at this point - mov $0, %rax - cmovc %rcx, %rax - sub %rax, %r8 - - ;# Store the result - movq %r8, 0(%rdi) - movq %r9, 8(%rdi) - movq %r10, 16(%rdi) - movq %r11, 24(%rdi) - pop %rsi - pop %rdi - ret - -.global fmul_scalar_e -fmul_scalar_e: - push %rdi - push %r13 - push %rbx - push %rsi - mov %rcx, %rdi - mov %rdx, %rsi - mov %r8, %rdx - ;# Compute the raw multiplication of f1*f2 - mulxq 0(%rsi), %r8, %rcx - ;# f1[0]*f2 - mulxq 8(%rsi), %r9, %rbx - ;# f1[1]*f2 - add %rcx, %r9 - mov $0, %rcx - mulxq 16(%rsi), %r10, %r13 - ;# f1[2]*f2 - adcx %rbx, %r10 - mulxq 24(%rsi), %r11, %rax - ;# f1[3]*f2 - adcx %r13, %r11 - adcx %rcx, %rax - ;# Wrap the result back into the field - ;# Step 1: Compute carry*38 - mov $38, %rdx - imul %rdx, %rax - - ;# Step 2: Fold the carry back into dst - add %rax, %r8 - adcx %rcx, %r9 - movq %r9, 8(%rdi) - adcx %rcx, %r10 - movq %r10, 16(%rdi) - adcx %rcx, %r11 - movq %r11, 24(%rdi) - - ;# Step 3: Fold the carry bit back in; guaranteed not to carry at this point - mov $0, %rax - cmovc %rdx, %rax - add %rax, %r8 - movq %r8, 0(%rdi) - pop %rsi - pop %rbx - pop %r13 - pop %rdi - ret - -.global fmul_e -fmul_e: - push %r13 - push %r14 - push %r15 - push %rbx - push %rsi - push %rdi - mov %rcx, %rdi - mov %rdx, %rsi - mov %r8, %r15 - mov %r9, %rcx - ;# Compute the raw multiplication: tmp <- src1 * src2 - ;# Compute src1[0] * src2 - movq 0(%rsi), %rdx - mulxq 0(%rcx), %r8, %r9 - xor %r10d, %r10d - movq %r8, 0(%rdi) - - mulxq 8(%rcx), %r10, %r11 - adox %r9, %r10 - movq %r10, 8(%rdi) - - mulxq 16(%rcx), %rbx, %r13 - adox %r11, %rbx - mulxq 24(%rcx), %r14, %rdx - adox %r13, %r14 - mov $0, %rax - adox %rdx, %rax - - - ;# Compute src1[1] * src2 - movq 8(%rsi), %rdx - mulxq 0(%rcx), %r8, %r9 - xor %r10d, %r10d - adcxq 8(%rdi), %r8 - movq %r8, 8(%rdi) - mulxq 8(%rcx), %r10, %r11 - adox %r9, %r10 - adcx %rbx, %r10 - movq %r10, 16(%rdi) - mulxq 16(%rcx), %rbx, %r13 - adox %r11, %rbx - adcx %r14, %rbx - mov $0, %r8 - mulxq 24(%rcx), %r14, %rdx - adox %r13, %r14 - adcx %rax, %r14 - mov $0, %rax - adox %rdx, %rax - adcx %r8, %rax - - - ;# Compute src1[2] * src2 - movq 16(%rsi), %rdx - mulxq 0(%rcx), %r8, %r9 - xor %r10d, %r10d - adcxq 16(%rdi), %r8 - movq %r8, 16(%rdi) - mulxq 8(%rcx), %r10, %r11 - adox %r9, %r10 - adcx %rbx, %r10 - movq %r10, 24(%rdi) - mulxq 16(%rcx), %rbx, %r13 - adox %r11, %rbx - adcx %r14, %rbx - mov $0, %r8 - mulxq 24(%rcx), %r14, %rdx - adox %r13, %r14 - adcx %rax, %r14 - mov $0, %rax - adox %rdx, %rax - adcx %r8, %rax - - - ;# Compute src1[3] * src2 - movq 24(%rsi), %rdx - mulxq 0(%rcx), %r8, %r9 - xor %r10d, %r10d - adcxq 24(%rdi), %r8 - movq %r8, 24(%rdi) - mulxq 8(%rcx), %r10, %r11 - adox %r9, %r10 - adcx %rbx, %r10 - movq %r10, 32(%rdi) - mulxq 16(%rcx), %rbx, %r13 - adox %r11, %rbx - adcx %r14, %rbx - movq %rbx, 40(%rdi) - mov $0, %r8 - mulxq 24(%rcx), %r14, %rdx - adox %r13, %r14 - adcx %rax, %r14 - movq %r14, 48(%rdi) - mov $0, %rax - adox %rdx, %rax - adcx %r8, %rax - movq %rax, 56(%rdi) - - - ;# Line up pointers - mov %rdi, %rsi - mov %r15, %rdi - ;# Wrap the result back into the field - ;# Step 1: Compute dst + carry == tmp_hi * 38 + tmp_lo - mov $38, %rdx - mulxq 32(%rsi), %r8, %r13 - xor %ecx, %ecx - adoxq 0(%rsi), %r8 - mulxq 40(%rsi), %r9, %rbx - adcx %r13, %r9 - adoxq 8(%rsi), %r9 - mulxq 48(%rsi), %r10, %r13 - adcx %rbx, %r10 - adoxq 16(%rsi), %r10 - mulxq 56(%rsi), %r11, %rax - adcx %r13, %r11 - adoxq 24(%rsi), %r11 - adcx %rcx, %rax - adox %rcx, %rax - imul %rdx, %rax - - ;# Step 2: Fold the carry back into dst - add %rax, %r8 - adcx %rcx, %r9 - movq %r9, 8(%rdi) - adcx %rcx, %r10 - movq %r10, 16(%rdi) - adcx %rcx, %r11 - movq %r11, 24(%rdi) - - ;# Step 3: Fold the carry bit back in; guaranteed not to carry at this point - mov $0, %rax - cmovc %rdx, %rax - add %rax, %r8 - movq %r8, 0(%rdi) - pop %rdi - pop %rsi - pop %rbx - pop %r15 - pop %r14 - pop %r13 - ret - -.global fmul2_e -fmul2_e: - push %r13 - push %r14 - push %r15 - push %rbx - push %rsi - push %rdi - mov %rcx, %rdi - mov %rdx, %rsi - mov %r8, %r15 - mov %r9, %rcx - ;# Compute the raw multiplication tmp[0] <- f1[0] * f2[0] - ;# Compute src1[0] * src2 - movq 0(%rsi), %rdx - mulxq 0(%rcx), %r8, %r9 - xor %r10d, %r10d - movq %r8, 0(%rdi) - - mulxq 8(%rcx), %r10, %r11 - adox %r9, %r10 - movq %r10, 8(%rdi) - - mulxq 16(%rcx), %rbx, %r13 - adox %r11, %rbx - mulxq 24(%rcx), %r14, %rdx - adox %r13, %r14 - mov $0, %rax - adox %rdx, %rax - - - ;# Compute src1[1] * src2 - movq 8(%rsi), %rdx - mulxq 0(%rcx), %r8, %r9 - xor %r10d, %r10d - adcxq 8(%rdi), %r8 - movq %r8, 8(%rdi) - mulxq 8(%rcx), %r10, %r11 - adox %r9, %r10 - adcx %rbx, %r10 - movq %r10, 16(%rdi) - mulxq 16(%rcx), %rbx, %r13 - adox %r11, %rbx - adcx %r14, %rbx - mov $0, %r8 - mulxq 24(%rcx), %r14, %rdx - adox %r13, %r14 - adcx %rax, %r14 - mov $0, %rax - adox %rdx, %rax - adcx %r8, %rax - - - ;# Compute src1[2] * src2 - movq 16(%rsi), %rdx - mulxq 0(%rcx), %r8, %r9 - xor %r10d, %r10d - adcxq 16(%rdi), %r8 - movq %r8, 16(%rdi) - mulxq 8(%rcx), %r10, %r11 - adox %r9, %r10 - adcx %rbx, %r10 - movq %r10, 24(%rdi) - mulxq 16(%rcx), %rbx, %r13 - adox %r11, %rbx - adcx %r14, %rbx - mov $0, %r8 - mulxq 24(%rcx), %r14, %rdx - adox %r13, %r14 - adcx %rax, %r14 - mov $0, %rax - adox %rdx, %rax - adcx %r8, %rax - - - ;# Compute src1[3] * src2 - movq 24(%rsi), %rdx - mulxq 0(%rcx), %r8, %r9 - xor %r10d, %r10d - adcxq 24(%rdi), %r8 - movq %r8, 24(%rdi) - mulxq 8(%rcx), %r10, %r11 - adox %r9, %r10 - adcx %rbx, %r10 - movq %r10, 32(%rdi) - mulxq 16(%rcx), %rbx, %r13 - adox %r11, %rbx - adcx %r14, %rbx - movq %rbx, 40(%rdi) - mov $0, %r8 - mulxq 24(%rcx), %r14, %rdx - adox %r13, %r14 - adcx %rax, %r14 - movq %r14, 48(%rdi) - mov $0, %rax - adox %rdx, %rax - adcx %r8, %rax - movq %rax, 56(%rdi) - - ;# Compute the raw multiplication tmp[1] <- f1[1] * f2[1] - ;# Compute src1[0] * src2 - movq 32(%rsi), %rdx - mulxq 32(%rcx), %r8, %r9 - xor %r10d, %r10d - movq %r8, 64(%rdi) - - mulxq 40(%rcx), %r10, %r11 - adox %r9, %r10 - movq %r10, 72(%rdi) - - mulxq 48(%rcx), %rbx, %r13 - adox %r11, %rbx - mulxq 56(%rcx), %r14, %rdx - adox %r13, %r14 - mov $0, %rax - adox %rdx, %rax - - - ;# Compute src1[1] * src2 - movq 40(%rsi), %rdx - mulxq 32(%rcx), %r8, %r9 - xor %r10d, %r10d - adcxq 72(%rdi), %r8 - movq %r8, 72(%rdi) - mulxq 40(%rcx), %r10, %r11 - adox %r9, %r10 - adcx %rbx, %r10 - movq %r10, 80(%rdi) - mulxq 48(%rcx), %rbx, %r13 - adox %r11, %rbx - adcx %r14, %rbx - mov $0, %r8 - mulxq 56(%rcx), %r14, %rdx - adox %r13, %r14 - adcx %rax, %r14 - mov $0, %rax - adox %rdx, %rax - adcx %r8, %rax - - - ;# Compute src1[2] * src2 - movq 48(%rsi), %rdx - mulxq 32(%rcx), %r8, %r9 - xor %r10d, %r10d - adcxq 80(%rdi), %r8 - movq %r8, 80(%rdi) - mulxq 40(%rcx), %r10, %r11 - adox %r9, %r10 - adcx %rbx, %r10 - movq %r10, 88(%rdi) - mulxq 48(%rcx), %rbx, %r13 - adox %r11, %rbx - adcx %r14, %rbx - mov $0, %r8 - mulxq 56(%rcx), %r14, %rdx - adox %r13, %r14 - adcx %rax, %r14 - mov $0, %rax - adox %rdx, %rax - adcx %r8, %rax - - - ;# Compute src1[3] * src2 - movq 56(%rsi), %rdx - mulxq 32(%rcx), %r8, %r9 - xor %r10d, %r10d - adcxq 88(%rdi), %r8 - movq %r8, 88(%rdi) - mulxq 40(%rcx), %r10, %r11 - adox %r9, %r10 - adcx %rbx, %r10 - movq %r10, 96(%rdi) - mulxq 48(%rcx), %rbx, %r13 - adox %r11, %rbx - adcx %r14, %rbx - movq %rbx, 104(%rdi) - mov $0, %r8 - mulxq 56(%rcx), %r14, %rdx - adox %r13, %r14 - adcx %rax, %r14 - movq %r14, 112(%rdi) - mov $0, %rax - adox %rdx, %rax - adcx %r8, %rax - movq %rax, 120(%rdi) - - - ;# Line up pointers - mov %rdi, %rsi - mov %r15, %rdi - ;# Wrap the results back into the field - ;# Step 1: Compute dst + carry == tmp_hi * 38 + tmp_lo - mov $38, %rdx - mulxq 32(%rsi), %r8, %r13 - xor %ecx, %ecx - adoxq 0(%rsi), %r8 - mulxq 40(%rsi), %r9, %rbx - adcx %r13, %r9 - adoxq 8(%rsi), %r9 - mulxq 48(%rsi), %r10, %r13 - adcx %rbx, %r10 - adoxq 16(%rsi), %r10 - mulxq 56(%rsi), %r11, %rax - adcx %r13, %r11 - adoxq 24(%rsi), %r11 - adcx %rcx, %rax - adox %rcx, %rax - imul %rdx, %rax - - ;# Step 2: Fold the carry back into dst - add %rax, %r8 - adcx %rcx, %r9 - movq %r9, 8(%rdi) - adcx %rcx, %r10 - movq %r10, 16(%rdi) - adcx %rcx, %r11 - movq %r11, 24(%rdi) - - ;# Step 3: Fold the carry bit back in; guaranteed not to carry at this point - mov $0, %rax - cmovc %rdx, %rax - add %rax, %r8 - movq %r8, 0(%rdi) - - ;# Step 1: Compute dst + carry == tmp_hi * 38 + tmp_lo - mov $38, %rdx - mulxq 96(%rsi), %r8, %r13 - xor %ecx, %ecx - adoxq 64(%rsi), %r8 - mulxq 104(%rsi), %r9, %rbx - adcx %r13, %r9 - adoxq 72(%rsi), %r9 - mulxq 112(%rsi), %r10, %r13 - adcx %rbx, %r10 - adoxq 80(%rsi), %r10 - mulxq 120(%rsi), %r11, %rax - adcx %r13, %r11 - adoxq 88(%rsi), %r11 - adcx %rcx, %rax - adox %rcx, %rax - imul %rdx, %rax - - ;# Step 2: Fold the carry back into dst - add %rax, %r8 - adcx %rcx, %r9 - movq %r9, 40(%rdi) - adcx %rcx, %r10 - movq %r10, 48(%rdi) - adcx %rcx, %r11 - movq %r11, 56(%rdi) - - ;# Step 3: Fold the carry bit back in; guaranteed not to carry at this point - mov $0, %rax - cmovc %rdx, %rax - add %rax, %r8 - movq %r8, 32(%rdi) - pop %rdi - pop %rsi - pop %rbx - pop %r15 - pop %r14 - pop %r13 - ret - -.global fsqr_e -fsqr_e: - push %r15 - push %r13 - push %r14 - push %r12 - push %rbx - push %rsi - push %rdi - mov %rcx, %rdi - mov %rdx, %rsi - mov %r8, %r12 - ;# Compute the raw multiplication: tmp <- f * f - ;# Step 1: Compute all partial products - movq 0(%rsi), %rdx - ;# f[0] - mulxq 8(%rsi), %r8, %r14 - xor %r15d, %r15d - ;# f[1]*f[0] - mulxq 16(%rsi), %r9, %r10 - adcx %r14, %r9 - ;# f[2]*f[0] - mulxq 24(%rsi), %rax, %rcx - adcx %rax, %r10 - ;# f[3]*f[0] - movq 24(%rsi), %rdx - ;# f[3] - mulxq 8(%rsi), %r11, %rbx - adcx %rcx, %r11 - ;# f[1]*f[3] - mulxq 16(%rsi), %rax, %r13 - adcx %rax, %rbx - ;# f[2]*f[3] - movq 8(%rsi), %rdx - adcx %r15, %r13 - ;# f1 - mulxq 16(%rsi), %rax, %rcx - mov $0, %r14 - ;# f[2]*f[1] - - ;# Step 2: Compute two parallel carry chains - xor %r15d, %r15d - adox %rax, %r10 - adcx %r8, %r8 - adox %rcx, %r11 - adcx %r9, %r9 - adox %r15, %rbx - adcx %r10, %r10 - adox %r15, %r13 - adcx %r11, %r11 - adox %r15, %r14 - adcx %rbx, %rbx - adcx %r13, %r13 - adcx %r14, %r14 - - ;# Step 3: Compute intermediate squares - movq 0(%rsi), %rdx - mulx %rdx, %rax, %rcx - ;# f[0]^2 - movq %rax, 0(%rdi) - - add %rcx, %r8 - movq %r8, 8(%rdi) - - movq 8(%rsi), %rdx - mulx %rdx, %rax, %rcx - ;# f[1]^2 - adcx %rax, %r9 - movq %r9, 16(%rdi) - - adcx %rcx, %r10 - movq %r10, 24(%rdi) - - movq 16(%rsi), %rdx - mulx %rdx, %rax, %rcx - ;# f[2]^2 - adcx %rax, %r11 - movq %r11, 32(%rdi) - - adcx %rcx, %rbx - movq %rbx, 40(%rdi) - - movq 24(%rsi), %rdx - mulx %rdx, %rax, %rcx - ;# f[3]^2 - adcx %rax, %r13 - movq %r13, 48(%rdi) - - adcx %rcx, %r14 - movq %r14, 56(%rdi) - - - ;# Line up pointers - mov %rdi, %rsi - mov %r12, %rdi - ;# Wrap the result back into the field - ;# Step 1: Compute dst + carry == tmp_hi * 38 + tmp_lo - mov $38, %rdx - mulxq 32(%rsi), %r8, %r13 - xor %ecx, %ecx - adoxq 0(%rsi), %r8 - mulxq 40(%rsi), %r9, %rbx - adcx %r13, %r9 - adoxq 8(%rsi), %r9 - mulxq 48(%rsi), %r10, %r13 - adcx %rbx, %r10 - adoxq 16(%rsi), %r10 - mulxq 56(%rsi), %r11, %rax - adcx %r13, %r11 - adoxq 24(%rsi), %r11 - adcx %rcx, %rax - adox %rcx, %rax - imul %rdx, %rax - - ;# Step 2: Fold the carry back into dst - add %rax, %r8 - adcx %rcx, %r9 - movq %r9, 8(%rdi) - adcx %rcx, %r10 - movq %r10, 16(%rdi) - adcx %rcx, %r11 - movq %r11, 24(%rdi) - - ;# Step 3: Fold the carry bit back in; guaranteed not to carry at this point - mov $0, %rax - cmovc %rdx, %rax - add %rax, %r8 - movq %r8, 0(%rdi) - pop %rdi - pop %rsi - pop %rbx - pop %r12 - pop %r14 - pop %r13 - pop %r15 - ret - -.global fsqr2_e -fsqr2_e: - push %r15 - push %r13 - push %r14 - push %r12 - push %rbx - push %rsi - push %rdi - mov %rcx, %rdi - mov %rdx, %rsi - mov %r8, %r12 - ;# Step 1: Compute all partial products - movq 0(%rsi), %rdx - ;# f[0] - mulxq 8(%rsi), %r8, %r14 - xor %r15d, %r15d - ;# f[1]*f[0] - mulxq 16(%rsi), %r9, %r10 - adcx %r14, %r9 - ;# f[2]*f[0] - mulxq 24(%rsi), %rax, %rcx - adcx %rax, %r10 - ;# f[3]*f[0] - movq 24(%rsi), %rdx - ;# f[3] - mulxq 8(%rsi), %r11, %rbx - adcx %rcx, %r11 - ;# f[1]*f[3] - mulxq 16(%rsi), %rax, %r13 - adcx %rax, %rbx - ;# f[2]*f[3] - movq 8(%rsi), %rdx - adcx %r15, %r13 - ;# f1 - mulxq 16(%rsi), %rax, %rcx - mov $0, %r14 - ;# f[2]*f[1] - - ;# Step 2: Compute two parallel carry chains - xor %r15d, %r15d - adox %rax, %r10 - adcx %r8, %r8 - adox %rcx, %r11 - adcx %r9, %r9 - adox %r15, %rbx - adcx %r10, %r10 - adox %r15, %r13 - adcx %r11, %r11 - adox %r15, %r14 - adcx %rbx, %rbx - adcx %r13, %r13 - adcx %r14, %r14 - - ;# Step 3: Compute intermediate squares - movq 0(%rsi), %rdx - mulx %rdx, %rax, %rcx - ;# f[0]^2 - movq %rax, 0(%rdi) - - add %rcx, %r8 - movq %r8, 8(%rdi) - - movq 8(%rsi), %rdx - mulx %rdx, %rax, %rcx - ;# f[1]^2 - adcx %rax, %r9 - movq %r9, 16(%rdi) - - adcx %rcx, %r10 - movq %r10, 24(%rdi) - - movq 16(%rsi), %rdx - mulx %rdx, %rax, %rcx - ;# f[2]^2 - adcx %rax, %r11 - movq %r11, 32(%rdi) - - adcx %rcx, %rbx - movq %rbx, 40(%rdi) - - movq 24(%rsi), %rdx - mulx %rdx, %rax, %rcx - ;# f[3]^2 - adcx %rax, %r13 - movq %r13, 48(%rdi) - - adcx %rcx, %r14 - movq %r14, 56(%rdi) - - - ;# Step 1: Compute all partial products - movq 32(%rsi), %rdx - ;# f[0] - mulxq 40(%rsi), %r8, %r14 - xor %r15d, %r15d - ;# f[1]*f[0] - mulxq 48(%rsi), %r9, %r10 - adcx %r14, %r9 - ;# f[2]*f[0] - mulxq 56(%rsi), %rax, %rcx - adcx %rax, %r10 - ;# f[3]*f[0] - movq 56(%rsi), %rdx - ;# f[3] - mulxq 40(%rsi), %r11, %rbx - adcx %rcx, %r11 - ;# f[1]*f[3] - mulxq 48(%rsi), %rax, %r13 - adcx %rax, %rbx - ;# f[2]*f[3] - movq 40(%rsi), %rdx - adcx %r15, %r13 - ;# f1 - mulxq 48(%rsi), %rax, %rcx - mov $0, %r14 - ;# f[2]*f[1] - - ;# Step 2: Compute two parallel carry chains - xor %r15d, %r15d - adox %rax, %r10 - adcx %r8, %r8 - adox %rcx, %r11 - adcx %r9, %r9 - adox %r15, %rbx - adcx %r10, %r10 - adox %r15, %r13 - adcx %r11, %r11 - adox %r15, %r14 - adcx %rbx, %rbx - adcx %r13, %r13 - adcx %r14, %r14 - - ;# Step 3: Compute intermediate squares - movq 32(%rsi), %rdx - mulx %rdx, %rax, %rcx - ;# f[0]^2 - movq %rax, 64(%rdi) - - add %rcx, %r8 - movq %r8, 72(%rdi) - - movq 40(%rsi), %rdx - mulx %rdx, %rax, %rcx - ;# f[1]^2 - adcx %rax, %r9 - movq %r9, 80(%rdi) - - adcx %rcx, %r10 - movq %r10, 88(%rdi) - - movq 48(%rsi), %rdx - mulx %rdx, %rax, %rcx - ;# f[2]^2 - adcx %rax, %r11 - movq %r11, 96(%rdi) - - adcx %rcx, %rbx - movq %rbx, 104(%rdi) - - movq 56(%rsi), %rdx - mulx %rdx, %rax, %rcx - ;# f[3]^2 - adcx %rax, %r13 - movq %r13, 112(%rdi) - - adcx %rcx, %r14 - movq %r14, 120(%rdi) - - - ;# Line up pointers - mov %rdi, %rsi - mov %r12, %rdi - - ;# Step 1: Compute dst + carry == tmp_hi * 38 + tmp_lo - mov $38, %rdx - mulxq 32(%rsi), %r8, %r13 - xor %ecx, %ecx - adoxq 0(%rsi), %r8 - mulxq 40(%rsi), %r9, %rbx - adcx %r13, %r9 - adoxq 8(%rsi), %r9 - mulxq 48(%rsi), %r10, %r13 - adcx %rbx, %r10 - adoxq 16(%rsi), %r10 - mulxq 56(%rsi), %r11, %rax - adcx %r13, %r11 - adoxq 24(%rsi), %r11 - adcx %rcx, %rax - adox %rcx, %rax - imul %rdx, %rax - - ;# Step 2: Fold the carry back into dst - add %rax, %r8 - adcx %rcx, %r9 - movq %r9, 8(%rdi) - adcx %rcx, %r10 - movq %r10, 16(%rdi) - adcx %rcx, %r11 - movq %r11, 24(%rdi) - - ;# Step 3: Fold the carry bit back in; guaranteed not to carry at this point - mov $0, %rax - cmovc %rdx, %rax - add %rax, %r8 - movq %r8, 0(%rdi) - - ;# Step 1: Compute dst + carry == tmp_hi * 38 + tmp_lo - mov $38, %rdx - mulxq 96(%rsi), %r8, %r13 - xor %ecx, %ecx - adoxq 64(%rsi), %r8 - mulxq 104(%rsi), %r9, %rbx - adcx %r13, %r9 - adoxq 72(%rsi), %r9 - mulxq 112(%rsi), %r10, %r13 - adcx %rbx, %r10 - adoxq 80(%rsi), %r10 - mulxq 120(%rsi), %r11, %rax - adcx %r13, %r11 - adoxq 88(%rsi), %r11 - adcx %rcx, %rax - adox %rcx, %rax - imul %rdx, %rax - - ;# Step 2: Fold the carry back into dst - add %rax, %r8 - adcx %rcx, %r9 - movq %r9, 40(%rdi) - adcx %rcx, %r10 - movq %r10, 48(%rdi) - adcx %rcx, %r11 - movq %r11, 56(%rdi) - - ;# Step 3: Fold the carry bit back in; guaranteed not to carry at this point - mov $0, %rax - cmovc %rdx, %rax - add %rax, %r8 - movq %r8, 32(%rdi) - pop %rdi - pop %rsi - pop %rbx - pop %r12 - pop %r14 - pop %r13 - pop %r15 - ret - -.global cswap2_e -cswap2_e: - push %rdi - push %rsi - mov %rcx, %rdi - mov %rdx, %rsi - mov %r8, %rdx - ;# Transfer bit into CF flag - add $18446744073709551615, %rdi - - ;# cswap p1[0], p2[0] - movq 0(%rsi), %r8 - movq 0(%rdx), %r9 - mov %r8, %r10 - cmovc %r9, %r8 - cmovc %r10, %r9 - movq %r8, 0(%rsi) - movq %r9, 0(%rdx) - - ;# cswap p1[1], p2[1] - movq 8(%rsi), %r8 - movq 8(%rdx), %r9 - mov %r8, %r10 - cmovc %r9, %r8 - cmovc %r10, %r9 - movq %r8, 8(%rsi) - movq %r9, 8(%rdx) - - ;# cswap p1[2], p2[2] - movq 16(%rsi), %r8 - movq 16(%rdx), %r9 - mov %r8, %r10 - cmovc %r9, %r8 - cmovc %r10, %r9 - movq %r8, 16(%rsi) - movq %r9, 16(%rdx) - - ;# cswap p1[3], p2[3] - movq 24(%rsi), %r8 - movq 24(%rdx), %r9 - mov %r8, %r10 - cmovc %r9, %r8 - cmovc %r10, %r9 - movq %r8, 24(%rsi) - movq %r9, 24(%rdx) - - ;# cswap p1[4], p2[4] - movq 32(%rsi), %r8 - movq 32(%rdx), %r9 - mov %r8, %r10 - cmovc %r9, %r8 - cmovc %r10, %r9 - movq %r8, 32(%rsi) - movq %r9, 32(%rdx) - - ;# cswap p1[5], p2[5] - movq 40(%rsi), %r8 - movq 40(%rdx), %r9 - mov %r8, %r10 - cmovc %r9, %r8 - cmovc %r10, %r9 - movq %r8, 40(%rsi) - movq %r9, 40(%rdx) - - ;# cswap p1[6], p2[6] - movq 48(%rsi), %r8 - movq 48(%rdx), %r9 - mov %r8, %r10 - cmovc %r9, %r8 - cmovc %r10, %r9 - movq %r8, 48(%rsi) - movq %r9, 48(%rdx) - - ;# cswap p1[7], p2[7] - movq 56(%rsi), %r8 - movq 56(%rdx), %r9 - mov %r8, %r10 - cmovc %r9, %r8 - cmovc %r10, %r9 - movq %r8, 56(%rsi) - movq %r9, 56(%rdx) - pop %rsi - pop %rdi - ret - - diff --git a/dist/c89-compatible/curve25519-x86_64-msvc.asm b/dist/c89-compatible/curve25519-x86_64-msvc.asm deleted file mode 100644 index 202da85a21..0000000000 --- a/dist/c89-compatible/curve25519-x86_64-msvc.asm +++ /dev/null @@ -1,1041 +0,0 @@ -.code -ALIGN 16 -add_scalar_e proc - push rdi - push rsi - mov rdi, rcx - mov rsi, rdx - mov rdx, r8 - ;# Clear registers to propagate the carry bit - xor r8d, r8d - xor r9d, r9d - xor r10d, r10d - xor r11d, r11d - xor eax, eax - - ;# Begin addition chain - add rdx, qword ptr [rsi + 0] - mov qword ptr [rdi + 0], rdx - adcx r8, qword ptr [rsi + 8] - mov qword ptr [rdi + 8], r8 - adcx r9, qword ptr [rsi + 16] - mov qword ptr [rdi + 16], r9 - adcx r10, qword ptr [rsi + 24] - mov qword ptr [rdi + 24], r10 - - ;# Return the carry bit in a register - adcx rax, r11 - pop rsi - pop rdi - ret -add_scalar_e endp -ALIGN 16 -fadd_e proc - push rdi - push rsi - mov rdi, rcx - mov rsi, rdx - mov rdx, r8 - ;# Compute the raw addition of f1 + f2 - mov r8, qword ptr [rdx + 0] - add r8, qword ptr [rsi + 0] - mov r9, qword ptr [rdx + 8] - adcx r9, qword ptr [rsi + 8] - mov r10, qword ptr [rdx + 16] - adcx r10, qword ptr [rsi + 16] - mov r11, qword ptr [rdx + 24] - adcx r11, qword ptr [rsi + 24] - ;# Wrap the result back into the field - ;# Step 1: Compute carry*38 - mov rax, 0 - mov rdx, 38 - cmovc rax, rdx - - ;# Step 2: Add carry*38 to the original sum - xor ecx, ecx - add r8, rax - adcx r9, rcx - mov qword ptr [rdi + 8], r9 - adcx r10, rcx - mov qword ptr [rdi + 16], r10 - adcx r11, rcx - mov qword ptr [rdi + 24], r11 - - ;# Step 3: Fold the carry bit back in; guaranteed not to carry at this point - mov rax, 0 - cmovc rax, rdx - add r8, rax - mov qword ptr [rdi + 0], r8 - pop rsi - pop rdi - ret -fadd_e endp -ALIGN 16 -fsub_e proc - push rdi - push rsi - mov rdi, rcx - mov rsi, rdx - mov rdx, r8 - ;# Compute the raw substraction of f1-f2 - mov r8, qword ptr [rsi + 0] - sub r8, qword ptr [rdx + 0] - mov r9, qword ptr [rsi + 8] - sbb r9, qword ptr [rdx + 8] - mov r10, qword ptr [rsi + 16] - sbb r10, qword ptr [rdx + 16] - mov r11, qword ptr [rsi + 24] - sbb r11, qword ptr [rdx + 24] - ;# Wrap the result back into the field - ;# Step 1: Compute carry*38 - mov rax, 0 - mov rcx, 38 - cmovc rax, rcx - - ;# Step 2: Substract carry*38 from the original difference - sub r8, rax - sbb r9, 0 - sbb r10, 0 - sbb r11, 0 - - ;# Step 3: Fold the carry bit back in; guaranteed not to carry at this point - mov rax, 0 - cmovc rax, rcx - sub r8, rax - - ;# Store the result - mov qword ptr [rdi + 0], r8 - mov qword ptr [rdi + 8], r9 - mov qword ptr [rdi + 16], r10 - mov qword ptr [rdi + 24], r11 - pop rsi - pop rdi - ret -fsub_e endp -ALIGN 16 -fmul_scalar_e proc - push rdi - push r13 - push rbx - push rsi - mov rdi, rcx - mov rsi, rdx - mov rdx, r8 - ;# Compute the raw multiplication of f1*f2 - mulx rcx, r8, qword ptr [rsi + 0] - ;# f1[0]*f2 - mulx rbx, r9, qword ptr [rsi + 8] - ;# f1[1]*f2 - add r9, rcx - mov rcx, 0 - mulx r13, r10, qword ptr [rsi + 16] - ;# f1[2]*f2 - adcx r10, rbx - mulx rax, r11, qword ptr [rsi + 24] - ;# f1[3]*f2 - adcx r11, r13 - adcx rax, rcx - ;# Wrap the result back into the field - ;# Step 1: Compute carry*38 - mov rdx, 38 - imul rax, rdx - - ;# Step 2: Fold the carry back into dst - add r8, rax - adcx r9, rcx - mov qword ptr [rdi + 8], r9 - adcx r10, rcx - mov qword ptr [rdi + 16], r10 - adcx r11, rcx - mov qword ptr [rdi + 24], r11 - - ;# Step 3: Fold the carry bit back in; guaranteed not to carry at this point - mov rax, 0 - cmovc rax, rdx - add r8, rax - mov qword ptr [rdi + 0], r8 - pop rsi - pop rbx - pop r13 - pop rdi - ret -fmul_scalar_e endp -ALIGN 16 -fmul_e proc - push r13 - push r14 - push r15 - push rbx - push rsi - push rdi - mov rdi, rcx - mov rsi, rdx - mov r15, r8 - mov rcx, r9 - ;# Compute the raw multiplication: tmp <- src1 * src2 - ;# Compute src1[0] * src2 - mov rdx, qword ptr [rsi + 0] - mulx r9, r8, qword ptr [rcx + 0] - xor r10d, r10d - mov qword ptr [rdi + 0], r8 - - mulx r11, r10, qword ptr [rcx + 8] - adox r10, r9 - mov qword ptr [rdi + 8], r10 - - mulx r13, rbx, qword ptr [rcx + 16] - adox rbx, r11 - mulx rdx, r14, qword ptr [rcx + 24] - adox r14, r13 - mov rax, 0 - adox rax, rdx - - - ;# Compute src1[1] * src2 - mov rdx, qword ptr [rsi + 8] - mulx r9, r8, qword ptr [rcx + 0] - xor r10d, r10d - adcx r8, qword ptr [rdi + 8] - mov qword ptr [rdi + 8], r8 - mulx r11, r10, qword ptr [rcx + 8] - adox r10, r9 - adcx r10, rbx - mov qword ptr [rdi + 16], r10 - mulx r13, rbx, qword ptr [rcx + 16] - adox rbx, r11 - adcx rbx, r14 - mov r8, 0 - mulx rdx, r14, qword ptr [rcx + 24] - adox r14, r13 - adcx r14, rax - mov rax, 0 - adox rax, rdx - adcx rax, r8 - - - ;# Compute src1[2] * src2 - mov rdx, qword ptr [rsi + 16] - mulx r9, r8, qword ptr [rcx + 0] - xor r10d, r10d - adcx r8, qword ptr [rdi + 16] - mov qword ptr [rdi + 16], r8 - mulx r11, r10, qword ptr [rcx + 8] - adox r10, r9 - adcx r10, rbx - mov qword ptr [rdi + 24], r10 - mulx r13, rbx, qword ptr [rcx + 16] - adox rbx, r11 - adcx rbx, r14 - mov r8, 0 - mulx rdx, r14, qword ptr [rcx + 24] - adox r14, r13 - adcx r14, rax - mov rax, 0 - adox rax, rdx - adcx rax, r8 - - - ;# Compute src1[3] * src2 - mov rdx, qword ptr [rsi + 24] - mulx r9, r8, qword ptr [rcx + 0] - xor r10d, r10d - adcx r8, qword ptr [rdi + 24] - mov qword ptr [rdi + 24], r8 - mulx r11, r10, qword ptr [rcx + 8] - adox r10, r9 - adcx r10, rbx - mov qword ptr [rdi + 32], r10 - mulx r13, rbx, qword ptr [rcx + 16] - adox rbx, r11 - adcx rbx, r14 - mov qword ptr [rdi + 40], rbx - mov r8, 0 - mulx rdx, r14, qword ptr [rcx + 24] - adox r14, r13 - adcx r14, rax - mov qword ptr [rdi + 48], r14 - mov rax, 0 - adox rax, rdx - adcx rax, r8 - mov qword ptr [rdi + 56], rax - - - ;# Line up pointers - mov rsi, rdi - mov rdi, r15 - ;# Wrap the result back into the field - ;# Step 1: Compute dst + carry == tmp_hi * 38 + tmp_lo - mov rdx, 38 - mulx r13, r8, qword ptr [rsi + 32] - xor ecx, ecx - adox r8, qword ptr [rsi + 0] - mulx rbx, r9, qword ptr [rsi + 40] - adcx r9, r13 - adox r9, qword ptr [rsi + 8] - mulx r13, r10, qword ptr [rsi + 48] - adcx r10, rbx - adox r10, qword ptr [rsi + 16] - mulx rax, r11, qword ptr [rsi + 56] - adcx r11, r13 - adox r11, qword ptr [rsi + 24] - adcx rax, rcx - adox rax, rcx - imul rax, rdx - - ;# Step 2: Fold the carry back into dst - add r8, rax - adcx r9, rcx - mov qword ptr [rdi + 8], r9 - adcx r10, rcx - mov qword ptr [rdi + 16], r10 - adcx r11, rcx - mov qword ptr [rdi + 24], r11 - - ;# Step 3: Fold the carry bit back in; guaranteed not to carry at this point - mov rax, 0 - cmovc rax, rdx - add r8, rax - mov qword ptr [rdi + 0], r8 - pop rdi - pop rsi - pop rbx - pop r15 - pop r14 - pop r13 - ret -fmul_e endp -ALIGN 16 -fmul2_e proc - push r13 - push r14 - push r15 - push rbx - push rsi - push rdi - mov rdi, rcx - mov rsi, rdx - mov r15, r8 - mov rcx, r9 - ;# Compute the raw multiplication tmp[0] <- f1[0] * f2[0] - ;# Compute src1[0] * src2 - mov rdx, qword ptr [rsi + 0] - mulx r9, r8, qword ptr [rcx + 0] - xor r10d, r10d - mov qword ptr [rdi + 0], r8 - - mulx r11, r10, qword ptr [rcx + 8] - adox r10, r9 - mov qword ptr [rdi + 8], r10 - - mulx r13, rbx, qword ptr [rcx + 16] - adox rbx, r11 - mulx rdx, r14, qword ptr [rcx + 24] - adox r14, r13 - mov rax, 0 - adox rax, rdx - - - ;# Compute src1[1] * src2 - mov rdx, qword ptr [rsi + 8] - mulx r9, r8, qword ptr [rcx + 0] - xor r10d, r10d - adcx r8, qword ptr [rdi + 8] - mov qword ptr [rdi + 8], r8 - mulx r11, r10, qword ptr [rcx + 8] - adox r10, r9 - adcx r10, rbx - mov qword ptr [rdi + 16], r10 - mulx r13, rbx, qword ptr [rcx + 16] - adox rbx, r11 - adcx rbx, r14 - mov r8, 0 - mulx rdx, r14, qword ptr [rcx + 24] - adox r14, r13 - adcx r14, rax - mov rax, 0 - adox rax, rdx - adcx rax, r8 - - - ;# Compute src1[2] * src2 - mov rdx, qword ptr [rsi + 16] - mulx r9, r8, qword ptr [rcx + 0] - xor r10d, r10d - adcx r8, qword ptr [rdi + 16] - mov qword ptr [rdi + 16], r8 - mulx r11, r10, qword ptr [rcx + 8] - adox r10, r9 - adcx r10, rbx - mov qword ptr [rdi + 24], r10 - mulx r13, rbx, qword ptr [rcx + 16] - adox rbx, r11 - adcx rbx, r14 - mov r8, 0 - mulx rdx, r14, qword ptr [rcx + 24] - adox r14, r13 - adcx r14, rax - mov rax, 0 - adox rax, rdx - adcx rax, r8 - - - ;# Compute src1[3] * src2 - mov rdx, qword ptr [rsi + 24] - mulx r9, r8, qword ptr [rcx + 0] - xor r10d, r10d - adcx r8, qword ptr [rdi + 24] - mov qword ptr [rdi + 24], r8 - mulx r11, r10, qword ptr [rcx + 8] - adox r10, r9 - adcx r10, rbx - mov qword ptr [rdi + 32], r10 - mulx r13, rbx, qword ptr [rcx + 16] - adox rbx, r11 - adcx rbx, r14 - mov qword ptr [rdi + 40], rbx - mov r8, 0 - mulx rdx, r14, qword ptr [rcx + 24] - adox r14, r13 - adcx r14, rax - mov qword ptr [rdi + 48], r14 - mov rax, 0 - adox rax, rdx - adcx rax, r8 - mov qword ptr [rdi + 56], rax - - ;# Compute the raw multiplication tmp[1] <- f1[1] * f2[1] - ;# Compute src1[0] * src2 - mov rdx, qword ptr [rsi + 32] - mulx r9, r8, qword ptr [rcx + 32] - xor r10d, r10d - mov qword ptr [rdi + 64], r8 - - mulx r11, r10, qword ptr [rcx + 40] - adox r10, r9 - mov qword ptr [rdi + 72], r10 - - mulx r13, rbx, qword ptr [rcx + 48] - adox rbx, r11 - mulx rdx, r14, qword ptr [rcx + 56] - adox r14, r13 - mov rax, 0 - adox rax, rdx - - - ;# Compute src1[1] * src2 - mov rdx, qword ptr [rsi + 40] - mulx r9, r8, qword ptr [rcx + 32] - xor r10d, r10d - adcx r8, qword ptr [rdi + 72] - mov qword ptr [rdi + 72], r8 - mulx r11, r10, qword ptr [rcx + 40] - adox r10, r9 - adcx r10, rbx - mov qword ptr [rdi + 80], r10 - mulx r13, rbx, qword ptr [rcx + 48] - adox rbx, r11 - adcx rbx, r14 - mov r8, 0 - mulx rdx, r14, qword ptr [rcx + 56] - adox r14, r13 - adcx r14, rax - mov rax, 0 - adox rax, rdx - adcx rax, r8 - - - ;# Compute src1[2] * src2 - mov rdx, qword ptr [rsi + 48] - mulx r9, r8, qword ptr [rcx + 32] - xor r10d, r10d - adcx r8, qword ptr [rdi + 80] - mov qword ptr [rdi + 80], r8 - mulx r11, r10, qword ptr [rcx + 40] - adox r10, r9 - adcx r10, rbx - mov qword ptr [rdi + 88], r10 - mulx r13, rbx, qword ptr [rcx + 48] - adox rbx, r11 - adcx rbx, r14 - mov r8, 0 - mulx rdx, r14, qword ptr [rcx + 56] - adox r14, r13 - adcx r14, rax - mov rax, 0 - adox rax, rdx - adcx rax, r8 - - - ;# Compute src1[3] * src2 - mov rdx, qword ptr [rsi + 56] - mulx r9, r8, qword ptr [rcx + 32] - xor r10d, r10d - adcx r8, qword ptr [rdi + 88] - mov qword ptr [rdi + 88], r8 - mulx r11, r10, qword ptr [rcx + 40] - adox r10, r9 - adcx r10, rbx - mov qword ptr [rdi + 96], r10 - mulx r13, rbx, qword ptr [rcx + 48] - adox rbx, r11 - adcx rbx, r14 - mov qword ptr [rdi + 104], rbx - mov r8, 0 - mulx rdx, r14, qword ptr [rcx + 56] - adox r14, r13 - adcx r14, rax - mov qword ptr [rdi + 112], r14 - mov rax, 0 - adox rax, rdx - adcx rax, r8 - mov qword ptr [rdi + 120], rax - - - ;# Line up pointers - mov rsi, rdi - mov rdi, r15 - ;# Wrap the results back into the field - ;# Step 1: Compute dst + carry == tmp_hi * 38 + tmp_lo - mov rdx, 38 - mulx r13, r8, qword ptr [rsi + 32] - xor ecx, ecx - adox r8, qword ptr [rsi + 0] - mulx rbx, r9, qword ptr [rsi + 40] - adcx r9, r13 - adox r9, qword ptr [rsi + 8] - mulx r13, r10, qword ptr [rsi + 48] - adcx r10, rbx - adox r10, qword ptr [rsi + 16] - mulx rax, r11, qword ptr [rsi + 56] - adcx r11, r13 - adox r11, qword ptr [rsi + 24] - adcx rax, rcx - adox rax, rcx - imul rax, rdx - - ;# Step 2: Fold the carry back into dst - add r8, rax - adcx r9, rcx - mov qword ptr [rdi + 8], r9 - adcx r10, rcx - mov qword ptr [rdi + 16], r10 - adcx r11, rcx - mov qword ptr [rdi + 24], r11 - - ;# Step 3: Fold the carry bit back in; guaranteed not to carry at this point - mov rax, 0 - cmovc rax, rdx - add r8, rax - mov qword ptr [rdi + 0], r8 - - ;# Step 1: Compute dst + carry == tmp_hi * 38 + tmp_lo - mov rdx, 38 - mulx r13, r8, qword ptr [rsi + 96] - xor ecx, ecx - adox r8, qword ptr [rsi + 64] - mulx rbx, r9, qword ptr [rsi + 104] - adcx r9, r13 - adox r9, qword ptr [rsi + 72] - mulx r13, r10, qword ptr [rsi + 112] - adcx r10, rbx - adox r10, qword ptr [rsi + 80] - mulx rax, r11, qword ptr [rsi + 120] - adcx r11, r13 - adox r11, qword ptr [rsi + 88] - adcx rax, rcx - adox rax, rcx - imul rax, rdx - - ;# Step 2: Fold the carry back into dst - add r8, rax - adcx r9, rcx - mov qword ptr [rdi + 40], r9 - adcx r10, rcx - mov qword ptr [rdi + 48], r10 - adcx r11, rcx - mov qword ptr [rdi + 56], r11 - - ;# Step 3: Fold the carry bit back in; guaranteed not to carry at this point - mov rax, 0 - cmovc rax, rdx - add r8, rax - mov qword ptr [rdi + 32], r8 - pop rdi - pop rsi - pop rbx - pop r15 - pop r14 - pop r13 - ret -fmul2_e endp -ALIGN 16 -fsqr_e proc - push r15 - push r13 - push r14 - push r12 - push rbx - push rsi - push rdi - mov rdi, rcx - mov rsi, rdx - mov r12, r8 - ;# Compute the raw multiplication: tmp <- f * f - ;# Step 1: Compute all partial products - mov rdx, qword ptr [rsi + 0] - ;# f[0] - mulx r14, r8, qword ptr [rsi + 8] - xor r15d, r15d - ;# f[1]*f[0] - mulx r10, r9, qword ptr [rsi + 16] - adcx r9, r14 - ;# f[2]*f[0] - mulx rcx, rax, qword ptr [rsi + 24] - adcx r10, rax - ;# f[3]*f[0] - mov rdx, qword ptr [rsi + 24] - ;# f[3] - mulx rbx, r11, qword ptr [rsi + 8] - adcx r11, rcx - ;# f[1]*f[3] - mulx r13, rax, qword ptr [rsi + 16] - adcx rbx, rax - ;# f[2]*f[3] - mov rdx, qword ptr [rsi + 8] - adcx r13, r15 - ;# f1 - mulx rcx, rax, qword ptr [rsi + 16] - mov r14, 0 - ;# f[2]*f[1] - - ;# Step 2: Compute two parallel carry chains - xor r15d, r15d - adox r10, rax - adcx r8, r8 - adox r11, rcx - adcx r9, r9 - adox rbx, r15 - adcx r10, r10 - adox r13, r15 - adcx r11, r11 - adox r14, r15 - adcx rbx, rbx - adcx r13, r13 - adcx r14, r14 - - ;# Step 3: Compute intermediate squares - mov rdx, qword ptr [rsi + 0] - mulx rcx, rax, rdx - ;# f[0]^2 - mov qword ptr [rdi + 0], rax - - add r8, rcx - mov qword ptr [rdi + 8], r8 - - mov rdx, qword ptr [rsi + 8] - mulx rcx, rax, rdx - ;# f[1]^2 - adcx r9, rax - mov qword ptr [rdi + 16], r9 - - adcx r10, rcx - mov qword ptr [rdi + 24], r10 - - mov rdx, qword ptr [rsi + 16] - mulx rcx, rax, rdx - ;# f[2]^2 - adcx r11, rax - mov qword ptr [rdi + 32], r11 - - adcx rbx, rcx - mov qword ptr [rdi + 40], rbx - - mov rdx, qword ptr [rsi + 24] - mulx rcx, rax, rdx - ;# f[3]^2 - adcx r13, rax - mov qword ptr [rdi + 48], r13 - - adcx r14, rcx - mov qword ptr [rdi + 56], r14 - - - ;# Line up pointers - mov rsi, rdi - mov rdi, r12 - ;# Wrap the result back into the field - ;# Step 1: Compute dst + carry == tmp_hi * 38 + tmp_lo - mov rdx, 38 - mulx r13, r8, qword ptr [rsi + 32] - xor ecx, ecx - adox r8, qword ptr [rsi + 0] - mulx rbx, r9, qword ptr [rsi + 40] - adcx r9, r13 - adox r9, qword ptr [rsi + 8] - mulx r13, r10, qword ptr [rsi + 48] - adcx r10, rbx - adox r10, qword ptr [rsi + 16] - mulx rax, r11, qword ptr [rsi + 56] - adcx r11, r13 - adox r11, qword ptr [rsi + 24] - adcx rax, rcx - adox rax, rcx - imul rax, rdx - - ;# Step 2: Fold the carry back into dst - add r8, rax - adcx r9, rcx - mov qword ptr [rdi + 8], r9 - adcx r10, rcx - mov qword ptr [rdi + 16], r10 - adcx r11, rcx - mov qword ptr [rdi + 24], r11 - - ;# Step 3: Fold the carry bit back in; guaranteed not to carry at this point - mov rax, 0 - cmovc rax, rdx - add r8, rax - mov qword ptr [rdi + 0], r8 - pop rdi - pop rsi - pop rbx - pop r12 - pop r14 - pop r13 - pop r15 - ret -fsqr_e endp -ALIGN 16 -fsqr2_e proc - push r15 - push r13 - push r14 - push r12 - push rbx - push rsi - push rdi - mov rdi, rcx - mov rsi, rdx - mov r12, r8 - ;# Step 1: Compute all partial products - mov rdx, qword ptr [rsi + 0] - ;# f[0] - mulx r14, r8, qword ptr [rsi + 8] - xor r15d, r15d - ;# f[1]*f[0] - mulx r10, r9, qword ptr [rsi + 16] - adcx r9, r14 - ;# f[2]*f[0] - mulx rcx, rax, qword ptr [rsi + 24] - adcx r10, rax - ;# f[3]*f[0] - mov rdx, qword ptr [rsi + 24] - ;# f[3] - mulx rbx, r11, qword ptr [rsi + 8] - adcx r11, rcx - ;# f[1]*f[3] - mulx r13, rax, qword ptr [rsi + 16] - adcx rbx, rax - ;# f[2]*f[3] - mov rdx, qword ptr [rsi + 8] - adcx r13, r15 - ;# f1 - mulx rcx, rax, qword ptr [rsi + 16] - mov r14, 0 - ;# f[2]*f[1] - - ;# Step 2: Compute two parallel carry chains - xor r15d, r15d - adox r10, rax - adcx r8, r8 - adox r11, rcx - adcx r9, r9 - adox rbx, r15 - adcx r10, r10 - adox r13, r15 - adcx r11, r11 - adox r14, r15 - adcx rbx, rbx - adcx r13, r13 - adcx r14, r14 - - ;# Step 3: Compute intermediate squares - mov rdx, qword ptr [rsi + 0] - mulx rcx, rax, rdx - ;# f[0]^2 - mov qword ptr [rdi + 0], rax - - add r8, rcx - mov qword ptr [rdi + 8], r8 - - mov rdx, qword ptr [rsi + 8] - mulx rcx, rax, rdx - ;# f[1]^2 - adcx r9, rax - mov qword ptr [rdi + 16], r9 - - adcx r10, rcx - mov qword ptr [rdi + 24], r10 - - mov rdx, qword ptr [rsi + 16] - mulx rcx, rax, rdx - ;# f[2]^2 - adcx r11, rax - mov qword ptr [rdi + 32], r11 - - adcx rbx, rcx - mov qword ptr [rdi + 40], rbx - - mov rdx, qword ptr [rsi + 24] - mulx rcx, rax, rdx - ;# f[3]^2 - adcx r13, rax - mov qword ptr [rdi + 48], r13 - - adcx r14, rcx - mov qword ptr [rdi + 56], r14 - - - ;# Step 1: Compute all partial products - mov rdx, qword ptr [rsi + 32] - ;# f[0] - mulx r14, r8, qword ptr [rsi + 40] - xor r15d, r15d - ;# f[1]*f[0] - mulx r10, r9, qword ptr [rsi + 48] - adcx r9, r14 - ;# f[2]*f[0] - mulx rcx, rax, qword ptr [rsi + 56] - adcx r10, rax - ;# f[3]*f[0] - mov rdx, qword ptr [rsi + 56] - ;# f[3] - mulx rbx, r11, qword ptr [rsi + 40] - adcx r11, rcx - ;# f[1]*f[3] - mulx r13, rax, qword ptr [rsi + 48] - adcx rbx, rax - ;# f[2]*f[3] - mov rdx, qword ptr [rsi + 40] - adcx r13, r15 - ;# f1 - mulx rcx, rax, qword ptr [rsi + 48] - mov r14, 0 - ;# f[2]*f[1] - - ;# Step 2: Compute two parallel carry chains - xor r15d, r15d - adox r10, rax - adcx r8, r8 - adox r11, rcx - adcx r9, r9 - adox rbx, r15 - adcx r10, r10 - adox r13, r15 - adcx r11, r11 - adox r14, r15 - adcx rbx, rbx - adcx r13, r13 - adcx r14, r14 - - ;# Step 3: Compute intermediate squares - mov rdx, qword ptr [rsi + 32] - mulx rcx, rax, rdx - ;# f[0]^2 - mov qword ptr [rdi + 64], rax - - add r8, rcx - mov qword ptr [rdi + 72], r8 - - mov rdx, qword ptr [rsi + 40] - mulx rcx, rax, rdx - ;# f[1]^2 - adcx r9, rax - mov qword ptr [rdi + 80], r9 - - adcx r10, rcx - mov qword ptr [rdi + 88], r10 - - mov rdx, qword ptr [rsi + 48] - mulx rcx, rax, rdx - ;# f[2]^2 - adcx r11, rax - mov qword ptr [rdi + 96], r11 - - adcx rbx, rcx - mov qword ptr [rdi + 104], rbx - - mov rdx, qword ptr [rsi + 56] - mulx rcx, rax, rdx - ;# f[3]^2 - adcx r13, rax - mov qword ptr [rdi + 112], r13 - - adcx r14, rcx - mov qword ptr [rdi + 120], r14 - - - ;# Line up pointers - mov rsi, rdi - mov rdi, r12 - - ;# Step 1: Compute dst + carry == tmp_hi * 38 + tmp_lo - mov rdx, 38 - mulx r13, r8, qword ptr [rsi + 32] - xor ecx, ecx - adox r8, qword ptr [rsi + 0] - mulx rbx, r9, qword ptr [rsi + 40] - adcx r9, r13 - adox r9, qword ptr [rsi + 8] - mulx r13, r10, qword ptr [rsi + 48] - adcx r10, rbx - adox r10, qword ptr [rsi + 16] - mulx rax, r11, qword ptr [rsi + 56] - adcx r11, r13 - adox r11, qword ptr [rsi + 24] - adcx rax, rcx - adox rax, rcx - imul rax, rdx - - ;# Step 2: Fold the carry back into dst - add r8, rax - adcx r9, rcx - mov qword ptr [rdi + 8], r9 - adcx r10, rcx - mov qword ptr [rdi + 16], r10 - adcx r11, rcx - mov qword ptr [rdi + 24], r11 - - ;# Step 3: Fold the carry bit back in; guaranteed not to carry at this point - mov rax, 0 - cmovc rax, rdx - add r8, rax - mov qword ptr [rdi + 0], r8 - - ;# Step 1: Compute dst + carry == tmp_hi * 38 + tmp_lo - mov rdx, 38 - mulx r13, r8, qword ptr [rsi + 96] - xor ecx, ecx - adox r8, qword ptr [rsi + 64] - mulx rbx, r9, qword ptr [rsi + 104] - adcx r9, r13 - adox r9, qword ptr [rsi + 72] - mulx r13, r10, qword ptr [rsi + 112] - adcx r10, rbx - adox r10, qword ptr [rsi + 80] - mulx rax, r11, qword ptr [rsi + 120] - adcx r11, r13 - adox r11, qword ptr [rsi + 88] - adcx rax, rcx - adox rax, rcx - imul rax, rdx - - ;# Step 2: Fold the carry back into dst - add r8, rax - adcx r9, rcx - mov qword ptr [rdi + 40], r9 - adcx r10, rcx - mov qword ptr [rdi + 48], r10 - adcx r11, rcx - mov qword ptr [rdi + 56], r11 - - ;# Step 3: Fold the carry bit back in; guaranteed not to carry at this point - mov rax, 0 - cmovc rax, rdx - add r8, rax - mov qword ptr [rdi + 32], r8 - pop rdi - pop rsi - pop rbx - pop r12 - pop r14 - pop r13 - pop r15 - ret -fsqr2_e endp -ALIGN 16 -cswap2_e proc - push rdi - push rsi - mov rdi, rcx - mov rsi, rdx - mov rdx, r8 - ;# Transfer bit into CF flag - add rdi, 18446744073709551615 - - ;# cswap p1[0], p2[0] - mov r8, qword ptr [rsi + 0] - mov r9, qword ptr [rdx + 0] - mov r10, r8 - cmovc r8, r9 - cmovc r9, r10 - mov qword ptr [rsi + 0], r8 - mov qword ptr [rdx + 0], r9 - - ;# cswap p1[1], p2[1] - mov r8, qword ptr [rsi + 8] - mov r9, qword ptr [rdx + 8] - mov r10, r8 - cmovc r8, r9 - cmovc r9, r10 - mov qword ptr [rsi + 8], r8 - mov qword ptr [rdx + 8], r9 - - ;# cswap p1[2], p2[2] - mov r8, qword ptr [rsi + 16] - mov r9, qword ptr [rdx + 16] - mov r10, r8 - cmovc r8, r9 - cmovc r9, r10 - mov qword ptr [rsi + 16], r8 - mov qword ptr [rdx + 16], r9 - - ;# cswap p1[3], p2[3] - mov r8, qword ptr [rsi + 24] - mov r9, qword ptr [rdx + 24] - mov r10, r8 - cmovc r8, r9 - cmovc r9, r10 - mov qword ptr [rsi + 24], r8 - mov qword ptr [rdx + 24], r9 - - ;# cswap p1[4], p2[4] - mov r8, qword ptr [rsi + 32] - mov r9, qword ptr [rdx + 32] - mov r10, r8 - cmovc r8, r9 - cmovc r9, r10 - mov qword ptr [rsi + 32], r8 - mov qword ptr [rdx + 32], r9 - - ;# cswap p1[5], p2[5] - mov r8, qword ptr [rsi + 40] - mov r9, qword ptr [rdx + 40] - mov r10, r8 - cmovc r8, r9 - cmovc r9, r10 - mov qword ptr [rsi + 40], r8 - mov qword ptr [rdx + 40], r9 - - ;# cswap p1[6], p2[6] - mov r8, qword ptr [rsi + 48] - mov r9, qword ptr [rdx + 48] - mov r10, r8 - cmovc r8, r9 - cmovc r9, r10 - mov qword ptr [rsi + 48], r8 - mov qword ptr [rdx + 48], r9 - - ;# cswap p1[7], p2[7] - mov r8, qword ptr [rsi + 56] - mov r9, qword ptr [rdx + 56] - mov r10, r8 - cmovc r8, r9 - cmovc r9, r10 - mov qword ptr [rsi + 56], r8 - mov qword ptr [rdx + 56], r9 - pop rsi - pop rdi - ret -cswap2_e endp -end diff --git a/dist/c89-compatible/evercrypt_targetconfig.h b/dist/c89-compatible/evercrypt_targetconfig.h deleted file mode 100644 index d6d7c032f2..0000000000 --- a/dist/c89-compatible/evercrypt_targetconfig.h +++ /dev/null @@ -1,56 +0,0 @@ -#ifndef __EVERCRYPT_TARGETCONFIG_H -#define __EVERCRYPT_TARGETCONFIG_H - -// Instead of listing the identifiers for the target architectures -// then defining the constant TARGET_ARCHITECTURE in config.h, we might simply -// define exactly one tag of the form TARGET_ARCHITECTURE_IS_... in config.h. -// However, for maintenance purposes, we use the first method in -// order to have all the possible values listed in one place. -// Note that for now, the only important id is TARGET_ARCHITECTURE_ID_X64, -// but the other ids might prove useful in the future if we make -// the dynamic feature detection more precise (see the functions -// has_vec128_not_avx/has_vec256_not_avx2 below). -#define TARGET_ARCHITECTURE_ID_UNKNOWN 0 -#define TARGET_ARCHITECTURE_ID_X86 1 -#define TARGET_ARCHITECTURE_ID_X64 2 -#define TARGET_ARCHITECTURE_ID_ARM7 3 -#define TARGET_ARCHITECTURE_ID_ARM8 4 -#define TARGET_ARCHITECTURE_ID_SYSTEMZ 5 -#define TARGET_ARCHITECTURE_ID_POWERPC64 6 - -#if defined(__has_include) -#if __has_include("config.h") -#include "config.h" -#else -#define TARGET_ARCHITECTURE TARGET_ARCHITECTURE_ID_UNKNOWN -#endif -#endif - -// Those functions are called on non-x64 platforms for which the feature detection -// is not covered by vale's CPUID support; therefore, we hand-write in C ourselves. -// For now, on non-x64 platforms, if we can compile 128-bit vector code, we can -// also execute it; this is true of: Z, Power, ARM8. In the future, if we consider -// cross-compilation scenarios, we'll have to refine this predicate; it could be the case, -// for instance, that we want our code to run on old revisions of a system without -// vector instructions, in which case we'll have to do run-time feature detection -// in addition to compile-time detection. - -#include - -static inline bool has_vec128_not_avx () { -#if (TARGET_ARCHITECTURE != TARGET_ARCHITECTURE_ID_X64) && HACL_CAN_COMPILE_VEC128 - return true; -#else - return false; -#endif -} - -static inline bool has_vec256_not_avx2 () { -#if (TARGET_ARCHITECTURE != TARGET_ARCHITECTURE_ID_X64) && HACL_CAN_COMPILE_VEC256 - return true; -#else - return false; -#endif -} - -#endif diff --git a/dist/c89-compatible/internal/Hacl_Bignum.h b/dist/c89-compatible/internal/Hacl_Bignum.h deleted file mode 100644 index f4a7739a6a..0000000000 --- a/dist/c89-compatible/internal/Hacl_Bignum.h +++ /dev/null @@ -1,344 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#ifndef __internal_Hacl_Bignum_H -#define __internal_Hacl_Bignum_H - -#if defined(__cplusplus) -extern "C" { -#endif - -#include -#include "krml/internal/types.h" -#include "krml/lowstar_endianness.h" -#include "krml/internal/target.h" - - -#include "../Hacl_Bignum.h" -#include "evercrypt_targetconfig.h" -#include "lib_intrinsics.h" -void -Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint32( - uint32_t aLen, - uint32_t *a, - uint32_t *b, - uint32_t *tmp, - uint32_t *res -); - -void -Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint64( - uint32_t aLen, - uint64_t *a, - uint64_t *b, - uint64_t *tmp, - uint64_t *res -); - -void -Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint32( - uint32_t aLen, - uint32_t *a, - uint32_t *tmp, - uint32_t *res -); - -void -Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint64( - uint32_t aLen, - uint64_t *a, - uint64_t *tmp, - uint64_t *res -); - -void -Hacl_Bignum_bn_add_mod_n_u32( - uint32_t len1, - uint32_t *n, - uint32_t *a, - uint32_t *b, - uint32_t *res -); - -void -Hacl_Bignum_bn_add_mod_n_u64( - uint32_t len1, - uint64_t *n, - uint64_t *a, - uint64_t *b, - uint64_t *res -); - -void -Hacl_Bignum_bn_sub_mod_n_u32( - uint32_t len1, - uint32_t *n, - uint32_t *a, - uint32_t *b, - uint32_t *res -); - -void -Hacl_Bignum_bn_sub_mod_n_u64( - uint32_t len1, - uint64_t *n, - uint64_t *a, - uint64_t *b, - uint64_t *res -); - -uint32_t Hacl_Bignum_ModInvLimb_mod_inv_uint32(uint32_t n0); - -uint64_t Hacl_Bignum_ModInvLimb_mod_inv_uint64(uint64_t n0); - -uint32_t Hacl_Bignum_Montgomery_bn_check_modulus_u32(uint32_t len, uint32_t *n); - -void -Hacl_Bignum_Montgomery_bn_precomp_r2_mod_n_u32( - uint32_t len, - uint32_t nBits, - uint32_t *n, - uint32_t *res -); - -void -Hacl_Bignum_Montgomery_bn_mont_reduction_u32( - uint32_t len, - uint32_t *n, - uint32_t nInv, - uint32_t *c, - uint32_t *res -); - -void -Hacl_Bignum_Montgomery_bn_to_mont_u32( - uint32_t len, - uint32_t *n, - uint32_t nInv, - uint32_t *r2, - uint32_t *a, - uint32_t *aM -); - -void -Hacl_Bignum_Montgomery_bn_from_mont_u32( - uint32_t len, - uint32_t *n, - uint32_t nInv_u64, - uint32_t *aM, - uint32_t *a -); - -void -Hacl_Bignum_Montgomery_bn_mont_mul_u32( - uint32_t len, - uint32_t *n, - uint32_t nInv_u64, - uint32_t *aM, - uint32_t *bM, - uint32_t *resM -); - -void -Hacl_Bignum_Montgomery_bn_mont_sqr_u32( - uint32_t len, - uint32_t *n, - uint32_t nInv_u64, - uint32_t *aM, - uint32_t *resM -); - -uint64_t Hacl_Bignum_Montgomery_bn_check_modulus_u64(uint32_t len, uint64_t *n); - -void -Hacl_Bignum_Montgomery_bn_precomp_r2_mod_n_u64( - uint32_t len, - uint32_t nBits, - uint64_t *n, - uint64_t *res -); - -void -Hacl_Bignum_Montgomery_bn_mont_reduction_u64( - uint32_t len, - uint64_t *n, - uint64_t nInv, - uint64_t *c, - uint64_t *res -); - -void -Hacl_Bignum_Montgomery_bn_to_mont_u64( - uint32_t len, - uint64_t *n, - uint64_t nInv, - uint64_t *r2, - uint64_t *a, - uint64_t *aM -); - -void -Hacl_Bignum_Montgomery_bn_from_mont_u64( - uint32_t len, - uint64_t *n, - uint64_t nInv_u64, - uint64_t *aM, - uint64_t *a -); - -void -Hacl_Bignum_Montgomery_bn_mont_mul_u64( - uint32_t len, - uint64_t *n, - uint64_t nInv_u64, - uint64_t *aM, - uint64_t *bM, - uint64_t *resM -); - -void -Hacl_Bignum_Montgomery_bn_mont_sqr_u64( - uint32_t len, - uint64_t *n, - uint64_t nInv_u64, - uint64_t *aM, - uint64_t *resM -); - -uint32_t -Hacl_Bignum_Exponentiation_bn_check_mod_exp_u32( - uint32_t len, - uint32_t *n, - uint32_t *a, - uint32_t bBits, - uint32_t *b -); - -void -Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_precomp_u32( - uint32_t len, - uint32_t *n, - uint32_t mu, - uint32_t *r2, - uint32_t *a, - uint32_t bBits, - uint32_t *b, - uint32_t *res -); - -void -Hacl_Bignum_Exponentiation_bn_mod_exp_consttime_precomp_u32( - uint32_t len, - uint32_t *n, - uint32_t mu, - uint32_t *r2, - uint32_t *a, - uint32_t bBits, - uint32_t *b, - uint32_t *res -); - -void -Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_u32( - uint32_t len, - uint32_t nBits, - uint32_t *n, - uint32_t *a, - uint32_t bBits, - uint32_t *b, - uint32_t *res -); - -void -Hacl_Bignum_Exponentiation_bn_mod_exp_consttime_u32( - uint32_t len, - uint32_t nBits, - uint32_t *n, - uint32_t *a, - uint32_t bBits, - uint32_t *b, - uint32_t *res -); - -uint64_t -Hacl_Bignum_Exponentiation_bn_check_mod_exp_u64( - uint32_t len, - uint64_t *n, - uint64_t *a, - uint32_t bBits, - uint64_t *b -); - -void -Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_precomp_u64( - uint32_t len, - uint64_t *n, - uint64_t mu, - uint64_t *r2, - uint64_t *a, - uint32_t bBits, - uint64_t *b, - uint64_t *res -); - -void -Hacl_Bignum_Exponentiation_bn_mod_exp_consttime_precomp_u64( - uint32_t len, - uint64_t *n, - uint64_t mu, - uint64_t *r2, - uint64_t *a, - uint32_t bBits, - uint64_t *b, - uint64_t *res -); - -void -Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_u64( - uint32_t len, - uint32_t nBits, - uint64_t *n, - uint64_t *a, - uint32_t bBits, - uint64_t *b, - uint64_t *res -); - -void -Hacl_Bignum_Exponentiation_bn_mod_exp_consttime_u64( - uint32_t len, - uint32_t nBits, - uint64_t *n, - uint64_t *a, - uint32_t bBits, - uint64_t *b, - uint64_t *res -); - -#if defined(__cplusplus) -} -#endif - -#define __internal_Hacl_Bignum_H_DEFINED -#endif diff --git a/dist/c89-compatible/internal/Hacl_Chacha20.h b/dist/c89-compatible/internal/Hacl_Chacha20.h deleted file mode 100644 index b4abbe28ac..0000000000 --- a/dist/c89-compatible/internal/Hacl_Chacha20.h +++ /dev/null @@ -1,52 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#ifndef __internal_Hacl_Chacha20_H -#define __internal_Hacl_Chacha20_H - -#if defined(__cplusplus) -extern "C" { -#endif - -#include -#include "krml/internal/types.h" -#include "krml/lowstar_endianness.h" -#include "krml/internal/target.h" - - -#include "../Hacl_Chacha20.h" -#include "evercrypt_targetconfig.h" -extern const uint32_t Hacl_Impl_Chacha20_Vec_chacha20_constants[4U]; - -void Hacl_Impl_Chacha20_chacha20_init(uint32_t *ctx, uint8_t *k, uint8_t *n, uint32_t ctr); - -void -Hacl_Impl_Chacha20_chacha20_update(uint32_t *ctx, uint32_t len, uint8_t *out, uint8_t *text); - -#if defined(__cplusplus) -} -#endif - -#define __internal_Hacl_Chacha20_H_DEFINED -#endif diff --git a/dist/c89-compatible/internal/Hacl_Curve25519_51.h b/dist/c89-compatible/internal/Hacl_Curve25519_51.h deleted file mode 100644 index 4741bad22b..0000000000 --- a/dist/c89-compatible/internal/Hacl_Curve25519_51.h +++ /dev/null @@ -1,55 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#ifndef __internal_Hacl_Curve25519_51_H -#define __internal_Hacl_Curve25519_51_H - -#if defined(__cplusplus) -extern "C" { -#endif - -#include -#include "krml/internal/types.h" -#include "krml/lowstar_endianness.h" -#include "krml/internal/target.h" - - -#include "../Hacl_Curve25519_51.h" -#include "evercrypt_targetconfig.h" -void -Hacl_Curve25519_51_fsquare_times( - uint64_t *o, - uint64_t *inp, - FStar_UInt128_uint128 *tmp, - uint32_t n -); - -void Hacl_Curve25519_51_finv(uint64_t *o, uint64_t *i, FStar_UInt128_uint128 *tmp); - -#if defined(__cplusplus) -} -#endif - -#define __internal_Hacl_Curve25519_51_H_DEFINED -#endif diff --git a/dist/c89-compatible/internal/Hacl_Ed25519.h b/dist/c89-compatible/internal/Hacl_Ed25519.h deleted file mode 100644 index 6a76bb3201..0000000000 --- a/dist/c89-compatible/internal/Hacl_Ed25519.h +++ /dev/null @@ -1,71 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#ifndef __internal_Hacl_Ed25519_H -#define __internal_Hacl_Ed25519_H - -#if defined(__cplusplus) -extern "C" { -#endif - -#include -#include "krml/internal/types.h" -#include "krml/lowstar_endianness.h" -#include "krml/internal/target.h" - - -#include "internal/Hacl_Hash_SHA2.h" -#include "internal/Hacl_Curve25519_51.h" -#include "../Hacl_Ed25519.h" -#include "evercrypt_targetconfig.h" -void Hacl_Bignum25519_reduce_513(uint64_t *a); - -void Hacl_Bignum25519_inverse(uint64_t *out, uint64_t *a); - -void Hacl_Bignum25519_load_51(uint64_t *output, uint8_t *input); - -void Hacl_Bignum25519_store_51(uint8_t *output, uint64_t *input); - -void Hacl_Impl_Ed25519_PointDouble_point_double(uint64_t *out, uint64_t *p); - -bool Hacl_Impl_Ed25519_PointDecompress_point_decompress(uint64_t *out, uint8_t *s); - -void Hacl_Impl_Ed25519_PointCompress_point_compress(uint8_t *z, uint64_t *p); - -bool Hacl_Impl_Ed25519_PointEqual_point_equal(uint64_t *p, uint64_t *q); - -void Hacl_Impl_Ed25519_PointAdd_point_add(uint64_t *out, uint64_t *p, uint64_t *q); - -void Hacl_Impl_Ed25519_PointNegate_point_negate(uint64_t *p, uint64_t *out); - -void Hacl_Impl_Ed25519_Ladder_make_point_inf(uint64_t *b); - -void Hacl_Impl_Ed25519_Ladder_point_mul(uint64_t *result, uint8_t *scalar, uint64_t *q); - -#if defined(__cplusplus) -} -#endif - -#define __internal_Hacl_Ed25519_H_DEFINED -#endif diff --git a/dist/c89-compatible/internal/Hacl_Frodo_KEM.h b/dist/c89-compatible/internal/Hacl_Frodo_KEM.h deleted file mode 100644 index f80ef3d8b0..0000000000 --- a/dist/c89-compatible/internal/Hacl_Frodo_KEM.h +++ /dev/null @@ -1,47 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#ifndef __internal_Hacl_Frodo_KEM_H -#define __internal_Hacl_Frodo_KEM_H - -#if defined(__cplusplus) -extern "C" { -#endif - -#include -#include "krml/internal/types.h" -#include "krml/lowstar_endianness.h" -#include "krml/internal/target.h" - - -#include "../Hacl_Frodo_KEM.h" -#include "evercrypt_targetconfig.h" -void randombytes_(uint32_t len, uint8_t *res); - -#if defined(__cplusplus) -} -#endif - -#define __internal_Hacl_Frodo_KEM_H_DEFINED -#endif diff --git a/dist/c89-compatible/internal/Hacl_HMAC.h b/dist/c89-compatible/internal/Hacl_HMAC.h deleted file mode 100644 index f14ed8cdb2..0000000000 --- a/dist/c89-compatible/internal/Hacl_HMAC.h +++ /dev/null @@ -1,62 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#ifndef __internal_Hacl_HMAC_H -#define __internal_Hacl_HMAC_H - -#if defined(__cplusplus) -extern "C" { -#endif - -#include -#include "krml/internal/types.h" -#include "krml/lowstar_endianness.h" -#include "krml/internal/target.h" - - -#include "internal/Hacl_Hash_SHA2.h" -#include "internal/Hacl_Hash_SHA1.h" -#include "internal/Hacl_Hash_Blake2.h" -#include "../Hacl_HMAC.h" -#include "evercrypt_targetconfig.h" -typedef struct K____uint32_t__uint64_t_s -{ - uint32_t *fst; - uint64_t snd; -} -K____uint32_t__uint64_t; - -typedef struct K____uint64_t__FStar_UInt128_uint128_s -{ - uint64_t *fst; - FStar_UInt128_uint128 snd; -} -K____uint64_t__FStar_UInt128_uint128; - -#if defined(__cplusplus) -} -#endif - -#define __internal_Hacl_HMAC_H_DEFINED -#endif diff --git a/dist/c89-compatible/internal/Hacl_Hash_Blake2.h b/dist/c89-compatible/internal/Hacl_Hash_Blake2.h deleted file mode 100644 index 93e0119de5..0000000000 --- a/dist/c89-compatible/internal/Hacl_Hash_Blake2.h +++ /dev/null @@ -1,126 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#ifndef __internal_Hacl_Hash_Blake2_H -#define __internal_Hacl_Hash_Blake2_H - -#if defined(__cplusplus) -extern "C" { -#endif - -#include -#include "krml/internal/types.h" -#include "krml/lowstar_endianness.h" -#include "krml/internal/target.h" - - -#include "../Hacl_Hash_Blake2.h" -#include "evercrypt_targetconfig.h" -uint64_t Hacl_Hash_Core_Blake2_init_blake2s_32(uint32_t *s); - -uint64_t Hacl_Hash_Core_Blake2_update_blake2s_32(uint32_t *s, uint64_t totlen, uint8_t *block); - -void Hacl_Hash_Core_Blake2_finish_blake2s_32(uint32_t *s, uint64_t ev, uint8_t *dst); - -FStar_UInt128_uint128 Hacl_Hash_Core_Blake2_init_blake2b_32(uint64_t *s); - -FStar_UInt128_uint128 -Hacl_Hash_Core_Blake2_update_blake2b_32( - uint64_t *s, - FStar_UInt128_uint128 totlen, - uint8_t *block -); - -void -Hacl_Hash_Core_Blake2_finish_blake2b_32(uint64_t *s, FStar_UInt128_uint128 ev, uint8_t *dst); - -uint64_t -Hacl_Hash_Blake2_update_multi_blake2s_32( - uint32_t *s, - uint64_t ev, - uint8_t *blocks, - uint32_t n_blocks -); - -FStar_UInt128_uint128 -Hacl_Hash_Blake2_update_multi_blake2b_32( - uint64_t *s, - FStar_UInt128_uint128 ev, - uint8_t *blocks, - uint32_t n_blocks -); - -typedef struct K___uint32_t_uint32_t_uint32_t__uint8_t___uint8_t__s -{ - uint32_t fst; - uint32_t snd; - uint32_t thd; - uint8_t *f3; - uint8_t *f4; -} -K___uint32_t_uint32_t_uint32_t__uint8_t___uint8_t_; - -typedef struct K___uint32_t_uint32_t_uint32_t_s -{ - uint32_t fst; - uint32_t snd; - uint32_t thd; -} -K___uint32_t_uint32_t_uint32_t; - -uint64_t -Hacl_Hash_Blake2_update_last_blake2s_32( - uint32_t *s, - uint64_t ev, - uint64_t prev_len, - uint8_t *input, - uint32_t input_len -); - -FStar_UInt128_uint128 -Hacl_Hash_Blake2_update_last_blake2b_32( - uint64_t *s, - FStar_UInt128_uint128 ev, - FStar_UInt128_uint128 prev_len, - uint8_t *input, - uint32_t input_len -); - -void Hacl_Hash_Blake2_hash_blake2s_32(uint8_t *input, uint32_t input_len, uint8_t *dst); - -void Hacl_Hash_Blake2_hash_blake2b_32(uint8_t *input, uint32_t input_len, uint8_t *dst); - -typedef struct K___uint32_t_uint32_t_s -{ - uint32_t fst; - uint32_t snd; -} -K___uint32_t_uint32_t; - -#if defined(__cplusplus) -} -#endif - -#define __internal_Hacl_Hash_Blake2_H_DEFINED -#endif diff --git a/dist/c89-compatible/internal/Hacl_Hash_Blake2b_256.h b/dist/c89-compatible/internal/Hacl_Hash_Blake2b_256.h deleted file mode 100644 index 36e76a9955..0000000000 --- a/dist/c89-compatible/internal/Hacl_Hash_Blake2b_256.h +++ /dev/null @@ -1,85 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#ifndef __internal_Hacl_Hash_Blake2b_256_H -#define __internal_Hacl_Hash_Blake2b_256_H - -#if defined(__cplusplus) -extern "C" { -#endif - -#include -#include "krml/internal/types.h" -#include "krml/lowstar_endianness.h" -#include "krml/internal/target.h" - - -#include "internal/Hacl_Hash_Blake2.h" -#include "../Hacl_Hash_Blake2b_256.h" -#include "evercrypt_targetconfig.h" -#include "libintvector.h" -FStar_UInt128_uint128 -Hacl_Hash_Blake2b_256_init_blake2b_256(Lib_IntVector_Intrinsics_vec256 *s); - -FStar_UInt128_uint128 -Hacl_Hash_Blake2b_256_update_blake2b_256( - Lib_IntVector_Intrinsics_vec256 *s, - FStar_UInt128_uint128 totlen, - uint8_t *block -); - -void -Hacl_Hash_Blake2b_256_finish_blake2b_256( - Lib_IntVector_Intrinsics_vec256 *s, - FStar_UInt128_uint128 ev, - uint8_t *dst -); - -FStar_UInt128_uint128 -Hacl_Hash_Blake2b_256_update_multi_blake2b_256( - Lib_IntVector_Intrinsics_vec256 *s, - FStar_UInt128_uint128 ev, - uint8_t *blocks, - uint32_t n_blocks -); - -FStar_UInt128_uint128 -Hacl_Hash_Blake2b_256_update_last_blake2b_256( - Lib_IntVector_Intrinsics_vec256 *s, - FStar_UInt128_uint128 ev, - FStar_UInt128_uint128 prev_len, - uint8_t *input, - uint32_t input_len -); - -void Hacl_Hash_Blake2b_256_hash_blake2b_256(uint8_t *input, uint32_t input_len, uint8_t *dst); - -Lib_IntVector_Intrinsics_vec256 *Hacl_Hash_Blake2b_256_malloc_blake2b_256(); - -#if defined(__cplusplus) -} -#endif - -#define __internal_Hacl_Hash_Blake2b_256_H_DEFINED -#endif diff --git a/dist/c89-compatible/internal/Hacl_Hash_Blake2s_128.h b/dist/c89-compatible/internal/Hacl_Hash_Blake2s_128.h deleted file mode 100644 index e1c62c085c..0000000000 --- a/dist/c89-compatible/internal/Hacl_Hash_Blake2s_128.h +++ /dev/null @@ -1,84 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#ifndef __internal_Hacl_Hash_Blake2s_128_H -#define __internal_Hacl_Hash_Blake2s_128_H - -#if defined(__cplusplus) -extern "C" { -#endif - -#include -#include "krml/internal/types.h" -#include "krml/lowstar_endianness.h" -#include "krml/internal/target.h" - - -#include "internal/Hacl_Hash_Blake2.h" -#include "../Hacl_Hash_Blake2s_128.h" -#include "evercrypt_targetconfig.h" -#include "libintvector.h" -uint64_t Hacl_Hash_Blake2s_128_init_blake2s_128(Lib_IntVector_Intrinsics_vec128 *s); - -uint64_t -Hacl_Hash_Blake2s_128_update_blake2s_128( - Lib_IntVector_Intrinsics_vec128 *s, - uint64_t totlen, - uint8_t *block -); - -void -Hacl_Hash_Blake2s_128_finish_blake2s_128( - Lib_IntVector_Intrinsics_vec128 *s, - uint64_t ev, - uint8_t *dst -); - -uint64_t -Hacl_Hash_Blake2s_128_update_multi_blake2s_128( - Lib_IntVector_Intrinsics_vec128 *s, - uint64_t ev, - uint8_t *blocks, - uint32_t n_blocks -); - -uint64_t -Hacl_Hash_Blake2s_128_update_last_blake2s_128( - Lib_IntVector_Intrinsics_vec128 *s, - uint64_t ev, - uint64_t prev_len, - uint8_t *input, - uint32_t input_len -); - -void Hacl_Hash_Blake2s_128_hash_blake2s_128(uint8_t *input, uint32_t input_len, uint8_t *dst); - -Lib_IntVector_Intrinsics_vec128 *Hacl_Hash_Blake2s_128_malloc_blake2s_128(); - -#if defined(__cplusplus) -} -#endif - -#define __internal_Hacl_Hash_Blake2s_128_H_DEFINED -#endif diff --git a/dist/c89-compatible/internal/Hacl_Hash_MD5.h b/dist/c89-compatible/internal/Hacl_Hash_MD5.h deleted file mode 100644 index 2324cf8b16..0000000000 --- a/dist/c89-compatible/internal/Hacl_Hash_MD5.h +++ /dev/null @@ -1,51 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#ifndef __internal_Hacl_Hash_MD5_H -#define __internal_Hacl_Hash_MD5_H - -#if defined(__cplusplus) -extern "C" { -#endif - -#include -#include "krml/internal/types.h" -#include "krml/lowstar_endianness.h" -#include "krml/internal/target.h" - - -#include "../Hacl_Hash_MD5.h" -#include "evercrypt_targetconfig.h" -void Hacl_Hash_Core_MD5_legacy_init(uint32_t *s); - -void Hacl_Hash_Core_MD5_legacy_update(uint32_t *abcd, uint8_t *x); - -void Hacl_Hash_Core_MD5_legacy_finish(uint32_t *s, uint8_t *dst); - -#if defined(__cplusplus) -} -#endif - -#define __internal_Hacl_Hash_MD5_H_DEFINED -#endif diff --git a/dist/c89-compatible/internal/Hacl_Hash_SHA1.h b/dist/c89-compatible/internal/Hacl_Hash_SHA1.h deleted file mode 100644 index b512358c8c..0000000000 --- a/dist/c89-compatible/internal/Hacl_Hash_SHA1.h +++ /dev/null @@ -1,51 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#ifndef __internal_Hacl_Hash_SHA1_H -#define __internal_Hacl_Hash_SHA1_H - -#if defined(__cplusplus) -extern "C" { -#endif - -#include -#include "krml/internal/types.h" -#include "krml/lowstar_endianness.h" -#include "krml/internal/target.h" - - -#include "../Hacl_Hash_SHA1.h" -#include "evercrypt_targetconfig.h" -void Hacl_Hash_Core_SHA1_legacy_init(uint32_t *s); - -void Hacl_Hash_Core_SHA1_legacy_update(uint32_t *h, uint8_t *l); - -void Hacl_Hash_Core_SHA1_legacy_finish(uint32_t *s, uint8_t *dst); - -#if defined(__cplusplus) -} -#endif - -#define __internal_Hacl_Hash_SHA1_H_DEFINED -#endif diff --git a/dist/c89-compatible/internal/Hacl_Hash_SHA2.h b/dist/c89-compatible/internal/Hacl_Hash_SHA2.h deleted file mode 100644 index c72f0f9f82..0000000000 --- a/dist/c89-compatible/internal/Hacl_Hash_SHA2.h +++ /dev/null @@ -1,67 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#ifndef __internal_Hacl_Hash_SHA2_H -#define __internal_Hacl_Hash_SHA2_H - -#if defined(__cplusplus) -extern "C" { -#endif - -#include -#include "krml/internal/types.h" -#include "krml/lowstar_endianness.h" -#include "krml/internal/target.h" - - -#include "../Hacl_Hash_SHA2.h" -#include "evercrypt_targetconfig.h" -void Hacl_Hash_Core_SHA2_init_224(uint32_t *s); - -void Hacl_Hash_Core_SHA2_init_256(uint32_t *s); - -void Hacl_Hash_Core_SHA2_init_384(uint64_t *s); - -void Hacl_Hash_Core_SHA2_init_512(uint64_t *s); - -void Hacl_Hash_Core_SHA2_update_384(uint64_t *hash, uint8_t *block); - -void Hacl_Hash_Core_SHA2_update_512(uint64_t *hash, uint8_t *block); - -void Hacl_Hash_Core_SHA2_pad_256(uint64_t len, uint8_t *dst); - -void Hacl_Hash_Core_SHA2_finish_224(uint32_t *s, uint8_t *dst); - -void Hacl_Hash_Core_SHA2_finish_256(uint32_t *s, uint8_t *dst); - -void Hacl_Hash_Core_SHA2_finish_384(uint64_t *s, uint8_t *dst); - -void Hacl_Hash_Core_SHA2_finish_512(uint64_t *s, uint8_t *dst); - -#if defined(__cplusplus) -} -#endif - -#define __internal_Hacl_Hash_SHA2_H_DEFINED -#endif diff --git a/dist/c89-compatible/internal/Hacl_K256_ECDSA.h b/dist/c89-compatible/internal/Hacl_K256_ECDSA.h deleted file mode 100644 index 5879328729..0000000000 --- a/dist/c89-compatible/internal/Hacl_K256_ECDSA.h +++ /dev/null @@ -1,62 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#ifndef __internal_Hacl_K256_ECDSA_H -#define __internal_Hacl_K256_ECDSA_H - -#if defined(__cplusplus) -extern "C" { -#endif - -#include -#include "krml/internal/types.h" -#include "krml/lowstar_endianness.h" -#include "krml/internal/target.h" - - -#include "../Hacl_K256_ECDSA.h" -#include "evercrypt_targetconfig.h" -#include "lib_intrinsics.h" -bool Hacl_Impl_K256_Point_aff_point_decompress_vartime(uint64_t *x, uint64_t *y, uint8_t *s); - -void Hacl_Impl_K256_Point_aff_point_compress_vartime(uint8_t *s, uint64_t *x, uint64_t *y); - -void Hacl_Impl_K256_Point_point_negate(uint64_t *out, uint64_t *p); - -bool Hacl_Impl_K256_Point_point_eq(uint64_t *p, uint64_t *q); - -void Hacl_Impl_K256_PointDouble_point_double(uint64_t *out, uint64_t *p); - -void Hacl_Impl_K256_PointAdd_point_add(uint64_t *out, uint64_t *p, uint64_t *q); - -void Hacl_Impl_K256_PointMul_make_point_at_inf(uint64_t *p); - -void Hacl_Impl_K256_PointMul_point_mul(uint64_t *out, uint64_t *scalar, uint64_t *q); - -#if defined(__cplusplus) -} -#endif - -#define __internal_Hacl_K256_ECDSA_H_DEFINED -#endif diff --git a/dist/c89-compatible/internal/Hacl_P256.h b/dist/c89-compatible/internal/Hacl_P256.h deleted file mode 100644 index 93c1870662..0000000000 --- a/dist/c89-compatible/internal/Hacl_P256.h +++ /dev/null @@ -1,64 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#ifndef __internal_Hacl_P256_H -#define __internal_Hacl_P256_H - -#if defined(__cplusplus) -extern "C" { -#endif - -#include -#include "krml/internal/types.h" -#include "krml/lowstar_endianness.h" -#include "krml/internal/target.h" - - -#include "internal/Hacl_Spec.h" -#include "../Hacl_P256.h" -#include "evercrypt_targetconfig.h" -#include "lib_intrinsics.h" -void Hacl_Impl_P256_LowLevel_toUint8(uint64_t *i, uint8_t *o); - -void Hacl_Impl_P256_LowLevel_changeEndian(uint64_t *i); - -void Hacl_Impl_P256_LowLevel_toUint64ChangeEndian(uint8_t *i, uint64_t *o); - -uint64_t Hacl_Impl_P256_Core_isPointAtInfinityPrivate(uint64_t *p); - -void -Hacl_Impl_P256_Core_secretToPublic(uint64_t *result, uint8_t *scalar, uint64_t *tempBuffer); - -/** - The pub(lic)_key input of the function is considered to be public, - thus this code is not secret independent with respect to the operations done over this variable. -*/ -uint64_t Hacl_Impl_P256_DH__ecp256dh_r(uint64_t *result, uint64_t *pubKey, uint8_t *scalar); - -#if defined(__cplusplus) -} -#endif - -#define __internal_Hacl_P256_H_DEFINED -#endif diff --git a/dist/c89-compatible/internal/Hacl_Poly1305_128.h b/dist/c89-compatible/internal/Hacl_Poly1305_128.h deleted file mode 100644 index 1a8f0eb779..0000000000 --- a/dist/c89-compatible/internal/Hacl_Poly1305_128.h +++ /dev/null @@ -1,55 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#ifndef __internal_Hacl_Poly1305_128_H -#define __internal_Hacl_Poly1305_128_H - -#if defined(__cplusplus) -extern "C" { -#endif - -#include -#include "krml/internal/types.h" -#include "krml/lowstar_endianness.h" -#include "krml/internal/target.h" - - -#include "../Hacl_Poly1305_128.h" -#include "evercrypt_targetconfig.h" -#include "libintvector.h" -void -Hacl_Impl_Poly1305_Field32xN_128_load_acc2(Lib_IntVector_Intrinsics_vec128 *acc, uint8_t *b); - -void -Hacl_Impl_Poly1305_Field32xN_128_fmul_r2_normalize( - Lib_IntVector_Intrinsics_vec128 *out, - Lib_IntVector_Intrinsics_vec128 *p -); - -#if defined(__cplusplus) -} -#endif - -#define __internal_Hacl_Poly1305_128_H_DEFINED -#endif diff --git a/dist/c89-compatible/internal/Hacl_Poly1305_256.h b/dist/c89-compatible/internal/Hacl_Poly1305_256.h deleted file mode 100644 index f70cc5ac79..0000000000 --- a/dist/c89-compatible/internal/Hacl_Poly1305_256.h +++ /dev/null @@ -1,55 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#ifndef __internal_Hacl_Poly1305_256_H -#define __internal_Hacl_Poly1305_256_H - -#if defined(__cplusplus) -extern "C" { -#endif - -#include -#include "krml/internal/types.h" -#include "krml/lowstar_endianness.h" -#include "krml/internal/target.h" - - -#include "../Hacl_Poly1305_256.h" -#include "evercrypt_targetconfig.h" -#include "libintvector.h" -void -Hacl_Impl_Poly1305_Field32xN_256_load_acc4(Lib_IntVector_Intrinsics_vec256 *acc, uint8_t *b); - -void -Hacl_Impl_Poly1305_Field32xN_256_fmul_r4_normalize( - Lib_IntVector_Intrinsics_vec256 *out, - Lib_IntVector_Intrinsics_vec256 *p -); - -#if defined(__cplusplus) -} -#endif - -#define __internal_Hacl_Poly1305_256_H_DEFINED -#endif diff --git a/dist/c89-compatible/internal/Hacl_SHA2_Types.h b/dist/c89-compatible/internal/Hacl_SHA2_Types.h deleted file mode 100644 index f0083f7c37..0000000000 --- a/dist/c89-compatible/internal/Hacl_SHA2_Types.h +++ /dev/null @@ -1,108 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#ifndef __internal_Hacl_SHA2_Types_H -#define __internal_Hacl_SHA2_Types_H - -#if defined(__cplusplus) -extern "C" { -#endif - -#include -#include "krml/internal/types.h" -#include "krml/lowstar_endianness.h" -#include "krml/internal/target.h" - - -#include "../Hacl_SHA2_Types.h" -#include "evercrypt_targetconfig.h" -typedef struct Hacl_Impl_SHA2_Types_uint8_2p_s -{ - uint8_t *fst; - uint8_t *snd; -} -Hacl_Impl_SHA2_Types_uint8_2p; - -typedef struct Hacl_Impl_SHA2_Types_uint8_3p_s -{ - uint8_t *fst; - Hacl_Impl_SHA2_Types_uint8_2p snd; -} -Hacl_Impl_SHA2_Types_uint8_3p; - -typedef struct Hacl_Impl_SHA2_Types_uint8_4p_s -{ - uint8_t *fst; - Hacl_Impl_SHA2_Types_uint8_3p snd; -} -Hacl_Impl_SHA2_Types_uint8_4p; - -typedef struct Hacl_Impl_SHA2_Types_uint8_5p_s -{ - uint8_t *fst; - Hacl_Impl_SHA2_Types_uint8_4p snd; -} -Hacl_Impl_SHA2_Types_uint8_5p; - -typedef struct Hacl_Impl_SHA2_Types_uint8_6p_s -{ - uint8_t *fst; - Hacl_Impl_SHA2_Types_uint8_5p snd; -} -Hacl_Impl_SHA2_Types_uint8_6p; - -typedef struct Hacl_Impl_SHA2_Types_uint8_7p_s -{ - uint8_t *fst; - Hacl_Impl_SHA2_Types_uint8_6p snd; -} -Hacl_Impl_SHA2_Types_uint8_7p; - -typedef struct Hacl_Impl_SHA2_Types_uint8_8p_s -{ - uint8_t *fst; - Hacl_Impl_SHA2_Types_uint8_7p snd; -} -Hacl_Impl_SHA2_Types_uint8_8p; - -typedef struct Hacl_Impl_SHA2_Types_uint8_2x4p_s -{ - Hacl_Impl_SHA2_Types_uint8_4p fst; - Hacl_Impl_SHA2_Types_uint8_4p snd; -} -Hacl_Impl_SHA2_Types_uint8_2x4p; - -typedef struct Hacl_Impl_SHA2_Types_uint8_2x8p_s -{ - Hacl_Impl_SHA2_Types_uint8_8p fst; - Hacl_Impl_SHA2_Types_uint8_8p snd; -} -Hacl_Impl_SHA2_Types_uint8_2x8p; - -#if defined(__cplusplus) -} -#endif - -#define __internal_Hacl_SHA2_Types_H_DEFINED -#endif diff --git a/dist/c89-compatible/internal/Hacl_Spec.h b/dist/c89-compatible/internal/Hacl_Spec.h deleted file mode 100644 index 1a4a51dc40..0000000000 --- a/dist/c89-compatible/internal/Hacl_Spec.h +++ /dev/null @@ -1,63 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#ifndef __internal_Hacl_Spec_H -#define __internal_Hacl_Spec_H - -#if defined(__cplusplus) -extern "C" { -#endif - -#include -#include "krml/internal/types.h" -#include "krml/lowstar_endianness.h" -#include "krml/internal/target.h" - - -#include "../Hacl_Spec.h" -#include "evercrypt_targetconfig.h" -#define Spec_ECDSA_NoHash 0 -#define Spec_ECDSA_Hash 1 - -typedef uint8_t Spec_ECDSA_hash_alg_ecdsa_tags; - -typedef struct Spec_ECDSA_hash_alg_ecdsa_s -{ - Spec_ECDSA_hash_alg_ecdsa_tags tag; - Spec_Hash_Definitions_hash_alg _0; -} -Spec_ECDSA_hash_alg_ecdsa; - -#define Spec_Cipher_Expansion_Hacl_CHACHA20 0 -#define Spec_Cipher_Expansion_Vale_AES128 1 -#define Spec_Cipher_Expansion_Vale_AES256 2 - -typedef uint8_t Spec_Cipher_Expansion_impl; - -#if defined(__cplusplus) -} -#endif - -#define __internal_Hacl_Spec_H_DEFINED -#endif diff --git a/dist/c89-compatible/internal/Vale.h b/dist/c89-compatible/internal/Vale.h deleted file mode 100644 index 105ac865d6..0000000000 --- a/dist/c89-compatible/internal/Vale.h +++ /dev/null @@ -1,193 +0,0 @@ -/* MIT License - * - * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - - -#ifndef __internal_Vale_H -#define __internal_Vale_H - -#if defined(__cplusplus) -extern "C" { -#endif - -#include -#include "krml/internal/types.h" -#include "krml/lowstar_endianness.h" -#include "krml/internal/target.h" - - - -#include "evercrypt_targetconfig.h" -extern uint64_t add_scalar_e(uint64_t *x0, uint64_t *x1, uint64_t x2); - -extern uint64_t fadd_e(uint64_t *x0, uint64_t *x1, uint64_t *x2); - -extern uint64_t sha256_update(uint32_t *x0, uint8_t *x1, uint64_t x2, uint32_t *x3); - -extern uint64_t x64_poly1305(uint8_t *x0, uint8_t *x1, uint64_t x2, uint64_t x3); - -extern uint64_t check_aesni(); - -extern uint64_t check_sha(); - -extern uint64_t check_adx_bmi2(); - -extern uint64_t check_avx(); - -extern uint64_t check_avx2(); - -extern uint64_t check_movbe(); - -extern uint64_t check_sse(); - -extern uint64_t check_rdrand(); - -extern uint64_t check_avx512(); - -extern uint64_t check_osxsave(); - -extern uint64_t check_avx_xcr0(); - -extern uint64_t check_avx512_xcr0(); - -extern uint64_t -gcm128_decrypt_opt( - uint8_t *x0, - uint64_t x1, - uint64_t x2, - uint8_t *x3, - uint8_t *x4, - uint8_t *x5, - uint8_t *x6, - uint8_t *x7, - uint8_t *x8, - uint64_t x9, - uint8_t *x10, - uint8_t *x11, - uint64_t x12, - uint8_t *x13, - uint64_t x14, - uint8_t *x15, - uint8_t *x16 -); - -extern uint64_t -gcm256_decrypt_opt( - uint8_t *x0, - uint64_t x1, - uint64_t x2, - uint8_t *x3, - uint8_t *x4, - uint8_t *x5, - uint8_t *x6, - uint8_t *x7, - uint8_t *x8, - uint64_t x9, - uint8_t *x10, - uint8_t *x11, - uint64_t x12, - uint8_t *x13, - uint64_t x14, - uint8_t *x15, - uint8_t *x16 -); - -extern uint64_t aes128_key_expansion(uint8_t *x0, uint8_t *x1); - -extern uint64_t aes256_key_expansion(uint8_t *x0, uint8_t *x1); - -extern uint64_t -compute_iv_stdcall( - uint8_t *x0, - uint64_t x1, - uint64_t x2, - uint8_t *x3, - uint8_t *x4, - uint8_t *x5 -); - -extern uint64_t -gcm128_encrypt_opt( - uint8_t *x0, - uint64_t x1, - uint64_t x2, - uint8_t *x3, - uint8_t *x4, - uint8_t *x5, - uint8_t *x6, - uint8_t *x7, - uint8_t *x8, - uint64_t x9, - uint8_t *x10, - uint8_t *x11, - uint64_t x12, - uint8_t *x13, - uint64_t x14, - uint8_t *x15, - uint8_t *x16 -); - -extern uint64_t -gcm256_encrypt_opt( - uint8_t *x0, - uint64_t x1, - uint64_t x2, - uint8_t *x3, - uint8_t *x4, - uint8_t *x5, - uint8_t *x6, - uint8_t *x7, - uint8_t *x8, - uint64_t x9, - uint8_t *x10, - uint8_t *x11, - uint64_t x12, - uint8_t *x13, - uint64_t x14, - uint8_t *x15, - uint8_t *x16 -); - -extern uint64_t aes128_keyhash_init(uint8_t *x0, uint8_t *x1); - -extern uint64_t aes256_keyhash_init(uint8_t *x0, uint8_t *x1); - -extern uint64_t cswap2_e(uint64_t x0, uint64_t *x1, uint64_t *x2); - -extern uint64_t fsqr_e(uint64_t *x0, uint64_t *x1, uint64_t *x2); - -extern uint64_t fsqr2_e(uint64_t *x0, uint64_t *x1, uint64_t *x2); - -extern uint64_t fmul_e(uint64_t *x0, uint64_t *x1, uint64_t *x2, uint64_t *x3); - -extern uint64_t fmul2_e(uint64_t *x0, uint64_t *x1, uint64_t *x2, uint64_t *x3); - -extern uint64_t fmul_scalar_e(uint64_t *x0, uint64_t *x1, uint64_t x2); - -extern uint64_t fsub_e(uint64_t *x0, uint64_t *x1, uint64_t *x2); - -#if defined(__cplusplus) -} -#endif - -#define __internal_Vale_H_DEFINED -#endif diff --git a/dist/c89-compatible/lib_intrinsics.h b/dist/c89-compatible/lib_intrinsics.h deleted file mode 100644 index 0c35026e89..0000000000 --- a/dist/c89-compatible/lib_intrinsics.h +++ /dev/null @@ -1,83 +0,0 @@ -#pragma once - -#include - -#if defined(__has_include) -#if __has_include("config.h") -#include "config.h" -#endif -#endif - -#if defined(HACL_CAN_COMPILE_INTRINSICS) -#if defined(_MSC_VER) -#include -#else -#include -#endif -#endif - -#if !defined(HACL_CAN_COMPILE_INTRINSICS) - -#include "Hacl_IntTypes_Intrinsics.h" - -#if defined(HACL_CAN_COMPILE_UINT128) - -#include "Hacl_IntTypes_Intrinsics_128.h" - -#define Lib_IntTypes_Intrinsics_add_carry_u64(x1, x2, x3, x4) \ - (Hacl_IntTypes_Intrinsics_128_add_carry_u64(x1, x2, x3, x4)) - -#define Lib_IntTypes_Intrinsics_sub_borrow_u64(x1, x2, x3, x4) \ - (Hacl_IntTypes_Intrinsics_128_sub_borrow_u64(x1, x2, x3, x4)) - -#else - -#define Lib_IntTypes_Intrinsics_add_carry_u64(x1, x2, x3, x4) \ - (Hacl_IntTypes_Intrinsics_add_carry_u64(x1, x2, x3, x4)) - -#define Lib_IntTypes_Intrinsics_sub_borrow_u64(x1, x2, x3, x4) \ - (Hacl_IntTypes_Intrinsics_sub_borrow_u64(x1, x2, x3, x4)) - -#endif // defined(HACL_CAN_COMPILE_UINT128) - -#define Lib_IntTypes_Intrinsics_add_carry_u32(x1, x2, x3, x4) \ - (Hacl_IntTypes_Intrinsics_add_carry_u32(x1, x2, x3, x4)) - -#define Lib_IntTypes_Intrinsics_sub_borrow_u32(x1, x2, x3, x4) \ - (Hacl_IntTypes_Intrinsics_sub_borrow_u32(x1, x2, x3, x4)) - -#else // !defined(HACL_CAN_COMPILE_INTRINSICS) - -#define Lib_IntTypes_Intrinsics_add_carry_u32(x1, x2, x3, x4) \ - (_addcarry_u32(x1, x2, x3, (unsigned int *) x4)) - -#define Lib_IntTypes_Intrinsics_add_carry_u64(x1, x2, x3, x4) \ - (_addcarry_u64(x1, x2, x3, (long long unsigned int *) x4)) - - -/* - GCC versions prior to 7.2 pass arguments to _subborrow_u{32,64} - in an incorrect order. - - See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=81294 -*/ -#if defined(__GNUC__) && !defined (__clang__) && \ - (__GNUC__ < 7 || (__GNUC__ == 7 && (__GNUC_MINOR__ < 2))) - -#define Lib_IntTypes_Intrinsics_sub_borrow_u32(x1, x2, x3, x4) \ - (_subborrow_u32(x1, x3, x2, (unsigned int *) x4)) - -#define Lib_IntTypes_Intrinsics_sub_borrow_u64(x1, x2, x3, x4) \ - (_subborrow_u64(x1, x3, x2, (long long unsigned int *) x4)) - -#else - -#define Lib_IntTypes_Intrinsics_sub_borrow_u32(x1, x2, x3, x4) \ - (_subborrow_u32(x1, x2, x3, (unsigned int *) x4)) - -#define Lib_IntTypes_Intrinsics_sub_borrow_u64(x1, x2, x3, x4) \ - (_subborrow_u64(x1, x2, x3, (long long unsigned int *) x4)) - -#endif // GCC < 7.2 - -#endif // !HACL_CAN_COMPILE_INTRINSICS diff --git a/dist/c89-compatible/libevercrypt.def b/dist/c89-compatible/libevercrypt.def deleted file mode 100644 index f698f813db..0000000000 --- a/dist/c89-compatible/libevercrypt.def +++ /dev/null @@ -1,881 +0,0 @@ -LIBRARY libevercrypt - -EXPORTS - Hacl_Hash_Definitions_word_len - Hacl_Hash_Definitions_block_len - Hacl_Hash_Definitions_hash_word_len - Hacl_Hash_Definitions_hash_len - Hacl_Hash_Core_Blake2_init_blake2s_32 - Hacl_Hash_Core_Blake2_update_blake2s_32 - Hacl_Hash_Core_Blake2_finish_blake2s_32 - Hacl_Hash_Core_Blake2_init_blake2b_32 - Hacl_Hash_Core_Blake2_update_blake2b_32 - Hacl_Hash_Core_Blake2_finish_blake2b_32 - Hacl_Hash_Blake2_update_multi_blake2s_32 - Hacl_Hash_Blake2_update_multi_blake2b_32 - Hacl_Hash_Blake2_update_last_blake2s_32 - Hacl_Hash_Blake2_update_last_blake2b_32 - Hacl_Hash_Blake2_hash_blake2s_32 - Hacl_Hash_Blake2_hash_blake2b_32 - Hacl_Blake2b_32_blake2b_init - Hacl_Blake2b_32_blake2b_update_key - Hacl_Blake2b_32_blake2b_update_multi - Hacl_Blake2b_32_blake2b_update_last - Hacl_Blake2b_32_blake2b_finish - Hacl_Blake2b_32_blake2b - Hacl_Blake2s_32_blake2s_init - Hacl_Blake2s_32_blake2s_update_key - Hacl_Blake2s_32_blake2s_update_multi - Hacl_Blake2s_32_blake2s_update_last - Hacl_Blake2s_32_blake2s_finish - Hacl_Blake2s_32_blake2s - Hacl_Hash_Blake2b_256_init_blake2b_256 - Hacl_Hash_Blake2b_256_update_blake2b_256 - Hacl_Hash_Blake2b_256_finish_blake2b_256 - Hacl_Hash_Blake2b_256_update_multi_blake2b_256 - Hacl_Hash_Blake2b_256_update_last_blake2b_256 - Hacl_Hash_Blake2b_256_hash_blake2b_256 - Hacl_Hash_Blake2b_256_malloc_blake2b_256 - Hacl_Blake2b_256_blake2b_init - Hacl_Blake2b_256_blake2b_update_key - Hacl_Blake2b_256_blake2b_update_multi - Hacl_Blake2b_256_blake2b_update_last - Hacl_Blake2b_256_blake2b_finish - Hacl_Blake2b_256_blake2b - Hacl_Blake2b_256_load_state256b_from_state32 - Hacl_Blake2b_256_store_state256b_to_state32 - Hacl_Hash_Blake2s_128_init_blake2s_128 - Hacl_Hash_Blake2s_128_update_blake2s_128 - Hacl_Hash_Blake2s_128_finish_blake2s_128 - Hacl_Hash_Blake2s_128_update_multi_blake2s_128 - Hacl_Hash_Blake2s_128_update_last_blake2s_128 - Hacl_Hash_Blake2s_128_hash_blake2s_128 - Hacl_Hash_Blake2s_128_malloc_blake2s_128 - Hacl_Blake2s_128_blake2s_init - Hacl_Blake2s_128_blake2s_update_key - Hacl_Blake2s_128_blake2s_update_multi - Hacl_Blake2s_128_blake2s_update_last - Hacl_Blake2s_128_blake2s_finish - Hacl_Blake2s_128_blake2s - Hacl_Blake2s_128_store_state128s_to_state32 - Hacl_Blake2s_128_load_state128s_from_state32 - Hacl_Impl_SHA3_rotl - Hacl_Impl_SHA3_state_permute - Hacl_Impl_SHA3_loadState - Hacl_Impl_SHA3_storeState - Hacl_Impl_SHA3_absorb - Hacl_Impl_SHA3_squeeze - Hacl_Impl_SHA3_keccak - Hacl_SHA3_shake128_hacl - Hacl_SHA3_shake256_hacl - Hacl_SHA3_sha3_224 - Hacl_SHA3_sha3_256 - Hacl_SHA3_sha3_384 - Hacl_SHA3_sha3_512 - Hacl_Hash_Core_MD5_legacy_init - Hacl_Hash_Core_MD5_legacy_update - Hacl_Hash_Core_MD5_legacy_finish - Hacl_Hash_MD5_legacy_update_multi - Hacl_Hash_MD5_legacy_update_last - Hacl_Hash_MD5_legacy_hash - Hacl_Hash_Core_SHA1_legacy_init - Hacl_Hash_Core_SHA1_legacy_update - Hacl_Hash_Core_SHA1_legacy_finish - Hacl_Hash_SHA1_legacy_update_multi - Hacl_Hash_SHA1_legacy_update_last - Hacl_Hash_SHA1_legacy_hash - Hacl_Hash_Core_SHA2_init_224 - Hacl_Hash_Core_SHA2_init_256 - Hacl_Hash_Core_SHA2_init_384 - Hacl_Hash_Core_SHA2_init_512 - Hacl_Hash_Core_SHA2_update_384 - Hacl_Hash_Core_SHA2_update_512 - Hacl_Hash_Core_SHA2_pad_256 - Hacl_Hash_Core_SHA2_finish_224 - Hacl_Hash_Core_SHA2_finish_256 - Hacl_Hash_Core_SHA2_finish_384 - Hacl_Hash_Core_SHA2_finish_512 - Hacl_Hash_SHA2_update_multi_224 - Hacl_Hash_SHA2_update_multi_256 - Hacl_Hash_SHA2_update_multi_384 - Hacl_Hash_SHA2_update_multi_512 - Hacl_Hash_SHA2_update_last_224 - Hacl_Hash_SHA2_update_last_256 - Hacl_Hash_SHA2_update_last_384 - Hacl_Hash_SHA2_update_last_512 - Hacl_Hash_SHA2_hash_224 - Hacl_Hash_SHA2_hash_256 - Hacl_Hash_SHA2_hash_384 - Hacl_Hash_SHA2_hash_512 - EverCrypt_AutoConfig2_has_shaext - EverCrypt_AutoConfig2_has_aesni - EverCrypt_AutoConfig2_has_pclmulqdq - EverCrypt_AutoConfig2_has_avx2 - EverCrypt_AutoConfig2_has_avx - EverCrypt_AutoConfig2_has_bmi2 - EverCrypt_AutoConfig2_has_adx - EverCrypt_AutoConfig2_has_sse - EverCrypt_AutoConfig2_has_movbe - EverCrypt_AutoConfig2_has_rdrand - EverCrypt_AutoConfig2_has_avx512 - EverCrypt_AutoConfig2_recall - EverCrypt_AutoConfig2_init - EverCrypt_AutoConfig2_disable_avx2 - EverCrypt_AutoConfig2_disable_avx - EverCrypt_AutoConfig2_disable_bmi2 - EverCrypt_AutoConfig2_disable_adx - EverCrypt_AutoConfig2_disable_shaext - EverCrypt_AutoConfig2_disable_aesni - EverCrypt_AutoConfig2_disable_pclmulqdq - EverCrypt_AutoConfig2_disable_sse - EverCrypt_AutoConfig2_disable_movbe - EverCrypt_AutoConfig2_disable_rdrand - EverCrypt_AutoConfig2_disable_avx512 - EverCrypt_AutoConfig2_has_vec128 - EverCrypt_AutoConfig2_has_vec256 - EverCrypt_Hash_string_of_alg - EverCrypt_Hash_uu___is_MD5_s - EverCrypt_Hash_uu___is_SHA1_s - EverCrypt_Hash_uu___is_SHA2_224_s - EverCrypt_Hash_uu___is_SHA2_256_s - EverCrypt_Hash_uu___is_SHA2_384_s - EverCrypt_Hash_uu___is_SHA2_512_s - EverCrypt_Hash_uu___is_SHA3_256_s - EverCrypt_Hash_uu___is_Blake2S_s - EverCrypt_Hash_uu___is_Blake2S_128_s - EverCrypt_Hash_uu___is_Blake2B_s - EverCrypt_Hash_uu___is_Blake2B_256_s - EverCrypt_Hash_alg_of_state - EverCrypt_Hash_create_in - EverCrypt_Hash_create - EverCrypt_Hash_init - EverCrypt_Hash_update_multi_256 - EverCrypt_Hash_update2 - EverCrypt_Hash_update - EverCrypt_Hash_update_multi2 - EverCrypt_Hash_update_multi - EverCrypt_Hash_update_last_256 - EverCrypt_Hash_update_last2 - EverCrypt_Hash_update_last - EverCrypt_Hash_finish - EverCrypt_Hash_free - EverCrypt_Hash_copy - EverCrypt_Hash_hash_256 - EverCrypt_Hash_hash_224 - EverCrypt_Hash_hash - EverCrypt_Hash_Incremental_hash_len - EverCrypt_Hash_Incremental_block_len - EverCrypt_Hash_Incremental_create_in - EverCrypt_Hash_Incremental_init - EverCrypt_Hash_Incremental_max_input_len64 - EverCrypt_Hash_Incremental_update - EverCrypt_Hash_Incremental_finish_md5 - EverCrypt_Hash_Incremental_finish_sha1 - EverCrypt_Hash_Incremental_finish_sha224 - EverCrypt_Hash_Incremental_finish_sha256 - EverCrypt_Hash_Incremental_finish_sha3_256 - EverCrypt_Hash_Incremental_finish_sha384 - EverCrypt_Hash_Incremental_finish_sha512 - EverCrypt_Hash_Incremental_finish_blake2s - EverCrypt_Hash_Incremental_finish_blake2b - EverCrypt_Hash_Incremental_alg_of_state - EverCrypt_Hash_Incremental_finish - EverCrypt_Hash_Incremental_free - Hacl_Impl_Chacha20_chacha20_init - Hacl_Impl_Chacha20_chacha20_update - Hacl_Chacha20_chacha20_encrypt - Hacl_Chacha20_chacha20_decrypt - Hacl_Salsa20_salsa20_encrypt - Hacl_Salsa20_salsa20_decrypt - Hacl_Salsa20_salsa20_key_block0 - Hacl_Salsa20_hsalsa20 - Hacl_Bignum_Convert_bn_from_bytes_be_uint64 - Hacl_Bignum_Convert_bn_to_bytes_be_uint64 - Hacl_Bignum_Base_mul_wide_add_u64 - Hacl_Bignum_Base_mul_wide_add2_u32 - Hacl_Bignum_Base_mul_wide_add2_u64 - Hacl_Bignum_Lib_bn_get_top_index_u32 - Hacl_Bignum_Lib_bn_get_top_index_u64 - Hacl_Bignum_Addition_bn_sub_eq_len_u32 - Hacl_Bignum_Addition_bn_sub_eq_len_u64 - Hacl_Bignum_Addition_bn_add_eq_len_u32 - Hacl_Bignum_Addition_bn_add_eq_len_u64 - Hacl_Bignum_Multiplication_bn_mul_u32 - Hacl_Bignum_Multiplication_bn_mul_u64 - Hacl_Bignum_Multiplication_bn_sqr_u32 - Hacl_Bignum_Multiplication_bn_sqr_u64 - Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint32 - Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint64 - Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint32 - Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint64 - Hacl_Bignum_bn_add_mod_n_u32 - Hacl_Bignum_bn_add_mod_n_u64 - Hacl_Bignum_bn_sub_mod_n_u32 - Hacl_Bignum_bn_sub_mod_n_u64 - Hacl_Bignum_ModInvLimb_mod_inv_uint32 - Hacl_Bignum_ModInvLimb_mod_inv_uint64 - Hacl_Bignum_Montgomery_bn_check_modulus_u32 - Hacl_Bignum_Montgomery_bn_precomp_r2_mod_n_u32 - Hacl_Bignum_Montgomery_bn_mont_reduction_u32 - Hacl_Bignum_Montgomery_bn_to_mont_u32 - Hacl_Bignum_Montgomery_bn_from_mont_u32 - Hacl_Bignum_Montgomery_bn_mont_mul_u32 - Hacl_Bignum_Montgomery_bn_mont_sqr_u32 - Hacl_Bignum_Montgomery_bn_check_modulus_u64 - Hacl_Bignum_Montgomery_bn_precomp_r2_mod_n_u64 - Hacl_Bignum_Montgomery_bn_mont_reduction_u64 - Hacl_Bignum_Montgomery_bn_to_mont_u64 - Hacl_Bignum_Montgomery_bn_from_mont_u64 - Hacl_Bignum_Montgomery_bn_mont_mul_u64 - Hacl_Bignum_Montgomery_bn_mont_sqr_u64 - Hacl_Bignum_Exponentiation_bn_check_mod_exp_u32 - Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_precomp_u32 - Hacl_Bignum_Exponentiation_bn_mod_exp_consttime_precomp_u32 - Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_u32 - Hacl_Bignum_Exponentiation_bn_mod_exp_consttime_u32 - Hacl_Bignum_Exponentiation_bn_check_mod_exp_u64 - Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_precomp_u64 - Hacl_Bignum_Exponentiation_bn_mod_exp_consttime_precomp_u64 - Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_u64 - Hacl_Bignum_Exponentiation_bn_mod_exp_consttime_u64 - Hacl_Curve25519_64_Slow_scalarmult - Hacl_Curve25519_64_Slow_secret_to_public - Hacl_Curve25519_64_Slow_ecdh - Hacl_Curve25519_64_scalarmult - Hacl_Curve25519_64_secret_to_public - Hacl_Curve25519_64_ecdh - Hacl_Impl_Curve25519_Field51_fadd - Hacl_Impl_Curve25519_Field51_fsub - Hacl_Impl_Curve25519_Field51_fmul - Hacl_Impl_Curve25519_Field51_fmul2 - Hacl_Impl_Curve25519_Field51_fmul1 - Hacl_Impl_Curve25519_Field51_fsqr - Hacl_Impl_Curve25519_Field51_fsqr2 - Hacl_Impl_Curve25519_Field51_store_felem - Hacl_Impl_Curve25519_Field51_cswap2 - Hacl_Curve25519_51_fsquare_times - Hacl_Curve25519_51_finv - Hacl_Curve25519_51_scalarmult - Hacl_Curve25519_51_secret_to_public - Hacl_Curve25519_51_ecdh - Hacl_Streaming_SHA2_create_in_224 - LowStar_BufferOps_op_Bang_Star__Hacl_Streaming_Functor_state_s__uint32_t____ - Hacl_Streaming_SHA2_init_224 - LowStar_BufferOps_op_Star_Equals__Hacl_Streaming_Functor_state_s__uint32_t____ - Hacl_Streaming_SHA2_update_224 - Hacl_Streaming_SHA2_finish_224 - Hacl_Streaming_SHA2_free_224 - Hacl_Streaming_SHA2_create_in_256 - Hacl_Streaming_SHA2_init_256 - Hacl_Streaming_SHA2_update_256 - Hacl_Streaming_SHA2_finish_256 - Hacl_Streaming_SHA2_free_256 - Hacl_Streaming_SHA2_create_in_384 - LowStar_BufferOps_op_Bang_Star__Hacl_Streaming_Functor_state_s__uint64_t____ - Hacl_Streaming_SHA2_init_384 - LowStar_BufferOps_op_Star_Equals__Hacl_Streaming_Functor_state_s__uint64_t____ - Hacl_Streaming_SHA2_update_384 - Hacl_Streaming_SHA2_finish_384 - Hacl_Streaming_SHA2_free_384 - Hacl_Streaming_SHA2_create_in_512 - Hacl_Streaming_SHA2_init_512 - Hacl_Streaming_SHA2_update_512 - Hacl_Streaming_SHA2_finish_512 - Hacl_Streaming_SHA2_free_512 - Hacl_Bignum25519_reduce_513 - Hacl_Bignum25519_inverse - Hacl_Bignum25519_load_51 - Hacl_Bignum25519_store_51 - Hacl_Impl_Ed25519_PointDouble_point_double - Hacl_Impl_Ed25519_PointDecompress_point_decompress - Hacl_Impl_Ed25519_PointCompress_point_compress - Hacl_Impl_Ed25519_PointEqual_point_equal - Hacl_Impl_Ed25519_PointAdd_point_add - Hacl_Impl_Ed25519_PointNegate_point_negate - Hacl_Impl_Ed25519_Ladder_make_point_inf - Hacl_Impl_Ed25519_Ladder_point_mul - Hacl_Ed25519_secret_to_public - Hacl_Ed25519_expand_keys - Hacl_Ed25519_sign_expanded - Hacl_Ed25519_sign - Hacl_Ed25519_verify - Hacl_Poly1305_32_poly1305_init - Hacl_Poly1305_32_poly1305_update1 - Hacl_Poly1305_32_poly1305_update - Hacl_Poly1305_32_poly1305_finish - Hacl_Poly1305_32_poly1305_mac - Hacl_Impl_Poly1305_Field32xN_128_load_acc2 - Hacl_Impl_Poly1305_Field32xN_128_fmul_r2_normalize - Hacl_Poly1305_128_poly1305_init - Hacl_Poly1305_128_poly1305_update1 - Hacl_Poly1305_128_poly1305_update - Hacl_Poly1305_128_poly1305_finish - Hacl_Poly1305_128_poly1305_mac - Hacl_Impl_Poly1305_Field32xN_256_load_acc4 - Hacl_Impl_Poly1305_Field32xN_256_fmul_r4_normalize - Hacl_Poly1305_256_poly1305_init - Hacl_Poly1305_256_poly1305_update1 - Hacl_Poly1305_256_poly1305_update - Hacl_Poly1305_256_poly1305_finish - Hacl_Poly1305_256_poly1305_mac - Hacl_NaCl_crypto_secretbox_detached - Hacl_NaCl_crypto_secretbox_open_detached - Hacl_NaCl_crypto_secretbox_easy - Hacl_NaCl_crypto_secretbox_open_easy - Hacl_NaCl_crypto_box_beforenm - Hacl_NaCl_crypto_box_detached_afternm - Hacl_NaCl_crypto_box_detached - Hacl_NaCl_crypto_box_open_detached_afternm - Hacl_NaCl_crypto_box_open_detached - Hacl_NaCl_crypto_box_easy_afternm - Hacl_NaCl_crypto_box_easy - Hacl_NaCl_crypto_box_open_easy_afternm - Hacl_NaCl_crypto_box_open_easy - Hacl_Impl_P256_LowLevel_toUint8 - Hacl_Impl_P256_LowLevel_changeEndian - Hacl_Impl_P256_LowLevel_toUint64ChangeEndian - Hacl_Impl_P256_Core_isPointAtInfinityPrivate - Hacl_Impl_P256_Core_secretToPublic - Hacl_Impl_P256_DH__ecp256dh_r - Hacl_P256_ecdsa_sign_p256_sha2 - Hacl_P256_ecdsa_sign_p256_sha384 - Hacl_P256_ecdsa_sign_p256_sha512 - Hacl_P256_ecdsa_sign_p256_without_hash - Hacl_P256_ecdsa_verif_p256_sha2 - Hacl_P256_ecdsa_verif_p256_sha384 - Hacl_P256_ecdsa_verif_p256_sha512 - Hacl_P256_ecdsa_verif_without_hash - Hacl_P256_validate_public_key - Hacl_P256_validate_private_key - Hacl_P256_uncompressed_to_raw - Hacl_P256_compressed_to_raw - Hacl_P256_raw_to_uncompressed - Hacl_P256_raw_to_compressed - Hacl_P256_dh_initiator - Hacl_P256_dh_responder - Hacl_K256_Field_is_felem_zero_vartime - Hacl_K256_Field_is_felem_eq_vartime - Hacl_K256_Field_is_felem_lt_prime_minus_order_vartime - Hacl_K256_Field_load_felem - Hacl_K256_Field_load_felem_vartime - Hacl_K256_Field_store_felem - Hacl_K256_Field_fmul_small_num - Hacl_K256_Field_fadd - Hacl_K256_Field_fsub - Hacl_K256_Field_fmul - Hacl_K256_Field_fsqr - Hacl_K256_Field_fnormalize_weak - Hacl_K256_Field_fnormalize - Hacl_K256_Field_fnegate_conditional_vartime - Hacl_Impl_K256_Finv_fsquare_times_in_place - Hacl_Impl_K256_Finv_fsquare_times - Hacl_Impl_K256_Finv_fexp_223_23 - Hacl_Impl_K256_Finv_finv - Hacl_Impl_K256_Finv_fsqrt - Hacl_Impl_K256_Point_aff_point_decompress_vartime - Hacl_Impl_K256_Point_aff_point_compress_vartime - Hacl_Impl_K256_Point_point_negate - Hacl_Impl_K256_Point_point_eq - Hacl_Impl_K256_PointDouble_point_double - Hacl_Impl_K256_PointAdd_point_add - Hacl_Impl_K256_PointMul_make_point_at_inf - Hacl_Impl_K256_PointMul_point_mul - Hacl_K256_ECDSA_ecdsa_sign_hashed_msg - Hacl_K256_ECDSA_ecdsa_sign_sha256 - Hacl_K256_ECDSA_ecdsa_verify_hashed_msg - Hacl_K256_ECDSA_ecdsa_verify_sha256 - Hacl_K256_ECDSA_secp256k1_ecdsa_signature_normalize - Hacl_K256_ECDSA_secp256k1_ecdsa_is_signature_normalized - Hacl_K256_ECDSA_secp256k1_ecdsa_sign_hashed_msg - Hacl_K256_ECDSA_secp256k1_ecdsa_sign_sha256 - Hacl_K256_ECDSA_secp256k1_ecdsa_verify_hashed_msg - Hacl_K256_ECDSA_secp256k1_ecdsa_verify_sha256 - Hacl_K256_ECDSA_public_key_uncompressed_to_raw - Hacl_K256_ECDSA_public_key_uncompressed_from_raw - Hacl_K256_ECDSA_public_key_compressed_to_raw - Hacl_K256_ECDSA_public_key_compressed_from_raw - Hacl_Keccak_shake128_4x - Hacl_Impl_Matrix_mod_pow2 - Hacl_Impl_Matrix_matrix_add - Hacl_Impl_Matrix_matrix_sub - Hacl_Impl_Matrix_matrix_mul - Hacl_Impl_Matrix_matrix_mul_s - Hacl_Impl_Matrix_matrix_eq - Hacl_Impl_Matrix_matrix_to_lbytes - Hacl_Impl_Matrix_matrix_from_lbytes - Hacl_Impl_Frodo_Gen_frodo_gen_matrix_shake_4x - Hacl_Impl_Frodo_Params_frodo_gen_matrix - Hacl_Impl_Frodo_Sample_frodo_sample_matrix64 - Hacl_Impl_Frodo_Sample_frodo_sample_matrix640 - Hacl_Impl_Frodo_Sample_frodo_sample_matrix976 - Hacl_Impl_Frodo_Sample_frodo_sample_matrix1344 - randombytes_ - Hacl_Impl_Frodo_Pack_frodo_pack - Hacl_Impl_Frodo_Pack_frodo_unpack - Hacl_Impl_Frodo_Encode_frodo_key_encode - Hacl_Impl_Frodo_Encode_frodo_key_decode - Hacl_IntTypes_Intrinsics_add_carry_u32 - Hacl_IntTypes_Intrinsics_sub_borrow_u32 - Hacl_IntTypes_Intrinsics_add_carry_u64 - Hacl_IntTypes_Intrinsics_sub_borrow_u64 - Hacl_IntTypes_Intrinsics_128_add_carry_u64 - Hacl_IntTypes_Intrinsics_128_sub_borrow_u64 - Hacl_RSAPSS_rsapss_sign - Hacl_RSAPSS_rsapss_verify - Hacl_RSAPSS_new_rsapss_load_pkey - Hacl_RSAPSS_new_rsapss_load_skey - Hacl_RSAPSS_rsapss_skey_sign - Hacl_RSAPSS_rsapss_pkey_verify - Hacl_FFDHE_ffdhe_len - Hacl_FFDHE_new_ffdhe_precomp_p - Hacl_FFDHE_ffdhe_secret_to_public_precomp - Hacl_FFDHE_ffdhe_secret_to_public - Hacl_FFDHE_ffdhe_shared_secret_precomp - Hacl_FFDHE_ffdhe_shared_secret - Hacl_Streaming_Blake2_blocks_state_len - Hacl_Streaming_Blake2_blake2s_32_no_key_create_in - Hacl_Streaming_Blake2_blake2s_32_no_key_init - Hacl_Streaming_Blake2_blake2s_32_no_key_update - Hacl_Streaming_Blake2_blake2s_32_no_key_finish - Hacl_Streaming_Blake2_blake2s_32_no_key_free - Hacl_Streaming_Blake2_blake2b_32_no_key_create_in - Hacl_Streaming_Blake2_blake2b_32_no_key_init - Hacl_Streaming_Blake2_blake2b_32_no_key_update - Hacl_Streaming_Blake2_blake2b_32_no_key_finish - Hacl_Streaming_Blake2_blake2b_32_no_key_free - Hacl_Streaming_Blake2b_256_blake2b_256_no_key_create_in - Hacl_Streaming_Blake2b_256_blake2b_256_no_key_init - Hacl_Streaming_Blake2b_256_blake2b_256_no_key_update - Hacl_Streaming_Blake2b_256_blake2b_256_no_key_finish - Hacl_Streaming_Blake2b_256_blake2b_256_no_key_free - Hacl_Streaming_SHA3_create_in_256 - Hacl_Streaming_SHA3_init_256 - Hacl_Streaming_SHA3_update_256 - Hacl_Streaming_SHA3_finish_256 - Hacl_Streaming_SHA3_free_256 - Hacl_Frodo640_crypto_kem_keypair - Hacl_Frodo640_crypto_kem_enc - Hacl_Frodo640_crypto_kem_dec - Hacl_Streaming_Blake2s_128_blake2s_128_no_key_create_in - Hacl_Streaming_Blake2s_128_blake2s_128_no_key_init - Hacl_Streaming_Blake2s_128_blake2s_128_no_key_update - Hacl_Streaming_Blake2s_128_blake2s_128_no_key_finish - Hacl_Streaming_Blake2s_128_blake2s_128_no_key_free - Hacl_Chacha20_Vec128_chacha20_encrypt_128 - Hacl_Chacha20_Vec128_chacha20_decrypt_128 - Hacl_Chacha20Poly1305_128_aead_encrypt - Hacl_Chacha20Poly1305_128_aead_decrypt - Hacl_HMAC_legacy_compute_sha1 - Hacl_HMAC_compute_sha2_256 - Hacl_HMAC_compute_sha2_384 - Hacl_HMAC_compute_sha2_512 - Hacl_HMAC_compute_blake2s_32 - Hacl_HMAC_compute_blake2b_32 - Hacl_HKDF_expand_sha2_256 - Hacl_HKDF_extract_sha2_256 - Hacl_HKDF_expand_sha2_512 - Hacl_HKDF_extract_sha2_512 - Hacl_HKDF_expand_blake2s_32 - Hacl_HKDF_extract_blake2s_32 - Hacl_HKDF_expand_blake2b_32 - Hacl_HKDF_extract_blake2b_32 - Hacl_HPKE_Curve51_CP128_SHA512_setupBaseS - Hacl_HPKE_Curve51_CP128_SHA512_setupBaseR - Hacl_HPKE_Curve51_CP128_SHA512_sealBase - Hacl_HPKE_Curve51_CP128_SHA512_openBase - EverCrypt_Cipher_chacha20 - Hacl_GenericField32_field_modulus_check - Hacl_GenericField32_field_init - LowStar_BufferOps_op_Bang_Star__Hacl_Bignum_MontArithmetic_bn_mont_ctx___uint32_t__uint32_t - Hacl_GenericField32_field_free - Hacl_GenericField32_field_get_len - Hacl_GenericField32_to_field - Hacl_GenericField32_from_field - Hacl_GenericField32_add - Hacl_GenericField32_sub - Hacl_GenericField32_mul - Hacl_GenericField32_sqr - Hacl_GenericField32_one - Hacl_GenericField32_exp_consttime - Hacl_GenericField32_exp_vartime - Hacl_GenericField32_inverse - Hacl_Bignum256_add - Hacl_Bignum256_sub - Hacl_Bignum256_add_mod - Hacl_Bignum256_sub_mod - Hacl_Bignum256_mul - Hacl_Bignum256_sqr - Hacl_Bignum256_mod - Hacl_Bignum256_mod_exp_vartime - Hacl_Bignum256_mod_exp_consttime - Hacl_Bignum256_mod_inv_prime_vartime - Hacl_Bignum256_mont_ctx_init - LowStar_BufferOps_op_Bang_Star__Hacl_Bignum_MontArithmetic_bn_mont_ctx___uint64_t__uint64_t - Hacl_Bignum256_mont_ctx_free - Hacl_Bignum256_mod_precomp - Hacl_Bignum256_mod_exp_vartime_precomp - Hacl_Bignum256_mod_exp_consttime_precomp - Hacl_Bignum256_mod_inv_prime_vartime_precomp - Hacl_Bignum256_new_bn_from_bytes_be - Hacl_Bignum256_new_bn_from_bytes_le - Hacl_Bignum256_bn_to_bytes_be - Hacl_Bignum256_bn_to_bytes_le - Hacl_Bignum256_lt_mask - Hacl_Bignum256_eq_mask - Hacl_SHA2_Vec256_sha224_8 - Hacl_SHA2_Vec256_sha256_8 - Hacl_SHA2_Vec256_sha384_4 - Hacl_SHA2_Vec256_sha512_4 - Hacl_EC_K256_mk_felem_zero - Hacl_EC_K256_mk_felem_one - Hacl_EC_K256_felem_add - Hacl_EC_K256_felem_sub - Hacl_EC_K256_felem_mul - Hacl_EC_K256_felem_sqr - Hacl_EC_K256_felem_inv - Hacl_EC_K256_felem_load - Hacl_EC_K256_felem_store - Hacl_EC_K256_mk_point_at_inf - Hacl_EC_K256_mk_base_point - Hacl_EC_K256_point_negate - Hacl_EC_K256_point_add - Hacl_EC_K256_point_double - Hacl_EC_K256_point_mul - Hacl_EC_K256_point_eq - Hacl_EC_K256_point_compress - Hacl_EC_K256_point_decompress - Hacl_Bignum4096_add - Hacl_Bignum4096_sub - Hacl_Bignum4096_add_mod - Hacl_Bignum4096_sub_mod - Hacl_Bignum4096_mul - Hacl_Bignum4096_sqr - Hacl_Bignum4096_mod - Hacl_Bignum4096_mod_exp_vartime - Hacl_Bignum4096_mod_exp_consttime - Hacl_Bignum4096_mod_inv_prime_vartime - Hacl_Bignum4096_mont_ctx_init - Hacl_Bignum4096_mont_ctx_free - Hacl_Bignum4096_mod_precomp - Hacl_Bignum4096_mod_exp_vartime_precomp - Hacl_Bignum4096_mod_exp_consttime_precomp - Hacl_Bignum4096_mod_inv_prime_vartime_precomp - Hacl_Bignum4096_new_bn_from_bytes_be - Hacl_Bignum4096_new_bn_from_bytes_le - Hacl_Bignum4096_bn_to_bytes_be - Hacl_Bignum4096_bn_to_bytes_le - Hacl_Bignum4096_lt_mask - Hacl_Bignum4096_eq_mask - Hacl_Chacha20_Vec32_chacha20_encrypt_32 - Hacl_Chacha20_Vec32_chacha20_decrypt_32 - EverCrypt_Ed25519_secret_to_public - EverCrypt_Ed25519_expand_keys - EverCrypt_Ed25519_sign_expanded - EverCrypt_Ed25519_sign - EverCrypt_Ed25519_verify - Hacl_Bignum4096_32_add - Hacl_Bignum4096_32_sub - Hacl_Bignum4096_32_add_mod - Hacl_Bignum4096_32_sub_mod - Hacl_Bignum4096_32_mul - Hacl_Bignum4096_32_sqr - Hacl_Bignum4096_32_mod - Hacl_Bignum4096_32_mod_exp_vartime - Hacl_Bignum4096_32_mod_exp_consttime - Hacl_Bignum4096_32_mod_inv_prime_vartime - Hacl_Bignum4096_32_mont_ctx_init - Hacl_Bignum4096_32_mont_ctx_free - Hacl_Bignum4096_32_mod_precomp - Hacl_Bignum4096_32_mod_exp_vartime_precomp - Hacl_Bignum4096_32_mod_exp_consttime_precomp - Hacl_Bignum4096_32_mod_inv_prime_vartime_precomp - Hacl_Bignum4096_32_new_bn_from_bytes_be - Hacl_Bignum4096_32_new_bn_from_bytes_le - Hacl_Bignum4096_32_bn_to_bytes_be - Hacl_Bignum4096_32_bn_to_bytes_le - Hacl_Bignum4096_32_lt_mask - Hacl_Bignum4096_32_eq_mask - EverCrypt_HMAC_compute_sha1 - EverCrypt_HMAC_compute_sha2_256 - EverCrypt_HMAC_compute_sha2_384 - EverCrypt_HMAC_compute_sha2_512 - EverCrypt_HMAC_compute_blake2s - EverCrypt_HMAC_compute_blake2b - EverCrypt_HMAC_is_supported_alg - EverCrypt_HMAC_compute - Hacl_HMAC_DRBG_min_length - Hacl_HMAC_DRBG_uu___is_State - Hacl_HMAC_DRBG_create_in - Hacl_HMAC_DRBG_instantiate - Hacl_HMAC_DRBG_reseed - Hacl_HMAC_DRBG_generate - EverCrypt_DRBG_min_length - EverCrypt_DRBG_uu___is_SHA1_s - EverCrypt_DRBG_uu___is_SHA2_256_s - EverCrypt_DRBG_uu___is_SHA2_384_s - EverCrypt_DRBG_uu___is_SHA2_512_s - EverCrypt_DRBG_create - EverCrypt_DRBG_instantiate_sha1 - EverCrypt_DRBG_instantiate_sha2_256 - EverCrypt_DRBG_instantiate_sha2_384 - EverCrypt_DRBG_instantiate_sha2_512 - EverCrypt_DRBG_reseed_sha1 - EverCrypt_DRBG_reseed_sha2_256 - EverCrypt_DRBG_reseed_sha2_384 - EverCrypt_DRBG_reseed_sha2_512 - EverCrypt_DRBG_generate_sha1 - EverCrypt_DRBG_generate_sha2_256 - EverCrypt_DRBG_generate_sha2_384 - EverCrypt_DRBG_generate_sha2_512 - EverCrypt_DRBG_uninstantiate_sha1 - EverCrypt_DRBG_uninstantiate_sha2_256 - EverCrypt_DRBG_uninstantiate_sha2_384 - EverCrypt_DRBG_uninstantiate_sha2_512 - EverCrypt_DRBG_instantiate - EverCrypt_DRBG_reseed - EverCrypt_DRBG_generate - EverCrypt_DRBG_uninstantiate - Hacl_HPKE_Curve64_CP128_SHA512_setupBaseS - Hacl_HPKE_Curve64_CP128_SHA512_setupBaseR - Hacl_HPKE_Curve64_CP128_SHA512_sealBase - Hacl_HPKE_Curve64_CP128_SHA512_openBase - Hacl_HPKE_P256_CP128_SHA256_setupBaseS - Hacl_HPKE_P256_CP128_SHA256_setupBaseR - Hacl_HPKE_P256_CP128_SHA256_sealBase - Hacl_HPKE_P256_CP128_SHA256_openBase - EverCrypt_Curve25519_secret_to_public - EverCrypt_Curve25519_scalarmult - EverCrypt_Curve25519_ecdh - Hacl_Chacha20_Vec256_chacha20_encrypt_256 - Hacl_Chacha20_Vec256_chacha20_decrypt_256 - Hacl_Chacha20Poly1305_256_aead_encrypt - Hacl_Chacha20Poly1305_256_aead_decrypt - Hacl_HPKE_Curve51_CP256_SHA512_setupBaseS - Hacl_HPKE_Curve51_CP256_SHA512_setupBaseR - Hacl_HPKE_Curve51_CP256_SHA512_sealBase - Hacl_HPKE_Curve51_CP256_SHA512_openBase - Hacl_SHA2_Scalar32_sha224 - Hacl_SHA2_Scalar32_sha256 - Hacl_SHA2_Scalar32_sha384 - Hacl_SHA2_Scalar32_sha512 - Hacl_Frodo976_crypto_kem_keypair - Hacl_Frodo976_crypto_kem_enc - Hacl_Frodo976_crypto_kem_dec - Hacl_HMAC_Blake2s_128_compute_blake2s_128 - Hacl_HKDF_Blake2s_128_expand_blake2s_128 - Hacl_HKDF_Blake2s_128_extract_blake2s_128 - Hacl_GenericField64_field_modulus_check - Hacl_GenericField64_field_init - Hacl_GenericField64_field_free - Hacl_GenericField64_field_get_len - Hacl_GenericField64_to_field - Hacl_GenericField64_from_field - Hacl_GenericField64_add - Hacl_GenericField64_sub - Hacl_GenericField64_mul - Hacl_GenericField64_sqr - Hacl_GenericField64_one - Hacl_GenericField64_exp_consttime - Hacl_GenericField64_exp_vartime - Hacl_GenericField64_inverse - Hacl_Streaming_Poly1305_256_create_in - Hacl_Streaming_Poly1305_256_init - Hacl_Streaming_Poly1305_256_update - Hacl_Streaming_Poly1305_256_finish - Hacl_Streaming_Poly1305_256_free - Hacl_Frodo1344_crypto_kem_keypair - Hacl_Frodo1344_crypto_kem_enc - Hacl_Frodo1344_crypto_kem_dec - Hacl_HPKE_Curve64_CP256_SHA512_setupBaseS - Hacl_HPKE_Curve64_CP256_SHA512_setupBaseR - Hacl_HPKE_Curve64_CP256_SHA512_sealBase - Hacl_HPKE_Curve64_CP256_SHA512_openBase - Hacl_Streaming_Poly1305_128_create_in - Hacl_Streaming_Poly1305_128_init - Hacl_Streaming_Poly1305_128_update - Hacl_Streaming_Poly1305_128_finish - Hacl_Streaming_Poly1305_128_free - Hacl_Bignum32_add - Hacl_Bignum32_sub - Hacl_Bignum32_add_mod - Hacl_Bignum32_sub_mod - Hacl_Bignum32_mul - Hacl_Bignum32_sqr - Hacl_Bignum32_mod - Hacl_Bignum32_mod_exp_vartime - Hacl_Bignum32_mod_exp_consttime - Hacl_Bignum32_mod_inv_prime_vartime - Hacl_Bignum32_mont_ctx_init - Hacl_Bignum32_mont_ctx_free - Hacl_Bignum32_mod_precomp - Hacl_Bignum32_mod_exp_vartime_precomp - Hacl_Bignum32_mod_exp_consttime_precomp - Hacl_Bignum32_mod_inv_prime_vartime_precomp - Hacl_Bignum32_new_bn_from_bytes_be - Hacl_Bignum32_new_bn_from_bytes_le - Hacl_Bignum32_bn_to_bytes_be - Hacl_Bignum32_bn_to_bytes_le - Hacl_Bignum32_lt_mask - Hacl_Bignum32_eq_mask - Hacl_HPKE_Curve51_CP128_SHA256_setupBaseS - Hacl_HPKE_Curve51_CP128_SHA256_setupBaseR - Hacl_HPKE_Curve51_CP128_SHA256_sealBase - Hacl_HPKE_Curve51_CP128_SHA256_openBase - Hacl_HPKE_Curve64_CP128_SHA256_setupBaseS - Hacl_HPKE_Curve64_CP128_SHA256_setupBaseR - Hacl_HPKE_Curve64_CP128_SHA256_sealBase - Hacl_HPKE_Curve64_CP128_SHA256_openBase - Hacl_Bignum256_32_add - Hacl_Bignum256_32_sub - Hacl_Bignum256_32_add_mod - Hacl_Bignum256_32_sub_mod - Hacl_Bignum256_32_mul - Hacl_Bignum256_32_sqr - Hacl_Bignum256_32_mod - Hacl_Bignum256_32_mod_exp_vartime - Hacl_Bignum256_32_mod_exp_consttime - Hacl_Bignum256_32_mod_inv_prime_vartime - Hacl_Bignum256_32_mont_ctx_init - Hacl_Bignum256_32_mont_ctx_free - Hacl_Bignum256_32_mod_precomp - Hacl_Bignum256_32_mod_exp_vartime_precomp - Hacl_Bignum256_32_mod_exp_consttime_precomp - Hacl_Bignum256_32_mod_inv_prime_vartime_precomp - Hacl_Bignum256_32_new_bn_from_bytes_be - Hacl_Bignum256_32_new_bn_from_bytes_le - Hacl_Bignum256_32_bn_to_bytes_be - Hacl_Bignum256_32_bn_to_bytes_le - Hacl_Bignum256_32_lt_mask - Hacl_Bignum256_32_eq_mask - Hacl_SHA2_Vec128_sha224_4 - Hacl_SHA2_Vec128_sha256_4 - Hacl_Chacha20Poly1305_32_aead_encrypt - Hacl_Chacha20Poly1305_32_aead_decrypt - Hacl_HPKE_Curve51_CP32_SHA256_setupBaseS - Hacl_HPKE_Curve51_CP32_SHA256_setupBaseR - Hacl_HPKE_Curve51_CP32_SHA256_sealBase - Hacl_HPKE_Curve51_CP32_SHA256_openBase - Hacl_HPKE_Curve64_CP256_SHA256_setupBaseS - Hacl_HPKE_Curve64_CP256_SHA256_setupBaseR - Hacl_HPKE_Curve64_CP256_SHA256_sealBase - Hacl_HPKE_Curve64_CP256_SHA256_openBase - EverCrypt_Poly1305_poly1305 - Hacl_Streaming_Poly1305_32_create_in - Hacl_Streaming_Poly1305_32_init - Hacl_Streaming_Poly1305_32_update - Hacl_Streaming_Poly1305_32_finish - Hacl_Streaming_Poly1305_32_free - Hacl_HPKE_Curve51_CP32_SHA512_setupBaseS - Hacl_HPKE_Curve51_CP32_SHA512_setupBaseR - Hacl_HPKE_Curve51_CP32_SHA512_sealBase - Hacl_HPKE_Curve51_CP32_SHA512_openBase - Hacl_HPKE_P256_CP256_SHA256_setupBaseS - Hacl_HPKE_P256_CP256_SHA256_setupBaseR - Hacl_HPKE_P256_CP256_SHA256_sealBase - Hacl_HPKE_P256_CP256_SHA256_openBase - Hacl_HPKE_P256_CP32_SHA256_setupBaseS - Hacl_HPKE_P256_CP32_SHA256_setupBaseR - Hacl_HPKE_P256_CP32_SHA256_sealBase - Hacl_HPKE_P256_CP32_SHA256_openBase - Hacl_Bignum64_add - Hacl_Bignum64_sub - Hacl_Bignum64_add_mod - Hacl_Bignum64_sub_mod - Hacl_Bignum64_mul - Hacl_Bignum64_sqr - Hacl_Bignum64_mod - Hacl_Bignum64_mod_exp_vartime - Hacl_Bignum64_mod_exp_consttime - Hacl_Bignum64_mod_inv_prime_vartime - Hacl_Bignum64_mont_ctx_init - Hacl_Bignum64_mont_ctx_free - Hacl_Bignum64_mod_precomp - Hacl_Bignum64_mod_exp_vartime_precomp - Hacl_Bignum64_mod_exp_consttime_precomp - Hacl_Bignum64_mod_inv_prime_vartime_precomp - Hacl_Bignum64_new_bn_from_bytes_be - Hacl_Bignum64_new_bn_from_bytes_le - Hacl_Bignum64_bn_to_bytes_be - Hacl_Bignum64_bn_to_bytes_le - Hacl_Bignum64_lt_mask - Hacl_Bignum64_eq_mask - Hacl_Frodo64_crypto_kem_keypair - Hacl_Frodo64_crypto_kem_enc - Hacl_Frodo64_crypto_kem_dec - Hacl_Streaming_SHA1_legacy_create_in_sha1 - Hacl_Streaming_SHA1_legacy_init_sha1 - Hacl_Streaming_SHA1_legacy_update_sha1 - Hacl_Streaming_SHA1_legacy_finish_sha1 - Hacl_Streaming_SHA1_legacy_free_sha1 - Hacl_Streaming_MD5_legacy_create_in_md5 - Hacl_Streaming_MD5_legacy_init_md5 - Hacl_Streaming_MD5_legacy_update_md5 - Hacl_Streaming_MD5_legacy_finish_md5 - Hacl_Streaming_MD5_legacy_free_md5 - Hacl_HMAC_Blake2b_256_compute_blake2b_256 - Hacl_HKDF_Blake2b_256_expand_blake2b_256 - Hacl_HKDF_Blake2b_256_extract_blake2b_256 - Hacl_HPKE_Curve64_CP32_SHA256_setupBaseS - Hacl_HPKE_Curve64_CP32_SHA256_setupBaseR - Hacl_HPKE_Curve64_CP32_SHA256_sealBase - Hacl_HPKE_Curve64_CP32_SHA256_openBase - Hacl_HPKE_Curve64_CP32_SHA512_setupBaseS - Hacl_HPKE_Curve64_CP32_SHA512_setupBaseR - Hacl_HPKE_Curve64_CP32_SHA512_sealBase - Hacl_HPKE_Curve64_CP32_SHA512_openBase - EverCrypt_HKDF_expand_sha1 - EverCrypt_HKDF_extract_sha1 - EverCrypt_HKDF_expand_sha2_256 - EverCrypt_HKDF_extract_sha2_256 - EverCrypt_HKDF_expand_sha2_384 - EverCrypt_HKDF_extract_sha2_384 - EverCrypt_HKDF_expand_sha2_512 - EverCrypt_HKDF_extract_sha2_512 - EverCrypt_HKDF_expand_blake2s - EverCrypt_HKDF_extract_blake2s - EverCrypt_HKDF_expand_blake2b - EverCrypt_HKDF_extract_blake2b - EverCrypt_HKDF_expand - EverCrypt_HKDF_extract - Hacl_EC_Ed25519_mk_felem_zero - Hacl_EC_Ed25519_mk_felem_one - Hacl_EC_Ed25519_felem_add - Hacl_EC_Ed25519_felem_sub - Hacl_EC_Ed25519_felem_mul - Hacl_EC_Ed25519_felem_sqr - Hacl_EC_Ed25519_felem_inv - Hacl_EC_Ed25519_felem_load - Hacl_EC_Ed25519_felem_store - Hacl_EC_Ed25519_mk_point_at_inf - Hacl_EC_Ed25519_mk_base_point - Hacl_EC_Ed25519_point_negate - Hacl_EC_Ed25519_point_add - Hacl_EC_Ed25519_point_double - Hacl_EC_Ed25519_point_mul - Hacl_EC_Ed25519_point_eq - Hacl_EC_Ed25519_point_compress - Hacl_EC_Ed25519_point_decompress - Hacl_HPKE_Curve51_CP256_SHA256_setupBaseS - Hacl_HPKE_Curve51_CP256_SHA256_setupBaseR - Hacl_HPKE_Curve51_CP256_SHA256_sealBase - Hacl_HPKE_Curve51_CP256_SHA256_openBase - EverCrypt_Chacha20Poly1305_aead_encrypt - EverCrypt_Chacha20Poly1305_aead_decrypt - EverCrypt_AEAD_uu___is_Ek - EverCrypt_AEAD_alg_of_state - EverCrypt_AEAD_create_in - EverCrypt_AEAD_encrypt - EverCrypt_AEAD_encrypt_expand_aes128_gcm_no_check - EverCrypt_AEAD_encrypt_expand_aes256_gcm_no_check - EverCrypt_AEAD_encrypt_expand_aes128_gcm - EverCrypt_AEAD_encrypt_expand_aes256_gcm - EverCrypt_AEAD_encrypt_expand_chacha20_poly1305 - EverCrypt_AEAD_encrypt_expand - EverCrypt_AEAD_decrypt - EverCrypt_AEAD_decrypt_expand_aes128_gcm_no_check - EverCrypt_AEAD_decrypt_expand_aes256_gcm_no_check - EverCrypt_AEAD_decrypt_expand_aes128_gcm - EverCrypt_AEAD_decrypt_expand_aes256_gcm - EverCrypt_AEAD_decrypt_expand_chacha20_poly1305 - EverCrypt_AEAD_decrypt_expand - EverCrypt_AEAD_free diff --git a/dist/c89-compatible/libintvector.h b/dist/c89-compatible/libintvector.h deleted file mode 100644 index fe2ba5eb8a..0000000000 --- a/dist/c89-compatible/libintvector.h +++ /dev/null @@ -1,937 +0,0 @@ -#ifndef __Vec_Intrin_H -#define __Vec_Intrin_H - -#include - -/* We include config.h here to ensure that the various feature-flags are - * properly brought into scope. Users can either run the configure script, or - * write a config.h themselves and put it under version control. */ -#if defined(__has_include) -#if __has_include("config.h") -#include "config.h" -#endif -#endif - -/* # DEBUGGING: - * ============ - * It is possible to debug the current definitions by using libintvector_debug.h - * See the include at the bottom of the file. */ - -#define Lib_IntVector_Intrinsics_bit_mask64(x) -((x) & 1) - -#if defined(__x86_64__) || defined(_M_X64) - -#if defined(HACL_CAN_COMPILE_VEC128) - -#include -#include -#include - -typedef __m128i Lib_IntVector_Intrinsics_vec128; - -#define Lib_IntVector_Intrinsics_ni_aes_enc(x0, x1) \ - (_mm_aesenc_si128(x0, x1)) - -#define Lib_IntVector_Intrinsics_ni_aes_enc_last(x0, x1) \ - (_mm_aesenclast_si128(x0, x1)) - -#define Lib_IntVector_Intrinsics_ni_aes_keygen_assist(x0, x1) \ - (_mm_aeskeygenassist_si128(x0, x1)) - -#define Lib_IntVector_Intrinsics_ni_clmul(x0, x1, x2) \ - (_mm_clmulepi64_si128(x0, x1, x2)) - - -#define Lib_IntVector_Intrinsics_vec128_xor(x0, x1) \ - (_mm_xor_si128(x0, x1)) - -#define Lib_IntVector_Intrinsics_vec128_eq64(x0, x1) \ - (_mm_cmpeq_epi64(x0, x1)) - -#define Lib_IntVector_Intrinsics_vec128_eq32(x0, x1) \ - (_mm_cmpeq_epi32(x0, x1)) - -#define Lib_IntVector_Intrinsics_vec128_gt64(x0, x1) \ - (_mm_cmpgt_epi64(x0, x1)) - -#define Lib_IntVector_Intrinsics_vec128_gt32(x0, x1) \ - (_mm_cmpgt_epi32(x0, x1)) - -#define Lib_IntVector_Intrinsics_vec128_or(x0, x1) \ - (_mm_or_si128(x0, x1)) - -#define Lib_IntVector_Intrinsics_vec128_and(x0, x1) \ - (_mm_and_si128(x0, x1)) - -#define Lib_IntVector_Intrinsics_vec128_lognot(x0) \ - (_mm_xor_si128(x0, _mm_set1_epi32(-1))) - - -#define Lib_IntVector_Intrinsics_vec128_shift_left(x0, x1) \ - (_mm_slli_si128(x0, (x1)/8)) - -#define Lib_IntVector_Intrinsics_vec128_shift_right(x0, x1) \ - (_mm_srli_si128(x0, (x1)/8)) - -#define Lib_IntVector_Intrinsics_vec128_shift_left64(x0, x1) \ - (_mm_slli_epi64(x0, x1)) - -#define Lib_IntVector_Intrinsics_vec128_shift_right64(x0, x1) \ - (_mm_srli_epi64(x0, x1)) - -#define Lib_IntVector_Intrinsics_vec128_shift_left32(x0, x1) \ - (_mm_slli_epi32(x0, x1)) - -#define Lib_IntVector_Intrinsics_vec128_shift_right32(x0, x1) \ - (_mm_srli_epi32(x0, x1)) - -#define Lib_IntVector_Intrinsics_vec128_rotate_left32_8(x0) \ - (_mm_shuffle_epi8(x0, _mm_set_epi8(14,13,12,15,10,9,8,11,6,5,4,7,2,1,0,3))) - -#define Lib_IntVector_Intrinsics_vec128_rotate_left32_16(x0) \ - (_mm_shuffle_epi8(x0, _mm_set_epi8(13,12,15,14,9,8,11,10,5,4,7,6,1,0,3,2))) - -#define Lib_IntVector_Intrinsics_vec128_rotate_left32_24(x0) \ - (_mm_shuffle_epi8(x0, _mm_set_epi8(12,15,14,13,8,11,10,9,4,7,6,5,0,3,2,1))) - -#define Lib_IntVector_Intrinsics_vec128_rotate_left32(x0,x1) \ - (((x1) == 8? Lib_IntVector_Intrinsics_vec128_rotate_left32_8(x0) : \ - ((x1) == 16? Lib_IntVector_Intrinsics_vec128_rotate_left32_16(x0) : \ - ((x1) == 24? Lib_IntVector_Intrinsics_vec128_rotate_left32_24(x0) : \ - _mm_xor_si128(_mm_slli_epi32(x0,x1),_mm_srli_epi32(x0,32-(x1))))))) - -#define Lib_IntVector_Intrinsics_vec128_rotate_right32(x0,x1) \ - (Lib_IntVector_Intrinsics_vec128_rotate_left32(x0,32-(x1))) - -#define Lib_IntVector_Intrinsics_vec128_shuffle32(x0, x1, x2, x3, x4) \ - (_mm_shuffle_epi32(x0, _MM_SHUFFLE(x4,x3,x2,x1))) - -#define Lib_IntVector_Intrinsics_vec128_shuffle64(x0, x1, x2) \ - (_mm_shuffle_epi32(x0, _MM_SHUFFLE(2*x1+1,2*x1,2*x2+1,2*x2))) - -#define Lib_IntVector_Intrinsics_vec128_rotate_right_lanes32(x0, x1) \ - (_mm_shuffle_epi32(x0, _MM_SHUFFLE((x1+3)%4,(x1+2)%4,(x1+1)%4,x1%4))) - -#define Lib_IntVector_Intrinsics_vec128_rotate_right_lanes64(x0, x1) \ - (_mm_shuffle_epi32(x0, _MM_SHUFFLE((2*x1+3)%4,(2*x1+2)%4,(2*x1+1)%4,(2*x1)%4))) - -#define Lib_IntVector_Intrinsics_vec128_load32_le(x0) \ - (_mm_loadu_si128((__m128i*)(x0))) - -#define Lib_IntVector_Intrinsics_vec128_load64_le(x0) \ - (_mm_loadu_si128((__m128i*)(x0))) - -#define Lib_IntVector_Intrinsics_vec128_store32_le(x0, x1) \ - (_mm_storeu_si128((__m128i*)(x0), x1)) - -#define Lib_IntVector_Intrinsics_vec128_store64_le(x0, x1) \ - (_mm_storeu_si128((__m128i*)(x0), x1)) - -#define Lib_IntVector_Intrinsics_vec128_load_be(x0) \ - (_mm_shuffle_epi8(_mm_loadu_si128((__m128i*)(x0)), _mm_set_epi8(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15))) - -#define Lib_IntVector_Intrinsics_vec128_load32_be(x0) \ - (_mm_shuffle_epi8(_mm_loadu_si128((__m128i*)(x0)), _mm_set_epi8(12, 13, 14, 15, 8, 9, 10, 11, 4, 5, 6, 7, 0, 1, 2, 3))) - -#define Lib_IntVector_Intrinsics_vec128_load64_be(x0) \ - (_mm_shuffle_epi8(_mm_loadu_si128((__m128i*)(x0)), _mm_set_epi8(8, 9, 10, 11, 12, 13, 14, 15, 0, 1, 2, 3, 4, 5, 6, 7))) - -#define Lib_IntVector_Intrinsics_vec128_store_be(x0, x1) \ - (_mm_storeu_si128((__m128i*)(x0), _mm_shuffle_epi8(x1, _mm_set_epi8(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15)))) - - -#define Lib_IntVector_Intrinsics_vec128_store32_be(x0, x1) \ - (_mm_storeu_si128((__m128i*)(x0), _mm_shuffle_epi8(x1, _mm_set_epi8(12, 13, 14, 15, 8, 9, 10, 11, 4, 5, 6, 7, 0, 1, 2, 3)))) - -#define Lib_IntVector_Intrinsics_vec128_store64_be(x0, x1) \ - (_mm_storeu_si128((__m128i*)(x0), _mm_shuffle_epi8(x1, _mm_set_epi8(8, 9, 10, 11, 12, 13, 14, 15, 0, 1, 2, 3, 4, 5, 6, 7)))) - - - -#define Lib_IntVector_Intrinsics_vec128_insert8(x0, x1, x2) \ - (_mm_insert_epi8(x0, x1, x2)) - -#define Lib_IntVector_Intrinsics_vec128_insert32(x0, x1, x2) \ - (_mm_insert_epi32(x0, x1, x2)) - -#define Lib_IntVector_Intrinsics_vec128_insert64(x0, x1, x2) \ - (_mm_insert_epi64(x0, x1, x2)) - -#define Lib_IntVector_Intrinsics_vec128_extract8(x0, x1) \ - (_mm_extract_epi8(x0, x1)) - -#define Lib_IntVector_Intrinsics_vec128_extract32(x0, x1) \ - (_mm_extract_epi32(x0, x1)) - -#define Lib_IntVector_Intrinsics_vec128_extract64(x0, x1) \ - (_mm_extract_epi64(x0, x1)) - -#define Lib_IntVector_Intrinsics_vec128_zero \ - (_mm_setzero_si128()) - - -#define Lib_IntVector_Intrinsics_vec128_add64(x0, x1) \ - (_mm_add_epi64(x0, x1)) - -#define Lib_IntVector_Intrinsics_vec128_sub64(x0, x1) \ - (_mm_sub_epi64(x0, x1)) - -#define Lib_IntVector_Intrinsics_vec128_mul64(x0, x1) \ - (_mm_mul_epu32(x0, x1)) - -#define Lib_IntVector_Intrinsics_vec128_smul64(x0, x1) \ - (_mm_mul_epu32(x0, _mm_set1_epi64x(x1))) - -#define Lib_IntVector_Intrinsics_vec128_add32(x0, x1) \ - (_mm_add_epi32(x0, x1)) - -#define Lib_IntVector_Intrinsics_vec128_sub32(x0, x1) \ - (_mm_sub_epi32(x0, x1)) - -#define Lib_IntVector_Intrinsics_vec128_mul32(x0, x1) \ - (_mm_mullo_epi32(x0, x1)) - -#define Lib_IntVector_Intrinsics_vec128_smul32(x0, x1) \ - (_mm_mullo_epi32(x0, _mm_set1_epi32(x1))) - -#define Lib_IntVector_Intrinsics_vec128_load128(x) \ - ((__m128i)x) - -#define Lib_IntVector_Intrinsics_vec128_load64(x) \ - (_mm_set1_epi64x(x)) /* hi lo */ - -#define Lib_IntVector_Intrinsics_vec128_load64s(x0, x1) \ - (_mm_set_epi64x(x1, x0)) /* hi lo */ - -#define Lib_IntVector_Intrinsics_vec128_load32(x) \ - (_mm_set1_epi32(x)) - -#define Lib_IntVector_Intrinsics_vec128_load32s(x0, x1, x2, x3) \ - (_mm_set_epi32(x3, x2, x1, x0)) /* hi lo */ - -#define Lib_IntVector_Intrinsics_vec128_interleave_low32(x1, x2) \ - (_mm_unpacklo_epi32(x1, x2)) - -#define Lib_IntVector_Intrinsics_vec128_interleave_high32(x1, x2) \ - (_mm_unpackhi_epi32(x1, x2)) - -#define Lib_IntVector_Intrinsics_vec128_interleave_low64(x1, x2) \ - (_mm_unpacklo_epi64(x1, x2)) - -#define Lib_IntVector_Intrinsics_vec128_interleave_high64(x1, x2) \ - (_mm_unpackhi_epi64(x1, x2)) - -#endif /* HACL_CAN_COMPILE_VEC128 */ - -#if defined(HACL_CAN_COMPILE_VEC256) - -#include -#include - -typedef __m256i Lib_IntVector_Intrinsics_vec256; - - -#define Lib_IntVector_Intrinsics_vec256_eq64(x0, x1) \ - (_mm256_cmpeq_epi64(x0, x1)) - -#define Lib_IntVector_Intrinsics_vec256_eq32(x0, x1) \ - (_mm256_cmpeq_epi32(x0, x1)) - -#define Lib_IntVector_Intrinsics_vec256_gt64(x0, x1) \ - (_mm256_cmpgt_epi64(x0, x1)) - -#define Lib_IntVector_Intrinsics_vec256_gt32(x0, x1) \ - (_mm256_cmpgt_epi32(x0, x1)) - -#define Lib_IntVector_Intrinsics_vec256_xor(x0, x1) \ - (_mm256_xor_si256(x0, x1)) - -#define Lib_IntVector_Intrinsics_vec256_or(x0, x1) \ - (_mm256_or_si256(x0, x1)) - -#define Lib_IntVector_Intrinsics_vec256_and(x0, x1) \ - (_mm256_and_si256(x0, x1)) - -#define Lib_IntVector_Intrinsics_vec256_lognot(x0) \ - (_mm256_xor_si256(x0, _mm256_set1_epi32(-1))) - -#define Lib_IntVector_Intrinsics_vec256_shift_left(x0, x1) \ - (_mm256_slli_si256(x0, (x1)/8)) - -#define Lib_IntVector_Intrinsics_vec256_shift_right(x0, x1) \ - (_mm256_srli_si256(x0, (x1)/8)) - -#define Lib_IntVector_Intrinsics_vec256_shift_left64(x0, x1) \ - (_mm256_slli_epi64(x0, x1)) - -#define Lib_IntVector_Intrinsics_vec256_shift_right64(x0, x1) \ - (_mm256_srli_epi64(x0, x1)) - -#define Lib_IntVector_Intrinsics_vec256_shift_left32(x0, x1) \ - (_mm256_slli_epi32(x0, x1)) - -#define Lib_IntVector_Intrinsics_vec256_shift_right32(x0, x1) \ - (_mm256_srli_epi32(x0, x1)) - -#define Lib_IntVector_Intrinsics_vec256_rotate_left32_8(x0) \ - (_mm256_shuffle_epi8(x0, _mm256_set_epi8(14,13,12,15,10,9,8,11,6,5,4,7,2,1,0,3,14,13,12,15,10,9,8,11,6,5,4,7,2,1,0,3))) - -#define Lib_IntVector_Intrinsics_vec256_rotate_left32_16(x0) \ - (_mm256_shuffle_epi8(x0, _mm256_set_epi8(13,12,15,14,9,8,11,10,5,4,7,6,1,0,3,2,13,12,15,14,9,8,11,10,5,4,7,6,1,0,3,2))) - -#define Lib_IntVector_Intrinsics_vec256_rotate_left32_24(x0) \ - (_mm256_shuffle_epi8(x0, _mm256_set_epi8(12,15,14,13,8,11,10,9,4,7,6,5,0,3,2,1,12,15,14,13,8,11,10,9,4,7,6,5,0,3,2,1))) - -#define Lib_IntVector_Intrinsics_vec256_rotate_left32(x0,x1) \ - ((x1 == 8? Lib_IntVector_Intrinsics_vec256_rotate_left32_8(x0) : \ - (x1 == 16? Lib_IntVector_Intrinsics_vec256_rotate_left32_16(x0) : \ - (x1 == 24? Lib_IntVector_Intrinsics_vec256_rotate_left32_24(x0) : \ - _mm256_or_si256(_mm256_slli_epi32(x0,x1),_mm256_srli_epi32(x0,32-(x1))))))) - -#define Lib_IntVector_Intrinsics_vec256_rotate_right32(x0,x1) \ - (Lib_IntVector_Intrinsics_vec256_rotate_left32(x0,32-(x1))) - -#define Lib_IntVector_Intrinsics_vec256_rotate_right64_8(x0) \ - (_mm256_shuffle_epi8(x0, _mm256_set_epi8(8,15,14,13,12,11,10,9,0,7,6,5,4,3,2,1,8,15,14,13,12,11,10,9,0,7,6,5,4,3,2,1))) - -#define Lib_IntVector_Intrinsics_vec256_rotate_right64_16(x0) \ - (_mm256_shuffle_epi8(x0, _mm256_set_epi8(9,8,15,14,13,12,11,10,1,0,7,6,5,4,3,2,9,8,15,14,13,12,11,10,1,0,7,6,5,4,3,2))) - -#define Lib_IntVector_Intrinsics_vec256_rotate_right64_24(x0) \ - (_mm256_shuffle_epi8(x0, _mm256_set_epi8(10,9,8,15,14,13,12,11,2,1,0,7,6,5,4,3,10,9,8,15,14,13,12,11,2,1,0,7,6,5,4,3))) - -#define Lib_IntVector_Intrinsics_vec256_rotate_right64_32(x0) \ - (_mm256_shuffle_epi8(x0, _mm256_set_epi8(11,10,9,8,15,14,13,12,3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12,3,2,1,0,7,6,5,4))) - -#define Lib_IntVector_Intrinsics_vec256_rotate_right64_40(x0) \ - (_mm256_shuffle_epi8(x0, _mm256_set_epi8(12,11,10,9,8,15,14,13,4,3,2,1,0,7,6,5,12,11,10,9,8,15,14,13,4,3,2,1,0,7,6,5))) - -#define Lib_IntVector_Intrinsics_vec256_rotate_right64_48(x0) \ - (_mm256_shuffle_epi8(x0, _mm256_set_epi8(13,12,11,10,9,8,15,14,5,4,3,2,1,0,7,6,13,12,11,10,9,8,15,14,5,4,3,2,1,0,7,6))) - -#define Lib_IntVector_Intrinsics_vec256_rotate_right64_56(x0) \ - (_mm256_shuffle_epi8(x0, _mm256_set_epi8(14,13,12,11,10,9,8,15,6,5,4,3,2,1,0,7,14,13,12,11,10,9,8,15,6,5,4,3,2,1,0,7))) - -#define Lib_IntVector_Intrinsics_vec256_rotate_right64(x0,x1) \ - ((x1 == 8? Lib_IntVector_Intrinsics_vec256_rotate_right64_8(x0) : \ - (x1 == 16? Lib_IntVector_Intrinsics_vec256_rotate_right64_16(x0) : \ - (x1 == 24? Lib_IntVector_Intrinsics_vec256_rotate_right64_24(x0) : \ - (x1 == 32? Lib_IntVector_Intrinsics_vec256_rotate_right64_32(x0) : \ - (x1 == 40? Lib_IntVector_Intrinsics_vec256_rotate_right64_40(x0) : \ - (x1 == 48? Lib_IntVector_Intrinsics_vec256_rotate_right64_48(x0) : \ - (x1 == 56? Lib_IntVector_Intrinsics_vec256_rotate_right64_56(x0) : \ - _mm256_xor_si256(_mm256_srli_epi64((x0),(x1)),_mm256_slli_epi64((x0),(64-(x1)))))))))))) - -#define Lib_IntVector_Intrinsics_vec256_rotate_left64(x0,x1) \ - (Lib_IntVector_Intrinsics_vec256_rotate_right64(x0,64-(x1))) - -#define Lib_IntVector_Intrinsics_vec256_shuffle64(x0, x1, x2, x3, x4) \ - (_mm256_permute4x64_epi64(x0, _MM_SHUFFLE(x4,x3,x2,x1))) - -#define Lib_IntVector_Intrinsics_vec256_shuffle32(x0, x1, x2, x3, x4, x5, x6, x7, x8) \ - (_mm256_permutevar8x32_epi32(x0, _mm256_set_epi32(x8,x7,x6,x5,x4,x3,x2,x1))) - -#define Lib_IntVector_Intrinsics_vec256_rotate_right_lanes32(x0, x1) \ - (_mm256_permutevar8x32_epi32(x0, _mm256_set_epi32((x1+7)%8,(x1+6)%8,(x1+5)%8,(x1+4)%8,(x1+3%8),(x1+2)%8,(x1+1)%8,x1%8))) - -#define Lib_IntVector_Intrinsics_vec256_rotate_right_lanes64(x0, x1) \ - (_mm256_permute4x64_epi64(x0, _MM_SHUFFLE((x1+3)%4,(x1+2)%4,(x1+1)%4,x1%4))) - -#define Lib_IntVector_Intrinsics_vec256_load32_le(x0) \ - (_mm256_loadu_si256((__m256i*)(x0))) - -#define Lib_IntVector_Intrinsics_vec256_load64_le(x0) \ - (_mm256_loadu_si256((__m256i*)(x0))) - -#define Lib_IntVector_Intrinsics_vec256_load32_be(x0) \ - (_mm256_shuffle_epi8(_mm256_loadu_si256((__m256i*)(x0)), _mm256_set_epi8(12, 13, 14, 15, 8, 9, 10, 11, 4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15, 8, 9, 10, 11, 4, 5, 6, 7, 0, 1, 2, 3))) - -#define Lib_IntVector_Intrinsics_vec256_load64_be(x0) \ - (_mm256_shuffle_epi8(_mm256_loadu_si256((__m256i*)(x0)), _mm256_set_epi8(8, 9, 10, 11, 12, 13, 14, 15, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 0, 1, 2, 3, 4, 5, 6, 7))) - - -#define Lib_IntVector_Intrinsics_vec256_store32_le(x0, x1) \ - (_mm256_storeu_si256((__m256i*)(x0), x1)) - -#define Lib_IntVector_Intrinsics_vec256_store64_le(x0, x1) \ - (_mm256_storeu_si256((__m256i*)(x0), x1)) - -#define Lib_IntVector_Intrinsics_vec256_store32_be(x0, x1) \ - (_mm256_storeu_si256((__m256i*)(x0), _mm256_shuffle_epi8(x1, _mm256_set_epi8(12, 13, 14, 15, 8, 9, 10, 11, 4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15, 8, 9, 10, 11, 4, 5, 6, 7, 0, 1, 2, 3)))) - -#define Lib_IntVector_Intrinsics_vec256_store64_be(x0, x1) \ - (_mm256_storeu_si256((__m256i*)(x0), _mm256_shuffle_epi8(x1, _mm256_set_epi8(8, 9, 10, 11, 12, 13, 14, 15, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 0, 1, 2, 3, 4, 5, 6, 7)))) - - -#define Lib_IntVector_Intrinsics_vec256_insert8(x0, x1, x2) \ - (_mm256_insert_epi8(x0, x1, x2)) - -#define Lib_IntVector_Intrinsics_vec256_insert32(x0, x1, x2) \ - (_mm256_insert_epi32(x0, x1, x2)) - -#define Lib_IntVector_Intrinsics_vec256_insert64(x0, x1, x2) \ - (_mm256_insert_epi64(x0, x1, x2)) - -#define Lib_IntVector_Intrinsics_vec256_extract8(x0, x1) \ - (_mm256_extract_epi8(x0, x1)) - -#define Lib_IntVector_Intrinsics_vec256_extract32(x0, x1) \ - (_mm256_extract_epi32(x0, x1)) - -#define Lib_IntVector_Intrinsics_vec256_extract64(x0, x1) \ - (_mm256_extract_epi64(x0, x1)) - -#define Lib_IntVector_Intrinsics_vec256_zero \ - (_mm256_setzero_si256()) - -#define Lib_IntVector_Intrinsics_vec256_add64(x0, x1) \ - (_mm256_add_epi64(x0, x1)) - -#define Lib_IntVector_Intrinsics_vec256_sub64(x0, x1) \ - (_mm256_sub_epi64(x0, x1)) - -#define Lib_IntVector_Intrinsics_vec256_mul64(x0, x1) \ - (_mm256_mul_epu32(x0, x1)) - -#define Lib_IntVector_Intrinsics_vec256_smul64(x0, x1) \ - (_mm256_mul_epu32(x0, _mm256_set1_epi64x(x1))) - - -#define Lib_IntVector_Intrinsics_vec256_add32(x0, x1) \ - (_mm256_add_epi32(x0, x1)) - -#define Lib_IntVector_Intrinsics_vec256_sub32(x0, x1) \ - (_mm256_sub_epi32(x0, x1)) - -#define Lib_IntVector_Intrinsics_vec256_mul32(x0, x1) \ - (_mm256_mullo_epi32(x0, x1)) - -#define Lib_IntVector_Intrinsics_vec256_smul32(x0, x1) \ - (_mm256_mullo_epi32(x0, _mm256_set1_epi32(x1))) - - -#define Lib_IntVector_Intrinsics_vec256_load64(x1) \ - (_mm256_set1_epi64x(x1)) /* hi lo */ - -#define Lib_IntVector_Intrinsics_vec256_load64s(x0, x1, x2, x3) \ - (_mm256_set_epi64x(x3,x2,x1,x0)) /* hi lo */ - -#define Lib_IntVector_Intrinsics_vec256_load32(x) \ - (_mm256_set1_epi32(x)) - -#define Lib_IntVector_Intrinsics_vec256_load32s(x0,x1,x2,x3,x4, x5, x6, x7) \ - (_mm256_set_epi32(x7, x6, x5, x4, x3, x2, x1, x0)) /* hi lo */ - -#define Lib_IntVector_Intrinsics_vec256_load128(x) \ - (_mm256_set_m128i((__m128i)x)) - -#define Lib_IntVector_Intrinsics_vec256_load128s(x0,x1) \ - (_mm256_set_m128i((__m128i)x1,(__m128i)x0)) - -#define Lib_IntVector_Intrinsics_vec256_interleave_low32(x1, x2) \ - (_mm256_unpacklo_epi32(x1, x2)) - -#define Lib_IntVector_Intrinsics_vec256_interleave_high32(x1, x2) \ - (_mm256_unpackhi_epi32(x1, x2)) - -#define Lib_IntVector_Intrinsics_vec256_interleave_low64(x1, x2) \ - (_mm256_unpacklo_epi64(x1, x2)) - -#define Lib_IntVector_Intrinsics_vec256_interleave_high64(x1, x2) \ - (_mm256_unpackhi_epi64(x1, x2)) - -#define Lib_IntVector_Intrinsics_vec256_interleave_low128(x1, x2) \ - (_mm256_permute2x128_si256(x1, x2, 0x20)) - -#define Lib_IntVector_Intrinsics_vec256_interleave_high128(x1, x2) \ - (_mm256_permute2x128_si256(x1, x2, 0x31)) - -#endif /* HACL_CAN_COMPILE_VEC256 */ - -#elif (defined(__aarch64__) || defined(_M_ARM64) || defined(__arm__) || defined(_M_ARM)) \ - && !defined(__ARM_32BIT_STATE) - -#if defined(HACL_CAN_COMPILE_VEC128) - -#include - -typedef uint32x4_t Lib_IntVector_Intrinsics_vec128; - -#define Lib_IntVector_Intrinsics_vec128_xor(x0, x1) \ - (veorq_u32(x0,x1)) - -#define Lib_IntVector_Intrinsics_vec128_eq64(x0, x1) \ - (vceqq_u32(x0,x1)) - -#define Lib_IntVector_Intrinsics_vec128_eq32(x0, x1) \ - (vceqq_u32(x0,x1)) - -#define Lib_IntVector_Intrinsics_vec128_gt32(x0, x1) \ - (vcgtq_u32(x0, x1)) - -#define high32(x0) \ - (vmovn_u64(vshrq_n_u64(vreinterpretq_u64_u32(x0),32))) - -#define low32(x0) \ - (vmovn_u64(vreinterpretq_u64_u32(x0))) - -#define Lib_IntVector_Intrinsics_vec128_gt64(x0, x1) \ - (vreinterpretq_u32_u64(vmovl_u32(vorr_u32(vcgt_u32(high32(x0),high32(x1)),vand_u32(vceq_u32(high32(x0),high32(x1)),vcgt_u32(low32(x0),low32(x1))))))) - -#define Lib_IntVector_Intrinsics_vec128_or(x0, x1) \ - (vorrq_u32(x0, x1)) - -#define Lib_IntVector_Intrinsics_vec128_and(x0, x1) \ - (vandq_u32(x0, x1)) - -#define Lib_IntVector_Intrinsics_vec128_lognot(x0) \ - (vmvnq_u32(x0)) - - -#define Lib_IntVector_Intrinsics_vec128_shift_left(x0, x1) \ - (vextq_u32(x0, vdupq_n_u8(0), 16-(x1)/8)) - -#define Lib_IntVector_Intrinsics_vec128_shift_right(x0, x1) \ - (vextq_u32(x0, vdupq_n_u8(0), (x1)/8)) - -#define Lib_IntVector_Intrinsics_vec128_shift_left64(x0, x1) \ - (vreinterpretq_u32_u64(vshlq_n_u64(vreinterpretq_u64_u32(x0), x1))) - -#define Lib_IntVector_Intrinsics_vec128_shift_right64(x0, x1) \ - (vreinterpretq_u32_u64(vshrq_n_u64(vreinterpretq_u64_u32(x0), x1))) - -#define Lib_IntVector_Intrinsics_vec128_shift_left32(x0, x1) \ - (vshlq_n_u32(x0, x1)) - -#define Lib_IntVector_Intrinsics_vec128_shift_right32(x0, x1) \ - (vshrq_n_u32(x0, x1)) - -#define Lib_IntVector_Intrinsics_vec128_rotate_left32_16(x1) \ - (vreinterpretq_u32_u16(vrev32q_u16(vreinterpretq_u16_u32(x1)))) - -#define Lib_IntVector_Intrinsics_vec128_rotate_left32(x0,x1) \ - (((x1) == 16? Lib_IntVector_Intrinsics_vec128_rotate_left32_16(x0) : \ - vsriq_n_u32(vshlq_n_u32((x0),(x1)),(x0),32-(x1)))) - -#define Lib_IntVector_Intrinsics_vec128_rotate_right32_16(x1) \ - (vreinterpretq_u32_u16(vrev32q_u16(vreinterpretq_u16_u32(x1)))) - -#define Lib_IntVector_Intrinsics_vec128_rotate_right32(x0,x1) \ - (((x1) == 16? Lib_IntVector_Intrinsics_vec128_rotate_right32_16(x0) : \ - vsriq_n_u32(vshlq_n_u32((x0),32-(x1)),(x0),(x1)))) - -#define Lib_IntVector_Intrinsics_vec128_rotate_right_lanes32(x0, x1) \ - (vextq_u32(x0,x0,x1)) - -#define Lib_IntVector_Intrinsics_vec128_rotate_right_lanes64(x0, x1) \ - (vextq_u64(x0,x0,x1)) - - -/* -#define Lib_IntVector_Intrinsics_vec128_shuffle32(x0, x1, x2, x3, x4) \ - (_mm_shuffle_epi32(x0, _MM_SHUFFLE(x1,x2,x3,x4))) - -#define Lib_IntVector_Intrinsics_vec128_shuffle64(x0, x1, x2) \ - (_mm_shuffle_epi32(x0, _MM_SHUFFLE(2*x1+1,2*x1,2*x2+1,2*x2))) -*/ - -#define Lib_IntVector_Intrinsics_vec128_load32_le(x0) \ - (vld1q_u32((const uint32_t*) (x0))) - -#define Lib_IntVector_Intrinsics_vec128_load64_le(x0) \ - (vld1q_u32((const uint32_t*) (x0))) - -#define Lib_IntVector_Intrinsics_vec128_store32_le(x0, x1) \ - (vst1q_u32((uint32_t*)(x0),(x1))) - -#define Lib_IntVector_Intrinsics_vec128_store64_le(x0, x1) \ - (vst1q_u32((uint32_t*)(x0),(x1))) - -/* -#define Lib_IntVector_Intrinsics_vec128_load_be(x0) \ - ( Lib_IntVector_Intrinsics_vec128 l = vrev64q_u8(vld1q_u32((uint32_t*)(x0))); - -*/ - -#define Lib_IntVector_Intrinsics_vec128_load32_be(x0) \ - (vreinterpretq_u32_u8(vrev32q_u8(vreinterpretq_u8_u32(vld1q_u32((const uint32_t*)(x0)))))) - -#define Lib_IntVector_Intrinsics_vec128_load64_be(x0) \ - (vreinterpretq_u32_u8(vrev64q_u8(vreinterpretq_u8_u32(vld1q_u32((const uint32_t*)(x0)))))) - -/* -#define Lib_IntVector_Intrinsics_vec128_store_be(x0, x1) \ - (_mm_storeu_si128((__m128i*)(x0), _mm_shuffle_epi8(x1, _mm_set_epi8(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15)))) -*/ - -#define Lib_IntVector_Intrinsics_vec128_store32_be(x0, x1) \ - (vst1q_u32((uint32_t*)(x0),(vreinterpretq_u32_u8(vrev32q_u8(vreinterpretq_u8_u32(x1)))))) - -#define Lib_IntVector_Intrinsics_vec128_store64_be(x0, x1) \ - (vst1q_u32((uint32_t*)(x0),(vreinterpretq_u32_u8(vrev64q_u8(vreinterpretq_u8_u32(x1)))))) - -#define Lib_IntVector_Intrinsics_vec128_insert8(x0, x1, x2) \ - (vsetq_lane_u8(x1,x0,x2)) - -#define Lib_IntVector_Intrinsics_vec128_insert32(x0, x1, x2) \ - (vsetq_lane_u32(x1,x0,x2)) - -#define Lib_IntVector_Intrinsics_vec128_insert64(x0, x1, x2) \ - (vreinterpretq_u32_u64(vsetq_lane_u64(x1,vreinterpretq_u64_u32(x0),x2))) - -#define Lib_IntVector_Intrinsics_vec128_extract8(x0, x1) \ - (vgetq_lane_u8(x0,x1)) - -#define Lib_IntVector_Intrinsics_vec128_extract32(x0, x1) \ - (vgetq_lane_u32(x0,x1)) - -#define Lib_IntVector_Intrinsics_vec128_extract64(x0, x1) \ - (vgetq_lane_u64(vreinterpretq_u64_u32(x0),x1)) - -#define Lib_IntVector_Intrinsics_vec128_zero \ - (vdupq_n_u32(0)) - -#define Lib_IntVector_Intrinsics_vec128_add64(x0, x1) \ - (vreinterpretq_u32_u64(vaddq_u64(vreinterpretq_u64_u32(x0), vreinterpretq_u64_u32(x1)))) - -#define Lib_IntVector_Intrinsics_vec128_sub64(x0, x1) \ - (vreinterpretq_u32_u64(vsubq_u64(vreinterpretq_u64_u32(x0), vreinterpretq_u64_u32(x1)))) - -#define Lib_IntVector_Intrinsics_vec128_mul64(x0, x1) \ - (vreinterpretq_u32_u64(vmull_u32(vmovn_u64(vreinterpretq_u64_u32(x0)), vmovn_u64(vreinterpretq_u64_u32(x1))))) - -#define Lib_IntVector_Intrinsics_vec128_smul64(x0, x1) \ - (vreinterpretq_u32_u64(vmull_n_u32(vmovn_u64(vreinterpretq_u64_u32(x0)), (uint32_t)x1))) - -#define Lib_IntVector_Intrinsics_vec128_add32(x0, x1) \ - (vaddq_u32(x0, x1)) - -#define Lib_IntVector_Intrinsics_vec128_sub32(x0, x1) \ - (vsubq_u32(x0, x1)) - -#define Lib_IntVector_Intrinsics_vec128_mul32(x0, x1) \ - (vmulq_lane_u32(x0, x1)) - -#define Lib_IntVector_Intrinsics_vec128_smul32(x0, x1) \ - (vmulq_lane_u32(x0, vdupq_n_u32(x1))) - -#define Lib_IntVector_Intrinsics_vec128_load128(x) \ - ((uint32x4_t)(x)) - -#define Lib_IntVector_Intrinsics_vec128_load64(x) \ - (vreinterpretq_u32_u64(vdupq_n_u64(x))) /* hi lo */ - -#define Lib_IntVector_Intrinsics_vec128_load32(x) \ - (vdupq_n_u32(x)) /* hi lo */ - -static inline Lib_IntVector_Intrinsics_vec128 Lib_IntVector_Intrinsics_vec128_load64s(uint64_t x1, uint64_t x2){ - const uint64_t a[2] = {x1,x2}; - return vreinterpretq_u32_u64(vld1q_u64(a)); -} - -static inline Lib_IntVector_Intrinsics_vec128 Lib_IntVector_Intrinsics_vec128_load32s(uint32_t x1, uint32_t x2, uint32_t x3, uint32_t x4){ - const uint32_t a[4] = {x1,x2,x3,x4}; - return vld1q_u32(a); -} - -#define Lib_IntVector_Intrinsics_vec128_interleave_low32(x1, x2) \ - (vzip1q_u32(x1,x2)) - -#define Lib_IntVector_Intrinsics_vec128_interleave_high32(x1, x2) \ - (vzip2q_u32(x1,x2)) - -#define Lib_IntVector_Intrinsics_vec128_interleave_low64(x1,x2) \ - (vreinterpretq_u32_u64(vzip1q_u64(vreinterpretq_u64_u32(x1),vreinterpretq_u64_u32(x2)))) - -#define Lib_IntVector_Intrinsics_vec128_interleave_high64(x1,x2) \ - (vreinterpretq_u32_u64(vzip2q_u64(vreinterpretq_u64_u32(x1),vreinterpretq_u64_u32(x2)))) - -#endif /* HACL_CAN_COMPILE_VEC128 */ - -/* IBM z architecture */ -#elif defined(__s390x__) /* this flag is for GCC only */ - -#if defined(HACL_CAN_COMPILE_VEC128) - -#include -#include - -/* The main vector 128 type - * We can't use uint8_t, uint32_t, uint64_t... instead of unsigned char, - * unsigned int, unsigned long long: the compiler complains that the parameter - * combination is invalid. */ -typedef unsigned char vector128_8 __attribute__ ((vector_size(16))); -typedef unsigned int vector128_32 __attribute__ ((vector_size(16))); -typedef unsigned long long vector128_64 __attribute__ ((vector_size(16))); - -typedef vector128_8 Lib_IntVector_Intrinsics_vec128; -typedef vector128_8 vector128; - -#define Lib_IntVector_Intrinsics_vec128_load32_le(x) \ - (vector128) ((vector128_32) vec_revb(*((vector128_32*) (const uint8_t*)(x)))) - -#define Lib_IntVector_Intrinsics_vec128_load32_be(x) \ - (vector128) (*((vector128_32*) (const uint8_t*)(x))) - -#define Lib_IntVector_Intrinsics_vec128_load64_le(x) \ - (vector128) ((vector128_64) vec_revb(*((vector128_64*) (const uint8_t*)(x)))) - -static inline -void Lib_IntVector_Intrinsics_vec128_store32_le(const uint8_t *x0, vector128 x1) { - *((vector128_32*)x0) = vec_revb((vector128_32) x1); -} - -static inline -void Lib_IntVector_Intrinsics_vec128_store32_be(const uint8_t *x0, vector128 x1) { - *((vector128_32*)x0) = (vector128_32) x1; -} - -static inline -void Lib_IntVector_Intrinsics_vec128_store64_le(const uint8_t *x0, vector128 x1) { - *((vector128_64*)x0) = vec_revb((vector128_64) x1); -} - -#define Lib_IntVector_Intrinsics_vec128_add32(x0,x1) \ - ((vector128)((vector128_32)(((vector128_32)(x0)) + ((vector128_32)(x1))))) - -#define Lib_IntVector_Intrinsics_vec128_add64(x0, x1) \ - ((vector128)((vector128_64)(((vector128_64)(x0)) + ((vector128_64)(x1))))) - -#define Lib_IntVector_Intrinsics_vec128_and(x0, x1) \ - ((vector128)(vec_and((vector128)(x0),(vector128)(x1)))) - -#define Lib_IntVector_Intrinsics_vec128_eq32(x0, x1) \ - ((vector128)(vec_cmpeq(((vector128_32)(x0)),((vector128_32)(x1))))) - -#define Lib_IntVector_Intrinsics_vec128_eq64(x0, x1) \ - ((vector128)(vec_cmpeq(((vector128_64)(x0)),((vector128_64)(x1))))) - -#define Lib_IntVector_Intrinsics_vec128_extract32(x0, x1) \ - ((unsigned int)(vec_extract((vector128_32)(x0), x1))) - -#define Lib_IntVector_Intrinsics_vec128_extract64(x0, x1) \ - ((unsigned long long)(vec_extract((vector128_64)(x0), x1))) - -#define Lib_IntVector_Intrinsics_vec128_gt32(x0, x1) \ - ((vector128)((vector128_32)(((vector128_32)(x0)) > ((vector128_32)(x1))))) - -#define Lib_IntVector_Intrinsics_vec128_gt64(x0, x1) \ - ((vector128)((vector128_64)(((vector128_64)(x0)) > ((vector128_64)(x1))))) - -#define Lib_IntVector_Intrinsics_vec128_insert32(x0, x1, x2) \ - ((vector128)((vector128_32)vec_insert((unsigned int)(x1), (vector128_32)(x0), x2))) - -#define Lib_IntVector_Intrinsics_vec128_insert64(x0, x1, x2) \ - ((vector128)((vector128_64)vec_insert((unsigned long long)(x1), (vector128_64)(x0), x2))) - -#define Lib_IntVector_Intrinsics_vec128_interleave_high32(x0, x1) \ - ((vector128)((vector128_32)vec_mergel((vector128_32)(x0), (vector128_32)(x1)))) - -#define Lib_IntVector_Intrinsics_vec128_interleave_high64(x0, x1) \ - ((vector128)((vector128_64)vec_mergel((vector128_64)(x0), (vector128_64)(x1)))) - -#define Lib_IntVector_Intrinsics_vec128_interleave_low32(x0, x1) \ - ((vector128)((vector128_32)vec_mergeh((vector128_32)(x0), (vector128_32)(x1)))) - -#define Lib_IntVector_Intrinsics_vec128_interleave_low64(x0, x1) \ - ((vector128)((vector128_64)vec_mergeh((vector128_64)(x0), (vector128_64)(x1)))) - -#define Lib_IntVector_Intrinsics_vec128_load32(x) \ - ((vector128)((vector128_32){(unsigned int)(x), (unsigned int)(x), \ - (unsigned int)(x), (unsigned int)(x)})) - -#define Lib_IntVector_Intrinsics_vec128_load32s(x0, x1, x2, x3) \ - ((vector128)((vector128_32){(unsigned int)(x0),(unsigned int)(x1),(unsigned int)(x2),(unsigned int)(x3)})) - -#define Lib_IntVector_Intrinsics_vec128_load64(x) \ - ((vector128)((vector128_64)vec_load_pair((unsigned long long)(x),(unsigned long long)(x)))) - -#define Lib_IntVector_Intrinsics_vec128_lognot(x0) \ - ((vector128)(vec_xor((vector128)(x0), (vector128)vec_splat_u32(-1)))) - -#define Lib_IntVector_Intrinsics_vec128_mul64(x0, x1) \ - ((vector128)(vec_mulo((vector128_32)(x0), \ - (vector128_32)(x1)))) - -#define Lib_IntVector_Intrinsics_vec128_or(x0, x1) \ - ((vector128)(vec_or((vector128)(x0),(vector128)(x1)))) - -#define Lib_IntVector_Intrinsics_vec128_rotate_left32(x0, x1) \ - ((vector128)(vec_rli((vector128_32)(x0), (unsigned long)(x1)))) - -#define Lib_IntVector_Intrinsics_vec128_rotate_right32(x0, x1) \ - (Lib_IntVector_Intrinsics_vec128_rotate_left32(x0,(uint32_t)(32-(x1)))) - -#define Lib_IntVector_Intrinsics_vec128_rotate_right_lanes32(x0, x1) \ - ((vector128)(vec_sld((vector128)(x0), (vector128)(x0), (x1%4)*4))) - -#define Lib_IntVector_Intrinsics_vec128_shift_left64(x0, x1) \ - (((vector128)((vector128_64)vec_rli((vector128_64)(x0), (unsigned long)(x1)))) & \ - ((vector128)((vector128_64){0xffffffffffffffff << (x1), 0xffffffffffffffff << (x1)}))) - -#define Lib_IntVector_Intrinsics_vec128_shift_right64(x0, x1) \ - (((vector128)((vector128_64)vec_rli((vector128_64)(x0), (unsigned long)(64-(x1))))) & \ - ((vector128)((vector128_64){0xffffffffffffffff >> (x1), 0xffffffffffffffff >> (x1)}))) - -#define Lib_IntVector_Intrinsics_vec128_shift_right32(x0, x1) \ - (((vector128)((vector128_32)vec_rli((vector128_32)(x0), (unsigned int)(32-(x1))))) & \ - ((vector128)((vector128_32){0xffffffff >> (x1), 0xffffffff >> (x1), \ - 0xffffffff >> (x1), 0xffffffff >> (x1)}))) - -/* Doesn't work with vec_splat_u64 */ -#define Lib_IntVector_Intrinsics_vec128_smul64(x0, x1) \ - ((vector128)(Lib_IntVector_Intrinsics_vec128_mul64(x0,((vector128_64){(unsigned long long)(x1),(unsigned long long)(x1)})))) - -#define Lib_IntVector_Intrinsics_vec128_sub64(x0, x1) \ - ((vector128)((vector128_64)(x0) - (vector128_64)(x1))) - -static inline -vector128 Lib_IntVector_Intrinsics_vec128_xor(vector128 x0, vector128 x1) { - return ((vector128)(vec_xor((vector128)(x0), (vector128)(x1)))); -} - - -#define Lib_IntVector_Intrinsics_vec128_zero \ - ((vector128){}) - -#endif /* HACL_CAN_COMPILE_VEC128 */ - -#elif defined(__powerpc64__) // PowerPC 64 - this flag is for GCC only - -#if defined(HACL_CAN_COMPILE_VEC128) - -#include -#include // for memcpy -#include - -// The main vector 128 type -// We can't use uint8_t, uint32_t, uint64_t... instead of unsigned char, -// unsigned int, unsigned long long: the compiler complains that the parameter -// combination is invalid. -typedef vector unsigned char vector128_8; -typedef vector unsigned int vector128_32; -typedef vector unsigned long long vector128_64; - -typedef vector128_8 Lib_IntVector_Intrinsics_vec128; -typedef vector128_8 vector128; - -#define Lib_IntVector_Intrinsics_vec128_load32_le(x) \ - ((vector128)((vector128_32)(vec_xl(0, (const unsigned int*) ((const uint8_t*)(x)))))) - -#define Lib_IntVector_Intrinsics_vec128_load64_le(x) \ - ((vector128)((vector128_64)(vec_xl(0, (const unsigned long long*) ((const uint8_t*)(x)))))) - -#define Lib_IntVector_Intrinsics_vec128_store32_le(x0, x1) \ - (vec_xst((vector128_32)(x1), 0, (unsigned int*) ((uint8_t*)(x0)))) - -#define Lib_IntVector_Intrinsics_vec128_store64_le(x0, x1) \ - (vec_xst((vector128_64)(x1), 0, (unsigned long long*) ((uint8_t*)(x0)))) - -#define Lib_IntVector_Intrinsics_vec128_add32(x0,x1) \ - ((vector128)((vector128_32)(((vector128_32)(x0)) + ((vector128_32)(x1))))) - -#define Lib_IntVector_Intrinsics_vec128_add64(x0, x1) \ - ((vector128)((vector128_64)(((vector128_64)(x0)) + ((vector128_64)(x1))))) - -#define Lib_IntVector_Intrinsics_vec128_and(x0, x1) \ - ((vector128)(vec_and((vector128)(x0),(vector128)(x1)))) - -#define Lib_IntVector_Intrinsics_vec128_eq32(x0, x1) \ - ((vector128)(vec_cmpeq(((vector128_32)(x0)),((vector128_32)(x1))))) - -#define Lib_IntVector_Intrinsics_vec128_eq64(x0, x1) \ - ((vector128)(vec_cmpeq(((vector128_64)(x0)),((vector128_64)(x1))))) - -#define Lib_IntVector_Intrinsics_vec128_extract32(x0, x1) \ - ((unsigned int)(vec_extract((vector128_32)(x0), x1))) - -#define Lib_IntVector_Intrinsics_vec128_extract64(x0, x1) \ - ((unsigned long long)(vec_extract((vector128_64)(x0), x1))) - -#define Lib_IntVector_Intrinsics_vec128_gt32(x0, x1) \ - ((vector128)((vector128_32)(((vector128_32)(x0)) > ((vector128_32)(x1))))) - -#define Lib_IntVector_Intrinsics_vec128_gt64(x0, x1) \ - ((vector128)((vector128_64)(((vector128_64)(x0)) > ((vector128_64)(x1))))) - -#define Lib_IntVector_Intrinsics_vec128_insert32(x0, x1, x2) \ - ((vector128)((vector128_32)vec_insert((unsigned int)(x1), (vector128_32)(x0), x2))) - -#define Lib_IntVector_Intrinsics_vec128_insert64(x0, x1, x2) \ - ((vector128)((vector128_64)vec_insert((unsigned long long)(x1), (vector128_64)(x0), x2))) - -#define Lib_IntVector_Intrinsics_vec128_interleave_high32(x0, x1) \ - ((vector128)((vector128_32)vec_mergel((vector128_32)(x0), (vector128_32)(x1)))) - -#define Lib_IntVector_Intrinsics_vec128_interleave_high64(x0, x1) \ - ((vector128)((vector128_64)vec_mergel((vector128_64)(x0), (vector128_64)(x1)))) - -#define Lib_IntVector_Intrinsics_vec128_interleave_low32(x0, x1) \ - ((vector128)((vector128_32)vec_mergeh((vector128_32)(x0), (vector128_32)(x1)))) - -#define Lib_IntVector_Intrinsics_vec128_interleave_low64(x0, x1) \ - ((vector128)((vector128_64)vec_mergeh((vector128_64)(x0), (vector128_64)(x1)))) - -#define Lib_IntVector_Intrinsics_vec128_load32(x) \ - ((vector128)((vector128_32){(unsigned int)(x), (unsigned int)(x), \ - (unsigned int)(x), (unsigned int)(x)})) - -#define Lib_IntVector_Intrinsics_vec128_load32s(x0, x1, x2, x3) \ - ((vector128)((vector128_32){(unsigned int)(x0),(unsigned int)(x1),(unsigned int)(x2),(unsigned int)(x3)})) - -#define Lib_IntVector_Intrinsics_vec128_load64(x) \ - ((vector128)((vector128_64){(unsigned long long)(x),(unsigned long long)(x)})) - -#define Lib_IntVector_Intrinsics_vec128_lognot(x0) \ - ((vector128)(vec_xor((vector128)(x0), (vector128)vec_splat_u32(-1)))) - -#define Lib_IntVector_Intrinsics_vec128_mul64(x0, x1) \ - ((vector128)(vec_mule((vector128_32)(x0), \ - (vector128_32)(x1)))) - -#define Lib_IntVector_Intrinsics_vec128_or(x0, x1) \ - ((vector128)(vec_or((vector128)(x0),(vector128)(x1)))) - -#define Lib_IntVector_Intrinsics_vec128_rotate_left32(x0, x1) \ - ((vector128)(vec_rl((vector128_32)(x0), (vector128_32){(unsigned int)(x1),(unsigned int)(x1),(unsigned int)(x1),(unsigned int)(x1)}))) - -#define Lib_IntVector_Intrinsics_vec128_rotate_right32(x0, x1) \ - (Lib_IntVector_Intrinsics_vec128_rotate_left32(x0,(uint32_t)(32-(x1)))) - -#define Lib_IntVector_Intrinsics_vec128_rotate_right_lanes32(x0, x1) \ - ((vector128)(vec_sld((vector128)(x0), (vector128)(x0), ((4-(x1))%4)*4))) - -#define Lib_IntVector_Intrinsics_vec128_shift_left64(x0, x1) \ - ((vector128)((vector128_64)vec_sl((vector128_64)(x0), (vector128_64){(unsigned long)(x1),(unsigned long)(x1)}))) - -#define Lib_IntVector_Intrinsics_vec128_shift_right64(x0, x1) \ - ((vector128)((vector128_64)vec_sr((vector128_64)(x0), (vector128_64){(unsigned long)(x1),(unsigned long)(x1)}))) - -// Doesn't work with vec_splat_u64 -#define Lib_IntVector_Intrinsics_vec128_smul64(x0, x1) \ - ((vector128)(Lib_IntVector_Intrinsics_vec128_mul64(x0,((vector128_64){(unsigned long long)(x1),(unsigned long long)(x1)})))) - -#define Lib_IntVector_Intrinsics_vec128_sub64(x0, x1) \ - ((vector128)((vector128_64)(x0) - (vector128_64)(x1))) - -#define Lib_IntVector_Intrinsics_vec128_xor(x0, x1) \ - ((vector128)(vec_xor((vector128)(x0), (vector128)(x1)))) - -#define Lib_IntVector_Intrinsics_vec128_zero \ - ((vector128){}) - -#endif /* HACL_CAN_COMPILE_VEC128 */ - -#endif // PowerPC64 - -// DEBUGGING: -// If libintvector_debug.h exists, use it to debug the current implementations. -// Note that some flags must be enabled for the debugging to be effective: -// see libintvector_debug.h for more details. -#if defined(__has_include) -#if __has_include("libintvector_debug.h") -#include "libintvector_debug.h" -#endif -#endif - -#endif // __Vec_Intrin_H diff --git a/dist/c89-compatible/poly1305-x86_64-darwin.S b/dist/c89-compatible/poly1305-x86_64-darwin.S deleted file mode 100644 index 1e9d242536..0000000000 --- a/dist/c89-compatible/poly1305-x86_64-darwin.S +++ /dev/null @@ -1,203 +0,0 @@ -.text -.global _x64_poly1305 -_x64_poly1305: - mov %rdi, %rax - mov %rsi, %r11 - movq %rcx, 184(%rdi) - push %rbx - push %rbp - push %rax - push %r11 - push %r12 - push %r13 - push %r14 - push %r15 - movq 24(%rdi), %r11 - movq 32(%rdi), %r12 - mov $1152921487695413247, %rcx - and %rcx, %r11 - mov $1152921487695413244, %rcx - and %rcx, %r12 - movq %r11, 24(%rdi) - movq %r12, 32(%rdi) - mov %rdx, %rax - and $15, %rax - sub %rax, %rdx - movq %rax, 56(%rdi) - movq %rdx, 64(%rdi) - mov $1, %rcx - shr $4, %rdx - mov %rdx, %r15 - movq 24(%rdi), %r11 - movq 32(%rdi), %r13 - movq 0(%rdi), %r14 - movq 8(%rdi), %rbx - movq 16(%rdi), %rbp - mov %r13, %r12 - shr $2, %r13 - mov %r12, %rax - add %r12, %r13 - jmp L1 -.balign 16 -L0: - addq 0(%rsi), %r14 - adcq 8(%rsi), %rbx - lea 16(%rsi), %rsi - adc %rcx, %rbp - mul %r14 - mov %rax, %r9 - mov %r11, %rax - mov %rdx, %r10 - mul %r14 - mov %rax, %r14 - mov %r11, %rax - mov %rdx, %r8 - mul %rbx - add %rax, %r9 - mov %r13, %rax - adc %rdx, %r10 - mul %rbx - mov %rbp, %rbx - add %rax, %r14 - adc %rdx, %r8 - imul %r13, %rbx - add %rbx, %r9 - mov %r8, %rbx - adc $0, %r10 - imul %r11, %rbp - add %r9, %rbx - mov $18446744073709551612, %rax - adc %rbp, %r10 - and %r10, %rax - mov %r10, %rbp - shr $2, %r10 - and $3, %rbp - add %r10, %rax - add %rax, %r14 - adc $0, %rbx - adc $0, %rbp - mov %r12, %rax - sub $1, %r15 -.balign 16 -L1: - cmp $0, %r15 - jne L0 - movq %r14, 0(%rdi) - movq %rbx, 8(%rdi) - movq %rbp, 16(%rdi) - movq 184(%rdi), %rax - cmp $1, %rax - jne L2 - movq 56(%rdi), %r15 - cmp $0, %r15 - je L4 - movq 32(%rdi), %rax - movq 0(%rsi), %r8 - movq 8(%rsi), %r9 - cmp $8, %r15 - jae L6 - mov %r15, %rcx - shl $3, %rcx - mov $1, %rdx - shl %cl, %rdx - mov %rdx, %rcx - sub $1, %rcx - and %rcx, %r8 - mov $0, %r9 - add %r8, %r14 - adc %r9, %rbx - adc $0, %rbp - add %rdx, %r14 - adc $0, %rbx - adc $0, %rbp - jmp L7 -L6: - mov %r15, %rcx - sub $8, %rcx - shl $3, %rcx - mov $1, %rdx - shl %cl, %rdx - mov %rdx, %rcx - sub $1, %rcx - and %rcx, %r9 - add %r8, %r14 - adc %r9, %rbx - adc $0, %rbp - add $0, %r14 - adc %rdx, %rbx - adc $0, %rbp -L7: - mul %r14 - mov %rax, %r9 - mov %r11, %rax - mov %rdx, %r10 - mul %r14 - mov %rax, %r14 - mov %r11, %rax - mov %rdx, %r8 - mul %rbx - add %rax, %r9 - mov %r13, %rax - adc %rdx, %r10 - mul %rbx - mov %rbp, %rbx - add %rax, %r14 - adc %rdx, %r8 - imul %r13, %rbx - add %rbx, %r9 - mov %r8, %rbx - adc $0, %r10 - imul %r11, %rbp - add %r9, %rbx - mov $18446744073709551612, %rax - adc %rbp, %r10 - and %r10, %rax - mov %r10, %rbp - shr $2, %r10 - and $3, %rbp - add %r10, %rax - add %rax, %r14 - adc $0, %rbx - adc $0, %rbp - jmp L5 -L4: -L5: - mov %r14, %r8 - mov %rbx, %r9 - mov %rbp, %r10 - add $5, %r8 - adc $0, %r9 - adc $0, %r10 - shr $2, %r10 - mov %r10, %rax - sub $1, %rax - and %rax, %r14 - and %rax, %rbx - mov $0, %rax - sub %r10, %rax - and %rax, %r8 - and %rax, %r9 - add %r8, %r14 - add %r9, %rbx - movq 40(%rdi), %rax - movq 48(%rdi), %rdx - add %rax, %r14 - adc %rdx, %rbx - jmp L3 -L2: -L3: - movq %r14, 0(%rdi) - movq %rbx, 8(%rdi) - movq %rbp, 16(%rdi) - pop %r15 - pop %r14 - pop %r13 - pop %r12 - pop %rsi - pop %rax - pop %rbp - pop %rbx - mov %rax, %rdi - ret - - diff --git a/dist/c89-compatible/poly1305-x86_64-linux.S b/dist/c89-compatible/poly1305-x86_64-linux.S deleted file mode 100644 index 8cd375cb89..0000000000 --- a/dist/c89-compatible/poly1305-x86_64-linux.S +++ /dev/null @@ -1,203 +0,0 @@ -.text -.global x64_poly1305 -x64_poly1305: - mov %rdi, %rax - mov %rsi, %r11 - movq %rcx, 184(%rdi) - push %rbx - push %rbp - push %rax - push %r11 - push %r12 - push %r13 - push %r14 - push %r15 - movq 24(%rdi), %r11 - movq 32(%rdi), %r12 - mov $1152921487695413247, %rcx - and %rcx, %r11 - mov $1152921487695413244, %rcx - and %rcx, %r12 - movq %r11, 24(%rdi) - movq %r12, 32(%rdi) - mov %rdx, %rax - and $15, %rax - sub %rax, %rdx - movq %rax, 56(%rdi) - movq %rdx, 64(%rdi) - mov $1, %rcx - shr $4, %rdx - mov %rdx, %r15 - movq 24(%rdi), %r11 - movq 32(%rdi), %r13 - movq 0(%rdi), %r14 - movq 8(%rdi), %rbx - movq 16(%rdi), %rbp - mov %r13, %r12 - shr $2, %r13 - mov %r12, %rax - add %r12, %r13 - jmp L1 -.balign 16 -L0: - addq 0(%rsi), %r14 - adcq 8(%rsi), %rbx - lea 16(%rsi), %rsi - adc %rcx, %rbp - mul %r14 - mov %rax, %r9 - mov %r11, %rax - mov %rdx, %r10 - mul %r14 - mov %rax, %r14 - mov %r11, %rax - mov %rdx, %r8 - mul %rbx - add %rax, %r9 - mov %r13, %rax - adc %rdx, %r10 - mul %rbx - mov %rbp, %rbx - add %rax, %r14 - adc %rdx, %r8 - imul %r13, %rbx - add %rbx, %r9 - mov %r8, %rbx - adc $0, %r10 - imul %r11, %rbp - add %r9, %rbx - mov $18446744073709551612, %rax - adc %rbp, %r10 - and %r10, %rax - mov %r10, %rbp - shr $2, %r10 - and $3, %rbp - add %r10, %rax - add %rax, %r14 - adc $0, %rbx - adc $0, %rbp - mov %r12, %rax - sub $1, %r15 -.balign 16 -L1: - cmp $0, %r15 - jne L0 - movq %r14, 0(%rdi) - movq %rbx, 8(%rdi) - movq %rbp, 16(%rdi) - movq 184(%rdi), %rax - cmp $1, %rax - jne L2 - movq 56(%rdi), %r15 - cmp $0, %r15 - je L4 - movq 32(%rdi), %rax - movq 0(%rsi), %r8 - movq 8(%rsi), %r9 - cmp $8, %r15 - jae L6 - mov %r15, %rcx - shl $3, %rcx - mov $1, %rdx - shl %cl, %rdx - mov %rdx, %rcx - sub $1, %rcx - and %rcx, %r8 - mov $0, %r9 - add %r8, %r14 - adc %r9, %rbx - adc $0, %rbp - add %rdx, %r14 - adc $0, %rbx - adc $0, %rbp - jmp L7 -L6: - mov %r15, %rcx - sub $8, %rcx - shl $3, %rcx - mov $1, %rdx - shl %cl, %rdx - mov %rdx, %rcx - sub $1, %rcx - and %rcx, %r9 - add %r8, %r14 - adc %r9, %rbx - adc $0, %rbp - add $0, %r14 - adc %rdx, %rbx - adc $0, %rbp -L7: - mul %r14 - mov %rax, %r9 - mov %r11, %rax - mov %rdx, %r10 - mul %r14 - mov %rax, %r14 - mov %r11, %rax - mov %rdx, %r8 - mul %rbx - add %rax, %r9 - mov %r13, %rax - adc %rdx, %r10 - mul %rbx - mov %rbp, %rbx - add %rax, %r14 - adc %rdx, %r8 - imul %r13, %rbx - add %rbx, %r9 - mov %r8, %rbx - adc $0, %r10 - imul %r11, %rbp - add %r9, %rbx - mov $18446744073709551612, %rax - adc %rbp, %r10 - and %r10, %rax - mov %r10, %rbp - shr $2, %r10 - and $3, %rbp - add %r10, %rax - add %rax, %r14 - adc $0, %rbx - adc $0, %rbp - jmp L5 -L4: -L5: - mov %r14, %r8 - mov %rbx, %r9 - mov %rbp, %r10 - add $5, %r8 - adc $0, %r9 - adc $0, %r10 - shr $2, %r10 - mov %r10, %rax - sub $1, %rax - and %rax, %r14 - and %rax, %rbx - mov $0, %rax - sub %r10, %rax - and %rax, %r8 - and %rax, %r9 - add %r8, %r14 - add %r9, %rbx - movq 40(%rdi), %rax - movq 48(%rdi), %rdx - add %rax, %r14 - adc %rdx, %rbx - jmp L3 -L2: -L3: - movq %r14, 0(%rdi) - movq %rbx, 8(%rdi) - movq %rbp, 16(%rdi) - pop %r15 - pop %r14 - pop %r13 - pop %r12 - pop %rsi - pop %rax - pop %rbp - pop %rbx - mov %rax, %rdi - ret - -.section .note.GNU-stack,"",%progbits diff --git a/dist/c89-compatible/poly1305-x86_64-mingw.S b/dist/c89-compatible/poly1305-x86_64-mingw.S deleted file mode 100644 index 6a2b334885..0000000000 --- a/dist/c89-compatible/poly1305-x86_64-mingw.S +++ /dev/null @@ -1,207 +0,0 @@ -.text -.global x64_poly1305 -x64_poly1305: - mov %rdi, %rax - mov %rsi, %r11 - mov %rcx, %rdi - mov %rdx, %rsi - mov %r8, %rdx - mov %r9, %rcx - movq %rcx, 184(%rdi) - push %rbx - push %rbp - push %rax - push %r11 - push %r12 - push %r13 - push %r14 - push %r15 - movq 24(%rdi), %r11 - movq 32(%rdi), %r12 - mov $1152921487695413247, %rcx - and %rcx, %r11 - mov $1152921487695413244, %rcx - and %rcx, %r12 - movq %r11, 24(%rdi) - movq %r12, 32(%rdi) - mov %rdx, %rax - and $15, %rax - sub %rax, %rdx - movq %rax, 56(%rdi) - movq %rdx, 64(%rdi) - mov $1, %rcx - shr $4, %rdx - mov %rdx, %r15 - movq 24(%rdi), %r11 - movq 32(%rdi), %r13 - movq 0(%rdi), %r14 - movq 8(%rdi), %rbx - movq 16(%rdi), %rbp - mov %r13, %r12 - shr $2, %r13 - mov %r12, %rax - add %r12, %r13 - jmp L1 -.balign 16 -L0: - addq 0(%rsi), %r14 - adcq 8(%rsi), %rbx - lea 16(%rsi), %rsi - adc %rcx, %rbp - mul %r14 - mov %rax, %r9 - mov %r11, %rax - mov %rdx, %r10 - mul %r14 - mov %rax, %r14 - mov %r11, %rax - mov %rdx, %r8 - mul %rbx - add %rax, %r9 - mov %r13, %rax - adc %rdx, %r10 - mul %rbx - mov %rbp, %rbx - add %rax, %r14 - adc %rdx, %r8 - imul %r13, %rbx - add %rbx, %r9 - mov %r8, %rbx - adc $0, %r10 - imul %r11, %rbp - add %r9, %rbx - mov $18446744073709551612, %rax - adc %rbp, %r10 - and %r10, %rax - mov %r10, %rbp - shr $2, %r10 - and $3, %rbp - add %r10, %rax - add %rax, %r14 - adc $0, %rbx - adc $0, %rbp - mov %r12, %rax - sub $1, %r15 -.balign 16 -L1: - cmp $0, %r15 - jne L0 - movq %r14, 0(%rdi) - movq %rbx, 8(%rdi) - movq %rbp, 16(%rdi) - movq 184(%rdi), %rax - cmp $1, %rax - jne L2 - movq 56(%rdi), %r15 - cmp $0, %r15 - je L4 - movq 32(%rdi), %rax - movq 0(%rsi), %r8 - movq 8(%rsi), %r9 - cmp $8, %r15 - jae L6 - mov %r15, %rcx - shl $3, %rcx - mov $1, %rdx - shl %cl, %rdx - mov %rdx, %rcx - sub $1, %rcx - and %rcx, %r8 - mov $0, %r9 - add %r8, %r14 - adc %r9, %rbx - adc $0, %rbp - add %rdx, %r14 - adc $0, %rbx - adc $0, %rbp - jmp L7 -L6: - mov %r15, %rcx - sub $8, %rcx - shl $3, %rcx - mov $1, %rdx - shl %cl, %rdx - mov %rdx, %rcx - sub $1, %rcx - and %rcx, %r9 - add %r8, %r14 - adc %r9, %rbx - adc $0, %rbp - add $0, %r14 - adc %rdx, %rbx - adc $0, %rbp -L7: - mul %r14 - mov %rax, %r9 - mov %r11, %rax - mov %rdx, %r10 - mul %r14 - mov %rax, %r14 - mov %r11, %rax - mov %rdx, %r8 - mul %rbx - add %rax, %r9 - mov %r13, %rax - adc %rdx, %r10 - mul %rbx - mov %rbp, %rbx - add %rax, %r14 - adc %rdx, %r8 - imul %r13, %rbx - add %rbx, %r9 - mov %r8, %rbx - adc $0, %r10 - imul %r11, %rbp - add %r9, %rbx - mov $18446744073709551612, %rax - adc %rbp, %r10 - and %r10, %rax - mov %r10, %rbp - shr $2, %r10 - and $3, %rbp - add %r10, %rax - add %rax, %r14 - adc $0, %rbx - adc $0, %rbp - jmp L5 -L4: -L5: - mov %r14, %r8 - mov %rbx, %r9 - mov %rbp, %r10 - add $5, %r8 - adc $0, %r9 - adc $0, %r10 - shr $2, %r10 - mov %r10, %rax - sub $1, %rax - and %rax, %r14 - and %rax, %rbx - mov $0, %rax - sub %r10, %rax - and %rax, %r8 - and %rax, %r9 - add %r8, %r14 - add %r9, %rbx - movq 40(%rdi), %rax - movq 48(%rdi), %rdx - add %rax, %r14 - adc %rdx, %rbx - jmp L3 -L2: -L3: - movq %r14, 0(%rdi) - movq %rbx, 8(%rdi) - movq %rbp, 16(%rdi) - pop %r15 - pop %r14 - pop %r13 - pop %r12 - pop %rsi - pop %rax - pop %rbp - pop %rbx - mov %rax, %rdi - ret - - diff --git a/dist/c89-compatible/poly1305-x86_64-msvc.asm b/dist/c89-compatible/poly1305-x86_64-msvc.asm deleted file mode 100644 index 4eae42241f..0000000000 --- a/dist/c89-compatible/poly1305-x86_64-msvc.asm +++ /dev/null @@ -1,207 +0,0 @@ -.code -ALIGN 16 -x64_poly1305 proc - mov rax, rdi - mov r11, rsi - mov rdi, rcx - mov rsi, rdx - mov rdx, r8 - mov rcx, r9 - mov qword ptr [rdi + 184], rcx - push rbx - push rbp - push rax - push r11 - push r12 - push r13 - push r14 - push r15 - mov r11, qword ptr [rdi + 24] - mov r12, qword ptr [rdi + 32] - mov rcx, 1152921487695413247 - and r11, rcx - mov rcx, 1152921487695413244 - and r12, rcx - mov qword ptr [rdi + 24], r11 - mov qword ptr [rdi + 32], r12 - mov rax, rdx - and rax, 15 - sub rdx, rax - mov qword ptr [rdi + 56], rax - mov qword ptr [rdi + 64], rdx - mov rcx, 1 - shr rdx, 4 - mov r15, rdx - mov r11, qword ptr [rdi + 24] - mov r13, qword ptr [rdi + 32] - mov r14, qword ptr [rdi + 0] - mov rbx, qword ptr [rdi + 8] - mov rbp, qword ptr [rdi + 16] - mov r12, r13 - shr r13, 2 - mov rax, r12 - add r13, r12 - jmp L1 -ALIGN 16 -L0: - add r14, qword ptr [rsi + 0] - adc rbx, qword ptr [rsi + 8] - lea rsi, qword ptr [rsi + 16] - adc rbp, rcx - mul r14 - mov r9, rax - mov rax, r11 - mov r10, rdx - mul r14 - mov r14, rax - mov rax, r11 - mov r8, rdx - mul rbx - add r9, rax - mov rax, r13 - adc r10, rdx - mul rbx - mov rbx, rbp - add r14, rax - adc r8, rdx - imul rbx, r13 - add r9, rbx - mov rbx, r8 - adc r10, 0 - imul rbp, r11 - add rbx, r9 - mov rax, 18446744073709551612 - adc r10, rbp - and rax, r10 - mov rbp, r10 - shr r10, 2 - and rbp, 3 - add rax, r10 - add r14, rax - adc rbx, 0 - adc rbp, 0 - mov rax, r12 - sub r15, 1 -ALIGN 16 -L1: - cmp r15, 0 - jne L0 - mov qword ptr [rdi + 0], r14 - mov qword ptr [rdi + 8], rbx - mov qword ptr [rdi + 16], rbp - mov rax, qword ptr [rdi + 184] - cmp rax, 1 - jne L2 - mov r15, qword ptr [rdi + 56] - cmp r15, 0 - je L4 - mov rax, qword ptr [rdi + 32] - mov r8, qword ptr [rsi + 0] - mov r9, qword ptr [rsi + 8] - cmp r15, 8 - jae L6 - mov rcx, r15 - shl rcx, 3 - mov rdx, 1 - shl rdx, cl - mov rcx, rdx - sub rcx, 1 - and r8, rcx - mov r9, 0 - add r14, r8 - adc rbx, r9 - adc rbp, 0 - add r14, rdx - adc rbx, 0 - adc rbp, 0 - jmp L7 -L6: - mov rcx, r15 - sub rcx, 8 - shl rcx, 3 - mov rdx, 1 - shl rdx, cl - mov rcx, rdx - sub rcx, 1 - and r9, rcx - add r14, r8 - adc rbx, r9 - adc rbp, 0 - add r14, 0 - adc rbx, rdx - adc rbp, 0 -L7: - mul r14 - mov r9, rax - mov rax, r11 - mov r10, rdx - mul r14 - mov r14, rax - mov rax, r11 - mov r8, rdx - mul rbx - add r9, rax - mov rax, r13 - adc r10, rdx - mul rbx - mov rbx, rbp - add r14, rax - adc r8, rdx - imul rbx, r13 - add r9, rbx - mov rbx, r8 - adc r10, 0 - imul rbp, r11 - add rbx, r9 - mov rax, 18446744073709551612 - adc r10, rbp - and rax, r10 - mov rbp, r10 - shr r10, 2 - and rbp, 3 - add rax, r10 - add r14, rax - adc rbx, 0 - adc rbp, 0 - jmp L5 -L4: -L5: - mov r8, r14 - mov r9, rbx - mov r10, rbp - add r8, 5 - adc r9, 0 - adc r10, 0 - shr r10, 2 - mov rax, r10 - sub rax, 1 - and r14, rax - and rbx, rax - mov rax, 0 - sub rax, r10 - and r8, rax - and r9, rax - add r14, r8 - add rbx, r9 - mov rax, qword ptr [rdi + 40] - mov rdx, qword ptr [rdi + 48] - add r14, rax - adc rbx, rdx - jmp L3 -L2: -L3: - mov qword ptr [rdi + 0], r14 - mov qword ptr [rdi + 8], rbx - mov qword ptr [rdi + 16], rbp - pop r15 - pop r14 - pop r13 - pop r12 - pop rsi - pop rax - pop rbp - pop rbx - mov rdi, rax - ret -x64_poly1305 endp -end diff --git a/dist/c89-compatible/sha256-ppc64le.S b/dist/c89-compatible/sha256-ppc64le.S deleted file mode 100644 index 1a1873443d..0000000000 --- a/dist/c89-compatible/sha256-ppc64le.S +++ /dev/null @@ -1,1183 +0,0 @@ -.text -.global sha256_update -sha256_update: - - subi 1, 1, 176 - stxv 32+20, 0(1) - stxv 32+21, 16(1) - stxv 32+22, 32(1) - stxv 32+23, 48(1) - stxv 32+24, 64(1) - stxv 32+25, 80(1) - stxv 32+26, 96(1) - stxv 32+28, 112(1) - stxv 32+29, 128(1) - stxv 32+30, 144(1) - stxv 32+31, 160(1) - li 10, 16 - lxvw4x 32+16, 0, 3 - lxvw4x 32+20, 10, 3 - vsldoi 17, 16, 16, 4 - vsldoi 18, 16, 16, 8 - vsldoi 19, 16, 16, 12 - vsldoi 21, 20, 20, 4 - vsldoi 22, 20, 20, 8 - vsldoi 23, 20, 20, 12 - b L1 -.align 4 -L0: - xxmrghd 32+28, 32+16, 32+20 - xxmrghd 32+29, 32+17, 32+21 - xxmrghd 32+30, 32+18, 32+22 - xxmrghd 32+31, 32+19, 32+23 - lxvb16x 32+0, 0, 4 - lxvw4x 32+24, 0, 6 - addi 4, 4, 16 - addi 6, 6, 16 - vadduwm 23, 23, 24 - vsldoi 24, 24, 24, 4 - lxvb16x 32+4, 0, 4 - addi 4, 4, 16 - lxvb16x 32+8, 0, 4 - addi 4, 4, 16 - lxvb16x 32+12, 0, 4 - addi 4, 4, 16 - vsldoi 1, 0, 0, 4 - vsldoi 2, 0, 0, 8 - vsldoi 3, 0, 0, 12 - vadduwm 23, 23, 0 - vsel 25, 22, 21, 20 - vadduwm 22, 22, 24 - vadduwm 23, 23, 25 - vshasigmaw 26, 20, 1, 15 - vadduwm 23, 23, 26 - vxor 25, 16, 17 - vsel 25, 17, 18, 25 - vadduwm 19, 19, 23 - vshasigmaw 26, 16, 1, 0 - vadduwm 26, 26, 25 - vadduwm 23, 23, 26 - vsldoi 24, 24, 24, 4 - vadduwm 22, 22, 1 - vsel 25, 21, 20, 19 - vadduwm 21, 21, 24 - vadduwm 22, 22, 25 - vshasigmaw 26, 19, 1, 15 - vadduwm 22, 22, 26 - vxor 25, 23, 16 - vsel 25, 16, 17, 25 - vadduwm 18, 18, 22 - vshasigmaw 26, 23, 1, 0 - vadduwm 26, 26, 25 - vadduwm 22, 22, 26 - vsldoi 24, 24, 24, 4 - vadduwm 21, 21, 2 - vsel 25, 20, 19, 18 - vadduwm 20, 20, 24 - vadduwm 21, 21, 25 - vshasigmaw 26, 18, 1, 15 - vadduwm 21, 21, 26 - vxor 25, 22, 23 - vsel 25, 23, 16, 25 - vadduwm 17, 17, 21 - vshasigmaw 26, 22, 1, 0 - vadduwm 26, 26, 25 - vadduwm 21, 21, 26 - lxvw4x 32+24, 0, 6 - addi 6, 6, 16 - vadduwm 20, 20, 3 - vsel 25, 19, 18, 17 - vadduwm 19, 19, 24 - vadduwm 20, 20, 25 - vshasigmaw 26, 17, 1, 15 - vadduwm 20, 20, 26 - vxor 25, 21, 22 - vsel 25, 22, 23, 25 - vadduwm 16, 16, 20 - vshasigmaw 26, 21, 1, 0 - vadduwm 26, 26, 25 - vadduwm 20, 20, 26 - vsldoi 24, 24, 24, 4 - vsldoi 5, 4, 4, 4 - vsldoi 6, 4, 4, 8 - vsldoi 7, 4, 4, 12 - vadduwm 19, 19, 4 - vsel 25, 18, 17, 16 - vadduwm 18, 18, 24 - vadduwm 19, 19, 25 - vshasigmaw 26, 16, 1, 15 - vadduwm 19, 19, 26 - vxor 25, 20, 21 - vsel 25, 21, 22, 25 - vadduwm 23, 23, 19 - vshasigmaw 26, 20, 1, 0 - vadduwm 26, 26, 25 - vadduwm 19, 19, 26 - vsldoi 24, 24, 24, 4 - vadduwm 18, 18, 5 - vsel 25, 17, 16, 23 - vadduwm 17, 17, 24 - vadduwm 18, 18, 25 - vshasigmaw 26, 23, 1, 15 - vadduwm 18, 18, 26 - vxor 25, 19, 20 - vsel 25, 20, 21, 25 - vadduwm 22, 22, 18 - vshasigmaw 26, 19, 1, 0 - vadduwm 26, 26, 25 - vadduwm 18, 18, 26 - vsldoi 24, 24, 24, 4 - vadduwm 17, 17, 6 - vsel 25, 16, 23, 22 - vadduwm 16, 16, 24 - vadduwm 17, 17, 25 - vshasigmaw 26, 22, 1, 15 - vadduwm 17, 17, 26 - vxor 25, 18, 19 - vsel 25, 19, 20, 25 - vadduwm 21, 21, 17 - vshasigmaw 26, 18, 1, 0 - vadduwm 26, 26, 25 - vadduwm 17, 17, 26 - lxvw4x 32+24, 0, 6 - addi 6, 6, 16 - vadduwm 16, 16, 7 - vsel 25, 23, 22, 21 - vadduwm 23, 23, 24 - vadduwm 16, 16, 25 - vshasigmaw 26, 21, 1, 15 - vadduwm 16, 16, 26 - vxor 25, 17, 18 - vsel 25, 18, 19, 25 - vadduwm 20, 20, 16 - vshasigmaw 26, 17, 1, 0 - vadduwm 26, 26, 25 - vadduwm 16, 16, 26 - vsldoi 24, 24, 24, 4 - vsldoi 9, 8, 8, 4 - vsldoi 10, 8, 8, 8 - vsldoi 11, 8, 8, 12 - vadduwm 23, 23, 8 - vsel 25, 22, 21, 20 - vadduwm 22, 22, 24 - vadduwm 23, 23, 25 - vshasigmaw 26, 20, 1, 15 - vadduwm 23, 23, 26 - vxor 25, 16, 17 - vsel 25, 17, 18, 25 - vadduwm 19, 19, 23 - vshasigmaw 26, 16, 1, 0 - vadduwm 26, 26, 25 - vadduwm 23, 23, 26 - vsldoi 24, 24, 24, 4 - vadduwm 22, 22, 9 - vsel 25, 21, 20, 19 - vadduwm 21, 21, 24 - vadduwm 22, 22, 25 - vshasigmaw 26, 19, 1, 15 - vadduwm 22, 22, 26 - vxor 25, 23, 16 - vsel 25, 16, 17, 25 - vadduwm 18, 18, 22 - vshasigmaw 26, 23, 1, 0 - vadduwm 26, 26, 25 - vadduwm 22, 22, 26 - vsldoi 24, 24, 24, 4 - vadduwm 21, 21, 10 - vsel 25, 20, 19, 18 - vadduwm 20, 20, 24 - vadduwm 21, 21, 25 - vshasigmaw 26, 18, 1, 15 - vadduwm 21, 21, 26 - vxor 25, 22, 23 - vsel 25, 23, 16, 25 - vadduwm 17, 17, 21 - vshasigmaw 26, 22, 1, 0 - vadduwm 26, 26, 25 - vadduwm 21, 21, 26 - lxvw4x 32+24, 0, 6 - addi 6, 6, 16 - vadduwm 20, 20, 11 - vsel 25, 19, 18, 17 - vadduwm 19, 19, 24 - vadduwm 20, 20, 25 - vshasigmaw 26, 17, 1, 15 - vadduwm 20, 20, 26 - vxor 25, 21, 22 - vsel 25, 22, 23, 25 - vadduwm 16, 16, 20 - vshasigmaw 26, 21, 1, 0 - vadduwm 26, 26, 25 - vadduwm 20, 20, 26 - vsldoi 24, 24, 24, 4 - vsldoi 13, 12, 12, 4 - vsldoi 14, 12, 12, 8 - vsldoi 15, 12, 12, 12 - vadduwm 19, 19, 12 - vsel 25, 18, 17, 16 - vadduwm 18, 18, 24 - vadduwm 19, 19, 25 - vshasigmaw 26, 16, 1, 15 - vadduwm 19, 19, 26 - vxor 25, 20, 21 - vsel 25, 21, 22, 25 - vadduwm 23, 23, 19 - vshasigmaw 26, 20, 1, 0 - vadduwm 26, 26, 25 - vadduwm 19, 19, 26 - vsldoi 24, 24, 24, 4 - vadduwm 18, 18, 13 - vsel 25, 17, 16, 23 - vadduwm 17, 17, 24 - vadduwm 18, 18, 25 - vshasigmaw 26, 23, 1, 15 - vadduwm 18, 18, 26 - vxor 25, 19, 20 - vsel 25, 20, 21, 25 - vadduwm 22, 22, 18 - vshasigmaw 26, 19, 1, 0 - vadduwm 26, 26, 25 - vadduwm 18, 18, 26 - vsldoi 24, 24, 24, 4 - vadduwm 17, 17, 14 - vsel 25, 16, 23, 22 - vadduwm 16, 16, 24 - vadduwm 17, 17, 25 - vshasigmaw 26, 22, 1, 15 - vadduwm 17, 17, 26 - vxor 25, 18, 19 - vsel 25, 19, 20, 25 - vadduwm 21, 21, 17 - vshasigmaw 26, 18, 1, 0 - vadduwm 26, 26, 25 - vadduwm 17, 17, 26 - lxvw4x 32+24, 0, 6 - addi 6, 6, 16 - vadduwm 16, 16, 15 - vsel 25, 23, 22, 21 - vadduwm 23, 23, 24 - vadduwm 16, 16, 25 - vshasigmaw 26, 21, 1, 15 - vadduwm 16, 16, 26 - vxor 25, 17, 18 - vsel 25, 18, 19, 25 - vadduwm 20, 20, 16 - vshasigmaw 26, 17, 1, 0 - vadduwm 26, 26, 25 - vadduwm 16, 16, 26 - vsldoi 24, 24, 24, 4 - vshasigmaw 25, 1, 0, 0 - vadduwm 0, 0, 25 - vshasigmaw 26, 14, 0, 15 - vadduwm 0, 0, 26 - vadduwm 0, 0, 9 - vshasigmaw 25, 2, 0, 0 - vadduwm 1, 1, 25 - vshasigmaw 26, 15, 0, 15 - vadduwm 1, 1, 26 - vadduwm 1, 1, 10 - vshasigmaw 25, 3, 0, 0 - vadduwm 2, 2, 25 - vshasigmaw 26, 0, 0, 15 - vadduwm 2, 2, 26 - vadduwm 2, 2, 11 - vshasigmaw 25, 4, 0, 0 - vadduwm 3, 3, 25 - vshasigmaw 26, 1, 0, 15 - vadduwm 3, 3, 26 - vadduwm 3, 3, 12 - vshasigmaw 25, 5, 0, 0 - vadduwm 4, 4, 25 - vshasigmaw 26, 2, 0, 15 - vadduwm 4, 4, 26 - vadduwm 4, 4, 13 - vadduwm 23, 23, 0 - vsel 25, 22, 21, 20 - vadduwm 22, 22, 24 - vadduwm 23, 23, 25 - vshasigmaw 26, 20, 1, 15 - vadduwm 23, 23, 26 - vxor 25, 16, 17 - vsel 25, 17, 18, 25 - vadduwm 19, 19, 23 - vshasigmaw 26, 16, 1, 0 - vadduwm 26, 26, 25 - vadduwm 23, 23, 26 - vsldoi 24, 24, 24, 4 - vadduwm 22, 22, 1 - vsel 25, 21, 20, 19 - vadduwm 21, 21, 24 - vadduwm 22, 22, 25 - vshasigmaw 26, 19, 1, 15 - vadduwm 22, 22, 26 - vxor 25, 23, 16 - vsel 25, 16, 17, 25 - vadduwm 18, 18, 22 - vshasigmaw 26, 23, 1, 0 - vadduwm 26, 26, 25 - vadduwm 22, 22, 26 - vsldoi 24, 24, 24, 4 - vadduwm 21, 21, 2 - vsel 25, 20, 19, 18 - vadduwm 20, 20, 24 - vadduwm 21, 21, 25 - vshasigmaw 26, 18, 1, 15 - vadduwm 21, 21, 26 - vxor 25, 22, 23 - vsel 25, 23, 16, 25 - vadduwm 17, 17, 21 - vshasigmaw 26, 22, 1, 0 - vadduwm 26, 26, 25 - vadduwm 21, 21, 26 - lxvw4x 32+24, 0, 6 - addi 6, 6, 16 - vadduwm 20, 20, 3 - vsel 25, 19, 18, 17 - vadduwm 19, 19, 24 - vadduwm 20, 20, 25 - vshasigmaw 26, 17, 1, 15 - vadduwm 20, 20, 26 - vxor 25, 21, 22 - vsel 25, 22, 23, 25 - vadduwm 16, 16, 20 - vshasigmaw 26, 21, 1, 0 - vadduwm 26, 26, 25 - vadduwm 20, 20, 26 - vsldoi 24, 24, 24, 4 - vshasigmaw 25, 6, 0, 0 - vadduwm 5, 5, 25 - vshasigmaw 26, 3, 0, 15 - vadduwm 5, 5, 26 - vadduwm 5, 5, 14 - vshasigmaw 25, 7, 0, 0 - vadduwm 6, 6, 25 - vshasigmaw 26, 4, 0, 15 - vadduwm 6, 6, 26 - vadduwm 6, 6, 15 - vshasigmaw 25, 8, 0, 0 - vadduwm 7, 7, 25 - vshasigmaw 26, 5, 0, 15 - vadduwm 7, 7, 26 - vadduwm 7, 7, 0 - vshasigmaw 25, 9, 0, 0 - vadduwm 8, 8, 25 - vshasigmaw 26, 6, 0, 15 - vadduwm 8, 8, 26 - vadduwm 8, 8, 1 - vadduwm 19, 19, 4 - vsel 25, 18, 17, 16 - vadduwm 18, 18, 24 - vadduwm 19, 19, 25 - vshasigmaw 26, 16, 1, 15 - vadduwm 19, 19, 26 - vxor 25, 20, 21 - vsel 25, 21, 22, 25 - vadduwm 23, 23, 19 - vshasigmaw 26, 20, 1, 0 - vadduwm 26, 26, 25 - vadduwm 19, 19, 26 - vsldoi 24, 24, 24, 4 - vadduwm 18, 18, 5 - vsel 25, 17, 16, 23 - vadduwm 17, 17, 24 - vadduwm 18, 18, 25 - vshasigmaw 26, 23, 1, 15 - vadduwm 18, 18, 26 - vxor 25, 19, 20 - vsel 25, 20, 21, 25 - vadduwm 22, 22, 18 - vshasigmaw 26, 19, 1, 0 - vadduwm 26, 26, 25 - vadduwm 18, 18, 26 - vsldoi 24, 24, 24, 4 - vadduwm 17, 17, 6 - vsel 25, 16, 23, 22 - vadduwm 16, 16, 24 - vadduwm 17, 17, 25 - vshasigmaw 26, 22, 1, 15 - vadduwm 17, 17, 26 - vxor 25, 18, 19 - vsel 25, 19, 20, 25 - vadduwm 21, 21, 17 - vshasigmaw 26, 18, 1, 0 - vadduwm 26, 26, 25 - vadduwm 17, 17, 26 - lxvw4x 32+24, 0, 6 - addi 6, 6, 16 - vadduwm 16, 16, 7 - vsel 25, 23, 22, 21 - vadduwm 23, 23, 24 - vadduwm 16, 16, 25 - vshasigmaw 26, 21, 1, 15 - vadduwm 16, 16, 26 - vxor 25, 17, 18 - vsel 25, 18, 19, 25 - vadduwm 20, 20, 16 - vshasigmaw 26, 17, 1, 0 - vadduwm 26, 26, 25 - vadduwm 16, 16, 26 - vsldoi 24, 24, 24, 4 - vshasigmaw 25, 10, 0, 0 - vadduwm 9, 9, 25 - vshasigmaw 26, 7, 0, 15 - vadduwm 9, 9, 26 - vadduwm 9, 9, 2 - vshasigmaw 25, 11, 0, 0 - vadduwm 10, 10, 25 - vshasigmaw 26, 8, 0, 15 - vadduwm 10, 10, 26 - vadduwm 10, 10, 3 - vshasigmaw 25, 12, 0, 0 - vadduwm 11, 11, 25 - vshasigmaw 26, 9, 0, 15 - vadduwm 11, 11, 26 - vadduwm 11, 11, 4 - vshasigmaw 25, 13, 0, 0 - vadduwm 12, 12, 25 - vshasigmaw 26, 10, 0, 15 - vadduwm 12, 12, 26 - vadduwm 12, 12, 5 - vadduwm 23, 23, 8 - vsel 25, 22, 21, 20 - vadduwm 22, 22, 24 - vadduwm 23, 23, 25 - vshasigmaw 26, 20, 1, 15 - vadduwm 23, 23, 26 - vxor 25, 16, 17 - vsel 25, 17, 18, 25 - vadduwm 19, 19, 23 - vshasigmaw 26, 16, 1, 0 - vadduwm 26, 26, 25 - vadduwm 23, 23, 26 - vsldoi 24, 24, 24, 4 - vadduwm 22, 22, 9 - vsel 25, 21, 20, 19 - vadduwm 21, 21, 24 - vadduwm 22, 22, 25 - vshasigmaw 26, 19, 1, 15 - vadduwm 22, 22, 26 - vxor 25, 23, 16 - vsel 25, 16, 17, 25 - vadduwm 18, 18, 22 - vshasigmaw 26, 23, 1, 0 - vadduwm 26, 26, 25 - vadduwm 22, 22, 26 - vsldoi 24, 24, 24, 4 - vadduwm 21, 21, 10 - vsel 25, 20, 19, 18 - vadduwm 20, 20, 24 - vadduwm 21, 21, 25 - vshasigmaw 26, 18, 1, 15 - vadduwm 21, 21, 26 - vxor 25, 22, 23 - vsel 25, 23, 16, 25 - vadduwm 17, 17, 21 - vshasigmaw 26, 22, 1, 0 - vadduwm 26, 26, 25 - vadduwm 21, 21, 26 - lxvw4x 32+24, 0, 6 - addi 6, 6, 16 - vadduwm 20, 20, 11 - vsel 25, 19, 18, 17 - vadduwm 19, 19, 24 - vadduwm 20, 20, 25 - vshasigmaw 26, 17, 1, 15 - vadduwm 20, 20, 26 - vxor 25, 21, 22 - vsel 25, 22, 23, 25 - vadduwm 16, 16, 20 - vshasigmaw 26, 21, 1, 0 - vadduwm 26, 26, 25 - vadduwm 20, 20, 26 - vsldoi 24, 24, 24, 4 - vshasigmaw 25, 14, 0, 0 - vadduwm 13, 13, 25 - vshasigmaw 26, 11, 0, 15 - vadduwm 13, 13, 26 - vadduwm 13, 13, 6 - vshasigmaw 25, 15, 0, 0 - vadduwm 14, 14, 25 - vshasigmaw 26, 12, 0, 15 - vadduwm 14, 14, 26 - vadduwm 14, 14, 7 - vshasigmaw 25, 0, 0, 0 - vadduwm 15, 15, 25 - vshasigmaw 26, 13, 0, 15 - vadduwm 15, 15, 26 - vadduwm 15, 15, 8 - vshasigmaw 25, 1, 0, 0 - vadduwm 0, 0, 25 - vshasigmaw 26, 14, 0, 15 - vadduwm 0, 0, 26 - vadduwm 0, 0, 9 - vadduwm 19, 19, 12 - vsel 25, 18, 17, 16 - vadduwm 18, 18, 24 - vadduwm 19, 19, 25 - vshasigmaw 26, 16, 1, 15 - vadduwm 19, 19, 26 - vxor 25, 20, 21 - vsel 25, 21, 22, 25 - vadduwm 23, 23, 19 - vshasigmaw 26, 20, 1, 0 - vadduwm 26, 26, 25 - vadduwm 19, 19, 26 - vsldoi 24, 24, 24, 4 - vadduwm 18, 18, 13 - vsel 25, 17, 16, 23 - vadduwm 17, 17, 24 - vadduwm 18, 18, 25 - vshasigmaw 26, 23, 1, 15 - vadduwm 18, 18, 26 - vxor 25, 19, 20 - vsel 25, 20, 21, 25 - vadduwm 22, 22, 18 - vshasigmaw 26, 19, 1, 0 - vadduwm 26, 26, 25 - vadduwm 18, 18, 26 - vsldoi 24, 24, 24, 4 - vadduwm 17, 17, 14 - vsel 25, 16, 23, 22 - vadduwm 16, 16, 24 - vadduwm 17, 17, 25 - vshasigmaw 26, 22, 1, 15 - vadduwm 17, 17, 26 - vxor 25, 18, 19 - vsel 25, 19, 20, 25 - vadduwm 21, 21, 17 - vshasigmaw 26, 18, 1, 0 - vadduwm 26, 26, 25 - vadduwm 17, 17, 26 - lxvw4x 32+24, 0, 6 - addi 6, 6, 16 - vadduwm 16, 16, 15 - vsel 25, 23, 22, 21 - vadduwm 23, 23, 24 - vadduwm 16, 16, 25 - vshasigmaw 26, 21, 1, 15 - vadduwm 16, 16, 26 - vxor 25, 17, 18 - vsel 25, 18, 19, 25 - vadduwm 20, 20, 16 - vshasigmaw 26, 17, 1, 0 - vadduwm 26, 26, 25 - vadduwm 16, 16, 26 - vsldoi 24, 24, 24, 4 - vshasigmaw 25, 2, 0, 0 - vadduwm 1, 1, 25 - vshasigmaw 26, 15, 0, 15 - vadduwm 1, 1, 26 - vadduwm 1, 1, 10 - vshasigmaw 25, 3, 0, 0 - vadduwm 2, 2, 25 - vshasigmaw 26, 0, 0, 15 - vadduwm 2, 2, 26 - vadduwm 2, 2, 11 - vshasigmaw 25, 4, 0, 0 - vadduwm 3, 3, 25 - vshasigmaw 26, 1, 0, 15 - vadduwm 3, 3, 26 - vadduwm 3, 3, 12 - vshasigmaw 25, 5, 0, 0 - vadduwm 4, 4, 25 - vshasigmaw 26, 2, 0, 15 - vadduwm 4, 4, 26 - vadduwm 4, 4, 13 - vadduwm 23, 23, 0 - vsel 25, 22, 21, 20 - vadduwm 22, 22, 24 - vadduwm 23, 23, 25 - vshasigmaw 26, 20, 1, 15 - vadduwm 23, 23, 26 - vxor 25, 16, 17 - vsel 25, 17, 18, 25 - vadduwm 19, 19, 23 - vshasigmaw 26, 16, 1, 0 - vadduwm 26, 26, 25 - vadduwm 23, 23, 26 - vsldoi 24, 24, 24, 4 - vadduwm 22, 22, 1 - vsel 25, 21, 20, 19 - vadduwm 21, 21, 24 - vadduwm 22, 22, 25 - vshasigmaw 26, 19, 1, 15 - vadduwm 22, 22, 26 - vxor 25, 23, 16 - vsel 25, 16, 17, 25 - vadduwm 18, 18, 22 - vshasigmaw 26, 23, 1, 0 - vadduwm 26, 26, 25 - vadduwm 22, 22, 26 - vsldoi 24, 24, 24, 4 - vadduwm 21, 21, 2 - vsel 25, 20, 19, 18 - vadduwm 20, 20, 24 - vadduwm 21, 21, 25 - vshasigmaw 26, 18, 1, 15 - vadduwm 21, 21, 26 - vxor 25, 22, 23 - vsel 25, 23, 16, 25 - vadduwm 17, 17, 21 - vshasigmaw 26, 22, 1, 0 - vadduwm 26, 26, 25 - vadduwm 21, 21, 26 - lxvw4x 32+24, 0, 6 - addi 6, 6, 16 - vadduwm 20, 20, 3 - vsel 25, 19, 18, 17 - vadduwm 19, 19, 24 - vadduwm 20, 20, 25 - vshasigmaw 26, 17, 1, 15 - vadduwm 20, 20, 26 - vxor 25, 21, 22 - vsel 25, 22, 23, 25 - vadduwm 16, 16, 20 - vshasigmaw 26, 21, 1, 0 - vadduwm 26, 26, 25 - vadduwm 20, 20, 26 - vsldoi 24, 24, 24, 4 - vshasigmaw 25, 6, 0, 0 - vadduwm 5, 5, 25 - vshasigmaw 26, 3, 0, 15 - vadduwm 5, 5, 26 - vadduwm 5, 5, 14 - vshasigmaw 25, 7, 0, 0 - vadduwm 6, 6, 25 - vshasigmaw 26, 4, 0, 15 - vadduwm 6, 6, 26 - vadduwm 6, 6, 15 - vshasigmaw 25, 8, 0, 0 - vadduwm 7, 7, 25 - vshasigmaw 26, 5, 0, 15 - vadduwm 7, 7, 26 - vadduwm 7, 7, 0 - vshasigmaw 25, 9, 0, 0 - vadduwm 8, 8, 25 - vshasigmaw 26, 6, 0, 15 - vadduwm 8, 8, 26 - vadduwm 8, 8, 1 - vadduwm 19, 19, 4 - vsel 25, 18, 17, 16 - vadduwm 18, 18, 24 - vadduwm 19, 19, 25 - vshasigmaw 26, 16, 1, 15 - vadduwm 19, 19, 26 - vxor 25, 20, 21 - vsel 25, 21, 22, 25 - vadduwm 23, 23, 19 - vshasigmaw 26, 20, 1, 0 - vadduwm 26, 26, 25 - vadduwm 19, 19, 26 - vsldoi 24, 24, 24, 4 - vadduwm 18, 18, 5 - vsel 25, 17, 16, 23 - vadduwm 17, 17, 24 - vadduwm 18, 18, 25 - vshasigmaw 26, 23, 1, 15 - vadduwm 18, 18, 26 - vxor 25, 19, 20 - vsel 25, 20, 21, 25 - vadduwm 22, 22, 18 - vshasigmaw 26, 19, 1, 0 - vadduwm 26, 26, 25 - vadduwm 18, 18, 26 - vsldoi 24, 24, 24, 4 - vadduwm 17, 17, 6 - vsel 25, 16, 23, 22 - vadduwm 16, 16, 24 - vadduwm 17, 17, 25 - vshasigmaw 26, 22, 1, 15 - vadduwm 17, 17, 26 - vxor 25, 18, 19 - vsel 25, 19, 20, 25 - vadduwm 21, 21, 17 - vshasigmaw 26, 18, 1, 0 - vadduwm 26, 26, 25 - vadduwm 17, 17, 26 - lxvw4x 32+24, 0, 6 - addi 6, 6, 16 - vadduwm 16, 16, 7 - vsel 25, 23, 22, 21 - vadduwm 23, 23, 24 - vadduwm 16, 16, 25 - vshasigmaw 26, 21, 1, 15 - vadduwm 16, 16, 26 - vxor 25, 17, 18 - vsel 25, 18, 19, 25 - vadduwm 20, 20, 16 - vshasigmaw 26, 17, 1, 0 - vadduwm 26, 26, 25 - vadduwm 16, 16, 26 - vsldoi 24, 24, 24, 4 - vshasigmaw 25, 10, 0, 0 - vadduwm 9, 9, 25 - vshasigmaw 26, 7, 0, 15 - vadduwm 9, 9, 26 - vadduwm 9, 9, 2 - vshasigmaw 25, 11, 0, 0 - vadduwm 10, 10, 25 - vshasigmaw 26, 8, 0, 15 - vadduwm 10, 10, 26 - vadduwm 10, 10, 3 - vshasigmaw 25, 12, 0, 0 - vadduwm 11, 11, 25 - vshasigmaw 26, 9, 0, 15 - vadduwm 11, 11, 26 - vadduwm 11, 11, 4 - vshasigmaw 25, 13, 0, 0 - vadduwm 12, 12, 25 - vshasigmaw 26, 10, 0, 15 - vadduwm 12, 12, 26 - vadduwm 12, 12, 5 - vadduwm 23, 23, 8 - vsel 25, 22, 21, 20 - vadduwm 22, 22, 24 - vadduwm 23, 23, 25 - vshasigmaw 26, 20, 1, 15 - vadduwm 23, 23, 26 - vxor 25, 16, 17 - vsel 25, 17, 18, 25 - vadduwm 19, 19, 23 - vshasigmaw 26, 16, 1, 0 - vadduwm 26, 26, 25 - vadduwm 23, 23, 26 - vsldoi 24, 24, 24, 4 - vadduwm 22, 22, 9 - vsel 25, 21, 20, 19 - vadduwm 21, 21, 24 - vadduwm 22, 22, 25 - vshasigmaw 26, 19, 1, 15 - vadduwm 22, 22, 26 - vxor 25, 23, 16 - vsel 25, 16, 17, 25 - vadduwm 18, 18, 22 - vshasigmaw 26, 23, 1, 0 - vadduwm 26, 26, 25 - vadduwm 22, 22, 26 - vsldoi 24, 24, 24, 4 - vadduwm 21, 21, 10 - vsel 25, 20, 19, 18 - vadduwm 20, 20, 24 - vadduwm 21, 21, 25 - vshasigmaw 26, 18, 1, 15 - vadduwm 21, 21, 26 - vxor 25, 22, 23 - vsel 25, 23, 16, 25 - vadduwm 17, 17, 21 - vshasigmaw 26, 22, 1, 0 - vadduwm 26, 26, 25 - vadduwm 21, 21, 26 - lxvw4x 32+24, 0, 6 - addi 6, 6, 16 - vadduwm 20, 20, 11 - vsel 25, 19, 18, 17 - vadduwm 19, 19, 24 - vadduwm 20, 20, 25 - vshasigmaw 26, 17, 1, 15 - vadduwm 20, 20, 26 - vxor 25, 21, 22 - vsel 25, 22, 23, 25 - vadduwm 16, 16, 20 - vshasigmaw 26, 21, 1, 0 - vadduwm 26, 26, 25 - vadduwm 20, 20, 26 - vsldoi 24, 24, 24, 4 - vshasigmaw 25, 14, 0, 0 - vadduwm 13, 13, 25 - vshasigmaw 26, 11, 0, 15 - vadduwm 13, 13, 26 - vadduwm 13, 13, 6 - vshasigmaw 25, 15, 0, 0 - vadduwm 14, 14, 25 - vshasigmaw 26, 12, 0, 15 - vadduwm 14, 14, 26 - vadduwm 14, 14, 7 - vshasigmaw 25, 0, 0, 0 - vadduwm 15, 15, 25 - vshasigmaw 26, 13, 0, 15 - vadduwm 15, 15, 26 - vadduwm 15, 15, 8 - vshasigmaw 25, 1, 0, 0 - vadduwm 0, 0, 25 - vshasigmaw 26, 14, 0, 15 - vadduwm 0, 0, 26 - vadduwm 0, 0, 9 - vadduwm 19, 19, 12 - vsel 25, 18, 17, 16 - vadduwm 18, 18, 24 - vadduwm 19, 19, 25 - vshasigmaw 26, 16, 1, 15 - vadduwm 19, 19, 26 - vxor 25, 20, 21 - vsel 25, 21, 22, 25 - vadduwm 23, 23, 19 - vshasigmaw 26, 20, 1, 0 - vadduwm 26, 26, 25 - vadduwm 19, 19, 26 - vsldoi 24, 24, 24, 4 - vadduwm 18, 18, 13 - vsel 25, 17, 16, 23 - vadduwm 17, 17, 24 - vadduwm 18, 18, 25 - vshasigmaw 26, 23, 1, 15 - vadduwm 18, 18, 26 - vxor 25, 19, 20 - vsel 25, 20, 21, 25 - vadduwm 22, 22, 18 - vshasigmaw 26, 19, 1, 0 - vadduwm 26, 26, 25 - vadduwm 18, 18, 26 - vsldoi 24, 24, 24, 4 - vadduwm 17, 17, 14 - vsel 25, 16, 23, 22 - vadduwm 16, 16, 24 - vadduwm 17, 17, 25 - vshasigmaw 26, 22, 1, 15 - vadduwm 17, 17, 26 - vxor 25, 18, 19 - vsel 25, 19, 20, 25 - vadduwm 21, 21, 17 - vshasigmaw 26, 18, 1, 0 - vadduwm 26, 26, 25 - vadduwm 17, 17, 26 - lxvw4x 32+24, 0, 6 - addi 6, 6, 16 - vadduwm 16, 16, 15 - vsel 25, 23, 22, 21 - vadduwm 23, 23, 24 - vadduwm 16, 16, 25 - vshasigmaw 26, 21, 1, 15 - vadduwm 16, 16, 26 - vxor 25, 17, 18 - vsel 25, 18, 19, 25 - vadduwm 20, 20, 16 - vshasigmaw 26, 17, 1, 0 - vadduwm 26, 26, 25 - vadduwm 16, 16, 26 - vsldoi 24, 24, 24, 4 - vshasigmaw 25, 2, 0, 0 - vadduwm 1, 1, 25 - vshasigmaw 26, 15, 0, 15 - vadduwm 1, 1, 26 - vadduwm 1, 1, 10 - vshasigmaw 25, 3, 0, 0 - vadduwm 2, 2, 25 - vshasigmaw 26, 0, 0, 15 - vadduwm 2, 2, 26 - vadduwm 2, 2, 11 - vshasigmaw 25, 4, 0, 0 - vadduwm 3, 3, 25 - vshasigmaw 26, 1, 0, 15 - vadduwm 3, 3, 26 - vadduwm 3, 3, 12 - vshasigmaw 25, 5, 0, 0 - vadduwm 4, 4, 25 - vshasigmaw 26, 2, 0, 15 - vadduwm 4, 4, 26 - vadduwm 4, 4, 13 - vadduwm 23, 23, 0 - vsel 25, 22, 21, 20 - vadduwm 22, 22, 24 - vadduwm 23, 23, 25 - vshasigmaw 26, 20, 1, 15 - vadduwm 23, 23, 26 - vxor 25, 16, 17 - vsel 25, 17, 18, 25 - vadduwm 19, 19, 23 - vshasigmaw 26, 16, 1, 0 - vadduwm 26, 26, 25 - vadduwm 23, 23, 26 - vsldoi 24, 24, 24, 4 - vadduwm 22, 22, 1 - vsel 25, 21, 20, 19 - vadduwm 21, 21, 24 - vadduwm 22, 22, 25 - vshasigmaw 26, 19, 1, 15 - vadduwm 22, 22, 26 - vxor 25, 23, 16 - vsel 25, 16, 17, 25 - vadduwm 18, 18, 22 - vshasigmaw 26, 23, 1, 0 - vadduwm 26, 26, 25 - vadduwm 22, 22, 26 - vsldoi 24, 24, 24, 4 - vadduwm 21, 21, 2 - vsel 25, 20, 19, 18 - vadduwm 20, 20, 24 - vadduwm 21, 21, 25 - vshasigmaw 26, 18, 1, 15 - vadduwm 21, 21, 26 - vxor 25, 22, 23 - vsel 25, 23, 16, 25 - vadduwm 17, 17, 21 - vshasigmaw 26, 22, 1, 0 - vadduwm 26, 26, 25 - vadduwm 21, 21, 26 - lxvw4x 32+24, 0, 6 - addi 6, 6, 16 - vadduwm 20, 20, 3 - vsel 25, 19, 18, 17 - vadduwm 19, 19, 24 - vadduwm 20, 20, 25 - vshasigmaw 26, 17, 1, 15 - vadduwm 20, 20, 26 - vxor 25, 21, 22 - vsel 25, 22, 23, 25 - vadduwm 16, 16, 20 - vshasigmaw 26, 21, 1, 0 - vadduwm 26, 26, 25 - vadduwm 20, 20, 26 - vsldoi 24, 24, 24, 4 - vshasigmaw 25, 6, 0, 0 - vadduwm 5, 5, 25 - vshasigmaw 26, 3, 0, 15 - vadduwm 5, 5, 26 - vadduwm 5, 5, 14 - vshasigmaw 25, 7, 0, 0 - vadduwm 6, 6, 25 - vshasigmaw 26, 4, 0, 15 - vadduwm 6, 6, 26 - vadduwm 6, 6, 15 - vshasigmaw 25, 8, 0, 0 - vadduwm 7, 7, 25 - vshasigmaw 26, 5, 0, 15 - vadduwm 7, 7, 26 - vadduwm 7, 7, 0 - vshasigmaw 25, 9, 0, 0 - vadduwm 8, 8, 25 - vshasigmaw 26, 6, 0, 15 - vadduwm 8, 8, 26 - vadduwm 8, 8, 1 - vadduwm 19, 19, 4 - vsel 25, 18, 17, 16 - vadduwm 18, 18, 24 - vadduwm 19, 19, 25 - vshasigmaw 26, 16, 1, 15 - vadduwm 19, 19, 26 - vxor 25, 20, 21 - vsel 25, 21, 22, 25 - vadduwm 23, 23, 19 - vshasigmaw 26, 20, 1, 0 - vadduwm 26, 26, 25 - vadduwm 19, 19, 26 - vsldoi 24, 24, 24, 4 - vadduwm 18, 18, 5 - vsel 25, 17, 16, 23 - vadduwm 17, 17, 24 - vadduwm 18, 18, 25 - vshasigmaw 26, 23, 1, 15 - vadduwm 18, 18, 26 - vxor 25, 19, 20 - vsel 25, 20, 21, 25 - vadduwm 22, 22, 18 - vshasigmaw 26, 19, 1, 0 - vadduwm 26, 26, 25 - vadduwm 18, 18, 26 - vsldoi 24, 24, 24, 4 - vadduwm 17, 17, 6 - vsel 25, 16, 23, 22 - vadduwm 16, 16, 24 - vadduwm 17, 17, 25 - vshasigmaw 26, 22, 1, 15 - vadduwm 17, 17, 26 - vxor 25, 18, 19 - vsel 25, 19, 20, 25 - vadduwm 21, 21, 17 - vshasigmaw 26, 18, 1, 0 - vadduwm 26, 26, 25 - vadduwm 17, 17, 26 - lxvw4x 32+24, 0, 6 - addi 6, 6, 16 - vadduwm 16, 16, 7 - vsel 25, 23, 22, 21 - vadduwm 23, 23, 24 - vadduwm 16, 16, 25 - vshasigmaw 26, 21, 1, 15 - vadduwm 16, 16, 26 - vxor 25, 17, 18 - vsel 25, 18, 19, 25 - vadduwm 20, 20, 16 - vshasigmaw 26, 17, 1, 0 - vadduwm 26, 26, 25 - vadduwm 16, 16, 26 - vsldoi 24, 24, 24, 4 - vshasigmaw 25, 10, 0, 0 - vadduwm 9, 9, 25 - vshasigmaw 26, 7, 0, 15 - vadduwm 9, 9, 26 - vadduwm 9, 9, 2 - vshasigmaw 25, 11, 0, 0 - vadduwm 10, 10, 25 - vshasigmaw 26, 8, 0, 15 - vadduwm 10, 10, 26 - vadduwm 10, 10, 3 - vshasigmaw 25, 12, 0, 0 - vadduwm 11, 11, 25 - vshasigmaw 26, 9, 0, 15 - vadduwm 11, 11, 26 - vadduwm 11, 11, 4 - vshasigmaw 25, 13, 0, 0 - vadduwm 12, 12, 25 - vshasigmaw 26, 10, 0, 15 - vadduwm 12, 12, 26 - vadduwm 12, 12, 5 - vadduwm 23, 23, 8 - vsel 25, 22, 21, 20 - vadduwm 22, 22, 24 - vadduwm 23, 23, 25 - vshasigmaw 26, 20, 1, 15 - vadduwm 23, 23, 26 - vxor 25, 16, 17 - vsel 25, 17, 18, 25 - vadduwm 19, 19, 23 - vshasigmaw 26, 16, 1, 0 - vadduwm 26, 26, 25 - vadduwm 23, 23, 26 - vsldoi 24, 24, 24, 4 - vadduwm 22, 22, 9 - vsel 25, 21, 20, 19 - vadduwm 21, 21, 24 - vadduwm 22, 22, 25 - vshasigmaw 26, 19, 1, 15 - vadduwm 22, 22, 26 - vxor 25, 23, 16 - vsel 25, 16, 17, 25 - vadduwm 18, 18, 22 - vshasigmaw 26, 23, 1, 0 - vadduwm 26, 26, 25 - vadduwm 22, 22, 26 - vsldoi 24, 24, 24, 4 - vadduwm 21, 21, 10 - vsel 25, 20, 19, 18 - vadduwm 20, 20, 24 - vadduwm 21, 21, 25 - vshasigmaw 26, 18, 1, 15 - vadduwm 21, 21, 26 - vxor 25, 22, 23 - vsel 25, 23, 16, 25 - vadduwm 17, 17, 21 - vshasigmaw 26, 22, 1, 0 - vadduwm 26, 26, 25 - vadduwm 21, 21, 26 - lxvw4x 32+24, 0, 6 - addi 6, 6, 16 - vadduwm 20, 20, 11 - vsel 25, 19, 18, 17 - vadduwm 19, 19, 24 - vadduwm 20, 20, 25 - vshasigmaw 26, 17, 1, 15 - vadduwm 20, 20, 26 - vxor 25, 21, 22 - vsel 25, 22, 23, 25 - vadduwm 16, 16, 20 - vshasigmaw 26, 21, 1, 0 - vadduwm 26, 26, 25 - vadduwm 20, 20, 26 - vsldoi 24, 24, 24, 4 - vshasigmaw 25, 14, 0, 0 - vadduwm 13, 13, 25 - vshasigmaw 26, 11, 0, 15 - vadduwm 13, 13, 26 - vadduwm 13, 13, 6 - vshasigmaw 25, 15, 0, 0 - vadduwm 14, 14, 25 - vshasigmaw 26, 12, 0, 15 - vadduwm 14, 14, 26 - vadduwm 14, 14, 7 - vshasigmaw 25, 0, 0, 0 - vadduwm 15, 15, 25 - vshasigmaw 26, 13, 0, 15 - vadduwm 15, 15, 26 - vadduwm 15, 15, 8 - vadduwm 19, 19, 12 - vsel 25, 18, 17, 16 - vadduwm 18, 18, 24 - vadduwm 19, 19, 25 - vshasigmaw 26, 16, 1, 15 - vadduwm 19, 19, 26 - vxor 25, 20, 21 - vsel 25, 21, 22, 25 - vadduwm 23, 23, 19 - vshasigmaw 26, 20, 1, 0 - vadduwm 26, 26, 25 - vadduwm 19, 19, 26 - vsldoi 24, 24, 24, 4 - vadduwm 18, 18, 13 - vsel 25, 17, 16, 23 - vadduwm 17, 17, 24 - vadduwm 18, 18, 25 - vshasigmaw 26, 23, 1, 15 - vadduwm 18, 18, 26 - vxor 25, 19, 20 - vsel 25, 20, 21, 25 - vadduwm 22, 22, 18 - vshasigmaw 26, 19, 1, 0 - vadduwm 26, 26, 25 - vadduwm 18, 18, 26 - vsldoi 24, 24, 24, 4 - vadduwm 17, 17, 14 - vsel 25, 16, 23, 22 - vadduwm 16, 16, 24 - vadduwm 17, 17, 25 - vshasigmaw 26, 22, 1, 15 - vadduwm 17, 17, 26 - vxor 25, 18, 19 - vsel 25, 19, 20, 25 - vadduwm 21, 21, 17 - vshasigmaw 26, 18, 1, 0 - vadduwm 26, 26, 25 - vadduwm 17, 17, 26 - vadduwm 16, 16, 15 - vsel 25, 23, 22, 21 - vadduwm 16, 16, 25 - vshasigmaw 26, 21, 1, 15 - vadduwm 16, 16, 26 - vxor 25, 17, 18 - vsel 25, 18, 19, 25 - vadduwm 20, 20, 16 - vshasigmaw 26, 17, 1, 0 - vadduwm 26, 26, 25 - vadduwm 16, 16, 26 - subi 6, 6, 256 - vsldoi 0, 28, 28, 8 - vsldoi 1, 29, 29, 8 - vsldoi 2, 30, 30, 8 - vsldoi 3, 31, 31, 8 - vadduwm 16, 16, 28 - vadduwm 17, 17, 29 - vadduwm 18, 18, 30 - vadduwm 19, 19, 31 - vadduwm 20, 20, 0 - vadduwm 21, 21, 1 - vadduwm 22, 22, 2 - vadduwm 23, 23, 3 - subi 5, 5, 1 -.align 4 -L1: - cmpldi 5, 0 - bgt L0 - vmrghw 16, 16, 17 - vmrghw 18, 18, 19 - xxmrghd 32+16, 32+16, 32+18 - vmrghw 20, 20, 21 - vmrghw 22, 22, 23 - xxmrghd 32+20, 32+20, 32+22 - li 10, 16 - stxvw4x 32+16, 0, 3 - stxvw4x 32+20, 10, 3 - lxv 32+20, 0(1) - lxv 32+21, 16(1) - lxv 32+22, 32(1) - lxv 32+23, 48(1) - lxv 32+24, 64(1) - lxv 32+25, 80(1) - lxv 32+26, 96(1) - lxv 32+28, 112(1) - lxv 32+29, 128(1) - lxv 32+30, 144(1) - lxv 32+31, 160(1) - addi 1, 1, 176 - - blr - -.section .note.GNU-stack,"",%progbits diff --git a/dist/c89-compatible/sha256-x86_64-darwin.S b/dist/c89-compatible/sha256-x86_64-darwin.S deleted file mode 100644 index de8b8e99d7..0000000000 --- a/dist/c89-compatible/sha256-x86_64-darwin.S +++ /dev/null @@ -1,257 +0,0 @@ -.text -.global _sha256_update -_sha256_update: - push %r15 - push %r14 - push %r13 - push %r12 - push %rsi - push %rdi - push %rbp - push %rbx - movdqu 0(%rdi), %xmm1 - movdqu 16(%rdi), %xmm2 - mov $289644378169868803, %rax - pinsrq $0, %rax, %xmm7 - mov $868365760874482187, %rax - pinsrq $1, %rax, %xmm7 - pshufd $27, %xmm1, %xmm0 - pshufd $177, %xmm1, %xmm1 - pshufd $27, %xmm2, %xmm2 - movdqu %xmm7, %xmm8 - palignr $8, %xmm2, %xmm1 - shufpd $0, %xmm0, %xmm2 - jmp L1 -.balign 16 -L0: - movdqu 0(%rsi), %xmm3 - movdqu 16(%rsi), %xmm4 - movdqu 32(%rsi), %xmm5 - pshufb %xmm7, %xmm3 - movdqu 48(%rsi), %xmm6 - movdqu 0(%rcx), %xmm0 - paddd %xmm3, %xmm0 - pshufb %xmm7, %xmm4 - movdqu %xmm2, %xmm10 - sha256rnds2 %xmm1, %xmm2 - pshufd $14, %xmm0, %xmm0 - movdqu %xmm1, %xmm9 - sha256rnds2 %xmm2, %xmm1 - movdqu 16(%rcx), %xmm0 - paddd %xmm4, %xmm0 - pshufb %xmm7, %xmm5 - sha256rnds2 %xmm1, %xmm2 - pshufd $14, %xmm0, %xmm0 - add $64, %rsi - sha256msg1 %xmm4, %xmm3 - sha256rnds2 %xmm2, %xmm1 - movdqu 32(%rcx), %xmm0 - paddd %xmm5, %xmm0 - pshufb %xmm7, %xmm6 - sha256rnds2 %xmm1, %xmm2 - pshufd $14, %xmm0, %xmm0 - movdqu %xmm6, %xmm7 - palignr $4, %xmm5, %xmm7 - paddd %xmm7, %xmm3 - sha256msg1 %xmm5, %xmm4 - sha256rnds2 %xmm2, %xmm1 - movdqu 48(%rcx), %xmm0 - paddd %xmm6, %xmm0 - sha256msg2 %xmm6, %xmm3 - sha256rnds2 %xmm1, %xmm2 - pshufd $14, %xmm0, %xmm0 - movdqu %xmm3, %xmm7 - palignr $4, %xmm6, %xmm7 - paddd %xmm7, %xmm4 - sha256msg1 %xmm6, %xmm5 - sha256rnds2 %xmm2, %xmm1 - movdqu 64(%rcx), %xmm0 - paddd %xmm3, %xmm0 - sha256msg2 %xmm3, %xmm4 - sha256rnds2 %xmm1, %xmm2 - pshufd $14, %xmm0, %xmm0 - movdqu %xmm4, %xmm7 - palignr $4, %xmm3, %xmm7 - paddd %xmm7, %xmm5 - sha256msg1 %xmm3, %xmm6 - sha256rnds2 %xmm2, %xmm1 - movdqu %xmm3, %xmm7 - movdqu %xmm6, %xmm0 - movdqu %xmm4, %xmm3 - movdqu %xmm5, %xmm4 - movdqu %xmm7, %xmm6 - movdqu %xmm0, %xmm5 - movdqu 80(%rcx), %xmm0 - paddd %xmm3, %xmm0 - sha256msg2 %xmm3, %xmm4 - sha256rnds2 %xmm1, %xmm2 - pshufd $14, %xmm0, %xmm0 - movdqu %xmm4, %xmm7 - palignr $4, %xmm3, %xmm7 - paddd %xmm7, %xmm5 - sha256msg1 %xmm3, %xmm6 - sha256rnds2 %xmm2, %xmm1 - movdqu %xmm3, %xmm7 - movdqu %xmm6, %xmm0 - movdqu %xmm4, %xmm3 - movdqu %xmm5, %xmm4 - movdqu %xmm7, %xmm6 - movdqu %xmm0, %xmm5 - movdqu 96(%rcx), %xmm0 - paddd %xmm3, %xmm0 - sha256msg2 %xmm3, %xmm4 - sha256rnds2 %xmm1, %xmm2 - pshufd $14, %xmm0, %xmm0 - movdqu %xmm4, %xmm7 - palignr $4, %xmm3, %xmm7 - paddd %xmm7, %xmm5 - sha256msg1 %xmm3, %xmm6 - sha256rnds2 %xmm2, %xmm1 - movdqu %xmm3, %xmm7 - movdqu %xmm6, %xmm0 - movdqu %xmm4, %xmm3 - movdqu %xmm5, %xmm4 - movdqu %xmm7, %xmm6 - movdqu %xmm0, %xmm5 - movdqu 112(%rcx), %xmm0 - paddd %xmm3, %xmm0 - sha256msg2 %xmm3, %xmm4 - sha256rnds2 %xmm1, %xmm2 - pshufd $14, %xmm0, %xmm0 - movdqu %xmm4, %xmm7 - palignr $4, %xmm3, %xmm7 - paddd %xmm7, %xmm5 - sha256msg1 %xmm3, %xmm6 - sha256rnds2 %xmm2, %xmm1 - movdqu %xmm3, %xmm7 - movdqu %xmm6, %xmm0 - movdqu %xmm4, %xmm3 - movdqu %xmm5, %xmm4 - movdqu %xmm7, %xmm6 - movdqu %xmm0, %xmm5 - movdqu 128(%rcx), %xmm0 - paddd %xmm3, %xmm0 - sha256msg2 %xmm3, %xmm4 - sha256rnds2 %xmm1, %xmm2 - pshufd $14, %xmm0, %xmm0 - movdqu %xmm4, %xmm7 - palignr $4, %xmm3, %xmm7 - paddd %xmm7, %xmm5 - sha256msg1 %xmm3, %xmm6 - sha256rnds2 %xmm2, %xmm1 - movdqu %xmm3, %xmm7 - movdqu %xmm6, %xmm0 - movdqu %xmm4, %xmm3 - movdqu %xmm5, %xmm4 - movdqu %xmm7, %xmm6 - movdqu %xmm0, %xmm5 - movdqu 144(%rcx), %xmm0 - paddd %xmm3, %xmm0 - sha256msg2 %xmm3, %xmm4 - sha256rnds2 %xmm1, %xmm2 - pshufd $14, %xmm0, %xmm0 - movdqu %xmm4, %xmm7 - palignr $4, %xmm3, %xmm7 - paddd %xmm7, %xmm5 - sha256msg1 %xmm3, %xmm6 - sha256rnds2 %xmm2, %xmm1 - movdqu %xmm3, %xmm7 - movdqu %xmm6, %xmm0 - movdqu %xmm4, %xmm3 - movdqu %xmm5, %xmm4 - movdqu %xmm7, %xmm6 - movdqu %xmm0, %xmm5 - movdqu 160(%rcx), %xmm0 - paddd %xmm3, %xmm0 - sha256msg2 %xmm3, %xmm4 - sha256rnds2 %xmm1, %xmm2 - pshufd $14, %xmm0, %xmm0 - movdqu %xmm4, %xmm7 - palignr $4, %xmm3, %xmm7 - paddd %xmm7, %xmm5 - sha256msg1 %xmm3, %xmm6 - sha256rnds2 %xmm2, %xmm1 - movdqu %xmm3, %xmm7 - movdqu %xmm6, %xmm0 - movdqu %xmm4, %xmm3 - movdqu %xmm5, %xmm4 - movdqu %xmm7, %xmm6 - movdqu %xmm0, %xmm5 - movdqu 176(%rcx), %xmm0 - paddd %xmm3, %xmm0 - sha256msg2 %xmm3, %xmm4 - sha256rnds2 %xmm1, %xmm2 - pshufd $14, %xmm0, %xmm0 - movdqu %xmm4, %xmm7 - palignr $4, %xmm3, %xmm7 - paddd %xmm7, %xmm5 - sha256msg1 %xmm3, %xmm6 - sha256rnds2 %xmm2, %xmm1 - movdqu %xmm3, %xmm7 - movdqu %xmm6, %xmm0 - movdqu %xmm4, %xmm3 - movdqu %xmm5, %xmm4 - movdqu %xmm7, %xmm6 - movdqu %xmm0, %xmm5 - movdqu 192(%rcx), %xmm0 - paddd %xmm3, %xmm0 - sha256msg2 %xmm3, %xmm4 - sha256rnds2 %xmm1, %xmm2 - pshufd $14, %xmm0, %xmm0 - movdqu %xmm4, %xmm7 - palignr $4, %xmm3, %xmm7 - paddd %xmm7, %xmm5 - sha256msg1 %xmm3, %xmm6 - sha256rnds2 %xmm2, %xmm1 - movdqu %xmm3, %xmm7 - movdqu %xmm6, %xmm0 - movdqu %xmm4, %xmm3 - movdqu %xmm5, %xmm4 - movdqu %xmm7, %xmm6 - movdqu %xmm0, %xmm5 - movdqu 208(%rcx), %xmm0 - paddd %xmm3, %xmm0 - sha256msg2 %xmm3, %xmm4 - sha256rnds2 %xmm1, %xmm2 - pshufd $14, %xmm0, %xmm0 - movdqu %xmm4, %xmm7 - palignr $4, %xmm3, %xmm7 - sha256rnds2 %xmm2, %xmm1 - paddd %xmm7, %xmm5 - movdqu 224(%rcx), %xmm0 - paddd %xmm4, %xmm0 - sha256rnds2 %xmm1, %xmm2 - pshufd $14, %xmm0, %xmm0 - sha256msg2 %xmm4, %xmm5 - movdqu %xmm8, %xmm7 - sha256rnds2 %xmm2, %xmm1 - movdqu 240(%rcx), %xmm0 - paddd %xmm5, %xmm0 - sha256rnds2 %xmm1, %xmm2 - pshufd $14, %xmm0, %xmm0 - sub $1, %rdx - sha256rnds2 %xmm2, %xmm1 - paddd %xmm10, %xmm2 - paddd %xmm9, %xmm1 -.balign 16 -L1: - cmp $0, %rdx - ja L0 - pshufd $177, %xmm2, %xmm2 - pshufd $27, %xmm1, %xmm7 - pshufd $177, %xmm1, %xmm1 - shufpd $3, %xmm2, %xmm1 - palignr $8, %xmm7, %xmm2 - movdqu %xmm1, 0(%rdi) - movdqu %xmm2, 16(%rdi) - pop %rbx - pop %rbp - pop %rdi - pop %rsi - pop %r12 - pop %r13 - pop %r14 - pop %r15 - ret - - diff --git a/dist/c89-compatible/sha256-x86_64-linux.S b/dist/c89-compatible/sha256-x86_64-linux.S deleted file mode 100644 index 8b83798ebf..0000000000 --- a/dist/c89-compatible/sha256-x86_64-linux.S +++ /dev/null @@ -1,257 +0,0 @@ -.text -.global sha256_update -sha256_update: - push %r15 - push %r14 - push %r13 - push %r12 - push %rsi - push %rdi - push %rbp - push %rbx - movdqu 0(%rdi), %xmm1 - movdqu 16(%rdi), %xmm2 - mov $289644378169868803, %rax - pinsrq $0, %rax, %xmm7 - mov $868365760874482187, %rax - pinsrq $1, %rax, %xmm7 - pshufd $27, %xmm1, %xmm0 - pshufd $177, %xmm1, %xmm1 - pshufd $27, %xmm2, %xmm2 - movdqu %xmm7, %xmm8 - palignr $8, %xmm2, %xmm1 - shufpd $0, %xmm0, %xmm2 - jmp L1 -.balign 16 -L0: - movdqu 0(%rsi), %xmm3 - movdqu 16(%rsi), %xmm4 - movdqu 32(%rsi), %xmm5 - pshufb %xmm7, %xmm3 - movdqu 48(%rsi), %xmm6 - movdqu 0(%rcx), %xmm0 - paddd %xmm3, %xmm0 - pshufb %xmm7, %xmm4 - movdqu %xmm2, %xmm10 - sha256rnds2 %xmm1, %xmm2 - pshufd $14, %xmm0, %xmm0 - movdqu %xmm1, %xmm9 - sha256rnds2 %xmm2, %xmm1 - movdqu 16(%rcx), %xmm0 - paddd %xmm4, %xmm0 - pshufb %xmm7, %xmm5 - sha256rnds2 %xmm1, %xmm2 - pshufd $14, %xmm0, %xmm0 - add $64, %rsi - sha256msg1 %xmm4, %xmm3 - sha256rnds2 %xmm2, %xmm1 - movdqu 32(%rcx), %xmm0 - paddd %xmm5, %xmm0 - pshufb %xmm7, %xmm6 - sha256rnds2 %xmm1, %xmm2 - pshufd $14, %xmm0, %xmm0 - movdqu %xmm6, %xmm7 - palignr $4, %xmm5, %xmm7 - paddd %xmm7, %xmm3 - sha256msg1 %xmm5, %xmm4 - sha256rnds2 %xmm2, %xmm1 - movdqu 48(%rcx), %xmm0 - paddd %xmm6, %xmm0 - sha256msg2 %xmm6, %xmm3 - sha256rnds2 %xmm1, %xmm2 - pshufd $14, %xmm0, %xmm0 - movdqu %xmm3, %xmm7 - palignr $4, %xmm6, %xmm7 - paddd %xmm7, %xmm4 - sha256msg1 %xmm6, %xmm5 - sha256rnds2 %xmm2, %xmm1 - movdqu 64(%rcx), %xmm0 - paddd %xmm3, %xmm0 - sha256msg2 %xmm3, %xmm4 - sha256rnds2 %xmm1, %xmm2 - pshufd $14, %xmm0, %xmm0 - movdqu %xmm4, %xmm7 - palignr $4, %xmm3, %xmm7 - paddd %xmm7, %xmm5 - sha256msg1 %xmm3, %xmm6 - sha256rnds2 %xmm2, %xmm1 - movdqu %xmm3, %xmm7 - movdqu %xmm6, %xmm0 - movdqu %xmm4, %xmm3 - movdqu %xmm5, %xmm4 - movdqu %xmm7, %xmm6 - movdqu %xmm0, %xmm5 - movdqu 80(%rcx), %xmm0 - paddd %xmm3, %xmm0 - sha256msg2 %xmm3, %xmm4 - sha256rnds2 %xmm1, %xmm2 - pshufd $14, %xmm0, %xmm0 - movdqu %xmm4, %xmm7 - palignr $4, %xmm3, %xmm7 - paddd %xmm7, %xmm5 - sha256msg1 %xmm3, %xmm6 - sha256rnds2 %xmm2, %xmm1 - movdqu %xmm3, %xmm7 - movdqu %xmm6, %xmm0 - movdqu %xmm4, %xmm3 - movdqu %xmm5, %xmm4 - movdqu %xmm7, %xmm6 - movdqu %xmm0, %xmm5 - movdqu 96(%rcx), %xmm0 - paddd %xmm3, %xmm0 - sha256msg2 %xmm3, %xmm4 - sha256rnds2 %xmm1, %xmm2 - pshufd $14, %xmm0, %xmm0 - movdqu %xmm4, %xmm7 - palignr $4, %xmm3, %xmm7 - paddd %xmm7, %xmm5 - sha256msg1 %xmm3, %xmm6 - sha256rnds2 %xmm2, %xmm1 - movdqu %xmm3, %xmm7 - movdqu %xmm6, %xmm0 - movdqu %xmm4, %xmm3 - movdqu %xmm5, %xmm4 - movdqu %xmm7, %xmm6 - movdqu %xmm0, %xmm5 - movdqu 112(%rcx), %xmm0 - paddd %xmm3, %xmm0 - sha256msg2 %xmm3, %xmm4 - sha256rnds2 %xmm1, %xmm2 - pshufd $14, %xmm0, %xmm0 - movdqu %xmm4, %xmm7 - palignr $4, %xmm3, %xmm7 - paddd %xmm7, %xmm5 - sha256msg1 %xmm3, %xmm6 - sha256rnds2 %xmm2, %xmm1 - movdqu %xmm3, %xmm7 - movdqu %xmm6, %xmm0 - movdqu %xmm4, %xmm3 - movdqu %xmm5, %xmm4 - movdqu %xmm7, %xmm6 - movdqu %xmm0, %xmm5 - movdqu 128(%rcx), %xmm0 - paddd %xmm3, %xmm0 - sha256msg2 %xmm3, %xmm4 - sha256rnds2 %xmm1, %xmm2 - pshufd $14, %xmm0, %xmm0 - movdqu %xmm4, %xmm7 - palignr $4, %xmm3, %xmm7 - paddd %xmm7, %xmm5 - sha256msg1 %xmm3, %xmm6 - sha256rnds2 %xmm2, %xmm1 - movdqu %xmm3, %xmm7 - movdqu %xmm6, %xmm0 - movdqu %xmm4, %xmm3 - movdqu %xmm5, %xmm4 - movdqu %xmm7, %xmm6 - movdqu %xmm0, %xmm5 - movdqu 144(%rcx), %xmm0 - paddd %xmm3, %xmm0 - sha256msg2 %xmm3, %xmm4 - sha256rnds2 %xmm1, %xmm2 - pshufd $14, %xmm0, %xmm0 - movdqu %xmm4, %xmm7 - palignr $4, %xmm3, %xmm7 - paddd %xmm7, %xmm5 - sha256msg1 %xmm3, %xmm6 - sha256rnds2 %xmm2, %xmm1 - movdqu %xmm3, %xmm7 - movdqu %xmm6, %xmm0 - movdqu %xmm4, %xmm3 - movdqu %xmm5, %xmm4 - movdqu %xmm7, %xmm6 - movdqu %xmm0, %xmm5 - movdqu 160(%rcx), %xmm0 - paddd %xmm3, %xmm0 - sha256msg2 %xmm3, %xmm4 - sha256rnds2 %xmm1, %xmm2 - pshufd $14, %xmm0, %xmm0 - movdqu %xmm4, %xmm7 - palignr $4, %xmm3, %xmm7 - paddd %xmm7, %xmm5 - sha256msg1 %xmm3, %xmm6 - sha256rnds2 %xmm2, %xmm1 - movdqu %xmm3, %xmm7 - movdqu %xmm6, %xmm0 - movdqu %xmm4, %xmm3 - movdqu %xmm5, %xmm4 - movdqu %xmm7, %xmm6 - movdqu %xmm0, %xmm5 - movdqu 176(%rcx), %xmm0 - paddd %xmm3, %xmm0 - sha256msg2 %xmm3, %xmm4 - sha256rnds2 %xmm1, %xmm2 - pshufd $14, %xmm0, %xmm0 - movdqu %xmm4, %xmm7 - palignr $4, %xmm3, %xmm7 - paddd %xmm7, %xmm5 - sha256msg1 %xmm3, %xmm6 - sha256rnds2 %xmm2, %xmm1 - movdqu %xmm3, %xmm7 - movdqu %xmm6, %xmm0 - movdqu %xmm4, %xmm3 - movdqu %xmm5, %xmm4 - movdqu %xmm7, %xmm6 - movdqu %xmm0, %xmm5 - movdqu 192(%rcx), %xmm0 - paddd %xmm3, %xmm0 - sha256msg2 %xmm3, %xmm4 - sha256rnds2 %xmm1, %xmm2 - pshufd $14, %xmm0, %xmm0 - movdqu %xmm4, %xmm7 - palignr $4, %xmm3, %xmm7 - paddd %xmm7, %xmm5 - sha256msg1 %xmm3, %xmm6 - sha256rnds2 %xmm2, %xmm1 - movdqu %xmm3, %xmm7 - movdqu %xmm6, %xmm0 - movdqu %xmm4, %xmm3 - movdqu %xmm5, %xmm4 - movdqu %xmm7, %xmm6 - movdqu %xmm0, %xmm5 - movdqu 208(%rcx), %xmm0 - paddd %xmm3, %xmm0 - sha256msg2 %xmm3, %xmm4 - sha256rnds2 %xmm1, %xmm2 - pshufd $14, %xmm0, %xmm0 - movdqu %xmm4, %xmm7 - palignr $4, %xmm3, %xmm7 - sha256rnds2 %xmm2, %xmm1 - paddd %xmm7, %xmm5 - movdqu 224(%rcx), %xmm0 - paddd %xmm4, %xmm0 - sha256rnds2 %xmm1, %xmm2 - pshufd $14, %xmm0, %xmm0 - sha256msg2 %xmm4, %xmm5 - movdqu %xmm8, %xmm7 - sha256rnds2 %xmm2, %xmm1 - movdqu 240(%rcx), %xmm0 - paddd %xmm5, %xmm0 - sha256rnds2 %xmm1, %xmm2 - pshufd $14, %xmm0, %xmm0 - sub $1, %rdx - sha256rnds2 %xmm2, %xmm1 - paddd %xmm10, %xmm2 - paddd %xmm9, %xmm1 -.balign 16 -L1: - cmp $0, %rdx - ja L0 - pshufd $177, %xmm2, %xmm2 - pshufd $27, %xmm1, %xmm7 - pshufd $177, %xmm1, %xmm1 - shufpd $3, %xmm2, %xmm1 - palignr $8, %xmm7, %xmm2 - movdqu %xmm1, 0(%rdi) - movdqu %xmm2, 16(%rdi) - pop %rbx - pop %rbp - pop %rdi - pop %rsi - pop %r12 - pop %r13 - pop %r14 - pop %r15 - ret - -.section .note.GNU-stack,"",%progbits diff --git a/dist/c89-compatible/sha256-x86_64-mingw.S b/dist/c89-compatible/sha256-x86_64-mingw.S deleted file mode 100644 index a833e05e17..0000000000 --- a/dist/c89-compatible/sha256-x86_64-mingw.S +++ /dev/null @@ -1,341 +0,0 @@ -.text -.global sha256_update -sha256_update: - pextrq $0, %xmm15, %rax - push %rax - pextrq $1, %xmm15, %rax - push %rax - pextrq $0, %xmm14, %rax - push %rax - pextrq $1, %xmm14, %rax - push %rax - pextrq $0, %xmm13, %rax - push %rax - pextrq $1, %xmm13, %rax - push %rax - pextrq $0, %xmm12, %rax - push %rax - pextrq $1, %xmm12, %rax - push %rax - pextrq $0, %xmm11, %rax - push %rax - pextrq $1, %xmm11, %rax - push %rax - pextrq $0, %xmm10, %rax - push %rax - pextrq $1, %xmm10, %rax - push %rax - pextrq $0, %xmm9, %rax - push %rax - pextrq $1, %xmm9, %rax - push %rax - pextrq $0, %xmm8, %rax - push %rax - pextrq $1, %xmm8, %rax - push %rax - pextrq $0, %xmm7, %rax - push %rax - pextrq $1, %xmm7, %rax - push %rax - pextrq $0, %xmm6, %rax - push %rax - pextrq $1, %xmm6, %rax - push %rax - push %r15 - push %r14 - push %r13 - push %r12 - push %rsi - push %rdi - push %rbp - push %rbx - mov %rcx, %rdi - mov %rdx, %rsi - mov %r8, %rdx - mov %r9, %rcx - movdqu 0(%rdi), %xmm1 - movdqu 16(%rdi), %xmm2 - mov $289644378169868803, %rax - pinsrq $0, %rax, %xmm7 - mov $868365760874482187, %rax - pinsrq $1, %rax, %xmm7 - pshufd $27, %xmm1, %xmm0 - pshufd $177, %xmm1, %xmm1 - pshufd $27, %xmm2, %xmm2 - movdqu %xmm7, %xmm8 - palignr $8, %xmm2, %xmm1 - shufpd $0, %xmm0, %xmm2 - jmp L1 -.balign 16 -L0: - movdqu 0(%rsi), %xmm3 - movdqu 16(%rsi), %xmm4 - movdqu 32(%rsi), %xmm5 - pshufb %xmm7, %xmm3 - movdqu 48(%rsi), %xmm6 - movdqu 0(%rcx), %xmm0 - paddd %xmm3, %xmm0 - pshufb %xmm7, %xmm4 - movdqu %xmm2, %xmm10 - sha256rnds2 %xmm1, %xmm2 - pshufd $14, %xmm0, %xmm0 - movdqu %xmm1, %xmm9 - sha256rnds2 %xmm2, %xmm1 - movdqu 16(%rcx), %xmm0 - paddd %xmm4, %xmm0 - pshufb %xmm7, %xmm5 - sha256rnds2 %xmm1, %xmm2 - pshufd $14, %xmm0, %xmm0 - add $64, %rsi - sha256msg1 %xmm4, %xmm3 - sha256rnds2 %xmm2, %xmm1 - movdqu 32(%rcx), %xmm0 - paddd %xmm5, %xmm0 - pshufb %xmm7, %xmm6 - sha256rnds2 %xmm1, %xmm2 - pshufd $14, %xmm0, %xmm0 - movdqu %xmm6, %xmm7 - palignr $4, %xmm5, %xmm7 - paddd %xmm7, %xmm3 - sha256msg1 %xmm5, %xmm4 - sha256rnds2 %xmm2, %xmm1 - movdqu 48(%rcx), %xmm0 - paddd %xmm6, %xmm0 - sha256msg2 %xmm6, %xmm3 - sha256rnds2 %xmm1, %xmm2 - pshufd $14, %xmm0, %xmm0 - movdqu %xmm3, %xmm7 - palignr $4, %xmm6, %xmm7 - paddd %xmm7, %xmm4 - sha256msg1 %xmm6, %xmm5 - sha256rnds2 %xmm2, %xmm1 - movdqu 64(%rcx), %xmm0 - paddd %xmm3, %xmm0 - sha256msg2 %xmm3, %xmm4 - sha256rnds2 %xmm1, %xmm2 - pshufd $14, %xmm0, %xmm0 - movdqu %xmm4, %xmm7 - palignr $4, %xmm3, %xmm7 - paddd %xmm7, %xmm5 - sha256msg1 %xmm3, %xmm6 - sha256rnds2 %xmm2, %xmm1 - movdqu %xmm3, %xmm7 - movdqu %xmm6, %xmm0 - movdqu %xmm4, %xmm3 - movdqu %xmm5, %xmm4 - movdqu %xmm7, %xmm6 - movdqu %xmm0, %xmm5 - movdqu 80(%rcx), %xmm0 - paddd %xmm3, %xmm0 - sha256msg2 %xmm3, %xmm4 - sha256rnds2 %xmm1, %xmm2 - pshufd $14, %xmm0, %xmm0 - movdqu %xmm4, %xmm7 - palignr $4, %xmm3, %xmm7 - paddd %xmm7, %xmm5 - sha256msg1 %xmm3, %xmm6 - sha256rnds2 %xmm2, %xmm1 - movdqu %xmm3, %xmm7 - movdqu %xmm6, %xmm0 - movdqu %xmm4, %xmm3 - movdqu %xmm5, %xmm4 - movdqu %xmm7, %xmm6 - movdqu %xmm0, %xmm5 - movdqu 96(%rcx), %xmm0 - paddd %xmm3, %xmm0 - sha256msg2 %xmm3, %xmm4 - sha256rnds2 %xmm1, %xmm2 - pshufd $14, %xmm0, %xmm0 - movdqu %xmm4, %xmm7 - palignr $4, %xmm3, %xmm7 - paddd %xmm7, %xmm5 - sha256msg1 %xmm3, %xmm6 - sha256rnds2 %xmm2, %xmm1 - movdqu %xmm3, %xmm7 - movdqu %xmm6, %xmm0 - movdqu %xmm4, %xmm3 - movdqu %xmm5, %xmm4 - movdqu %xmm7, %xmm6 - movdqu %xmm0, %xmm5 - movdqu 112(%rcx), %xmm0 - paddd %xmm3, %xmm0 - sha256msg2 %xmm3, %xmm4 - sha256rnds2 %xmm1, %xmm2 - pshufd $14, %xmm0, %xmm0 - movdqu %xmm4, %xmm7 - palignr $4, %xmm3, %xmm7 - paddd %xmm7, %xmm5 - sha256msg1 %xmm3, %xmm6 - sha256rnds2 %xmm2, %xmm1 - movdqu %xmm3, %xmm7 - movdqu %xmm6, %xmm0 - movdqu %xmm4, %xmm3 - movdqu %xmm5, %xmm4 - movdqu %xmm7, %xmm6 - movdqu %xmm0, %xmm5 - movdqu 128(%rcx), %xmm0 - paddd %xmm3, %xmm0 - sha256msg2 %xmm3, %xmm4 - sha256rnds2 %xmm1, %xmm2 - pshufd $14, %xmm0, %xmm0 - movdqu %xmm4, %xmm7 - palignr $4, %xmm3, %xmm7 - paddd %xmm7, %xmm5 - sha256msg1 %xmm3, %xmm6 - sha256rnds2 %xmm2, %xmm1 - movdqu %xmm3, %xmm7 - movdqu %xmm6, %xmm0 - movdqu %xmm4, %xmm3 - movdqu %xmm5, %xmm4 - movdqu %xmm7, %xmm6 - movdqu %xmm0, %xmm5 - movdqu 144(%rcx), %xmm0 - paddd %xmm3, %xmm0 - sha256msg2 %xmm3, %xmm4 - sha256rnds2 %xmm1, %xmm2 - pshufd $14, %xmm0, %xmm0 - movdqu %xmm4, %xmm7 - palignr $4, %xmm3, %xmm7 - paddd %xmm7, %xmm5 - sha256msg1 %xmm3, %xmm6 - sha256rnds2 %xmm2, %xmm1 - movdqu %xmm3, %xmm7 - movdqu %xmm6, %xmm0 - movdqu %xmm4, %xmm3 - movdqu %xmm5, %xmm4 - movdqu %xmm7, %xmm6 - movdqu %xmm0, %xmm5 - movdqu 160(%rcx), %xmm0 - paddd %xmm3, %xmm0 - sha256msg2 %xmm3, %xmm4 - sha256rnds2 %xmm1, %xmm2 - pshufd $14, %xmm0, %xmm0 - movdqu %xmm4, %xmm7 - palignr $4, %xmm3, %xmm7 - paddd %xmm7, %xmm5 - sha256msg1 %xmm3, %xmm6 - sha256rnds2 %xmm2, %xmm1 - movdqu %xmm3, %xmm7 - movdqu %xmm6, %xmm0 - movdqu %xmm4, %xmm3 - movdqu %xmm5, %xmm4 - movdqu %xmm7, %xmm6 - movdqu %xmm0, %xmm5 - movdqu 176(%rcx), %xmm0 - paddd %xmm3, %xmm0 - sha256msg2 %xmm3, %xmm4 - sha256rnds2 %xmm1, %xmm2 - pshufd $14, %xmm0, %xmm0 - movdqu %xmm4, %xmm7 - palignr $4, %xmm3, %xmm7 - paddd %xmm7, %xmm5 - sha256msg1 %xmm3, %xmm6 - sha256rnds2 %xmm2, %xmm1 - movdqu %xmm3, %xmm7 - movdqu %xmm6, %xmm0 - movdqu %xmm4, %xmm3 - movdqu %xmm5, %xmm4 - movdqu %xmm7, %xmm6 - movdqu %xmm0, %xmm5 - movdqu 192(%rcx), %xmm0 - paddd %xmm3, %xmm0 - sha256msg2 %xmm3, %xmm4 - sha256rnds2 %xmm1, %xmm2 - pshufd $14, %xmm0, %xmm0 - movdqu %xmm4, %xmm7 - palignr $4, %xmm3, %xmm7 - paddd %xmm7, %xmm5 - sha256msg1 %xmm3, %xmm6 - sha256rnds2 %xmm2, %xmm1 - movdqu %xmm3, %xmm7 - movdqu %xmm6, %xmm0 - movdqu %xmm4, %xmm3 - movdqu %xmm5, %xmm4 - movdqu %xmm7, %xmm6 - movdqu %xmm0, %xmm5 - movdqu 208(%rcx), %xmm0 - paddd %xmm3, %xmm0 - sha256msg2 %xmm3, %xmm4 - sha256rnds2 %xmm1, %xmm2 - pshufd $14, %xmm0, %xmm0 - movdqu %xmm4, %xmm7 - palignr $4, %xmm3, %xmm7 - sha256rnds2 %xmm2, %xmm1 - paddd %xmm7, %xmm5 - movdqu 224(%rcx), %xmm0 - paddd %xmm4, %xmm0 - sha256rnds2 %xmm1, %xmm2 - pshufd $14, %xmm0, %xmm0 - sha256msg2 %xmm4, %xmm5 - movdqu %xmm8, %xmm7 - sha256rnds2 %xmm2, %xmm1 - movdqu 240(%rcx), %xmm0 - paddd %xmm5, %xmm0 - sha256rnds2 %xmm1, %xmm2 - pshufd $14, %xmm0, %xmm0 - sub $1, %rdx - sha256rnds2 %xmm2, %xmm1 - paddd %xmm10, %xmm2 - paddd %xmm9, %xmm1 -.balign 16 -L1: - cmp $0, %rdx - ja L0 - pshufd $177, %xmm2, %xmm2 - pshufd $27, %xmm1, %xmm7 - pshufd $177, %xmm1, %xmm1 - shufpd $3, %xmm2, %xmm1 - palignr $8, %xmm7, %xmm2 - movdqu %xmm1, 0(%rdi) - movdqu %xmm2, 16(%rdi) - pop %rbx - pop %rbp - pop %rdi - pop %rsi - pop %r12 - pop %r13 - pop %r14 - pop %r15 - pop %rax - pinsrq $1, %rax, %xmm6 - pop %rax - pinsrq $0, %rax, %xmm6 - pop %rax - pinsrq $1, %rax, %xmm7 - pop %rax - pinsrq $0, %rax, %xmm7 - pop %rax - pinsrq $1, %rax, %xmm8 - pop %rax - pinsrq $0, %rax, %xmm8 - pop %rax - pinsrq $1, %rax, %xmm9 - pop %rax - pinsrq $0, %rax, %xmm9 - pop %rax - pinsrq $1, %rax, %xmm10 - pop %rax - pinsrq $0, %rax, %xmm10 - pop %rax - pinsrq $1, %rax, %xmm11 - pop %rax - pinsrq $0, %rax, %xmm11 - pop %rax - pinsrq $1, %rax, %xmm12 - pop %rax - pinsrq $0, %rax, %xmm12 - pop %rax - pinsrq $1, %rax, %xmm13 - pop %rax - pinsrq $0, %rax, %xmm13 - pop %rax - pinsrq $1, %rax, %xmm14 - pop %rax - pinsrq $0, %rax, %xmm14 - pop %rax - pinsrq $1, %rax, %xmm15 - pop %rax - pinsrq $0, %rax, %xmm15 - ret - - diff --git a/dist/c89-compatible/sha256-x86_64-msvc.asm b/dist/c89-compatible/sha256-x86_64-msvc.asm deleted file mode 100644 index 9bdde16927..0000000000 --- a/dist/c89-compatible/sha256-x86_64-msvc.asm +++ /dev/null @@ -1,341 +0,0 @@ -.code -ALIGN 16 -sha256_update proc - pextrq rax, xmm15, 0 - push rax - pextrq rax, xmm15, 1 - push rax - pextrq rax, xmm14, 0 - push rax - pextrq rax, xmm14, 1 - push rax - pextrq rax, xmm13, 0 - push rax - pextrq rax, xmm13, 1 - push rax - pextrq rax, xmm12, 0 - push rax - pextrq rax, xmm12, 1 - push rax - pextrq rax, xmm11, 0 - push rax - pextrq rax, xmm11, 1 - push rax - pextrq rax, xmm10, 0 - push rax - pextrq rax, xmm10, 1 - push rax - pextrq rax, xmm9, 0 - push rax - pextrq rax, xmm9, 1 - push rax - pextrq rax, xmm8, 0 - push rax - pextrq rax, xmm8, 1 - push rax - pextrq rax, xmm7, 0 - push rax - pextrq rax, xmm7, 1 - push rax - pextrq rax, xmm6, 0 - push rax - pextrq rax, xmm6, 1 - push rax - push r15 - push r14 - push r13 - push r12 - push rsi - push rdi - push rbp - push rbx - mov rdi, rcx - mov rsi, rdx - mov rdx, r8 - mov rcx, r9 - movdqu xmm1, xmmword ptr [rdi + 0] - movdqu xmm2, xmmword ptr [rdi + 16] - mov rax, 289644378169868803 - pinsrq xmm7, rax, 0 - mov rax, 868365760874482187 - pinsrq xmm7, rax, 1 - pshufd xmm0, xmm1, 27 - pshufd xmm1, xmm1, 177 - pshufd xmm2, xmm2, 27 - movdqu xmm8, xmm7 - palignr xmm1, xmm2, 8 - shufpd xmm2, xmm0, 0 - jmp L1 -ALIGN 16 -L0: - movdqu xmm3, xmmword ptr [rsi + 0] - movdqu xmm4, xmmword ptr [rsi + 16] - movdqu xmm5, xmmword ptr [rsi + 32] - pshufb xmm3, xmm7 - movdqu xmm6, xmmword ptr [rsi + 48] - movdqu xmm0, xmmword ptr [rcx + 0] - paddd xmm0, xmm3 - pshufb xmm4, xmm7 - movdqu xmm10, xmm2 - sha256rnds2 xmm2, xmm1, xmm0 - pshufd xmm0, xmm0, 14 - movdqu xmm9, xmm1 - sha256rnds2 xmm1, xmm2, xmm0 - movdqu xmm0, xmmword ptr [rcx + 16] - paddd xmm0, xmm4 - pshufb xmm5, xmm7 - sha256rnds2 xmm2, xmm1, xmm0 - pshufd xmm0, xmm0, 14 - add rsi, 64 - sha256msg1 xmm3, xmm4 - sha256rnds2 xmm1, xmm2, xmm0 - movdqu xmm0, xmmword ptr [rcx + 32] - paddd xmm0, xmm5 - pshufb xmm6, xmm7 - sha256rnds2 xmm2, xmm1, xmm0 - pshufd xmm0, xmm0, 14 - movdqu xmm7, xmm6 - palignr xmm7, xmm5, 4 - paddd xmm3, xmm7 - sha256msg1 xmm4, xmm5 - sha256rnds2 xmm1, xmm2, xmm0 - movdqu xmm0, xmmword ptr [rcx + 48] - paddd xmm0, xmm6 - sha256msg2 xmm3, xmm6 - sha256rnds2 xmm2, xmm1, xmm0 - pshufd xmm0, xmm0, 14 - movdqu xmm7, xmm3 - palignr xmm7, xmm6, 4 - paddd xmm4, xmm7 - sha256msg1 xmm5, xmm6 - sha256rnds2 xmm1, xmm2, xmm0 - movdqu xmm0, xmmword ptr [rcx + 64] - paddd xmm0, xmm3 - sha256msg2 xmm4, xmm3 - sha256rnds2 xmm2, xmm1, xmm0 - pshufd xmm0, xmm0, 14 - movdqu xmm7, xmm4 - palignr xmm7, xmm3, 4 - paddd xmm5, xmm7 - sha256msg1 xmm6, xmm3 - sha256rnds2 xmm1, xmm2, xmm0 - movdqu xmm7, xmm3 - movdqu xmm0, xmm6 - movdqu xmm3, xmm4 - movdqu xmm4, xmm5 - movdqu xmm6, xmm7 - movdqu xmm5, xmm0 - movdqu xmm0, xmmword ptr [rcx + 80] - paddd xmm0, xmm3 - sha256msg2 xmm4, xmm3 - sha256rnds2 xmm2, xmm1, xmm0 - pshufd xmm0, xmm0, 14 - movdqu xmm7, xmm4 - palignr xmm7, xmm3, 4 - paddd xmm5, xmm7 - sha256msg1 xmm6, xmm3 - sha256rnds2 xmm1, xmm2, xmm0 - movdqu xmm7, xmm3 - movdqu xmm0, xmm6 - movdqu xmm3, xmm4 - movdqu xmm4, xmm5 - movdqu xmm6, xmm7 - movdqu xmm5, xmm0 - movdqu xmm0, xmmword ptr [rcx + 96] - paddd xmm0, xmm3 - sha256msg2 xmm4, xmm3 - sha256rnds2 xmm2, xmm1, xmm0 - pshufd xmm0, xmm0, 14 - movdqu xmm7, xmm4 - palignr xmm7, xmm3, 4 - paddd xmm5, xmm7 - sha256msg1 xmm6, xmm3 - sha256rnds2 xmm1, xmm2, xmm0 - movdqu xmm7, xmm3 - movdqu xmm0, xmm6 - movdqu xmm3, xmm4 - movdqu xmm4, xmm5 - movdqu xmm6, xmm7 - movdqu xmm5, xmm0 - movdqu xmm0, xmmword ptr [rcx + 112] - paddd xmm0, xmm3 - sha256msg2 xmm4, xmm3 - sha256rnds2 xmm2, xmm1, xmm0 - pshufd xmm0, xmm0, 14 - movdqu xmm7, xmm4 - palignr xmm7, xmm3, 4 - paddd xmm5, xmm7 - sha256msg1 xmm6, xmm3 - sha256rnds2 xmm1, xmm2, xmm0 - movdqu xmm7, xmm3 - movdqu xmm0, xmm6 - movdqu xmm3, xmm4 - movdqu xmm4, xmm5 - movdqu xmm6, xmm7 - movdqu xmm5, xmm0 - movdqu xmm0, xmmword ptr [rcx + 128] - paddd xmm0, xmm3 - sha256msg2 xmm4, xmm3 - sha256rnds2 xmm2, xmm1, xmm0 - pshufd xmm0, xmm0, 14 - movdqu xmm7, xmm4 - palignr xmm7, xmm3, 4 - paddd xmm5, xmm7 - sha256msg1 xmm6, xmm3 - sha256rnds2 xmm1, xmm2, xmm0 - movdqu xmm7, xmm3 - movdqu xmm0, xmm6 - movdqu xmm3, xmm4 - movdqu xmm4, xmm5 - movdqu xmm6, xmm7 - movdqu xmm5, xmm0 - movdqu xmm0, xmmword ptr [rcx + 144] - paddd xmm0, xmm3 - sha256msg2 xmm4, xmm3 - sha256rnds2 xmm2, xmm1, xmm0 - pshufd xmm0, xmm0, 14 - movdqu xmm7, xmm4 - palignr xmm7, xmm3, 4 - paddd xmm5, xmm7 - sha256msg1 xmm6, xmm3 - sha256rnds2 xmm1, xmm2, xmm0 - movdqu xmm7, xmm3 - movdqu xmm0, xmm6 - movdqu xmm3, xmm4 - movdqu xmm4, xmm5 - movdqu xmm6, xmm7 - movdqu xmm5, xmm0 - movdqu xmm0, xmmword ptr [rcx + 160] - paddd xmm0, xmm3 - sha256msg2 xmm4, xmm3 - sha256rnds2 xmm2, xmm1, xmm0 - pshufd xmm0, xmm0, 14 - movdqu xmm7, xmm4 - palignr xmm7, xmm3, 4 - paddd xmm5, xmm7 - sha256msg1 xmm6, xmm3 - sha256rnds2 xmm1, xmm2, xmm0 - movdqu xmm7, xmm3 - movdqu xmm0, xmm6 - movdqu xmm3, xmm4 - movdqu xmm4, xmm5 - movdqu xmm6, xmm7 - movdqu xmm5, xmm0 - movdqu xmm0, xmmword ptr [rcx + 176] - paddd xmm0, xmm3 - sha256msg2 xmm4, xmm3 - sha256rnds2 xmm2, xmm1, xmm0 - pshufd xmm0, xmm0, 14 - movdqu xmm7, xmm4 - palignr xmm7, xmm3, 4 - paddd xmm5, xmm7 - sha256msg1 xmm6, xmm3 - sha256rnds2 xmm1, xmm2, xmm0 - movdqu xmm7, xmm3 - movdqu xmm0, xmm6 - movdqu xmm3, xmm4 - movdqu xmm4, xmm5 - movdqu xmm6, xmm7 - movdqu xmm5, xmm0 - movdqu xmm0, xmmword ptr [rcx + 192] - paddd xmm0, xmm3 - sha256msg2 xmm4, xmm3 - sha256rnds2 xmm2, xmm1, xmm0 - pshufd xmm0, xmm0, 14 - movdqu xmm7, xmm4 - palignr xmm7, xmm3, 4 - paddd xmm5, xmm7 - sha256msg1 xmm6, xmm3 - sha256rnds2 xmm1, xmm2, xmm0 - movdqu xmm7, xmm3 - movdqu xmm0, xmm6 - movdqu xmm3, xmm4 - movdqu xmm4, xmm5 - movdqu xmm6, xmm7 - movdqu xmm5, xmm0 - movdqu xmm0, xmmword ptr [rcx + 208] - paddd xmm0, xmm3 - sha256msg2 xmm4, xmm3 - sha256rnds2 xmm2, xmm1, xmm0 - pshufd xmm0, xmm0, 14 - movdqu xmm7, xmm4 - palignr xmm7, xmm3, 4 - sha256rnds2 xmm1, xmm2, xmm0 - paddd xmm5, xmm7 - movdqu xmm0, xmmword ptr [rcx + 224] - paddd xmm0, xmm4 - sha256rnds2 xmm2, xmm1, xmm0 - pshufd xmm0, xmm0, 14 - sha256msg2 xmm5, xmm4 - movdqu xmm7, xmm8 - sha256rnds2 xmm1, xmm2, xmm0 - movdqu xmm0, xmmword ptr [rcx + 240] - paddd xmm0, xmm5 - sha256rnds2 xmm2, xmm1, xmm0 - pshufd xmm0, xmm0, 14 - sub rdx, 1 - sha256rnds2 xmm1, xmm2, xmm0 - paddd xmm2, xmm10 - paddd xmm1, xmm9 -ALIGN 16 -L1: - cmp rdx, 0 - ja L0 - pshufd xmm2, xmm2, 177 - pshufd xmm7, xmm1, 27 - pshufd xmm1, xmm1, 177 - shufpd xmm1, xmm2, 3 - palignr xmm2, xmm7, 8 - movdqu xmmword ptr [rdi + 0], xmm1 - movdqu xmmword ptr [rdi + 16], xmm2 - pop rbx - pop rbp - pop rdi - pop rsi - pop r12 - pop r13 - pop r14 - pop r15 - pop rax - pinsrq xmm6, rax, 1 - pop rax - pinsrq xmm6, rax, 0 - pop rax - pinsrq xmm7, rax, 1 - pop rax - pinsrq xmm7, rax, 0 - pop rax - pinsrq xmm8, rax, 1 - pop rax - pinsrq xmm8, rax, 0 - pop rax - pinsrq xmm9, rax, 1 - pop rax - pinsrq xmm9, rax, 0 - pop rax - pinsrq xmm10, rax, 1 - pop rax - pinsrq xmm10, rax, 0 - pop rax - pinsrq xmm11, rax, 1 - pop rax - pinsrq xmm11, rax, 0 - pop rax - pinsrq xmm12, rax, 1 - pop rax - pinsrq xmm12, rax, 0 - pop rax - pinsrq xmm13, rax, 1 - pop rax - pinsrq xmm13, rax, 0 - pop rax - pinsrq xmm14, rax, 1 - pop rax - pinsrq xmm14, rax, 0 - pop rax - pinsrq xmm15, rax, 1 - pop rax - pinsrq xmm15, rax, 0 - ret -sha256_update endp -end diff --git a/doc/Obtaining.rst b/doc/Obtaining.rst index 22301660a9..4cb5b33c0c 100644 --- a/doc/Obtaining.rst +++ b/doc/Obtaining.rst @@ -14,13 +14,8 @@ options passed to KaRaMeL (the F*-to-C compiler) that influence how the generated C code looks like. There is a total order on distributions: -``c89-compatible <= msvc-compatible <= gcc-compatible <= gcc64-only`` +``msvc-compatible <= gcc-compatible <= gcc64-only`` -- The C89 distribution will work with the most C compilers; it relies on - ``alloca``; eliminates compound literals and enforces C89 scope to generate - syntactically C89-compliant code; code still relies on inttypes.h and other - headers that you may have to provide depending on your target. It does not - include Merkle Trees and the code is incredibly verbose. - The MSVC distribution relies on ``alloca`` to avoid C11 VLA for the sake of MSVC; relies on KaRaMeL for tail-call optimizations. It also does not use GCC inline assembly for Curve25519 and uses external linkage instead.