From c2cdda46243ba47769786f69d8150b63dc9a9047 Mon Sep 17 00:00:00 2001 From: czurnieden Date: Wed, 8 May 2024 17:24:20 +0200 Subject: [PATCH 1/2] Addition of rudimentary error tracing --- .github/workflows/main.yml | 4 ++ CMakeLists.txt | 5 ++ doc/bn.tex | 18 +++++ mp_2expt.c | 12 ++-- mp_abs.c | 9 ++- mp_add.c | 9 ++- mp_add_d.c | 11 ++- mp_addmod.c | 11 +-- mp_and.c | 10 +-- mp_complement.c | 5 +- mp_copy.c | 11 ++- mp_div.c | 17 ++--- mp_div_2.c | 10 +-- mp_div_2d.c | 17 +++-- mp_div_d.c | 29 +++++--- mp_dr_reduce.c | 14 ++-- mp_expt_n.c | 12 ++-- mp_exptmod.c | 38 +++++----- mp_exteuclid.c | 45 ++++++------ mp_fread.c | 19 +++-- mp_from_sbin.c | 9 ++- mp_from_ubin.c | 14 ++-- mp_fwrite.c | 16 ++--- mp_gcd.c | 55 +++++--------- mp_grow.c | 15 ++-- mp_init.c | 8 ++- mp_init_copy.c | 8 +-- mp_init_multi.c | 4 +- mp_init_set.c | 7 +- mp_init_size.c | 13 ++-- mp_invmod.c | 20 ++++-- mp_is_square.c | 45 +++++------- mp_kronecker.c | 46 ++++-------- mp_lcm.c | 25 +++---- mp_log.c | 79 +++++++++----------- mp_log_n.c | 6 +- mp_lshd.c | 9 ++- mp_mod_2d.c | 19 ++--- mp_montgomery_calc_normalization.c | 18 ++--- mp_montgomery_reduce.c | 14 ++-- mp_montgomery_setup.c | 7 +- mp_mul.c | 22 +++--- mp_mul_2.c | 10 +-- mp_mul_2d.c | 22 +++--- mp_mul_d.c | 18 ++--- mp_mulmod.c | 10 +-- mp_neg.c | 9 ++- mp_or.c | 10 +-- mp_pack.c | 19 +++-- mp_prime_fermat.c | 16 ++--- mp_prime_frobenius_underwood.c | 52 +++++++------- mp_prime_is_prime.c | 75 +++++++------------ mp_prime_miller_rabin.c | 44 +++++------- mp_prime_next_prime.c | 29 +++----- mp_prime_rand.c | 44 ++++-------- mp_prime_strong_lucas_selfridge.c | 104 +++++++++++++-------------- mp_radix_size.c | 12 ++-- mp_radix_size_overestimate.c | 11 ++- mp_rand.c | 19 +++-- mp_read_radix.c | 22 +++--- mp_reduce.c | 48 ++++--------- mp_reduce_2k.c | 25 +++---- mp_reduce_2k_l.c | 26 +++---- mp_reduce_2k_setup.c | 17 ++--- mp_reduce_2k_setup_l.c | 17 ++--- mp_reduce_setup.c | 10 +-- mp_root_n.c | 48 ++++++------- mp_shrink.c | 8 ++- mp_signed_rsh.c | 16 +++-- mp_sqrmod.c | 12 ++-- mp_sqrt.c | 42 ++++------- mp_sqrtmod_prime.c | 76 ++++++++++---------- mp_sub.c | 9 ++- mp_sub_d.c | 13 ++-- mp_submod.c | 12 ++-- mp_to_radix.c | 23 +++--- mp_to_sbin.c | 14 ++-- mp_to_ubin.c | 16 ++--- mp_unpack.c | 11 ++- mp_xor.c | 10 +-- s_mp_add.c | 10 +-- s_mp_copy_digs.c | 1 + s_mp_div_3.c | 9 ++- s_mp_div_recursive.c | 95 ++++++++++++------------ s_mp_div_school.c | 49 +++++++------ s_mp_div_small.c | 25 ++++--- s_mp_exptmod.c | 57 +++++++-------- s_mp_exptmod_fast.c | 73 ++++++++++--------- s_mp_fp_log.c | 46 ++++++------ s_mp_fp_log_d.c | 23 +++--- s_mp_invmod.c | 62 ++++++++-------- s_mp_invmod_odd.c | 49 +++++++------ s_mp_montgomery_reduce_comba.c | 13 ++-- s_mp_mul.c | 16 +++-- s_mp_mul_balance.c | 35 +++------ s_mp_mul_comba.c | 10 ++- s_mp_mul_high.c | 16 +++-- s_mp_mul_high_comba.c | 11 +-- s_mp_mul_karatsuba.c | 101 ++++++++++---------------- s_mp_mul_toom.c | 111 ++++++++++++++--------------- s_mp_prime_is_divisible.c | 12 ++-- s_mp_radix_size_overestimate.c | 15 ++-- s_mp_rand_platform.c | 2 + s_mp_sqr.c | 10 +-- s_mp_sqr_comba.c | 8 ++- s_mp_sqr_karatsuba.c | 75 +++++++++---------- s_mp_sqr_toom.c | 68 +++++++++--------- s_mp_sub.c | 10 +-- tommath_private.h | 11 +++ 109 files changed, 1314 insertions(+), 1453 deletions(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 18a832bbe..7975d86a8 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -134,6 +134,10 @@ jobs: # clang for the x86-64 architecture with restricted limb sizes - { BUILDOPTIONS: '--with-cc=clang --cflags=-DMP_16BIT --limit-valgrind', SANITIZER: '1', COMPILE_DEBUG: '0', COMPILE_LTO: '0', CONV_WARNINGS: '', OTHERDEPS: 'clang llvm' } - { BUILDOPTIONS: '--with-cc=clang --cflags=-DMP_32BIT --limit-valgrind', SANITIZER: '1', COMPILE_DEBUG: '0', COMPILE_LTO: '0', CONV_WARNINGS: '', OTHERDEPS: 'clang llvm' } + + # Check error-tracing with 64 bit only, macros are independent of limb-size with the exception of the S_MP_WORD_TOO_SMALL_C branch + - { BUILDOPTIONS: '--with-cc=gcc --with-m64 --cflags=-DMP_ADD_ERROR_TRACING --limit-valgrind', SANITIZER: '1', COMPILE_DEBUG: '0', COMPILE_LTO: '0', CONV_WARNINGS: '', OTHERDEPS: '' } + - { BUILDOPTIONS: '--with-cc=gcc --with-m64 --cflags=-DMP_ADD_ERROR_TRACING --cflags=-DS_MP_WORD_TOO_SMALL_C="" --limit-valgrind', SANITIZER: '1', COMPILE_DEBUG: '0', COMPILE_LTO: '0', CONV_WARNINGS: '', OTHERDEPS: '' } steps: - uses: actions/checkout@v3 - name: install dependencies diff --git a/CMakeLists.txt b/CMakeLists.txt index 014fb1883..159c9eef5 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -32,6 +32,7 @@ include(sources.cmake) # Options #----------------------------------------------------------------------------- option(BUILD_SHARED_LIBS "Build shared library and only the shared library if \"ON\", default is static" OFF) +option(ADD_ERROR_TRACING "Add some simple macros that allow for a primitive but useful error tracing if \"ON\", default is \"OFF\"" OFF) #----------------------------------------------------------------------------- # Compose CFLAGS @@ -76,6 +77,10 @@ if(CMAKE_SYSTEM_NAME MATCHES "CYGWIN") list(APPEND LTM_C_FLAGS -no-undefined) endif() +if(ADD_ERROR_TRACING) + list(APPEND LTM_C_FLAGS -DMP_ADD_ERROR_TRACING) +endif() + # TODO: coverage (lgcov) # If the user set the environment variables at generate-time, append them diff --git a/doc/bn.tex b/doc/bn.tex index 63e71633b..de3fa2202 100644 --- a/doc/bn.tex +++ b/doc/bn.tex @@ -362,6 +362,24 @@ \subsection{Small-Stack option} C.f. \ref{ch:SMALL_STACK_API} for the API description and further details. +\subsection{Error tracing} +\label{ch:ERROR_TRACING} +One error leads to another error and it might be a bit tedious to trace all of the errors up to the +source, especially in deeply recursive functions. The macro \texttt{MP\_TRACE\_ERROR} defined in +\texttt{tommath\_private.h} tries to help with it by printing the name of the source file, the line number, +and the name of the function to \texttt{stderr}. + +Include this functionality by defining \texttt{MP\_ADD\_ERROR\_TRACING} with \texttt{make} or \texttt{cmake}. + +\index{MP\_TRACE\_ERROR} +\begin{alltt} +make "LCFLAGS= -DMP_ADD_ERROR_TRACING " +or +cmake -DMP_ADD_ERROR_TRACING=ON . +\end{alltt} + + + \section{Purpose of LibTomMath} Unlike GNU MP (GMP) Library, LIP, OpenSSL or various other commercial kits (Miracl), LibTomMath was not written with bleeding edge performance in mind. First and foremost LibTomMath was written diff --git a/mp_2expt.c b/mp_2expt.c index c11fe4cb3..5828eff53 100644 --- a/mp_2expt.c +++ b/mp_2expt.c @@ -10,19 +10,18 @@ */ mp_err mp_2expt(mp_int *a, int b) { - mp_err err; + mp_err err = MP_OKAY; if (b < 0) { - return MP_VAL; + err = MP_VAL; + MP_TRACE_ERROR(err, LTM_ERR); } /* zero a as per default */ mp_zero(a); /* grow a to accommodate the single bit */ - if ((err = mp_grow(a, (b / MP_DIGIT_BIT) + 1)) != MP_OKAY) { - return err; - } + if ((err = mp_grow(a, (b / MP_DIGIT_BIT) + 1)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); /* set the used count of where the bit will go */ a->used = (b / MP_DIGIT_BIT) + 1; @@ -30,6 +29,7 @@ mp_err mp_2expt(mp_int *a, int b) /* put the single bit in its place */ a->dp[b / MP_DIGIT_BIT] = (mp_digit)1 << (mp_digit)(b % MP_DIGIT_BIT); - return MP_OKAY; +LTM_ERR: + return err; } #endif diff --git a/mp_abs.c b/mp_abs.c index a87cc0cb9..1e9586f06 100644 --- a/mp_abs.c +++ b/mp_abs.c @@ -9,16 +9,15 @@ */ mp_err mp_abs(const mp_int *a, mp_int *b) { - mp_err err; + mp_err err = MP_OKAY; /* copy a to b */ - if ((err = mp_copy(a, b)) != MP_OKAY) { - return err; - } + if ((err = mp_copy(a, b)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); /* force the sign of b to positive */ b->sign = MP_ZPOS; - return MP_OKAY; +LTM_ERR: + return err; } #endif diff --git a/mp_add.c b/mp_add.c index bf7a61e25..bd6ba84f0 100644 --- a/mp_add.c +++ b/mp_add.c @@ -6,12 +6,14 @@ /* high level addition (handles signs) */ mp_err mp_add(const mp_int *a, const mp_int *b, mp_int *c) { + mp_err err = MP_OKAY; /* handle two cases, not four */ if (a->sign == b->sign) { /* both positive or both negative */ /* add their magnitudes, copy the sign */ c->sign = a->sign; - return s_mp_add(a, b, c); + if ((err = s_mp_add(a, b, c)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); + return err; } /* one positive, the other negative */ @@ -23,7 +25,10 @@ mp_err mp_add(const mp_int *a, const mp_int *b, mp_int *c) } c->sign = a->sign; - return s_mp_sub(a, b, c); + if ((err = s_mp_sub(a, b, c)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); + +LTM_ERR: + return err; } #endif diff --git a/mp_add_d.c b/mp_add_d.c index c57a80db3..b464e62f5 100644 --- a/mp_add_d.c +++ b/mp_add_d.c @@ -6,7 +6,7 @@ /* single digit addition */ mp_err mp_add_d(const mp_int *a, mp_digit b, mp_int *c) { - mp_err err; + mp_err err = MP_OKAY; int oldused; /* fast path for a == c */ @@ -25,9 +25,7 @@ mp_err mp_add_d(const mp_int *a, mp_digit b, mp_int *c) } /* grow c as required */ - if ((err = mp_grow(c, a->used + 1)) != MP_OKAY) { - return err; - } + if ((err = mp_grow(c, a->used + 1)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); /* if a is negative and |a| >= b, call c = |a| - b */ if (mp_isneg(a) && ((a->used > 1) || (a->dp[0] >= b))) { @@ -36,7 +34,7 @@ mp_err mp_add_d(const mp_int *a, mp_digit b, mp_int *c) a_.sign = MP_ZPOS; /* c = |a| - b */ - err = mp_sub_d(&a_, b, c); + if ((err = mp_sub_d(&a_, b, c)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); /* fix sign */ c->sign = MP_NEG; @@ -80,7 +78,8 @@ mp_err mp_add_d(const mp_int *a, mp_digit b, mp_int *c) s_mp_zero_digs(c->dp + c->used, oldused - c->used); mp_clamp(c); - return MP_OKAY; +LTM_ERR: + return err; } #endif diff --git a/mp_addmod.c b/mp_addmod.c index 91e2087e5..5fa5dcbaf 100644 --- a/mp_addmod.c +++ b/mp_addmod.c @@ -6,10 +6,11 @@ /* d = a + b (mod c) */ mp_err mp_addmod(const mp_int *a, const mp_int *b, const mp_int *c, mp_int *d) { - mp_err err; - if ((err = mp_add(a, b, d)) != MP_OKAY) { - return err; - } - return mp_mod(d, c, d); + mp_err err = MP_OKAY; + if ((err = mp_add(a, b, d)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); + if ((err = mp_mod(d, c, d)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); + +LTM_ERR: + return err; } #endif diff --git a/mp_and.c b/mp_and.c index b5230c4d1..5e8c49cb5 100644 --- a/mp_and.c +++ b/mp_and.c @@ -7,13 +7,11 @@ mp_err mp_and(const mp_int *a, const mp_int *b, mp_int *c) { int used = MP_MAX(a->used, b->used) + 1, i; - mp_err err; + mp_err err = MP_OKAY; mp_digit ac = 1, bc = 1, cc = 1; bool neg = (mp_isneg(a) && mp_isneg(b)); - if ((err = mp_grow(c, used)) != MP_OKAY) { - return err; - } + if ((err = mp_grow(c, used)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); for (i = 0; i < used; i++) { mp_digit x, y; @@ -49,6 +47,8 @@ mp_err mp_and(const mp_int *a, const mp_int *b, mp_int *c) c->used = used; c->sign = (neg ? MP_NEG : MP_ZPOS); mp_clamp(c); - return MP_OKAY; + +LTM_ERR: + return err; } #endif diff --git a/mp_complement.c b/mp_complement.c index c16e25f9d..d41c30c5f 100644 --- a/mp_complement.c +++ b/mp_complement.c @@ -6,8 +6,11 @@ /* b = ~a */ mp_err mp_complement(const mp_int *a, mp_int *b) { + mp_err err = MP_OKAY; mp_int a_ = *a; a_.sign = ((a_.sign == MP_ZPOS) && !mp_iszero(a)) ? MP_NEG : MP_ZPOS; - return mp_sub_d(&a_, 1uL, b); + if ((err = mp_sub_d(&a_, 1uL, b)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); +LTM_ERR: + return err; } #endif diff --git a/mp_copy.c b/mp_copy.c index d79e2b8bf..27f652984 100644 --- a/mp_copy.c +++ b/mp_copy.c @@ -6,17 +6,15 @@ /* copy, b = a */ mp_err mp_copy(const mp_int *a, mp_int *b) { - mp_err err; + mp_err err = MP_OKAY; /* if dst == src do nothing */ if (a == b) { - return MP_OKAY; + return err; } /* grow dest */ - if ((err = mp_grow(b, a->used)) != MP_OKAY) { - return err; - } + if ((err = mp_grow(b, a->used)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); /* copy everything over and zero high digits */ s_mp_copy_digs(b->dp, a->dp, a->used); @@ -24,6 +22,7 @@ mp_err mp_copy(const mp_int *a, mp_int *b) b->used = a->used; b->sign = a->sign; - return MP_OKAY; +LTM_ERR: + return err; } #endif diff --git a/mp_div.c b/mp_div.c index b092d7bb0..9cd1a1587 100644 --- a/mp_div.c +++ b/mp_div.c @@ -5,19 +5,18 @@ mp_err mp_div(const mp_int *a, const mp_int *b, mp_int *c, mp_int *d) { - mp_err err; + mp_err err = MP_OKAY; /* is divisor zero ? */ if (mp_iszero(b)) { - return MP_VAL; + err = MP_VAL; + MP_TRACE_ERROR(err, LTM_ERR); } /* if a < b then q = 0, r = a */ if (mp_cmp_mag(a, b) == MP_LT) { if (d != NULL) { - if ((err = mp_copy(a, d)) != MP_OKAY) { - return err; - } + if ((err = mp_copy(a, d)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); } if (c != NULL) { mp_zero(c); @@ -28,15 +27,17 @@ mp_err mp_div(const mp_int *a, const mp_int *b, mp_int *c, mp_int *d) if (MP_HAS(S_MP_DIV_RECURSIVE) && (b->used > (2 * MP_MUL_KARATSUBA_CUTOFF)) && (b->used <= ((a->used/3)*2))) { - err = s_mp_div_recursive(a, b, c, d); + if ((err = s_mp_div_recursive(a, b, c, d)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); } else if (MP_HAS(S_MP_DIV_SCHOOL)) { - err = s_mp_div_school(a, b, c, d); + if ((err = s_mp_div_school(a, b, c, d)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); } else if (MP_HAS(S_MP_DIV_SMALL)) { - err = s_mp_div_small(a, b, c, d); + if ((err = s_mp_div_small(a, b, c, d)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); } else { err = MP_VAL; + MP_TRACE_ERROR(err, LTM_ERR); } +LTM_ERR: return err; } #endif diff --git a/mp_div_2.c b/mp_div_2.c index 8ab9bcb9c..5ee8f5e7c 100644 --- a/mp_div_2.c +++ b/mp_div_2.c @@ -6,13 +6,11 @@ /* b = a/2 */ mp_err mp_div_2(const mp_int *a, mp_int *b) { - mp_err err; + mp_err err = MP_OKAY; int x, oldused; mp_digit r; - if ((err = mp_grow(b, a->used)) != MP_OKAY) { - return err; - } + if ((err = mp_grow(b, a->used)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); oldused = b->used; b->used = a->used; @@ -35,6 +33,8 @@ mp_err mp_div_2(const mp_int *a, mp_int *b) b->sign = a->sign; mp_clamp(b); - return MP_OKAY; + +LTM_ERR: + return err; } #endif diff --git a/mp_div_2d.c b/mp_div_2d.c index e523465af..644898679 100644 --- a/mp_div_2d.c +++ b/mp_div_2d.c @@ -6,23 +6,20 @@ /* shift right by a certain bit count (store quotient in c, optional remainder in d) */ mp_err mp_div_2d(const mp_int *a, int b, mp_int *c, mp_int *d) { - mp_err err; + mp_err err = MP_OKAY; if (b < 0) { - return MP_VAL; + err = MP_VAL; + MP_TRACE_ERROR(err, LTM_ERR); } - if ((err = mp_copy(a, c)) != MP_OKAY) { - return err; - } + if ((err = mp_copy(a, c)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); /* 'a' should not be used after here - it might be the same as d */ /* get the remainder */ if (d != NULL) { - if ((err = mp_mod_2d(a, b, d)) != MP_OKAY) { - return err; - } + if ((err = mp_mod_2d(a, b, d)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); } /* shift by as many digits in the bit count */ @@ -56,6 +53,8 @@ mp_err mp_div_2d(const mp_int *a, int b, mp_int *c, mp_int *d) } } mp_clamp(c); - return MP_OKAY; + +LTM_ERR: + return err; } #endif diff --git a/mp_div_d.c b/mp_div_d.c index 5697e545c..5aee1956a 100644 --- a/mp_div_d.c +++ b/mp_div_d.c @@ -8,12 +8,13 @@ mp_err mp_div_d(const mp_int *a, mp_digit b, mp_int *c, mp_digit *d) { mp_int q; mp_word w; - mp_err err; + mp_err err = MP_OKAY; int ix; /* cannot divide by zero */ if (b == 0u) { - return MP_VAL; + err = MP_VAL; + MP_TRACE_ERROR(err, LTM_ERR); } /* quick outs */ @@ -22,9 +23,9 @@ mp_err mp_div_d(const mp_int *a, mp_digit b, mp_int *c, mp_digit *d) *d = 0; } if (c != NULL) { - return mp_copy(a, c); + if ((err = mp_copy(a, c)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); } - return MP_OKAY; + return err; } /* power of two ? */ @@ -32,7 +33,10 @@ mp_err mp_div_d(const mp_int *a, mp_digit b, mp_int *c, mp_digit *d) if (d != NULL) { *d = mp_isodd(a) ? 1u : 0u; } - return (c == NULL) ? MP_OKAY : mp_div_2(a, c); + if (c != NULL) { + if ((err = mp_div_2(a, c)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); + } + return err; } if (MP_HAS(MP_DIV_2D) && MP_IS_2EXPT(b)) { ix = 1; @@ -42,18 +46,20 @@ mp_err mp_div_d(const mp_int *a, mp_digit b, mp_int *c, mp_digit *d) if (d != NULL) { *d = a->dp[0] & (((mp_digit)1<<(mp_digit)ix) - 1uL); } - return (c == NULL) ? MP_OKAY : mp_div_2d(a, ix, c, NULL); + if (c != NULL) { + if ((err = mp_div_2d(a, ix, c, NULL)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); + } + return err; } /* three? */ if (MP_HAS(S_MP_DIV_3) && (b == 3u)) { - return s_mp_div_3(a, c, d); + if ((err = s_mp_div_3(a, c, d)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); + return err; } /* no easy answer [c'est la vie]. Just division */ - if ((err = mp_init_size(&q, a->used)) != MP_OKAY) { - return err; - } + if ((err = mp_init_size(&q, a->used)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); q.used = a->used; q.sign = a->sign; @@ -78,7 +84,8 @@ mp_err mp_div_d(const mp_int *a, mp_digit b, mp_int *c, mp_digit *d) } mp_clear(&q); - return MP_OKAY; +LTM_ERR: + return err; } #endif diff --git a/mp_dr_reduce.c b/mp_dr_reduce.c index f0f6f35e6..c8c9720f1 100644 --- a/mp_dr_reduce.c +++ b/mp_dr_reduce.c @@ -19,15 +19,13 @@ */ mp_err mp_dr_reduce(mp_int *x, const mp_int *n, mp_digit k) { - mp_err err; + mp_err err = MP_OKAY; /* m = digits in modulus */ int m = n->used; /* ensure that "x" has at least 2m digits */ - if ((err = mp_grow(x, m + m)) != MP_OKAY) { - return err; - } + if ((err = mp_grow(x, m + m)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); /* top of loop, this is where the code resumes if * another reduction pass is required. @@ -59,10 +57,10 @@ mp_err mp_dr_reduce(mp_int *x, const mp_int *n, mp_digit k) break; } - if ((err = s_mp_sub(x, n, x)) != MP_OKAY) { - return err; - } + if ((err = s_mp_sub(x, n, x)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); } - return MP_OKAY; + +LTM_ERR: + return err; } #endif diff --git a/mp_expt_n.c b/mp_expt_n.c index 93f9249a7..f93853486 100644 --- a/mp_expt_n.c +++ b/mp_expt_n.c @@ -6,7 +6,7 @@ /* calculate c = a**b using a square-multiply algorithm */ mp_err mp_expt_n(const mp_int *a, int b, mp_int *c) { - mp_err err; + mp_err err = MP_OKAY; mp_int g; if ((err = mp_init_copy(&g, a)) != MP_OKAY) { @@ -19,23 +19,19 @@ mp_err mp_expt_n(const mp_int *a, int b, mp_int *c) while (b > 0) { /* if the bit is set multiply */ if ((b & 1) != 0) { - if ((err = mp_mul(c, &g, c)) != MP_OKAY) { - goto LBL_ERR; - } + if ((err = mp_mul(c, &g, c)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); } /* square */ if (b > 1) { - if ((err = mp_sqr(&g, &g)) != MP_OKAY) { - goto LBL_ERR; - } + if ((err = mp_sqr(&g, &g)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); } /* shift to next bit */ b >>= 1; } -LBL_ERR: +LTM_ERR: mp_clear(&g); return err; } diff --git a/mp_exptmod.c b/mp_exptmod.c index eaab861eb..1800e26fe 100644 --- a/mp_exptmod.c +++ b/mp_exptmod.c @@ -11,38 +11,34 @@ mp_err mp_exptmod(const mp_int *G, const mp_int *X, const mp_int *P, mp_int *Y) { int dr; + mp_err err = MP_OKAY; /* modulus P must be positive */ if (mp_isneg(P)) { - return MP_VAL; + err = MP_VAL; + MP_TRACE_ERROR(err, LTM_ERR_END); } /* if exponent X is negative we have to recurse */ if (mp_isneg(X)) { mp_int tmpG, tmpX; - mp_err err; if (!MP_HAS(MP_INVMOD)) { - return MP_VAL; + err = MP_VAL; + MP_TRACE_ERROR(err, LTM_ERR); } - if ((err = mp_init_multi(&tmpG, &tmpX, NULL)) != MP_OKAY) { - return err; - } + if ((err = mp_init_multi(&tmpG, &tmpX, NULL)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); /* first compute 1/G mod P */ - if ((err = mp_invmod(G, P, &tmpG)) != MP_OKAY) { - goto LBL_ERR; - } + if ((err = mp_invmod(G, P, &tmpG)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); /* now get |X| */ - if ((err = mp_abs(X, &tmpX)) != MP_OKAY) { - goto LBL_ERR; - } + if ((err = mp_abs(X, &tmpX)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); /* and now compute (1/G)**|X| instead of G**X [X < 0] */ - err = mp_exptmod(&tmpG, &tmpX, P, Y); -LBL_ERR: + if ((err = mp_exptmod(&tmpG, &tmpX, P, Y)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); +LTM_ERR: mp_clear_multi(&tmpG, &tmpX, NULL); return err; } @@ -50,7 +46,8 @@ mp_err mp_exptmod(const mp_int *G, const mp_int *X, const mp_int *P, mp_int *Y) /* modified diminished radix reduction */ if (MP_HAS(MP_REDUCE_IS_2K_L) && MP_HAS(MP_REDUCE_2K_L) && MP_HAS(S_MP_EXPTMOD) && mp_reduce_is_2k_l(P)) { - return s_mp_exptmod(G, X, P, Y, 1); + if ((err = s_mp_exptmod(G, X, P, Y, 1)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_END); + return err; } /* is it a DR modulus? default to no */ @@ -63,16 +60,21 @@ mp_err mp_exptmod(const mp_int *G, const mp_int *X, const mp_int *P, mp_int *Y) /* if the modulus is odd or dr != 0 use the montgomery method */ if (MP_HAS(S_MP_EXPTMOD_FAST) && (mp_isodd(P) || (dr != 0))) { - return s_mp_exptmod_fast(G, X, P, Y, dr); + if ((err = s_mp_exptmod_fast(G, X, P, Y, dr)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); + return err; } /* otherwise use the generic Barrett reduction technique */ if (MP_HAS(S_MP_EXPTMOD)) { - return s_mp_exptmod(G, X, P, Y, 0); + if ((err = s_mp_exptmod(G, X, P, Y, 0)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); + return err; } /* no exptmod for evens */ - return MP_VAL; + err = MP_VAL; + +LTM_ERR_END: + return err; } #endif diff --git a/mp_exteuclid.c b/mp_exteuclid.c index 649c4ca85..cb83f89e6 100644 --- a/mp_exteuclid.c +++ b/mp_exteuclid.c @@ -9,49 +9,48 @@ mp_err mp_exteuclid(const mp_int *a, const mp_int *b, mp_int *U1, mp_int *U2, mp_int *U3) { mp_int u1, u2, u3, v1, v2, v3, t1, t2, t3, q, tmp; - mp_err err; + mp_err err = MP_OKAY; - if ((err = mp_init_multi(&u1, &u2, &u3, &v1, &v2, &v3, &t1, &t2, &t3, &q, &tmp, NULL)) != MP_OKAY) { - return err; - } + if ((err = mp_init_multi(&u1, &u2, &u3, &v1, &v2, &v3, &t1, &t2, &t3, &q, &tmp, NULL)) != MP_OKAY) + MP_TRACE_ERROR(err, LTM_ERR); /* initialize, (u1,u2,u3) = (1,0,a) */ mp_set(&u1, 1uL); - if ((err = mp_copy(a, &u3)) != MP_OKAY) goto LBL_ERR; + if ((err = mp_copy(a, &u3)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); /* initialize, (v1,v2,v3) = (0,1,b) */ mp_set(&v2, 1uL); - if ((err = mp_copy(b, &v3)) != MP_OKAY) goto LBL_ERR; + if ((err = mp_copy(b, &v3)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); /* loop while v3 != 0 */ while (!mp_iszero(&v3)) { /* q = u3/v3 */ - if ((err = mp_div(&u3, &v3, &q, NULL)) != MP_OKAY) goto LBL_ERR; + if ((err = mp_div(&u3, &v3, &q, NULL)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); /* (t1,t2,t3) = (u1,u2,u3) - (v1,v2,v3)q */ - if ((err = mp_mul(&v1, &q, &tmp)) != MP_OKAY) goto LBL_ERR; - if ((err = mp_sub(&u1, &tmp, &t1)) != MP_OKAY) goto LBL_ERR; - if ((err = mp_mul(&v2, &q, &tmp)) != MP_OKAY) goto LBL_ERR; - if ((err = mp_sub(&u2, &tmp, &t2)) != MP_OKAY) goto LBL_ERR; - if ((err = mp_mul(&v3, &q, &tmp)) != MP_OKAY) goto LBL_ERR; - if ((err = mp_sub(&u3, &tmp, &t3)) != MP_OKAY) goto LBL_ERR; + if ((err = mp_mul(&v1, &q, &tmp)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); + if ((err = mp_sub(&u1, &tmp, &t1)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); + if ((err = mp_mul(&v2, &q, &tmp)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); + if ((err = mp_sub(&u2, &tmp, &t2)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); + if ((err = mp_mul(&v3, &q, &tmp)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); + if ((err = mp_sub(&u3, &tmp, &t3)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); /* (u1,u2,u3) = (v1,v2,v3) */ - if ((err = mp_copy(&v1, &u1)) != MP_OKAY) goto LBL_ERR; - if ((err = mp_copy(&v2, &u2)) != MP_OKAY) goto LBL_ERR; - if ((err = mp_copy(&v3, &u3)) != MP_OKAY) goto LBL_ERR; + if ((err = mp_copy(&v1, &u1)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); + if ((err = mp_copy(&v2, &u2)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); + if ((err = mp_copy(&v3, &u3)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); /* (v1,v2,v3) = (t1,t2,t3) */ - if ((err = mp_copy(&t1, &v1)) != MP_OKAY) goto LBL_ERR; - if ((err = mp_copy(&t2, &v2)) != MP_OKAY) goto LBL_ERR; - if ((err = mp_copy(&t3, &v3)) != MP_OKAY) goto LBL_ERR; + if ((err = mp_copy(&t1, &v1)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); + if ((err = mp_copy(&t2, &v2)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); + if ((err = mp_copy(&t3, &v3)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); } /* make sure U3 >= 0 */ if (mp_isneg(&u3)) { - if ((err = mp_neg(&u1, &u1)) != MP_OKAY) goto LBL_ERR; - if ((err = mp_neg(&u2, &u2)) != MP_OKAY) goto LBL_ERR; - if ((err = mp_neg(&u3, &u3)) != MP_OKAY) goto LBL_ERR; + if ((err = mp_neg(&u1, &u1)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); + if ((err = mp_neg(&u2, &u2)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); + if ((err = mp_neg(&u3, &u3)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); } /* copy result out */ @@ -65,7 +64,7 @@ mp_err mp_exteuclid(const mp_int *a, const mp_int *b, mp_int *U1, mp_int *U2, mp mp_exch(U3, &u3); } -LBL_ERR: +LTM_ERR: mp_clear_multi(&u1, &u2, &u3, &v1, &v2, &v3, &t1, &t2, &t3, &q, &tmp, NULL); return err; } diff --git a/mp_fread.c b/mp_fread.c index 53c35e822..1bd4eb663 100644 --- a/mp_fread.c +++ b/mp_fread.c @@ -7,13 +7,14 @@ /* read a bigint from a file stream in ASCII */ mp_err mp_fread(mp_int *a, int radix, FILE *stream) { - mp_err err; + mp_err err = MP_OKAY; mp_sign sign = MP_ZPOS; int ch; /* make sure the radix is ok */ if ((radix < 2) || (radix > 64)) { - return MP_VAL; + err = MP_VAL; + MP_TRACE_ERROR(err, LTM_ERR); } /* if first digit is - then set negative */ @@ -25,7 +26,8 @@ mp_err mp_fread(mp_int *a, int radix, FILE *stream) /* no digits, return error */ if (ch == EOF) { - return MP_ERR; + err = MP_ERR; + MP_TRACE_ERROR(err, LTM_ERR); } /* clear a */ @@ -47,19 +49,16 @@ mp_err mp_fread(mp_int *a, int radix, FILE *stream) } /* shift up and add */ - if ((err = mp_mul_d(a, (mp_digit)radix, a)) != MP_OKAY) { - return err; - } - if ((err = mp_add_d(a, y, a)) != MP_OKAY) { - return err; - } + if ((err = mp_mul_d(a, (mp_digit)radix, a)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); + if ((err = mp_add_d(a, y, a)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); } while ((ch = fgetc(stream)) != EOF); if (!mp_iszero(a)) { a->sign = sign; } - return MP_OKAY; +LTM_ERR: + return err; } #endif diff --git a/mp_from_sbin.c b/mp_from_sbin.c index 26eb0f120..81d0ca1ac 100644 --- a/mp_from_sbin.c +++ b/mp_from_sbin.c @@ -6,16 +6,15 @@ /* read signed bin, big endian, first byte is 0==positive or 1==negative */ mp_err mp_from_sbin(mp_int *a, const uint8_t *buf, size_t size) { - mp_err err; + mp_err err = MP_OKAY; /* read magnitude */ - if ((err = mp_from_ubin(a, buf + 1, size - 1u)) != MP_OKAY) { - return err; - } + if ((err = mp_from_ubin(a, buf + 1, size - 1u)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); /* first byte is 0 for positive, non-zero for negative */ a->sign = (buf[0] != (uint8_t)0) ? MP_NEG : MP_ZPOS; - return MP_OKAY; +LTM_ERR: + return err; } #endif diff --git a/mp_from_ubin.c b/mp_from_ubin.c index 8272185b8..39d43d754 100644 --- a/mp_from_ubin.c +++ b/mp_from_ubin.c @@ -6,25 +6,23 @@ /* reads a uint8_t array, assumes the msb is stored first [big endian] */ mp_err mp_from_ubin(mp_int *a, const uint8_t *buf, size_t size) { - mp_err err; + mp_err err = MP_OKAY; /* make sure there are at least two digits */ - if ((err = mp_grow(a, 2)) != MP_OKAY) { - return err; - } + if ((err = mp_grow(a, 2)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); /* zero the int */ mp_zero(a); /* read the bytes in */ while (size-- > 0u) { - if ((err = mp_mul_2d(a, 8, a)) != MP_OKAY) { - return err; - } + if ((err = mp_mul_2d(a, 8, a)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); a->dp[0] |= *buf++; a->used += 1; } mp_clamp(a); - return MP_OKAY; + +LTM_ERR: + return err; } #endif diff --git a/mp_fwrite.c b/mp_fwrite.c index 8ea9d327e..e0d3a9eda 100644 --- a/mp_fwrite.c +++ b/mp_fwrite.c @@ -7,26 +7,26 @@ mp_err mp_fwrite(const mp_int *a, int radix, FILE *stream) { char *buf; - mp_err err; + mp_err err = MP_OKAY; size_t size, written; - if ((err = mp_radix_size_overestimate(a, radix, &size)) != MP_OKAY) { - return err; - } + if ((err = mp_radix_size_overestimate(a, radix, &size)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); buf = (char *) MP_MALLOC(size); - if (buf == NULL) { - return MP_MEM; - } + if (buf == NULL) MP_TRACE_ERROR(MP_MEM, LTM_ERR); if ((err = mp_to_radix(a, buf, size, &written, radix)) == MP_OKAY) { written--; if (fwrite(buf, written, 1uL, stream) != 1uL) { err = MP_ERR; + MP_TRACE_ERROR(err, LTM_ERR_FREE); } - } + /* For the stacktracing */ + } else MP_TRACE_ERROR(err, LTM_ERR_FREE); +LTM_ERR_FREE: MP_FREE_BUF(buf, size); +LTM_ERR: return err; } #endif diff --git a/mp_gcd.c b/mp_gcd.c index 4f6b6cc29..f1a992528 100644 --- a/mp_gcd.c +++ b/mp_gcd.c @@ -8,24 +8,22 @@ mp_err mp_gcd(const mp_int *a, const mp_int *b, mp_int *c) { mp_int u, v; int k, u_lsb, v_lsb; - mp_err err; + mp_err err = MP_OKAY; /* either zero than gcd is the largest */ if (mp_iszero(a)) { - return mp_abs(b, c); + if ((err = mp_abs(b, c)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); + return err; } if (mp_iszero(b)) { - return mp_abs(a, c); + if ((err = mp_abs(a, c)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); + return err; } /* get copies of a and b we can modify */ - if ((err = mp_init_copy(&u, a)) != MP_OKAY) { - return err; - } + if ((err = mp_init_copy(&u, a)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); + if ((err = mp_init_copy(&v, b)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_U); - if ((err = mp_init_copy(&v, b)) != MP_OKAY) { - goto LBL_U; - } /* must be positive for the remainder of the algorithm */ u.sign = v.sign = MP_ZPOS; @@ -37,26 +35,17 @@ mp_err mp_gcd(const mp_int *a, const mp_int *b, mp_int *c) if (k > 0) { /* divide the power of two out */ - if ((err = mp_div_2d(&u, k, &u, NULL)) != MP_OKAY) { - goto LBL_V; - } - - if ((err = mp_div_2d(&v, k, &v, NULL)) != MP_OKAY) { - goto LBL_V; - } + if ((err = mp_div_2d(&u, k, &u, NULL)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_V); + if ((err = mp_div_2d(&v, k, &v, NULL)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_V); } /* divide any remaining factors of two out */ if (u_lsb != k) { - if ((err = mp_div_2d(&u, u_lsb - k, &u, NULL)) != MP_OKAY) { - goto LBL_V; - } + if ((err = mp_div_2d(&u, u_lsb - k, &u, NULL)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_V); } if (v_lsb != k) { - if ((err = mp_div_2d(&v, v_lsb - k, &v, NULL)) != MP_OKAY) { - goto LBL_V; - } + if ((err = mp_div_2d(&v, v_lsb - k, &v, NULL)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_V); } while (!mp_iszero(&v)) { @@ -65,28 +54,22 @@ mp_err mp_gcd(const mp_int *a, const mp_int *b, mp_int *c) /* swap u and v to make sure v is >= u */ mp_exch(&u, &v); } - /* subtract smallest from largest */ - if ((err = s_mp_sub(&v, &u, &v)) != MP_OKAY) { - goto LBL_V; - } - + if ((err = s_mp_sub(&v, &u, &v)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_V); /* Divide out all factors of two */ - if ((err = mp_div_2d(&v, mp_cnt_lsb(&v), &v, NULL)) != MP_OKAY) { - goto LBL_V; - } + if ((err = mp_div_2d(&v, mp_cnt_lsb(&v), &v, NULL)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_V); } /* multiply by 2**k which we divided out at the beginning */ - if ((err = mp_mul_2d(&u, k, c)) != MP_OKAY) { - goto LBL_V; - } + if ((err = mp_mul_2d(&u, k, c)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_V); c->sign = MP_ZPOS; - err = MP_OKAY; -LBL_V: + + +LTM_ERR_V: mp_clear(&u); -LBL_U: +LTM_ERR_U: mp_clear(&v); +LTM_ERR: return err; } #endif diff --git a/mp_grow.c b/mp_grow.c index 551d1a3bc..90b40682e 100644 --- a/mp_grow.c +++ b/mp_grow.c @@ -6,8 +6,11 @@ /* grow as required */ mp_err mp_grow(mp_int *a, int size) { + mp_err err = MP_OKAY; + if (size < 0) { - return MP_VAL; + err = MP_VAL; + MP_TRACE_ERROR(err, LTM_ERR); } /* if the alloc size is smaller alloc more ram */ @@ -15,7 +18,8 @@ mp_err mp_grow(mp_int *a, int size) mp_digit *dp; if (size > MP_MAX_DIGIT_COUNT) { - return MP_OVF; + err = MP_OVF; + MP_TRACE_ERROR(err, LTM_ERR); } /* reallocate the array a->dp @@ -29,7 +33,8 @@ mp_err mp_grow(mp_int *a, int size) (size_t)size * sizeof(mp_digit)); if (dp == NULL) { /* reallocation failed but "a" is still valid [can be freed] */ - return MP_MEM; + err = MP_MEM; + MP_TRACE_ERROR(err, LTM_ERR); } /* reallocation succeeded so set a->dp */ @@ -39,6 +44,8 @@ mp_err mp_grow(mp_int *a, int size) s_mp_zero_digs(a->dp + a->alloc, size - a->alloc); a->alloc = size; } - return MP_OKAY; + +LTM_ERR: + return err; } #endif diff --git a/mp_init.c b/mp_init.c index af1674481..cf8b1fb19 100644 --- a/mp_init.c +++ b/mp_init.c @@ -6,10 +6,13 @@ /* init a new mp_int */ mp_err mp_init(mp_int *a) { + mp_err err = MP_OKAY; + /* allocate memory required and clear it */ a->dp = (mp_digit *) MP_CALLOC((size_t)MP_DEFAULT_DIGIT_COUNT, sizeof(mp_digit)); if (a->dp == NULL) { - return MP_MEM; + err = MP_MEM; + MP_TRACE_ERROR(err, LTM_ERR); } /* set the used to zero, allocated digits to the default precision @@ -18,6 +21,7 @@ mp_err mp_init(mp_int *a) a->alloc = MP_DEFAULT_DIGIT_COUNT; a->sign = MP_ZPOS; - return MP_OKAY; +LTM_ERR: + return err; } #endif diff --git a/mp_init_copy.c b/mp_init_copy.c index 4d0773bad..4604cce2d 100644 --- a/mp_init_copy.c +++ b/mp_init_copy.c @@ -6,16 +6,16 @@ /* creates "a" then copies b into it */ mp_err mp_init_copy(mp_int *a, const mp_int *b) { - mp_err err; + mp_err err = MP_OKAY; - if ((err = mp_init_size(a, b->used)) != MP_OKAY) { - return err; - } + if ((err = mp_init_size(a, b->used)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); if ((err = mp_copy(b, a)) != MP_OKAY) { mp_clear(a); + MP_TRACE_ERROR(err, LTM_ERR); } +LTM_ERR: return err; } #endif diff --git a/mp_init_multi.c b/mp_init_multi.c index 908b4df45..5f672e0c8 100644 --- a/mp_init_multi.c +++ b/mp_init_multi.c @@ -29,11 +29,13 @@ mp_err mp_init_multi(mp_int *mp, ...) cur_arg = va_arg(clean_args, mp_int *); } va_end(clean_args); - break; + MP_TRACE_ERROR(err, LTM_ERR); } n++; cur_arg = va_arg(args, mp_int *); } + +LTM_ERR: va_end(args); return err; } diff --git a/mp_init_set.c b/mp_init_set.c index e1f2ee94d..6669a6f44 100644 --- a/mp_init_set.c +++ b/mp_init_set.c @@ -6,11 +6,10 @@ /* initialize and set a digit */ mp_err mp_init_set(mp_int *a, mp_digit b) { - mp_err err; - if ((err = mp_init(a)) != MP_OKAY) { - return err; - } + mp_err err = MP_OKAY; + if ((err = mp_init(a)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); mp_set(a, b); +LTM_ERR: return err; } #endif diff --git a/mp_init_size.c b/mp_init_size.c index 87a4256e8..36b059543 100644 --- a/mp_init_size.c +++ b/mp_init_size.c @@ -6,20 +6,24 @@ /* init an mp_init for a given size */ mp_err mp_init_size(mp_int *a, int size) { + mp_err err = MP_OKAY; if (size < 0) { - return MP_VAL; + err = MP_VAL; + MP_TRACE_ERROR(err, LTM_ERR); } size = MP_MAX(MP_MIN_DIGIT_COUNT, size); if (size > MP_MAX_DIGIT_COUNT) { - return MP_OVF; + err = MP_OVF; + MP_TRACE_ERROR(err, LTM_ERR); } /* alloc mem */ a->dp = (mp_digit *) MP_CALLOC((size_t)size, sizeof(mp_digit)); if (a->dp == NULL) { - return MP_MEM; + err = MP_MEM; + MP_TRACE_ERROR(err, LTM_ERR); } /* set the members */ @@ -27,6 +31,7 @@ mp_err mp_init_size(mp_int *a, int size) a->alloc = size; a->sign = MP_ZPOS; - return MP_OKAY; +LTM_ERR: + return err; } #endif diff --git a/mp_invmod.c b/mp_invmod.c index 2494acbf6..b4fd9108b 100644 --- a/mp_invmod.c +++ b/mp_invmod.c @@ -6,6 +6,7 @@ /* hac 14.61, pp608 */ mp_err mp_invmod(const mp_int *a, const mp_int *b, mp_int *c) { + mp_err err = MP_OKAY; /* for all n in N and n > 0, n = 0 mod 1 */ if (!mp_isneg(a) && mp_cmp_d(b, 1uL) == MP_EQ) { mp_zero(c); @@ -14,16 +15,25 @@ mp_err mp_invmod(const mp_int *a, const mp_int *b, mp_int *c) /* b cannot be negative and has to be >1 */ if (mp_isneg(b) || (mp_cmp_d(b, 1uL) != MP_GT)) { - return MP_VAL; + err = MP_VAL; + MP_TRACE_ERROR(err, LTM_ERR); } /* if the modulus is odd we can use a faster routine instead */ if (MP_HAS(S_MP_INVMOD_ODD) && mp_isodd(b)) { - return s_mp_invmod_odd(a, b, c); + if ((err = s_mp_invmod_odd(a, b, c)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); + return err; } - return MP_HAS(S_MP_INVMOD) - ? s_mp_invmod(a, b, c) - : MP_VAL; + if (MP_HAS(S_MP_INVMOD)) { + if ((err = s_mp_invmod(a, b, c)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); + } else { + err = MP_VAL; + MP_TRACE_ERROR(err, LTM_ERR); + } + +LTM_ERR: + return err; + } #endif diff --git a/mp_is_square.c b/mp_is_square.c index d2856e255..6ebc6dbc8 100644 --- a/mp_is_square.c +++ b/mp_is_square.c @@ -28,7 +28,7 @@ static const char rem_105[105] = { /* Store non-zero to ret if arg is square, and zero if not */ mp_err mp_is_square(const mp_int *arg, bool *ret) { - mp_err err; + mp_err err = MP_OKAY; mp_digit c; mp_int t; uint32_t r; @@ -37,7 +37,8 @@ mp_err mp_is_square(const mp_int *arg, bool *ret) *ret = false; if (mp_isneg(arg)) { - return MP_VAL; + err = MP_VAL; + MP_TRACE_ERROR(err, LTM_ERR); } if (mp_iszero(arg)) { @@ -51,44 +52,36 @@ mp_err mp_is_square(const mp_int *arg, bool *ret) } /* Next check mod 105 (3*5*7) */ - if ((err = mp_mod_d(arg, 105uL, &c)) != MP_OKAY) { - return err; - } + if ((err = mp_mod_d(arg, 105uL, &c)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); if (rem_105[c] == (char)1) { return MP_OKAY; } - if ((err = mp_init_u32(&t, 11u*13u*17u*19u*23u*29u*31u)) != MP_OKAY) { - return err; - } - if ((err = mp_mod(arg, &t, &t)) != MP_OKAY) { - goto LBL_ERR; - } + if ((err = mp_init_u32(&t, 11u*13u*17u*19u*23u*29u*31u)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); + if ((err = mp_mod(arg, &t, &t)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); r = mp_get_u32(&t); /* Check for other prime modules, note it's not an ERROR but we must - * free "t" so the easiest way is to goto LBL_ERR. We know that err + * free "t" so the easiest way is to goto LTM_ERR. We know that err * is already equal to MP_OKAY from the mp_mod call */ - if (((1uL<<(r%11uL)) & 0x5C4uL) != 0uL) goto LBL_ERR; - if (((1uL<<(r%13uL)) & 0x9E4uL) != 0uL) goto LBL_ERR; - if (((1uL<<(r%17uL)) & 0x5CE8uL) != 0uL) goto LBL_ERR; - if (((1uL<<(r%19uL)) & 0x4F50CuL) != 0uL) goto LBL_ERR; - if (((1uL<<(r%23uL)) & 0x7ACCA0uL) != 0uL) goto LBL_ERR; - if (((1uL<<(r%29uL)) & 0xC2EDD0CuL) != 0uL) goto LBL_ERR; - if (((1uL<<(r%31uL)) & 0x6DE2B848uL) != 0uL) goto LBL_ERR; + if (((1uL<<(r%11uL)) & 0x5C4uL) != 0uL) goto LTM_ERR_1; + if (((1uL<<(r%13uL)) & 0x9E4uL) != 0uL) goto LTM_ERR_1; + if (((1uL<<(r%17uL)) & 0x5CE8uL) != 0uL) goto LTM_ERR_1; + if (((1uL<<(r%19uL)) & 0x4F50CuL) != 0uL) goto LTM_ERR_1; + if (((1uL<<(r%23uL)) & 0x7ACCA0uL) != 0uL) goto LTM_ERR_1; + if (((1uL<<(r%29uL)) & 0xC2EDD0CuL) != 0uL) goto LTM_ERR_1; + if (((1uL<<(r%31uL)) & 0x6DE2B848uL) != 0uL) goto LTM_ERR_1; /* Final check - is sqr(sqrt(arg)) == arg ? */ - if ((err = mp_sqrt(arg, &t)) != MP_OKAY) { - goto LBL_ERR; - } - if ((err = mp_sqr(&t, &t)) != MP_OKAY) { - goto LBL_ERR; - } + if ((err = mp_sqrt(arg, &t)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); + if ((err = mp_sqr(&t, &t)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); *ret = (mp_cmp_mag(&t, arg) == MP_EQ); -LBL_ERR: + +LTM_ERR_1: mp_clear(&t); +LTM_ERR: return err; } #endif diff --git a/mp_kronecker.c b/mp_kronecker.c index e6bedc891..1fde13c25 100644 --- a/mp_kronecker.c +++ b/mp_kronecker.c @@ -20,7 +20,7 @@ mp_err mp_kronecker(const mp_int *a, const mp_int *p, int *c) { mp_int a1, p1, r; - mp_err err; + mp_err err = MP_OKAY; int v, k; static const char table[] = {0, 1, 0, -1, 0, -1, 0, 1}; @@ -39,17 +39,11 @@ mp_err mp_kronecker(const mp_int *a, const mp_int *p, int *c) return MP_OKAY; } - if ((err = mp_init_copy(&a1, a)) != MP_OKAY) { - return err; - } - if ((err = mp_init_copy(&p1, p)) != MP_OKAY) { - goto LBL_KRON_0; - } + if ((err = mp_init_copy(&a1, a)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); + if ((err = mp_init_copy(&p1, p)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_KRON_0); v = mp_cnt_lsb(&p1); - if ((err = mp_div_2d(&p1, v, &p1, NULL)) != MP_OKAY) { - goto LBL_KRON_1; - } + if ((err = mp_div_2d(&p1, v, &p1, NULL)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_KRON_1); if ((v & 1) == 0) { k = 1; @@ -64,25 +58,21 @@ mp_err mp_kronecker(const mp_int *a, const mp_int *p, int *c) } } - if ((err = mp_init(&r)) != MP_OKAY) { - goto LBL_KRON_1; - } + if ((err = mp_init(&r)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_KRON_1); for (;;) { if (mp_iszero(&a1)) { if (mp_cmp_d(&p1, 1uL) == MP_EQ) { *c = k; - goto LBL_KRON; + goto LTM_ERR_KRON; } else { *c = 0; - goto LBL_KRON; + goto LTM_ERR_KRON; } } v = mp_cnt_lsb(&a1); - if ((err = mp_div_2d(&a1, v, &a1, NULL)) != MP_OKAY) { - goto LBL_KRON; - } + if ((err = mp_div_2d(&a1, v, &a1, NULL)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_KRON); if ((v & 1) == 1) { k = k * table[p1.dp[0] & 7u]; @@ -104,25 +94,19 @@ mp_err mp_kronecker(const mp_int *a, const mp_int *p, int *c) } } - if ((err = mp_copy(&a1, &r)) != MP_OKAY) { - goto LBL_KRON; - } + if ((err = mp_copy(&a1, &r)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_KRON); r.sign = MP_ZPOS; - if ((err = mp_mod(&p1, &r, &a1)) != MP_OKAY) { - goto LBL_KRON; - } - if ((err = mp_copy(&r, &p1)) != MP_OKAY) { - goto LBL_KRON; - } + if ((err = mp_mod(&p1, &r, &a1)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_KRON); + if ((err = mp_copy(&r, &p1)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_KRON); } -LBL_KRON: +LTM_ERR_KRON: mp_clear(&r); -LBL_KRON_1: +LTM_ERR_KRON_1: mp_clear(&p1); -LBL_KRON_0: +LTM_ERR_KRON_0: mp_clear(&a1); - +LTM_ERR: return err; } diff --git a/mp_lcm.c b/mp_lcm.c index f2044f0e5..e165e220a 100644 --- a/mp_lcm.c +++ b/mp_lcm.c @@ -6,39 +6,32 @@ /* computes least common multiple as |a*b|/(a, b) */ mp_err mp_lcm(const mp_int *a, const mp_int *b, mp_int *c) { - mp_err err; + mp_err err = MP_OKAY; mp_int t1, t2; - if ((err = mp_init_multi(&t1, &t2, NULL)) != MP_OKAY) { - return err; - } + if ((err = mp_init_multi(&t1, &t2, NULL)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); /* t1 = get the GCD of the two inputs */ - if ((err = mp_gcd(a, b, &t1)) != MP_OKAY) { - goto LBL_T; - } + if ((err = mp_gcd(a, b, &t1)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_0); /* divide the smallest by the GCD */ if (mp_cmp_mag(a, b) == MP_LT) { /* store quotient in t2 such that t2 * b is the LCM */ - if ((err = mp_div(a, &t1, &t2, NULL)) != MP_OKAY) { - goto LBL_T; - } - err = mp_mul(b, &t2, c); + if ((err = mp_div(a, &t1, &t2, NULL)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_0); + if ((err = mp_mul(b, &t2, c)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_0); } else { /* store quotient in t2 such that t2 * a is the LCM */ - if ((err = mp_div(b, &t1, &t2, NULL)) != MP_OKAY) { - goto LBL_T; - } - err = mp_mul(a, &t2, c); + if ((err = mp_div(b, &t1, &t2, NULL)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_0); + if ((err = mp_mul(a, &t2, c)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_0); } /* fix the sign to positive */ c->sign = MP_ZPOS; -LBL_T: +LTM_ERR_0: mp_clear_multi(&t1, &t2, NULL); +LTM_ERR: return err; } #endif diff --git a/mp_log.c b/mp_log.c index 0d5d893ad..bf5b4bf18 100644 --- a/mp_log.c +++ b/mp_log.c @@ -8,11 +8,11 @@ static mp_err s_approx_log_d(const mp_int *a, const mp_int *b, int *lb) { mp_word La, Lb; - mp_err err; + mp_err err = MP_OKAY; /* Approximation of the individual logarithms with low precision */ - if ((err = s_mp_fp_log_d(a, &La)) != MP_OKAY) goto LTM_ERR; - if ((err = s_mp_fp_log_d(b, &Lb)) != MP_OKAY) goto LTM_ERR; + if ((err = s_mp_fp_log_d(a, &La)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); + if ((err = s_mp_fp_log_d(b, &Lb)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); /* Approximation of log_b(a) with low precision. */ *lb = (int)(((La - (Lb + 1)/2) / Lb) + 1); @@ -26,25 +26,24 @@ static mp_err s_approx_log_d(const mp_int *a, const mp_int *b, int *lb) static mp_err s_approx_log(const mp_int *a, const mp_int *b, int *lb) { mp_int La, Lb, t; - mp_err err; + mp_err err = MP_OKAY; - if ((err = mp_init_multi(&La, &Lb, &t, NULL)) != MP_OKAY) { - return err; - } + if ((err = mp_init_multi(&La, &Lb, &t, NULL)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); - if ((err = s_mp_fp_log(a, &La)) != MP_OKAY) goto LTM_ERR; - if ((err = s_mp_fp_log(b, &Lb)) != MP_OKAY) goto LTM_ERR; + if ((err = s_mp_fp_log(a, &La)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); + if ((err = s_mp_fp_log(b, &Lb)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); - if ((err = mp_add_d(&Lb, 1u, &t)) != MP_OKAY) goto LTM_ERR; - if ((err = mp_div_2(&t, &t)) != MP_OKAY) goto LTM_ERR; - if ((err = mp_sub(&La, &t, &t)) != MP_OKAY) goto LTM_ERR; - if ((err = mp_div(&t, &Lb, &t, NULL)) != MP_OKAY) goto LTM_ERR; - if ((err = mp_add_d(&t, 1u, &t)) != MP_OKAY) goto LTM_ERR; + if ((err = mp_add_d(&Lb, 1u, &t)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); + if ((err = mp_div_2(&t, &t)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); + if ((err = mp_sub(&La, &t, &t)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); + if ((err = mp_div(&t, &Lb, &t, NULL)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); + if ((err = mp_add_d(&t, 1u, &t)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); *lb = mp_get_i32(&t); - err = MP_OKAY; -LTM_ERR: + +LTM_ERR_1: mp_clear_multi(&t, &Lb, &La, NULL); +LTM_ERR: return err; } @@ -52,11 +51,12 @@ mp_err mp_log(const mp_int *a, const mp_int *b, int *lb) { mp_int bn; int n, fla, flb; - mp_err err; + mp_err err = MP_OKAY; mp_ord cmp; if (mp_isneg(a) || mp_iszero(a) || (mp_cmp_d(b, 2u) == MP_LT)) { - return MP_VAL; + err = MP_VAL; + MP_TRACE_ERROR(err, LTM_ERR); } if (MP_IS_POWER_OF_TWO(b)) { @@ -83,17 +83,12 @@ mp_err mp_log(const mp_int *a, const mp_int *b, int *lb) } if (MP_HAS(S_MP_WORD_TOO_SMALL)) { - err = s_approx_log(a, b, &n); + if ((err = s_approx_log(a, b, &n)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); } else { - err = s_approx_log_d(a, b, &n); - } - if (err != MP_OKAY) { - return err; + if ((err = s_approx_log_d(a, b, &n)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); } - if ((err = mp_init(&bn)) != MP_OKAY) { - return err; - } + if ((err = mp_init(&bn)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); /* Check result. Result is wrong by 2(two) at most. */ if ((err = mp_expt_n(b, n, &bn)) != MP_OKAY) { @@ -101,9 +96,9 @@ mp_err mp_log(const mp_int *a, const mp_int *b, int *lb) if (err == MP_OVF) { n--; /* But only one */ - if ((err = mp_expt_n(b, n, &bn)) != MP_OKAY) goto LTM_ERR; + if ((err = mp_expt_n(b, n, &bn)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_2); } else { - goto LTM_ERR; + MP_TRACE_ERROR(err, LTM_ERR_2); } } @@ -112,7 +107,7 @@ mp_err mp_log(const mp_int *a, const mp_int *b, int *lb) /* The rare case of a perfect power makes a perfect shortcut, too. */ if (cmp == MP_EQ) { *lb = n; - goto LTM_OUT; + goto LTM_ERR_1; } /* We have to make at least one multiplication because it could still be a perfect power. */ @@ -121,17 +116,12 @@ mp_err mp_log(const mp_int *a, const mp_int *b, int *lb) /* Full big-integer operations are to be avoided if possible */ if (b->used == 1) { if ((err = mp_mul_d(&bn, b->dp[0], &bn)) != MP_OKAY) { - if (err == MP_OVF) { - goto LTM_OUT; - } - goto LTM_ERR; + /* We can ignore the overflow because the input was in size in the first place */ + if (err == MP_OVF) MP_TRACE_ERROR(err, LTM_ERR_1); } } else { if ((err = mp_mul(&bn, b, &bn)) != MP_OKAY) { - if (err == MP_OVF) { - goto LTM_OUT; - } - goto LTM_ERR; + if (err == MP_OVF) MP_TRACE_ERROR(err, LTM_ERR_1); } } n++; @@ -140,28 +130,29 @@ mp_err mp_log(const mp_int *a, const mp_int *b, int *lb) if (cmp == MP_GT) { n--; } - goto LTM_OUT; + goto LTM_ERR_1; } /* But it can overestimate, too, for example if "a" is closely below some "b^k" */ if (cmp == MP_GT) { do { if (b->used == 1) { - /* These are cheaper exact divisions, but that function is not available in LTM */ - if ((err = mp_div_d(&bn, b->dp[0], &bn, NULL)) != MP_OKAY) goto LTM_ERR; - + /* There are cheaper exact divisions, but that function is not available in LTM */ + if ((err = mp_div_d(&bn, b->dp[0], &bn, NULL)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_2); } else { - if ((err = mp_div(&bn, b, &bn, NULL)) != MP_OKAY) goto LTM_ERR; + if ((err = mp_div(&bn, b, &bn, NULL)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_2); } n--; } while ((cmp = mp_cmp(&bn, a)) == MP_GT); } -LTM_OUT: +LTM_ERR_1: *lb = n; + /* Reset the OVF error from the correction loops above */ err = MP_OKAY; -LTM_ERR: +LTM_ERR_2: mp_clear(&bn); +LTM_ERR: return err; } diff --git a/mp_log_n.c b/mp_log_n.c index 364e28a4b..61840418f 100644 --- a/mp_log_n.c +++ b/mp_log_n.c @@ -6,10 +6,10 @@ mp_err mp_log_n(const mp_int *a, int base, int *c) { mp_int b; - mp_err err; + mp_err err = MP_OKAY; - if ((err = mp_init_i32(&b, base)) != MP_OKAY) goto LTM_ERR; - if ((err = mp_log(a, &b, c)) != MP_OKAY) goto LTM_ERR; + if ((err = mp_init_i32(&b, base)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); + if ((err = mp_log(a, &b, c)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); LTM_ERR: mp_clear(&b); diff --git a/mp_lshd.c b/mp_lshd.c index 90014e480..5833a6b83 100644 --- a/mp_lshd.c +++ b/mp_lshd.c @@ -6,7 +6,7 @@ /* shift left a certain amount of digits */ mp_err mp_lshd(mp_int *a, int b) { - mp_err err; + mp_err err = MP_OKAY; int x; /* if its less than zero return */ @@ -19,9 +19,7 @@ mp_err mp_lshd(mp_int *a, int b) } /* grow to fit the new digits */ - if ((err = mp_grow(a, a->used + b)) != MP_OKAY) { - return err; - } + if ((err = mp_grow(a, a->used + b)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); /* increment the used by the shift amount then copy upwards */ a->used += b; @@ -37,6 +35,7 @@ mp_err mp_lshd(mp_int *a, int b) /* zero the lower digits */ s_mp_zero_digs(a->dp, b); - return MP_OKAY; +LTM_ERR: + return err; } #endif diff --git a/mp_mod_2d.c b/mp_mod_2d.c index 82c64f05f..d7f2d8472 100644 --- a/mp_mod_2d.c +++ b/mp_mod_2d.c @@ -7,10 +7,11 @@ mp_err mp_mod_2d(const mp_int *a, int b, mp_int *c) { int x; - mp_err err; + mp_err err = MP_OKAY; if (b < 0) { - return MP_VAL; + err = MP_VAL; + MP_TRACE_ERROR(err, LTM_ERR); } if (b == 0) { @@ -20,21 +21,21 @@ mp_err mp_mod_2d(const mp_int *a, int b, mp_int *c) /* if the modulus is larger than the value than return */ if (b >= (a->used * MP_DIGIT_BIT)) { - return mp_copy(a, c); - } - - if ((err = mp_copy(a, c)) != MP_OKAY) { + if ((err = mp_copy(a, c)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); return err; } + if ((err = mp_copy(a, c)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); + /* zero digits above the last digit of the modulus */ x = (b / MP_DIGIT_BIT) + (((b % MP_DIGIT_BIT) == 0) ? 0 : 1); s_mp_zero_digs(c->dp + x, c->used - x); /* clear the digit that is not completely outside/inside the modulus */ - c->dp[b / MP_DIGIT_BIT] &= - ((mp_digit)1 << (mp_digit)(b % MP_DIGIT_BIT)) - (mp_digit)1; + c->dp[b / MP_DIGIT_BIT] &= ((mp_digit)1 << (mp_digit)(b % MP_DIGIT_BIT)) - (mp_digit)1; mp_clamp(c); - return MP_OKAY; + +LTM_ERR: + return err; } #endif diff --git a/mp_montgomery_calc_normalization.c b/mp_montgomery_calc_normalization.c index bbb3adbc1..876f5c961 100644 --- a/mp_montgomery_calc_normalization.c +++ b/mp_montgomery_calc_normalization.c @@ -12,15 +12,14 @@ mp_err mp_montgomery_calc_normalization(mp_int *a, const mp_int *b) { int x, bits; - mp_err err; + mp_err err = MP_OKAY; /* how many bits of last digit does b use */ bits = mp_count_bits(b) % MP_DIGIT_BIT; if (b->used > 1) { - if ((err = mp_2expt(a, ((b->used - 1) * MP_DIGIT_BIT) + bits - 1)) != MP_OKAY) { - return err; - } + if ((err = mp_2expt(a, ((b->used - 1) * MP_DIGIT_BIT) + bits - 1)) != MP_OKAY) + MP_TRACE_ERROR(err, LTM_ERR); } else { mp_set(a, 1uL); bits = 1; @@ -28,16 +27,13 @@ mp_err mp_montgomery_calc_normalization(mp_int *a, const mp_int *b) /* now compute C = A * B mod b */ for (x = bits - 1; x < (int)MP_DIGIT_BIT; x++) { - if ((err = mp_mul_2(a, a)) != MP_OKAY) { - return err; - } + if ((err = mp_mul_2(a, a)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); if (mp_cmp_mag(a, b) != MP_LT) { - if ((err = s_mp_sub(a, b, a)) != MP_OKAY) { - return err; - } + if ((err = s_mp_sub(a, b, a)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); } } - return MP_OKAY; +LTM_ERR: + return err; } #endif diff --git a/mp_montgomery_reduce.c b/mp_montgomery_reduce.c index dbf45d3cf..94f40e6e9 100644 --- a/mp_montgomery_reduce.c +++ b/mp_montgomery_reduce.c @@ -6,7 +6,7 @@ /* computes xR**-1 == x (mod N) via Montgomery Reduction */ mp_err mp_montgomery_reduce(mp_int *x, const mp_int *n, mp_digit rho) { - mp_err err; + mp_err err = MP_OKAY; int ix, digs; /* can the fast reduction [comba] method be used? @@ -19,13 +19,12 @@ mp_err mp_montgomery_reduce(mp_int *x, const mp_int *n, mp_digit rho) if ((digs < MP_WARRAY) && (x->used <= MP_WARRAY) && (n->used < MP_MAX_COMBA)) { - return s_mp_montgomery_reduce_comba(x, n, rho); + if ((err = s_mp_montgomery_reduce_comba(x, n, rho)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); + return err; } /* grow the input as required */ - if ((err = mp_grow(x, digs)) != MP_OKAY) { - return err; - } + if ((err = mp_grow(x, digs)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); x->used = digs; for (ix = 0; ix < n->used; ix++) { @@ -81,9 +80,10 @@ mp_err mp_montgomery_reduce(mp_int *x, const mp_int *n, mp_digit rho) /* if x >= n then x = x - n */ if (mp_cmp_mag(x, n) != MP_LT) { - return s_mp_sub(x, n, x); + if ((err = s_mp_sub(x, n, x)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); } - return MP_OKAY; +LTM_ERR: + return err; } #endif diff --git a/mp_montgomery_setup.c b/mp_montgomery_setup.c index de57dc342..80d807730 100644 --- a/mp_montgomery_setup.c +++ b/mp_montgomery_setup.c @@ -7,6 +7,7 @@ mp_err mp_montgomery_setup(const mp_int *n, mp_digit *rho) { mp_digit x, b; + mp_err err = MP_OKAY; /* fast inversion mod 2**k * @@ -19,7 +20,8 @@ mp_err mp_montgomery_setup(const mp_int *n, mp_digit *rho) b = n->dp[0]; if ((b & 1u) == 0u) { - return MP_VAL; + err = MP_VAL; + MP_TRACE_ERROR(err, LTM_ERR); } x = (((b + 2u) & 4u) << 1) + b; /* here x*a==1 mod 2**4 */ @@ -35,6 +37,7 @@ mp_err mp_montgomery_setup(const mp_int *n, mp_digit *rho) /* rho = -1/m mod b */ *rho = (mp_digit)(((mp_word)1 << (mp_word)MP_DIGIT_BIT) - x) & MP_MASK; - return MP_OKAY; +LTM_ERR: + return err; } #endif diff --git a/mp_mul.c b/mp_mul.c index 81807406e..26a4e629d 100644 --- a/mp_mul.c +++ b/mp_mul.c @@ -6,7 +6,7 @@ /* high level multiplication (handles sign) */ mp_err mp_mul(const mp_int *a, const mp_int *b, mp_int *c) { - mp_err err; + mp_err err = MP_OKAY; int min = MP_MIN(a->used, b->used), max = MP_MAX(a->used, b->used), digs = a->used + b->used + 1; @@ -15,19 +15,19 @@ mp_err mp_mul(const mp_int *a, const mp_int *b, mp_int *c) if ((a == b) && MP_HAS(S_MP_SQR_TOOM) && /* use Toom-Cook? */ (a->used >= MP_SQR_TOOM_CUTOFF)) { - err = s_mp_sqr_toom(a, c); + if ((err = s_mp_sqr_toom(a, c)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); } else if ((a == b) && MP_HAS(S_MP_SQR_KARATSUBA) && /* Karatsuba? */ (a->used >= MP_SQR_KARATSUBA_CUTOFF)) { - err = s_mp_sqr_karatsuba(a, c); + if ((err = s_mp_sqr_karatsuba(a, c)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); } else if ((a == b) && MP_HAS(S_MP_SQR_COMBA) && /* can we use the fast comba multiplier? */ (((a->used * 2) + 1) < MP_WARRAY) && (a->used <= MP_MAX_COMBA)) { - err = s_mp_sqr_comba(a, c); + if ((err = s_mp_sqr_comba(a, c)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); } else if ((a == b) && MP_HAS(S_MP_SQR)) { - err = s_mp_sqr(a, c); + if ((err = s_mp_sqr(a, c)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); } else if (MP_HAS(S_MP_MUL_BALANCE) && /* Check sizes. The smaller one needs to be larger than the Karatsuba cut-off. * The bigger one needs to be at least about one MP_MUL_KARATSUBA_CUTOFF bigger @@ -40,13 +40,13 @@ mp_err mp_mul(const mp_int *a, const mp_int *b, mp_int *c) ((max / 2) >= MP_MUL_KARATSUBA_CUTOFF) && /* Not much effect was observed below a ratio of 1:2, but again: YMMV. */ (max >= (2 * min))) { - err = s_mp_mul_balance(a,b,c); + if ((err = s_mp_mul_balance(a,b,c)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); } else if (MP_HAS(S_MP_MUL_TOOM) && (min >= MP_MUL_TOOM_CUTOFF)) { - err = s_mp_mul_toom(a, b, c); + if ((err = s_mp_mul_toom(a, b, c)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); } else if (MP_HAS(S_MP_MUL_KARATSUBA) && (min >= MP_MUL_KARATSUBA_CUTOFF)) { - err = s_mp_mul_karatsuba(a, b, c); + if ((err = s_mp_mul_karatsuba(a, b, c)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); } else if (MP_HAS(S_MP_MUL_COMBA) && /* can we use the fast multiplier? * @@ -56,13 +56,15 @@ mp_err mp_mul(const mp_int *a, const mp_int *b, mp_int *c) */ (digs < MP_WARRAY) && (min <= MP_MAX_COMBA)) { - err = s_mp_mul_comba(a, b, c, digs); + if ((err = s_mp_mul_comba(a, b, c, digs)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); } else if (MP_HAS(S_MP_MUL)) { - err = s_mp_mul(a, b, c, digs); + if ((err = s_mp_mul(a, b, c, digs)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); } else { err = MP_VAL; + MP_TRACE_ERROR(err, LTM_ERR); } c->sign = ((c->used > 0) && neg) ? MP_NEG : MP_ZPOS; +LTM_ERR: return err; } #endif diff --git a/mp_mul_2.c b/mp_mul_2.c index 459fbd29d..19276d3e7 100644 --- a/mp_mul_2.c +++ b/mp_mul_2.c @@ -6,14 +6,12 @@ /* b = a*2 */ mp_err mp_mul_2(const mp_int *a, mp_int *b) { - mp_err err; + mp_err err = MP_OKAY; int x, oldused; mp_digit r; /* grow to accommodate result */ - if ((err = mp_grow(b, a->used + 1)) != MP_OKAY) { - return err; - } + if ((err = mp_grow(b, a->used + 1)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); oldused = b->used; b->used = a->used; @@ -48,6 +46,8 @@ mp_err mp_mul_2(const mp_int *a, mp_int *b) s_mp_zero_digs(b->dp + b->used, oldused - b->used); b->sign = a->sign; - return MP_OKAY; + +LTM_ERR: + return err; } #endif diff --git a/mp_mul_2d.c b/mp_mul_2d.c index e4581375b..e6f5dd552 100644 --- a/mp_mul_2d.c +++ b/mp_mul_2d.c @@ -6,25 +6,19 @@ /* shift left by a certain bit count */ mp_err mp_mul_2d(const mp_int *a, int b, mp_int *c) { - mp_err err; + mp_err err = MP_OKAY; if (b < 0) { - return MP_VAL; + err = MP_VAL; + MP_TRACE_ERROR(err, LTM_ERR); } - if ((err = mp_copy(a, c)) != MP_OKAY) { - return err; - } - - if ((err = mp_grow(c, c->used + (b / MP_DIGIT_BIT) + 1)) != MP_OKAY) { - return err; - } + if ((err = mp_copy(a, c)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); + if ((err = mp_grow(c, c->used + (b / MP_DIGIT_BIT) + 1)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); /* shift by as many digits in the bit count */ if (b >= MP_DIGIT_BIT) { - if ((err = mp_lshd(c, b / MP_DIGIT_BIT)) != MP_OKAY) { - return err; - } + if ((err = mp_lshd(c, b / MP_DIGIT_BIT)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); } /* shift any bit count < MP_DIGIT_BIT */ @@ -58,6 +52,8 @@ mp_err mp_mul_2d(const mp_int *a, int b, mp_int *c) } } mp_clamp(c); - return MP_OKAY; + +LTM_ERR: + return err; } #endif diff --git a/mp_mul_d.c b/mp_mul_d.c index 258505543..5c289c2a0 100644 --- a/mp_mul_d.c +++ b/mp_mul_d.c @@ -7,29 +7,30 @@ mp_err mp_mul_d(const mp_int *a, mp_digit b, mp_int *c) { mp_digit u; - mp_err err; + mp_err err = MP_OKAY; int ix, oldused; if (b == 1u) { - return mp_copy(a, c); + if ((err = mp_copy(a, c)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); + return err; } /* power of two ? */ if (MP_HAS(MP_MUL_2) && (b == 2u)) { - return mp_mul_2(a, c); + if ((err = mp_mul_2(a, c)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); + return err; } if (MP_HAS(MP_MUL_2D) && MP_IS_2EXPT(b)) { ix = 1; while ((ix < MP_DIGIT_BIT) && (b != (((mp_digit)1)<used + 1)) != MP_OKAY) { - return err; - } + if ((err = mp_grow(c, a->used + 1)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); /* get the original destinations used count */ oldused = c->used; @@ -63,6 +64,7 @@ mp_err mp_mul_d(const mp_int *a, mp_digit b, mp_int *c) mp_clamp(c); - return MP_OKAY; +LTM_ERR: + return err; } #endif diff --git a/mp_mulmod.c b/mp_mulmod.c index e158693b9..fcd87c811 100644 --- a/mp_mulmod.c +++ b/mp_mulmod.c @@ -6,10 +6,10 @@ /* d = a * b (mod c) */ mp_err mp_mulmod(const mp_int *a, const mp_int *b, const mp_int *c, mp_int *d) { - mp_err err; - if ((err = mp_mul(a, b, d)) != MP_OKAY) { - return err; - } - return mp_mod(d, c, d); + mp_err err = MP_OKAY; + if ((err = mp_mul(a, b, d)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); + if ((err = mp_mod(d, c, d)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); +LTM_ERR: + return err; } #endif diff --git a/mp_neg.c b/mp_neg.c index b445cd409..2dbfd552c 100644 --- a/mp_neg.c +++ b/mp_neg.c @@ -6,13 +6,12 @@ /* b = -a */ mp_err mp_neg(const mp_int *a, mp_int *b) { - mp_err err; - if ((err = mp_copy(a, b)) != MP_OKAY) { - return err; - } + mp_err err = MP_OKAY; + if ((err = mp_copy(a, b)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); b->sign = ((!mp_iszero(b) && !mp_isneg(b)) ? MP_NEG : MP_ZPOS); - return MP_OKAY; +LTM_ERR: + return err; } #endif diff --git a/mp_or.c b/mp_or.c index 15958524e..e2da7ed0e 100644 --- a/mp_or.c +++ b/mp_or.c @@ -7,13 +7,11 @@ mp_err mp_or(const mp_int *a, const mp_int *b, mp_int *c) { int used = MP_MAX(a->used, b->used) + 1, i; - mp_err err; + mp_err err = MP_OKAY; mp_digit ac = 1, bc = 1, cc = 1; bool neg = (mp_isneg(a) || mp_isneg(b)); - if ((err = mp_grow(c, used)) != MP_OKAY) { - return err; - } + if ((err = mp_grow(c, used)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); for (i = 0; i < used; i++) { mp_digit x, y; @@ -49,6 +47,8 @@ mp_err mp_or(const mp_int *a, const mp_int *b, mp_int *c) c->used = used; c->sign = (neg ? MP_NEG : MP_ZPOS); mp_clamp(c); - return MP_OKAY; + +LTM_ERR: + return err; } #endif diff --git a/mp_pack.c b/mp_pack.c index 447f1fdf9..6845dc12e 100644 --- a/mp_pack.c +++ b/mp_pack.c @@ -9,7 +9,7 @@ mp_err mp_pack(void *rop, size_t maxcount, size_t *written, mp_order order, size_t size, mp_endian endian, size_t nails, const mp_int *op) { - mp_err err; + mp_err err = MP_OKAY; size_t odd_nails, nail_bytes, i, j, count; uint8_t odd_nail_mask; @@ -18,12 +18,11 @@ mp_err mp_pack(void *rop, size_t maxcount, size_t *written, mp_order order, size count = mp_pack_count(op, nails, size); if (count > maxcount) { - return MP_BUF; + err = MP_BUF; + MP_TRACE_ERROR(err, LTM_ERR); } - if ((err = mp_init_copy(&t, op)) != MP_OKAY) { - return err; - } + if ((err = mp_init_copy(&t, op)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); if (endian == MP_NATIVE_ENDIAN) { MP_GET_ENDIANNESS(endian); @@ -49,20 +48,18 @@ mp_err mp_pack(void *rop, size_t maxcount, size_t *written, mp_order order, size *byte = (uint8_t)((j == ((size - nail_bytes) - 1u)) ? (t.dp[0] & odd_nail_mask) : (t.dp[0] & 0xFFuL)); - if ((err = mp_div_2d(&t, (j == ((size - nail_bytes) - 1u)) ? (int)(8u - odd_nails) : 8, &t, NULL)) != MP_OKAY) { - goto LBL_ERR; - } - + if ((err = mp_div_2d(&t, (j == ((size - nail_bytes) - 1u)) ? (int)(8u - odd_nails) : 8, &t, NULL)) != MP_OKAY) + MP_TRACE_ERROR(err, LTM_ERR_1); } } if (written != NULL) { *written = count; } - err = MP_OKAY; -LBL_ERR: +LTM_ERR_1: mp_clear(&t); +LTM_ERR: return err; } diff --git a/mp_prime_fermat.c b/mp_prime_fermat.c index ac8116fef..95455722a 100644 --- a/mp_prime_fermat.c +++ b/mp_prime_fermat.c @@ -14,28 +14,26 @@ mp_err mp_prime_fermat(const mp_int *a, const mp_int *b, bool *result) { mp_int t; - mp_err err; + mp_err err = MP_OKAY; /* ensure b > 1 */ if (mp_cmp_d(b, 1uL) != MP_GT) { - return MP_VAL; + err = MP_VAL; + MP_TRACE_ERROR(err, LTM_ERR); } /* init t */ - if ((err = mp_init(&t)) != MP_OKAY) { - return err; - } + if ((err = mp_init(&t)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); /* compute t = b**a mod a */ - if ((err = mp_exptmod(b, a, a, &t)) != MP_OKAY) { - goto LBL_ERR; - } + if ((err = mp_exptmod(b, a, a, &t)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); /* is it equal to b? */ *result = mp_cmp(&t, b) == MP_EQ; -LBL_ERR: +LTM_ERR_1: mp_clear(&t); +LTM_ERR: return err; } #endif diff --git a/mp_prime_frobenius_underwood.c b/mp_prime_frobenius_underwood.c index 62d3476a9..f25832c4f 100644 --- a/mp_prime_frobenius_underwood.c +++ b/mp_prime_frobenius_underwood.c @@ -24,11 +24,10 @@ mp_err mp_prime_frobenius_underwood(const mp_int *N, bool *result) { mp_int T1z, T2z, Np1z, sz, tz; int a, ap2, i; - mp_err err; + mp_err err = MP_OKAY; - if ((err = mp_init_multi(&T1z, &T2z, &Np1z, &sz, &tz, NULL)) != MP_OKAY) { - return err; - } + if ((err = mp_init_multi(&T1z, &T2z, &Np1z, &sz, &tz, NULL)) != MP_OKAY) + MP_TRACE_ERROR(err, LTM_ERR); for (a = 0; a < LTM_FROBENIUS_UNDERWOOD_A; a++) { int j; @@ -41,7 +40,7 @@ mp_err mp_prime_frobenius_underwood(const mp_int *N, bool *result) mp_set_i32(&T1z, (int32_t)((a * a) - 4)); - if ((err = mp_kronecker(&T1z, N, &j)) != MP_OKAY) goto LBL_END; + if ((err = mp_kronecker(&T1z, N, &j)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_END); if (j == -1) { break; @@ -50,27 +49,27 @@ mp_err mp_prime_frobenius_underwood(const mp_int *N, bool *result) if (j == 0) { /* composite */ *result = false; - goto LBL_END; + goto LTM_ERR_END; } } /* Tell it a composite and set return value accordingly */ if (a >= LTM_FROBENIUS_UNDERWOOD_A) { err = MP_ITER; - goto LBL_END; + MP_TRACE_ERROR(err, LTM_ERR_END); } /* Composite if N and (a+4)*(2*a+5) are not coprime */ mp_set_u32(&T1z, (uint32_t)((a+4)*((2*a)+5))); - if ((err = mp_gcd(N, &T1z, &T1z)) != MP_OKAY) goto LBL_END; + if ((err = mp_gcd(N, &T1z, &T1z)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_END); if (!((T1z.used == 1) && (T1z.dp[0] == 1u))) { /* composite */ *result = false; - goto LBL_END; + goto LTM_ERR_END; } ap2 = a + 2; - if ((err = mp_add_d(N, 1uL, &Np1z)) != MP_OKAY) goto LBL_END; + if ((err = mp_add_d(N, 1uL, &Np1z)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_END); mp_set(&sz, 1uL); mp_set(&tz, 2uL); @@ -81,20 +80,20 @@ mp_err mp_prime_frobenius_underwood(const mp_int *N, bool *result) * tz = ((tz-sz)*(tz+sz))%N; * sz = temp; */ - if ((err = mp_mul_2(&tz, &T2z)) != MP_OKAY) goto LBL_END; + if ((err = mp_mul_2(&tz, &T2z)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_END); /* a = 0 at about 50% of the cases (non-square and odd input) */ if (a != 0) { - if ((err = mp_mul_d(&sz, (mp_digit)a, &T1z)) != MP_OKAY) goto LBL_END; - if ((err = mp_add(&T1z, &T2z, &T2z)) != MP_OKAY) goto LBL_END; + if ((err = mp_mul_d(&sz, (mp_digit)a, &T1z)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_END); + if ((err = mp_add(&T1z, &T2z, &T2z)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_END); } - if ((err = mp_mul(&T2z, &sz, &T1z)) != MP_OKAY) goto LBL_END; - if ((err = mp_sub(&tz, &sz, &T2z)) != MP_OKAY) goto LBL_END; - if ((err = mp_add(&sz, &tz, &sz)) != MP_OKAY) goto LBL_END; - if ((err = mp_mul(&sz, &T2z, &tz)) != MP_OKAY) goto LBL_END; - if ((err = mp_mod(&tz, N, &tz)) != MP_OKAY) goto LBL_END; - if ((err = mp_mod(&T1z, N, &sz)) != MP_OKAY) goto LBL_END; + if ((err = mp_mul(&T2z, &sz, &T1z)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_END); + if ((err = mp_sub(&tz, &sz, &T2z)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_END); + if ((err = mp_add(&sz, &tz, &sz)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_END); + if ((err = mp_mul(&sz, &T2z, &tz)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_END); + if ((err = mp_mod(&tz, N, &tz)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_END); + if ((err = mp_mod(&T1z, N, &sz)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_END); if (s_mp_get_bit(&Np1z, i)) { /* * temp = (a+2) * sz + tz @@ -102,24 +101,25 @@ mp_err mp_prime_frobenius_underwood(const mp_int *N, bool *result) * sz = temp */ if (a == 0) { - if ((err = mp_mul_2(&sz, &T1z)) != MP_OKAY) goto LBL_END; + if ((err = mp_mul_2(&sz, &T1z)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_END); } else { - if ((err = mp_mul_d(&sz, (mp_digit)ap2, &T1z)) != MP_OKAY) goto LBL_END; + if ((err = mp_mul_d(&sz, (mp_digit)ap2, &T1z)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_END); } - if ((err = mp_add(&T1z, &tz, &T1z)) != MP_OKAY) goto LBL_END; - if ((err = mp_mul_2(&tz, &T2z)) != MP_OKAY) goto LBL_END; - if ((err = mp_sub(&T2z, &sz, &tz)) != MP_OKAY) goto LBL_END; + if ((err = mp_add(&T1z, &tz, &T1z)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_END); + if ((err = mp_mul_2(&tz, &T2z)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_END); + if ((err = mp_sub(&T2z, &sz, &tz)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_END); mp_exch(&sz, &T1z); } } mp_set_u32(&T1z, (uint32_t)((2 * a) + 5)); - if ((err = mp_mod(&T1z, N, &T1z)) != MP_OKAY) goto LBL_END; + if ((err = mp_mod(&T1z, N, &T1z)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_END); *result = mp_iszero(&sz) && (mp_cmp(&tz, &T1z) == MP_EQ); -LBL_END: +LTM_ERR_END: mp_clear_multi(&tz, &sz, &Np1z, &T2z, &T1z, NULL); +LTM_ERR: return err; } diff --git a/mp_prime_is_prime.c b/mp_prime_is_prime.c index bb24f5944..d6a49211c 100644 --- a/mp_prime_is_prime.c +++ b/mp_prime_is_prime.c @@ -18,7 +18,7 @@ mp_err mp_prime_is_prime(const mp_int *a, int t, bool *result) mp_int b; int ix; bool res; - mp_err err; + mp_err err = MP_OKAY; /* default to no */ *result = false; @@ -41,9 +41,7 @@ mp_err mp_prime_is_prime(const mp_int *a, int t, bool *result) return MP_OKAY; } /* N is not a perfect square: floor(sqrt(N))^2 != N */ - if ((err = mp_is_square(a, &res)) != MP_OKAY) { - return err; - } + if ((err = mp_is_square(a, &res)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); if (res) { return MP_OKAY; } @@ -56,9 +54,7 @@ mp_err mp_prime_is_prime(const mp_int *a, int t, bool *result) } } /* first perform trial division */ - if ((err = s_mp_prime_is_divisible(a, &res)) != MP_OKAY) { - return err; - } + if ((err = s_mp_prime_is_divisible(a, &res)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); /* return if it was trivially divisible */ if (res) { @@ -68,15 +64,11 @@ mp_err mp_prime_is_prime(const mp_int *a, int t, bool *result) /* Run the Miller-Rabin test with base 2 for the BPSW test. */ - if ((err = mp_init_set(&b, 2uL)) != MP_OKAY) { - return err; - } + if ((err = mp_init_set(&b, 2uL)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); - if ((err = mp_prime_miller_rabin(a, &b, &res)) != MP_OKAY) { - goto LBL_B; - } + if ((err = mp_prime_miller_rabin(a, &b, &res)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_B); if (!res) { - goto LBL_B; + goto LTM_ERR_B; } /* Rumours have it that Mathematica does a second M-R test with base 3. @@ -84,11 +76,9 @@ mp_err mp_prime_is_prime(const mp_int *a, int t, bool *result) It does not hurt, though, beside a bit of extra runtime. */ b.dp[0]++; - if ((err = mp_prime_miller_rabin(a, &b, &res)) != MP_OKAY) { - goto LBL_B; - } + if ((err = mp_prime_miller_rabin(a, &b, &res)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_B); if (!res) { - goto LBL_B; + goto LTM_ERR_B; } /* @@ -100,18 +90,14 @@ mp_err mp_prime_is_prime(const mp_int *a, int t, bool *result) if (t >= 0) { #ifdef LTM_USE_FROBENIUS_TEST err = mp_prime_frobenius_underwood(a, &res); - if ((err != MP_OKAY) && (err != MP_ITER)) { - goto LBL_B; - } + if ((err != MP_OKAY) && (err != MP_ITER)) MP_TRACE_ERROR(err, LTM_ERR_B); if (!res) { - goto LBL_B; + goto LTM_ERR_B; } #else - if ((err = mp_prime_strong_lucas_selfridge(a, &res)) != MP_OKAY) { - goto LBL_B; - } + if ((err = mp_prime_strong_lucas_selfridge(a, &res)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_B); if (!res) { - goto LBL_B; + goto LTM_ERR_B; } #endif } @@ -138,34 +124,28 @@ mp_err mp_prime_is_prime(const mp_int *a, int t, bool *result) "Strong Pseudoprimes to Twelve Prime Bases". */ /* 0x437ae92817f9fc85b7e5 = 318665857834031151167461 */ - if ((err = mp_read_radix(&b, "437ae92817f9fc85b7e5", 16)) != MP_OKAY) { - goto LBL_B; - } + if ((err = mp_read_radix(&b, "437ae92817f9fc85b7e5", 16)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_B); if (mp_cmp(a, &b) == MP_LT) { p_max = 12; } else { /* 0x2be6951adc5b22410a5fd = 3317044064679887385961981 */ - if ((err = mp_read_radix(&b, "2be6951adc5b22410a5fd", 16)) != MP_OKAY) { - goto LBL_B; - } + if ((err = mp_read_radix(&b, "2be6951adc5b22410a5fd", 16)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_B); if (mp_cmp(a, &b) == MP_LT) { p_max = 13; } else { err = MP_VAL; - goto LBL_B; + MP_TRACE_ERROR(err, LTM_ERR_B); } } /* we did bases 2 and 3 already, skip them */ for (ix = 2; ix < p_max; ix++) { mp_set(&b, s_mp_prime_tab[ix]); - if ((err = mp_prime_miller_rabin(a, &b, &res)) != MP_OKAY) { - goto LBL_B; - } + if ((err = mp_prime_miller_rabin(a, &b, &res)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_B); if (!res) { - goto LBL_B; + goto LTM_ERR_B; } } } @@ -226,9 +206,7 @@ mp_err mp_prime_is_prime(const mp_int *a, int t, bool *result) int len; /* mp_rand() guarantees the first digit to be non-zero */ - if ((err = mp_rand(&b, 1)) != MP_OKAY) { - goto LBL_B; - } + if ((err = mp_rand(&b, 1)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_B); /* * Reduce digit before casting because mp_digit might be bigger than * an unsigned int and "mask" on the other side is most probably not. @@ -244,9 +222,7 @@ mp_err mp_prime_is_prime(const mp_int *a, int t, bool *result) ix--; continue; } - if ((err = mp_rand(&b, len)) != MP_OKAY) { - goto LBL_B; - } + if ((err = mp_rand(&b, len)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_B); /* * That number might got too big and the witness has to be * smaller than "a" @@ -254,28 +230,25 @@ mp_err mp_prime_is_prime(const mp_int *a, int t, bool *result) len = mp_count_bits(&b); if (len >= size_a) { len = (len - size_a) + 1; - if ((err = mp_div_2d(&b, len, &b, NULL)) != MP_OKAY) { - goto LBL_B; - } + if ((err = mp_div_2d(&b, len, &b, NULL)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_B); } /* Although the chance for b <= 3 is miniscule, try again. */ if (mp_cmp_d(&b, 3uL) != MP_GT) { ix--; continue; } - if ((err = mp_prime_miller_rabin(a, &b, &res)) != MP_OKAY) { - goto LBL_B; - } + if ((err = mp_prime_miller_rabin(a, &b, &res)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_B); if (!res) { - goto LBL_B; + goto LTM_ERR_B; } } } /* passed the test */ *result = true; -LBL_B: +LTM_ERR_B: mp_clear(&b); +LTM_ERR: return err; } diff --git a/mp_prime_miller_rabin.c b/mp_prime_miller_rabin.c index 4c23a9f28..9a2030468 100644 --- a/mp_prime_miller_rabin.c +++ b/mp_prime_miller_rabin.c @@ -13,26 +13,21 @@ mp_err mp_prime_miller_rabin(const mp_int *a, const mp_int *b, bool *result) { mp_int n1, y, r; - mp_err err; + mp_err err = MP_OKAY; int s, j; /* ensure b > 1 */ if (mp_cmp_d(b, 1uL) != MP_GT) { - return MP_VAL; + err = MP_VAL; + MP_TRACE_ERROR(err, LTM_ERR); } /* get n1 = a - 1 */ - if ((err = mp_init_copy(&n1, a)) != MP_OKAY) { - return err; - } - if ((err = mp_sub_d(&n1, 1uL, &n1)) != MP_OKAY) { - goto LBL_ERR1; - } + if ((err = mp_init_copy(&n1, a)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); + if ((err = mp_sub_d(&n1, 1uL, &n1)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); /* set 2**s * r = n1 */ - if ((err = mp_init_copy(&r, &n1)) != MP_OKAY) { - goto LBL_ERR1; - } + if ((err = mp_init_copy(&r, &n1)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); /* count the number of least significant bits * which are zero @@ -40,31 +35,23 @@ mp_err mp_prime_miller_rabin(const mp_int *a, const mp_int *b, bool *result) s = mp_cnt_lsb(&r); /* now divide n - 1 by 2**s */ - if ((err = mp_div_2d(&r, s, &r, NULL)) != MP_OKAY) { - goto LBL_ERR2; - } + if ((err = mp_div_2d(&r, s, &r, NULL)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_2); /* compute y = b**r mod a */ - if ((err = mp_init(&y)) != MP_OKAY) { - goto LBL_ERR2; - } - if ((err = mp_exptmod(b, &r, a, &y)) != MP_OKAY) { - goto LBL_END; - } + if ((err = mp_init(&y)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_2); + if ((err = mp_exptmod(b, &r, a, &y)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_END); /* if y != 1 and y != n1 do */ if ((mp_cmp_d(&y, 1uL) != MP_EQ) && (mp_cmp(&y, &n1) != MP_EQ)) { j = 1; /* while j <= s-1 and y != n1 */ while ((j <= (s - 1)) && (mp_cmp(&y, &n1) != MP_EQ)) { - if ((err = mp_sqrmod(&y, a, &y)) != MP_OKAY) { - goto LBL_END; - } + if ((err = mp_sqrmod(&y, a, &y)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_END); /* if y == 1 then composite */ if (mp_cmp_d(&y, 1uL) == MP_EQ) { *result = false; - goto LBL_END; + goto LTM_ERR_END; } ++j; @@ -73,19 +60,20 @@ mp_err mp_prime_miller_rabin(const mp_int *a, const mp_int *b, bool *result) /* if y != n1 then composite */ if (mp_cmp(&y, &n1) != MP_EQ) { *result = false; - goto LBL_END; + goto LTM_ERR_END; } } /* probably prime now */ *result = true; -LBL_END: +LTM_ERR_END: mp_clear(&y); -LBL_ERR2: +LTM_ERR_2: mp_clear(&r); -LBL_ERR1: +LTM_ERR_1: mp_clear(&n1); +LTM_ERR: return err; } #endif diff --git a/mp_prime_next_prime.c b/mp_prime_next_prime.c index 6faa08de7..55801556a 100644 --- a/mp_prime_next_prime.c +++ b/mp_prime_next_prime.c @@ -11,7 +11,7 @@ mp_err mp_prime_next_prime(mp_int *a, int t, bool bbs_style) { int x; - mp_err err; + mp_err err = MP_OKAY; bool res = false; mp_digit res_tab[MP_PRIME_TAB_SIZE], kstep; mp_int b; @@ -48,30 +48,22 @@ mp_err mp_prime_next_prime(mp_int *a, int t, bool bbs_style) if (bbs_style) { /* if a mod 4 != 3 subtract the correct value to make it so */ if ((a->dp[0] & 3u) != 3u) { - if ((err = mp_sub_d(a, (a->dp[0] & 3u) + 1u, a)) != MP_OKAY) { - return err; - } + if ((err = mp_sub_d(a, (a->dp[0] & 3u) + 1u, a)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); } } else { if (mp_iseven(a)) { /* force odd */ - if ((err = mp_sub_d(a, 1uL, a)) != MP_OKAY) { - return err; - } + if ((err = mp_sub_d(a, 1uL, a)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); } } /* generate the restable */ for (x = 1; x < MP_PRIME_TAB_SIZE; x++) { - if ((err = mp_mod_d(a, s_mp_prime_tab[x], res_tab + x)) != MP_OKAY) { - return err; - } + if ((err = mp_mod_d(a, s_mp_prime_tab[x], res_tab + x)) != MP_OKAY)MP_TRACE_ERROR(err, LTM_ERR); } /* init temp used for Miller-Rabin Testing */ - if ((err = mp_init(&b)) != MP_OKAY) { - return err; - } + if ((err = mp_init(&b)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); for (;;) { mp_digit step = 0; @@ -102,25 +94,22 @@ mp_err mp_prime_next_prime(mp_int *a, int t, bool bbs_style) } while (y && (step < (((mp_digit)1 << MP_DIGIT_BIT) - kstep))); /* add the step */ - if ((err = mp_add_d(a, step, a)) != MP_OKAY) { - goto LBL_ERR; - } + if ((err = mp_add_d(a, step, a)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); /* if didn't pass sieve and step == MP_MAX then skip test */ if (y && (step >= (((mp_digit)1 << MP_DIGIT_BIT) - kstep))) { continue; } - if ((err = mp_prime_is_prime(a, t, &res)) != MP_OKAY) { - goto LBL_ERR; - } + if ((err = mp_prime_is_prime(a, t, &res)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); if (res) { break; } } -LBL_ERR: +LTM_ERR_1: mp_clear(&b); +LTM_ERR: return err; } diff --git a/mp_prime_rand.c b/mp_prime_rand.c index 5351aefe4..1b068e6b9 100644 --- a/mp_prime_rand.c +++ b/mp_prime_rand.c @@ -23,11 +23,12 @@ mp_err mp_prime_rand(mp_int *a, int t, int size, int flags) uint8_t *tmp, maskAND, maskOR_msb, maskOR_lsb; int bsize, maskOR_msb_offset; bool res; - mp_err err; + mp_err err = MP_OKAY; /* sanity check the input */ if (size <= 1) { - return MP_VAL; + err = MP_VAL; + MP_TRACE_ERROR(err, LTM_ERR); } /* MP_PRIME_SAFE implies MP_PRIME_BBS */ @@ -41,7 +42,8 @@ mp_err mp_prime_rand(mp_int *a, int t, int size, int flags) /* we need a buffer of bsize bytes */ tmp = (uint8_t *) MP_MALLOC((size_t)bsize); if (tmp == NULL) { - return MP_MEM; + err = MP_MEM; + MP_TRACE_ERROR(err, LTM_ERR); } /* calc the maskAND value for the MSbyte*/ @@ -62,9 +64,7 @@ mp_err mp_prime_rand(mp_int *a, int t, int size, int flags) do { /* read the bytes */ - if ((err = s_mp_rand_source(tmp, (size_t)bsize)) != MP_OKAY) { - goto LBL_ERR; - } + if ((err = s_mp_rand_source(tmp, (size_t)bsize)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); /* work over the MSbyte */ tmp[0] &= maskAND; @@ -76,47 +76,33 @@ mp_err mp_prime_rand(mp_int *a, int t, int size, int flags) /* read it in */ /* TODO: casting only for now until all lengths have been changed to the type "size_t"*/ - if ((err = mp_from_ubin(a, tmp, (size_t)bsize)) != MP_OKAY) { - goto LBL_ERR; - } + if ((err = mp_from_ubin(a, tmp, (size_t)bsize)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); /* is it prime? */ - if ((err = mp_prime_is_prime(a, t, &res)) != MP_OKAY) { - goto LBL_ERR; - } + if ((err = mp_prime_is_prime(a, t, &res)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); if (!res) { continue; } if ((flags & MP_PRIME_SAFE) != 0) { /* see if (a-1)/2 is prime */ - if ((err = mp_sub_d(a, 1uL, a)) != MP_OKAY) { - goto LBL_ERR; - } - if ((err = mp_div_2(a, a)) != MP_OKAY) { - goto LBL_ERR; - } + if ((err = mp_sub_d(a, 1uL, a)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); + if ((err = mp_div_2(a, a)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); /* is it prime? */ - if ((err = mp_prime_is_prime(a, t, &res)) != MP_OKAY) { - goto LBL_ERR; - } + if ((err = mp_prime_is_prime(a, t, &res)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); } } while (!res); if ((flags & MP_PRIME_SAFE) != 0) { /* restore a to the original value */ - if ((err = mp_mul_2(a, a)) != MP_OKAY) { - goto LBL_ERR; - } - if ((err = mp_add_d(a, 1uL, a)) != MP_OKAY) { - goto LBL_ERR; - } + if ((err = mp_mul_2(a, a)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); + if ((err = mp_add_d(a, 1uL, a)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); } - err = MP_OKAY; -LBL_ERR: +LTM_ERR_1: MP_FREE_BUF(tmp, (size_t)bsize); +LTM_ERR: return err; } diff --git a/mp_prime_strong_lucas_selfridge.c b/mp_prime_strong_lucas_selfridge.c index 23486e3a6..c13d0f575 100644 --- a/mp_prime_strong_lucas_selfridge.c +++ b/mp_prime_strong_lucas_selfridge.c @@ -16,19 +16,20 @@ static mp_err s_mul_si(const mp_int *a, int32_t d, mp_int *c) { mp_int t; - mp_err err; + mp_err err = MP_OKAY; - if ((err = mp_init(&t)) != MP_OKAY) { - return err; - } + if ((err = mp_init(&t)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); /* * mp_digit might be smaller than a long, which excludes * the use of mp_mul_d() here. */ mp_set_i32(&t, d); - err = mp_mul(a, &t, c); + if ((err = mp_mul(a, &t, c)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_END); + +LTM_ERR_END: mp_clear(&t); +LTM_ERR: return err; } /* @@ -54,7 +55,7 @@ mp_err mp_prime_strong_lucas_selfridge(const mp_int *a, bool *result) mp_int Dz, gcd, Np1, Uz, Vz, U2mz, V2mz, Qmz, Q2mz, Qkdz, T1z, T2z, T3z, T4z, Q2kdz; int J; int32_t D, Ds, sign, P, Q, r, s, u, Nbits; - mp_err err; + mp_err err = MP_OKAY; bool oddness; *result = false; @@ -67,9 +68,7 @@ mp_err mp_prime_strong_lucas_selfridge(const mp_int *a, bool *result) */ if ((err = mp_init_multi(&Dz, &gcd, &Np1, &Uz, &Vz, &U2mz, &V2mz, &Qmz, &Q2mz, &Qkdz, &T1z, &T2z, &T3z, &T4z, &Q2kdz, - NULL)) != MP_OKAY) { - return err; - } + NULL)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); D = 5; sign = 1; @@ -78,18 +77,16 @@ mp_err mp_prime_strong_lucas_selfridge(const mp_int *a, bool *result) Ds = sign * D; sign = -sign; mp_set_u32(&Dz, (uint32_t)D); - if ((err = mp_gcd(a, &Dz, &gcd)) != MP_OKAY) goto LBL_LS_ERR; + if ((err = mp_gcd(a, &Dz, &gcd)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_LS); /* if 1 < GCD < N then N is composite with factor "D", and Jacobi(D,N) is technically undefined (but often returned as zero). */ - if ((mp_cmp_d(&gcd, 1uL) == MP_GT) && (mp_cmp(&gcd, a) == MP_LT)) { - goto LBL_LS_ERR; - } + if ((mp_cmp_d(&gcd, 1uL) == MP_GT) && (mp_cmp(&gcd, a) == MP_LT)) MP_TRACE_ERROR(err, LTM_ERR_LS); if (Ds < 0) { Dz.sign = MP_NEG; } - if ((err = mp_kronecker(&Dz, a, &J)) != MP_OKAY) goto LBL_LS_ERR; + if ((err = mp_kronecker(&Dz, a, &J)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_LS); if (J == -1) { break; @@ -98,7 +95,7 @@ mp_err mp_prime_strong_lucas_selfridge(const mp_int *a, bool *result) if (D > (INT_MAX - 2)) { err = MP_VAL; - goto LBL_LS_ERR; + MP_TRACE_ERROR(err, LTM_ERR_LS); } } @@ -137,7 +134,7 @@ mp_err mp_prime_strong_lucas_selfridge(const mp_int *a, bool *result) Baillie-PSW test based on the strong Lucas-Selfridge test should be more reliable. */ - if ((err = mp_add_d(a, 1uL, &Np1)) != MP_OKAY) goto LBL_LS_ERR; + if ((err = mp_add_d(a, 1uL, &Np1)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_LS); s = mp_cnt_lsb(&Np1); /* CZ @@ -147,7 +144,7 @@ mp_err mp_prime_strong_lucas_selfridge(const mp_int *a, bool *result) * dividing an even number by two does not produce * any leftovers. */ - if ((err = mp_div_2d(&Np1, s, &Dz, NULL)) != MP_OKAY) goto LBL_LS_ERR; + if ((err = mp_div_2d(&Np1, s, &Dz, NULL)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_LS); /* We must now compute U_d and V_d. Since d is odd, the accumulated values U and V are initialized to U_1 and V_1 (if the target index were even, U and V would be initialized instead to U_0=0 @@ -165,7 +162,7 @@ mp_err mp_prime_strong_lucas_selfridge(const mp_int *a, bool *result) mp_set(&V2mz, (mp_digit)P); /* V_1 */ mp_set_i32(&Qmz, Q); - if ((err = mp_mul_2(&Qmz, &Q2mz)) != MP_OKAY) goto LBL_LS_ERR; + if ((err = mp_mul_2(&Qmz, &Q2mz)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_LS); /* Initializes calculation of Q^d */ mp_set_i32(&Qkdz, Q); @@ -180,18 +177,18 @@ mp_err mp_prime_strong_lucas_selfridge(const mp_int *a, bool *result) * V_2m = V_m*V_m - 2*Q^m */ - if ((err = mp_mul(&U2mz, &V2mz, &U2mz)) != MP_OKAY) goto LBL_LS_ERR; - if ((err = mp_mod(&U2mz, a, &U2mz)) != MP_OKAY) goto LBL_LS_ERR; - if ((err = mp_sqr(&V2mz, &V2mz)) != MP_OKAY) goto LBL_LS_ERR; - if ((err = mp_sub(&V2mz, &Q2mz, &V2mz)) != MP_OKAY) goto LBL_LS_ERR; - if ((err = mp_mod(&V2mz, a, &V2mz)) != MP_OKAY) goto LBL_LS_ERR; + if ((err = mp_mul(&U2mz, &V2mz, &U2mz)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_LS); + if ((err = mp_mod(&U2mz, a, &U2mz)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_LS); + if ((err = mp_sqr(&V2mz, &V2mz)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_LS); + if ((err = mp_sub(&V2mz, &Q2mz, &V2mz)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_LS); + if ((err = mp_mod(&V2mz, a, &V2mz)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_LS); /* Must calculate powers of Q for use in V_2m, also for Q^d later */ - if ((err = mp_sqr(&Qmz, &Qmz)) != MP_OKAY) goto LBL_LS_ERR; + if ((err = mp_sqr(&Qmz, &Qmz)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_LS); /* prevents overflow */ /* CZ still necessary without a fixed prealloc'd mem.? */ - if ((err = mp_mod(&Qmz, a, &Qmz)) != MP_OKAY) goto LBL_LS_ERR; - if ((err = mp_mul_2(&Qmz, &Q2mz)) != MP_OKAY) goto LBL_LS_ERR; + if ((err = mp_mod(&Qmz, a, &Qmz)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_LS); + if ((err = mp_mul_2(&Qmz, &Q2mz)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_LS); if (s_mp_get_bit(&Dz, u)) { /* Formulas for addition of indices (carried out mod N); @@ -201,14 +198,14 @@ mp_err mp_prime_strong_lucas_selfridge(const mp_int *a, bool *result) * * Be careful with division by 2 (mod N)! */ - if ((err = mp_mul(&U2mz, &Vz, &T1z)) != MP_OKAY) goto LBL_LS_ERR; - if ((err = mp_mul(&Uz, &V2mz, &T2z)) != MP_OKAY) goto LBL_LS_ERR; - if ((err = mp_mul(&V2mz, &Vz, &T3z)) != MP_OKAY) goto LBL_LS_ERR; - if ((err = mp_mul(&U2mz, &Uz, &T4z)) != MP_OKAY) goto LBL_LS_ERR; - if ((err = s_mul_si(&T4z, Ds, &T4z)) != MP_OKAY) goto LBL_LS_ERR; - if ((err = mp_add(&T1z, &T2z, &Uz)) != MP_OKAY) goto LBL_LS_ERR; + if ((err = mp_mul(&U2mz, &Vz, &T1z)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_LS); + if ((err = mp_mul(&Uz, &V2mz, &T2z)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_LS); + if ((err = mp_mul(&V2mz, &Vz, &T3z)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_LS); + if ((err = mp_mul(&U2mz, &Uz, &T4z)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_LS); + if ((err = s_mul_si(&T4z, Ds, &T4z)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_LS); + if ((err = mp_add(&T1z, &T2z, &Uz)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_LS); if (mp_isodd(&Uz)) { - if ((err = mp_add(&Uz, a, &Uz)) != MP_OKAY) goto LBL_LS_ERR; + if ((err = mp_add(&Uz, a, &Uz)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_LS); } /* CZ * This should round towards negative infinity because @@ -216,25 +213,25 @@ mp_err mp_prime_strong_lucas_selfridge(const mp_int *a, bool *result) * But mp_div_2() does not do so, it is truncating instead. */ oddness = mp_isodd(&Uz); - if ((err = mp_div_2(&Uz, &Uz)) != MP_OKAY) goto LBL_LS_ERR; + if ((err = mp_div_2(&Uz, &Uz)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_LS); if (mp_isneg(&Uz) && oddness) { - if ((err = mp_sub_d(&Uz, 1uL, &Uz)) != MP_OKAY) goto LBL_LS_ERR; + if ((err = mp_sub_d(&Uz, 1uL, &Uz)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_LS); } - if ((err = mp_add(&T3z, &T4z, &Vz)) != MP_OKAY) goto LBL_LS_ERR; + if ((err = mp_add(&T3z, &T4z, &Vz)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_LS); if (mp_isodd(&Vz)) { - if ((err = mp_add(&Vz, a, &Vz)) != MP_OKAY) goto LBL_LS_ERR; + if ((err = mp_add(&Vz, a, &Vz)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_LS); } oddness = mp_isodd(&Vz); - if ((err = mp_div_2(&Vz, &Vz)) != MP_OKAY) goto LBL_LS_ERR; + if ((err = mp_div_2(&Vz, &Vz)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_LS); if (mp_isneg(&Vz) && oddness) { - if ((err = mp_sub_d(&Vz, 1uL, &Vz)) != MP_OKAY) goto LBL_LS_ERR; + if ((err = mp_sub_d(&Vz, 1uL, &Vz)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_LS); } - if ((err = mp_mod(&Uz, a, &Uz)) != MP_OKAY) goto LBL_LS_ERR; - if ((err = mp_mod(&Vz, a, &Vz)) != MP_OKAY) goto LBL_LS_ERR; + if ((err = mp_mod(&Uz, a, &Uz)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_LS); + if ((err = mp_mod(&Vz, a, &Vz)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_LS); /* Calculating Q^d for later use */ - if ((err = mp_mul(&Qkdz, &Qmz, &Qkdz)) != MP_OKAY) goto LBL_LS_ERR; - if ((err = mp_mod(&Qkdz, a, &Qkdz)) != MP_OKAY) goto LBL_LS_ERR; + if ((err = mp_mul(&Qkdz, &Qmz, &Qkdz)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_LS); + if ((err = mp_mod(&Qkdz, a, &Qkdz)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_LS); } } @@ -242,7 +239,7 @@ mp_err mp_prime_strong_lucas_selfridge(const mp_int *a, bool *result) strong Lucas pseudoprime. */ if (mp_iszero(&Uz) || mp_iszero(&Vz)) { *result = true; - goto LBL_LS_ERR; + goto LTM_ERR_LS; } /* NOTE: Ribenboim ("The new book of prime number records," 3rd ed., @@ -257,25 +254,26 @@ mp_err mp_prime_strong_lucas_selfridge(const mp_int *a, bool *result) Lucas pseudoprime. */ /* Initialize 2*Q^(d*2^r) for V_2m */ - if ((err = mp_mul_2(&Qkdz, &Q2kdz)) != MP_OKAY) goto LBL_LS_ERR; + if ((err = mp_mul_2(&Qkdz, &Q2kdz)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_LS); for (r = 1; r < s; r++) { - if ((err = mp_sqr(&Vz, &Vz)) != MP_OKAY) goto LBL_LS_ERR; - if ((err = mp_sub(&Vz, &Q2kdz, &Vz)) != MP_OKAY) goto LBL_LS_ERR; - if ((err = mp_mod(&Vz, a, &Vz)) != MP_OKAY) goto LBL_LS_ERR; + if ((err = mp_sqr(&Vz, &Vz)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_LS); + if ((err = mp_sub(&Vz, &Q2kdz, &Vz)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_LS); + if ((err = mp_mod(&Vz, a, &Vz)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_LS); if (mp_iszero(&Vz)) { *result = true; - goto LBL_LS_ERR; + goto LTM_ERR_LS; } /* Calculate Q^{d*2^r} for next r (final iteration irrelevant). */ if (r < (s - 1)) { - if ((err = mp_sqr(&Qkdz, &Qkdz)) != MP_OKAY) goto LBL_LS_ERR; - if ((err = mp_mod(&Qkdz, a, &Qkdz)) != MP_OKAY) goto LBL_LS_ERR; - if ((err = mp_mul_2(&Qkdz, &Q2kdz)) != MP_OKAY) goto LBL_LS_ERR; + if ((err = mp_sqr(&Qkdz, &Qkdz)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_LS); + if ((err = mp_mod(&Qkdz, a, &Qkdz)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_LS); + if ((err = mp_mul_2(&Qkdz, &Q2kdz)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_LS); } } -LBL_LS_ERR: +LTM_ERR_LS: mp_clear_multi(&Q2kdz, &T4z, &T3z, &T2z, &T1z, &Qkdz, &Q2mz, &Qmz, &V2mz, &U2mz, &Vz, &Uz, &Np1, &gcd, &Dz, NULL); +LTM_ERR: return err; } #endif diff --git a/mp_radix_size.c b/mp_radix_size.c index ca08438bc..b9b73801f 100644 --- a/mp_radix_size.c +++ b/mp_radix_size.c @@ -6,13 +6,14 @@ /* returns size of ASCII representation */ mp_err mp_radix_size(const mp_int *a, int radix, size_t *size) { - mp_err err; + mp_err err = MP_OKAY; mp_int a_; int b; /* make sure the radix is in range */ if ((radix < 2) || (radix > 64)) { - return MP_VAL; + err = MP_VAL; + MP_TRACE_ERROR(err, LTM_ERR); } if (mp_iszero(a)) { @@ -22,13 +23,12 @@ mp_err mp_radix_size(const mp_int *a, int radix, size_t *size) a_ = *a; a_.sign = MP_ZPOS; - if ((err = mp_log_n(&a_, radix, &b)) != MP_OKAY) { - return err; - } + if ((err = mp_log_n(&a_, radix, &b)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); /* mp_ilogb truncates to zero, hence we need one extra put on top and one for `\0`. */ *size = (size_t)b + 2U + (mp_isneg(a) ? 1U : 0U); - return MP_OKAY; +LTM_ERR: + return err; } #endif diff --git a/mp_radix_size_overestimate.c b/mp_radix_size_overestimate.c index 3fe81d79d..eabbdc2e4 100644 --- a/mp_radix_size_overestimate.c +++ b/mp_radix_size_overestimate.c @@ -5,13 +5,18 @@ mp_err mp_radix_size_overestimate(const mp_int *a, const int radix, size_t *size) { + mp_err err = MP_OKAY; if (MP_HAS(S_MP_RADIX_SIZE_OVERESTIMATE)) { - return s_mp_radix_size_overestimate(a, radix, size); + if ((err = s_mp_radix_size_overestimate(a, radix, size)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); + return err; } if (MP_HAS(MP_RADIX_SIZE)) { - return mp_radix_size(a, radix, size); + if ((err = mp_radix_size(a, radix, size)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); + return err; } - return MP_ERR; + err = MP_ERR; +LTM_ERR: + return err; } #endif diff --git a/mp_rand.c b/mp_rand.c index 19364755a..bf427941e 100644 --- a/mp_rand.c +++ b/mp_rand.c @@ -6,7 +6,7 @@ mp_err mp_rand(mp_int *a, int digits) { int i; - mp_err err; + mp_err err = MP_OKAY; mp_zero(a); @@ -14,19 +14,15 @@ mp_err mp_rand(mp_int *a, int digits) return MP_OKAY; } - if ((err = mp_grow(a, digits)) != MP_OKAY) { - return err; - } + if ((err = mp_grow(a, digits)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); - if ((err = s_mp_rand_source(a->dp, (size_t)digits * sizeof(mp_digit))) != MP_OKAY) { - return err; - } + if ((err = s_mp_rand_source(a->dp, (size_t)digits * sizeof(mp_digit))) != MP_OKAY) + MP_TRACE_ERROR(err, LTM_ERR); /* TODO: We ensure that the highest digit is nonzero. Should this be removed? */ while ((a->dp[digits - 1] & MP_MASK) == 0u) { - if ((err = s_mp_rand_source(a->dp + digits - 1, sizeof(mp_digit))) != MP_OKAY) { - return err; - } + if ((err = s_mp_rand_source(a->dp + digits - 1, sizeof(mp_digit))) != MP_OKAY) + MP_TRACE_ERROR(err, LTM_ERR); } a->used = digits; @@ -34,6 +30,7 @@ mp_err mp_rand(mp_int *a, int digits) a->dp[i] &= MP_MASK; } - return MP_OKAY; +LTM_ERR: + return err; } #endif diff --git a/mp_read_radix.c b/mp_read_radix.c index 28e6eb60a..fff0ad56b 100644 --- a/mp_read_radix.c +++ b/mp_read_radix.c @@ -6,12 +6,13 @@ /* read a string [ASCII] in a given radix */ mp_err mp_read_radix(mp_int *a, const char *str, int radix) { - mp_err err; - mp_sign sign = MP_ZPOS; + mp_err err = MP_OKAY; + mp_sign sign = MP_ZPOS; /* make sure the radix is ok */ if ((radix < 2) || (radix > 64)) { - return MP_VAL; + err = MP_VAL; + MP_TRACE_ERROR(err, LTM_ERR); } /* if the leading digit is a @@ -46,24 +47,23 @@ mp_err mp_read_radix(mp_int *a, const char *str, int radix) if (y >= radix) { break; } - if ((err = mp_mul_d(a, (mp_digit)radix, a)) != MP_OKAY) { - return err; - } - if ((err = mp_add_d(a, y, a)) != MP_OKAY) { - return err; - } + if ((err = mp_mul_d(a, (mp_digit)radix, a)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); + if ((err = mp_add_d(a, y, a)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); ++str; } /* if an illegal character was found, fail. */ if ((*str != '\0') && (*str != '\r') && (*str != '\n')) { - return MP_VAL; + err = MP_VAL; + MP_TRACE_ERROR(err, LTM_ERR); } /* set the sign only if a != 0 */ if (!mp_iszero(a)) { a->sign = sign; } - return MP_OKAY; + +LTM_ERR: + return err; } #endif diff --git a/mp_reduce.c b/mp_reduce.c index b6fae55cc..cfd672787 100644 --- a/mp_reduce.c +++ b/mp_reduce.c @@ -10,74 +10,54 @@ mp_err mp_reduce(mp_int *x, const mp_int *m, const mp_int *mu) { mp_int q; - mp_err err; + mp_err err = MP_OKAY; int um = m->used; /* q = x */ - if ((err = mp_init_copy(&q, x)) != MP_OKAY) { - return err; - } + if ((err = mp_init_copy(&q, x)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); /* q1 = x / b**(k-1) */ mp_rshd(&q, um - 1); /* according to HAC this optimization is ok */ if ((mp_digit)um > ((mp_digit)1 << (MP_DIGIT_BIT - 1))) { - if ((err = mp_mul(&q, mu, &q)) != MP_OKAY) { - goto LBL_ERR; - } + if ((err = mp_mul(&q, mu, &q)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); } else if (MP_HAS(S_MP_MUL_HIGH)) { - if ((err = s_mp_mul_high(&q, mu, &q, um)) != MP_OKAY) { - goto LBL_ERR; - } + if ((err = s_mp_mul_high(&q, mu, &q, um)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); } else if (MP_HAS(S_MP_MUL_HIGH_COMBA)) { - if ((err = s_mp_mul_high_comba(&q, mu, &q, um)) != MP_OKAY) { - goto LBL_ERR; - } + if ((err = s_mp_mul_high_comba(&q, mu, &q, um)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); } else { err = MP_VAL; - goto LBL_ERR; + MP_TRACE_ERROR(err, LTM_ERR_1); } /* q3 = q2 / b**(k+1) */ mp_rshd(&q, um + 1); /* x = x mod b**(k+1), quick (no division) */ - if ((err = mp_mod_2d(x, MP_DIGIT_BIT * (um + 1), x)) != MP_OKAY) { - goto LBL_ERR; - } + if ((err = mp_mod_2d(x, MP_DIGIT_BIT * (um + 1), x)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); /* q = q * m mod b**(k+1), quick (no division) */ - if ((err = s_mp_mul(&q, m, &q, um + 1)) != MP_OKAY) { - goto LBL_ERR; - } + if ((err = s_mp_mul(&q, m, &q, um + 1)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); /* x = x - q */ - if ((err = mp_sub(x, &q, x)) != MP_OKAY) { - goto LBL_ERR; - } + if ((err = mp_sub(x, &q, x)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); /* If x < 0, add b**(k+1) to it */ if (mp_cmp_d(x, 0uL) == MP_LT) { mp_set(&q, 1uL); - if ((err = mp_lshd(&q, um + 1)) != MP_OKAY) { - goto LBL_ERR; - } - if ((err = mp_add(x, &q, x)) != MP_OKAY) { - goto LBL_ERR; - } + if ((err = mp_lshd(&q, um + 1)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); + if ((err = mp_add(x, &q, x)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); } /* Back off if it's too big */ while (mp_cmp(x, m) != MP_LT) { - if ((err = s_mp_sub(x, m, x)) != MP_OKAY) { - goto LBL_ERR; - } + if ((err = s_mp_sub(x, m, x)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); } -LBL_ERR: +LTM_ERR_1: mp_clear(&q); - +LTM_ERR: return err; } #endif diff --git a/mp_reduce_2k.c b/mp_reduce_2k.c index e635f5b90..f91c6eb71 100644 --- a/mp_reduce_2k.c +++ b/mp_reduce_2k.c @@ -7,42 +7,33 @@ mp_err mp_reduce_2k(mp_int *a, const mp_int *n, mp_digit d) { mp_int q; - mp_err err; + mp_err err = MP_OKAY; int p; - if ((err = mp_init(&q)) != MP_OKAY) { - return err; - } + if ((err = mp_init(&q)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); p = mp_count_bits(n); for (;;) { /* q = a/2**p, a = a mod 2**p */ - if ((err = mp_div_2d(a, p, &q, a)) != MP_OKAY) { - goto LBL_ERR; - } + if ((err = mp_div_2d(a, p, &q, a)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); if (d != 1u) { /* q = q * d */ - if ((err = mp_mul_d(&q, d, &q)) != MP_OKAY) { - goto LBL_ERR; - } + if ((err = mp_mul_d(&q, d, &q)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); } /* a = a + q */ - if ((err = s_mp_add(a, &q, a)) != MP_OKAY) { - goto LBL_ERR; - } + if ((err = s_mp_add(a, &q, a)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); if (mp_cmp_mag(a, n) == MP_LT) { break; } - if ((err = s_mp_sub(a, n, a)) != MP_OKAY) { - goto LBL_ERR; - } + if ((err = s_mp_sub(a, n, a)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); } -LBL_ERR: +LTM_ERR_1: mp_clear(&q); +LTM_ERR: return err; } diff --git a/mp_reduce_2k_l.c b/mp_reduce_2k_l.c index 31d9a1882..9e902f3b9 100644 --- a/mp_reduce_2k_l.c +++ b/mp_reduce_2k_l.c @@ -10,42 +10,32 @@ mp_err mp_reduce_2k_l(mp_int *a, const mp_int *n, const mp_int *d) { mp_int q; - mp_err err; + mp_err err = MP_OKAY; int p; - if ((err = mp_init(&q)) != MP_OKAY) { - return err; - } + if ((err = mp_init(&q)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); p = mp_count_bits(n); for (;;) { /* q = a/2**p, a = a mod 2**p */ - if ((err = mp_div_2d(a, p, &q, a)) != MP_OKAY) { - goto LBL_ERR; - } + if ((err = mp_div_2d(a, p, &q, a)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); /* q = q * d */ - if ((err = mp_mul(&q, d, &q)) != MP_OKAY) { - goto LBL_ERR; - } + if ((err = mp_mul(&q, d, &q)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); /* a = a + q */ - if ((err = s_mp_add(a, &q, a)) != MP_OKAY) { - goto LBL_ERR; - } + if ((err = s_mp_add(a, &q, a)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); if (mp_cmp_mag(a, n) == MP_LT) { break; } - if ((err = s_mp_sub(a, n, a)) != MP_OKAY) { - goto LBL_ERR; - } - + if ((err = s_mp_sub(a, n, a)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); } -LBL_ERR: +LTM_ERR_1: mp_clear(&q); +LTM_ERR: return err; } diff --git a/mp_reduce_2k_setup.c b/mp_reduce_2k_setup.c index 51f884134..6630add54 100644 --- a/mp_reduce_2k_setup.c +++ b/mp_reduce_2k_setup.c @@ -6,25 +6,20 @@ /* determines the setup value */ mp_err mp_reduce_2k_setup(const mp_int *a, mp_digit *d) { - mp_err err; + mp_err err = MP_OKAY; mp_int tmp; - if ((err = mp_init(&tmp)) != MP_OKAY) { - return err; - } + if ((err = mp_init(&tmp)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); - if ((err = mp_2expt(&tmp, mp_count_bits(a))) != MP_OKAY) { - goto LBL_ERR; - } + if ((err = mp_2expt(&tmp, mp_count_bits(a))) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); - if ((err = s_mp_sub(&tmp, a, &tmp)) != MP_OKAY) { - goto LBL_ERR; - } + if ((err = s_mp_sub(&tmp, a, &tmp)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); *d = tmp.dp[0]; -LBL_ERR: +LTM_ERR_1: mp_clear(&tmp); +LTM_ERR: return err; } #endif diff --git a/mp_reduce_2k_setup_l.c b/mp_reduce_2k_setup_l.c index b647c9d88..2b66f982d 100644 --- a/mp_reduce_2k_setup_l.c +++ b/mp_reduce_2k_setup_l.c @@ -6,23 +6,18 @@ /* determines the setup value */ mp_err mp_reduce_2k_setup_l(const mp_int *a, mp_int *d) { - mp_err err; + mp_err err = MP_OKAY; mp_int tmp; - if ((err = mp_init(&tmp)) != MP_OKAY) { - return err; - } + if ((err = mp_init(&tmp)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); - if ((err = mp_2expt(&tmp, mp_count_bits(a))) != MP_OKAY) { - goto LBL_ERR; - } + if ((err = mp_2expt(&tmp, mp_count_bits(a))) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); - if ((err = s_mp_sub(&tmp, a, d)) != MP_OKAY) { - goto LBL_ERR; - } + if ((err = s_mp_sub(&tmp, a, d)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); -LBL_ERR: +LTM_ERR_1: mp_clear(&tmp); +LTM_ERR: return err; } #endif diff --git a/mp_reduce_setup.c b/mp_reduce_setup.c index 2ce5b96f0..f10f6a5b6 100644 --- a/mp_reduce_setup.c +++ b/mp_reduce_setup.c @@ -8,10 +8,10 @@ */ mp_err mp_reduce_setup(mp_int *a, const mp_int *b) { - mp_err err; - if ((err = mp_2expt(a, b->used * 2 * MP_DIGIT_BIT)) != MP_OKAY) { - return err; - } - return mp_div(a, b, a, NULL); + mp_err err = MP_OKAY; + if ((err = mp_2expt(a, b->used * 2 * MP_DIGIT_BIT)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); + if ((err = mp_div(a, b, a, NULL)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); +LTM_ERR: + return err; } #endif diff --git a/mp_root_n.c b/mp_root_n.c index d904df883..8cbda2738 100644 --- a/mp_root_n.c +++ b/mp_root_n.c @@ -16,20 +16,20 @@ mp_err mp_root_n(const mp_int *a, int b, mp_int *c) { mp_int t1, t2, t3, a_; int ilog2; - mp_err err; + mp_err err = MP_OKAY; if (b < 0 || (unsigned)b > (unsigned)MP_DIGIT_MAX) { - return MP_VAL; + err = MP_VAL; + MP_TRACE_ERROR(err, LTM_ERR); } /* input must be positive if b is even */ if (((b & 1) == 0) && mp_isneg(a)) { - return MP_VAL; + err = MP_VAL; + MP_TRACE_ERROR(err, LTM_ERR); } - if ((err = mp_init_multi(&t1, &t2, &t3, NULL)) != MP_OKAY) { - return err; - } + if ((err = mp_init_multi(&t1, &t2, &t3, NULL)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); /* if a is negative fudge the sign but keep track */ a_ = *a; @@ -47,7 +47,7 @@ mp_err mp_root_n(const mp_int *a, int b, mp_int *c) mp_set(c, 1uL); c->sign = a->sign; err = MP_OKAY; - goto LBL_ERR; + MP_TRACE_ERROR(err, LTM_ERR_1); } /* "b" is smaller than INT_MAX, we can cast safely */ @@ -55,42 +55,41 @@ mp_err mp_root_n(const mp_int *a, int b, mp_int *c) mp_set(c, 1uL); c->sign = a->sign; err = MP_OKAY; - goto LBL_ERR; + MP_TRACE_ERROR(err, LTM_ERR_1); } ilog2 = ilog2 / b; if (ilog2 == 0) { mp_set(c, 1uL); c->sign = a->sign; err = MP_OKAY; - goto LBL_ERR; + MP_TRACE_ERROR(err, LTM_ERR_1); } /* Start value must be larger than root */ ilog2 += 2; - if ((err = mp_2expt(&t2,ilog2)) != MP_OKAY) goto LBL_ERR; + if ((err = mp_2expt(&t2,ilog2)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); do { /* t1 = t2 */ - if ((err = mp_copy(&t2, &t1)) != MP_OKAY) goto LBL_ERR; + if ((err = mp_copy(&t2, &t1)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); /* t2 = t1 - ((t1**b - a) / (b * t1**(b-1))) */ /* t3 = t1**(b-1) */ - if ((err = mp_expt_n(&t1, b - 1, &t3)) != MP_OKAY) goto LBL_ERR; + if ((err = mp_expt_n(&t1, b - 1, &t3)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); /* numerator */ /* t2 = t1**b */ - if ((err = mp_mul(&t3, &t1, &t2)) != MP_OKAY) goto LBL_ERR; + if ((err = mp_mul(&t3, &t1, &t2)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); /* t2 = t1**b - a */ - if ((err = mp_sub(&t2, &a_, &t2)) != MP_OKAY) goto LBL_ERR; + if ((err = mp_sub(&t2, &a_, &t2)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); /* denominator */ /* t3 = t1**(b-1) * b */ - if ((err = mp_mul_d(&t3, (mp_digit)b, &t3)) != MP_OKAY) goto LBL_ERR; + if ((err = mp_mul_d(&t3, (mp_digit)b, &t3)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); /* t3 = (t1**b - a)/(b * t1**(b-1)) */ - if ((err = mp_div(&t2, &t3, &t3, NULL)) != MP_OKAY) goto LBL_ERR; - - if ((err = mp_sub(&t1, &t3, &t2)) != MP_OKAY) goto LBL_ERR; + if ((err = mp_div(&t2, &t3, &t3, NULL)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); + if ((err = mp_sub(&t1, &t3, &t2)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); /* Number of rounds is at most log_2(root). If it is more it @@ -105,23 +104,23 @@ mp_err mp_root_n(const mp_int *a, int b, mp_int *c) /* Loop beneath can overshoot by one if found root is smaller than actual root */ for (;;) { mp_ord cmp; - if ((err = mp_expt_n(&t1, b, &t2)) != MP_OKAY) goto LBL_ERR; + if ((err = mp_expt_n(&t1, b, &t2)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); cmp = mp_cmp(&t2, &a_); if (cmp == MP_EQ) { err = MP_OKAY; - goto LBL_ERR; + MP_TRACE_ERROR(err, LTM_ERR_1); } if (cmp == MP_LT) { - if ((err = mp_add_d(&t1, 1uL, &t1)) != MP_OKAY) goto LBL_ERR; + if ((err = mp_add_d(&t1, 1uL, &t1)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); } else { break; } } /* correct overshoot from above or from recurrence */ for (;;) { - if ((err = mp_expt_n(&t1, b, &t2)) != MP_OKAY) goto LBL_ERR; + if ((err = mp_expt_n(&t1, b, &t2)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); if (mp_cmp(&t2, &a_) == MP_GT) { - if ((err = mp_sub_d(&t1, 1uL, &t1)) != MP_OKAY) goto LBL_ERR; + if ((err = mp_sub_d(&t1, 1uL, &t1)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); } else { break; } @@ -133,8 +132,9 @@ mp_err mp_root_n(const mp_int *a, int b, mp_int *c) /* set the sign of the result */ c->sign = a->sign; -LBL_ERR: +LTM_ERR_1: mp_clear_multi(&t1, &t2, &t3, NULL); +LTM_ERR: return err; } diff --git a/mp_shrink.c b/mp_shrink.c index 3d9b1626d..c1da62abf 100644 --- a/mp_shrink.c +++ b/mp_shrink.c @@ -6,17 +6,21 @@ /* shrink a bignum */ mp_err mp_shrink(mp_int *a) { + mp_err err = MP_OKAY; int alloc = MP_MAX(MP_MIN_DIGIT_COUNT, a->used); if (a->alloc != alloc) { mp_digit *dp = (mp_digit *) MP_REALLOC(a->dp, (size_t)a->alloc * sizeof(mp_digit), (size_t)alloc * sizeof(mp_digit)); if (dp == NULL) { - return MP_MEM; + err = MP_MEM; + MP_TRACE_ERROR(err, LTM_ERR); } a->dp = dp; a->alloc = alloc; } - return MP_OKAY; + +LTM_ERR: + return err; } #endif diff --git a/mp_signed_rsh.c b/mp_signed_rsh.c index 3b7e232e5..441a5e2fa 100644 --- a/mp_signed_rsh.c +++ b/mp_signed_rsh.c @@ -6,16 +6,18 @@ /* shift right by a certain bit count with sign extension */ mp_err mp_signed_rsh(const mp_int *a, int b, mp_int *c) { - mp_err err; + mp_err err = MP_OKAY; if (!mp_isneg(a)) { - return mp_div_2d(a, b, c, NULL); - } - - if ((err = mp_add_d(a, 1uL, c)) != MP_OKAY) { + if ((err = mp_div_2d(a, b, c, NULL)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); return err; } - err = mp_div_2d(c, b, c, NULL); - return (err == MP_OKAY) ? mp_sub_d(c, 1uL, c) : err; + if ((err = mp_add_d(a, 1uL, c)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); + + if ((err = mp_div_2d(c, b, c, NULL)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); + if ((err = mp_sub_d(c, 1uL, c)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); + +LTM_ERR: + return err; } #endif diff --git a/mp_sqrmod.c b/mp_sqrmod.c index bce2af090..5eb86836a 100644 --- a/mp_sqrmod.c +++ b/mp_sqrmod.c @@ -6,10 +6,12 @@ /* c = a * a (mod b) */ mp_err mp_sqrmod(const mp_int *a, const mp_int *b, mp_int *c) { - mp_err err; - if ((err = mp_sqr(a, c)) != MP_OKAY) { - return err; - } - return mp_mod(c, b, c); + mp_err err = MP_OKAY; + + if ((err = mp_sqr(a, c)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); + if ((err = mp_mod(c, b, c)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); + +LTM_ERR: + return err; } #endif diff --git a/mp_sqrt.c b/mp_sqrt.c index 1a9dca7da..4789670ff 100644 --- a/mp_sqrt.c +++ b/mp_sqrt.c @@ -6,12 +6,13 @@ /* this function is less generic than mp_n_root, simpler and faster */ mp_err mp_sqrt(const mp_int *arg, mp_int *ret) { - mp_err err; + mp_err err = MP_OKAY; mp_int t1, t2; /* must be positive */ if (mp_isneg(arg)) { - return MP_VAL; + err = MP_VAL; + MP_TRACE_ERROR(err, LTM_ERR); } /* easy out */ @@ -20,47 +21,32 @@ mp_err mp_sqrt(const mp_int *arg, mp_int *ret) return MP_OKAY; } - if ((err = mp_init_copy(&t1, arg)) != MP_OKAY) { - return err; - } + if ((err = mp_init_copy(&t1, arg)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); - if ((err = mp_init(&t2)) != MP_OKAY) { - goto LBL_ERR2; - } + if ((err = mp_init(&t2)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_2); /* First approx. (not very bad for large arg) */ mp_rshd(&t1, t1.used/2); /* t1 > 0 */ - if ((err = mp_div(arg, &t1, &t2, NULL)) != MP_OKAY) { - goto LBL_ERR1; - } - if ((err = mp_add(&t1, &t2, &t1)) != MP_OKAY) { - goto LBL_ERR1; - } - if ((err = mp_div_2(&t1, &t1)) != MP_OKAY) { - goto LBL_ERR1; - } + if ((err = mp_div(arg, &t1, &t2, NULL)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); + if ((err = mp_add(&t1, &t2, &t1)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); + if ((err = mp_div_2(&t1, &t1)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); /* And now t1 > sqrt(arg) */ do { - if ((err = mp_div(arg, &t1, &t2, NULL)) != MP_OKAY) { - goto LBL_ERR1; - } - if ((err = mp_add(&t1, &t2, &t1)) != MP_OKAY) { - goto LBL_ERR1; - } - if ((err = mp_div_2(&t1, &t1)) != MP_OKAY) { - goto LBL_ERR1; - } + if ((err = mp_div(arg, &t1, &t2, NULL)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); + if ((err = mp_add(&t1, &t2, &t1)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); + if ((err = mp_div_2(&t1, &t1)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); /* t1 >= sqrt(arg) >= t2 at this point */ } while (mp_cmp_mag(&t1, &t2) == MP_GT); mp_exch(&t1, ret); -LBL_ERR1: +LTM_ERR_1: mp_clear(&t2); -LBL_ERR2: +LTM_ERR_2: mp_clear(&t1); +LTM_ERR: return err; } diff --git a/mp_sqrtmod_prime.c b/mp_sqrtmod_prime.c index 0fae1d02a..15fae01e5 100644 --- a/mp_sqrtmod_prime.c +++ b/mp_sqrtmod_prime.c @@ -11,7 +11,7 @@ mp_err mp_sqrtmod_prime(const mp_int *n, const mp_int *prime, mp_int *ret) { - mp_err err; + mp_err err = MP_OKAY; int legendre; /* The type is "int" because of the types in the mp_int struct. Don't forget to change them here when you change them there! */ @@ -24,39 +24,43 @@ mp_err mp_sqrtmod_prime(const mp_int *n, const mp_int *prime, mp_int *ret) return MP_OKAY; } /* "prime" must be odd and > 2 */ - if (mp_iseven(prime) || (mp_cmp_d(prime, 3uL) == MP_LT)) return MP_VAL; - if ((err = mp_kronecker(n, prime, &legendre)) != MP_OKAY) return err; + if (mp_iseven(prime) || (mp_cmp_d(prime, 3uL) == MP_LT)) { + err = MP_VAL; + MP_TRACE_ERROR(err, LTM_ERR); + } + if ((err = mp_kronecker(n, prime, &legendre)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); /* n \not\cong 0 (mod p) and n \cong r^2 (mod p) for some r \in N^+ */ - if (legendre != 1) return MP_VAL; - - if ((err = mp_init_multi(&t1, &C, &Q, &Z, &T, &R, &two, NULL)) != MP_OKAY) { - return err; + if (legendre != 1) { + err = MP_VAL; + MP_TRACE_ERROR(err, LTM_ERR); } + if ((err = mp_init_multi(&t1, &C, &Q, &Z, &T, &R, &two, NULL)) != MP_OKAY) + MP_TRACE_ERROR(err, LTM_ERR); + /* SPECIAL CASE: if prime mod 4 == 3 * compute directly: err = n^(prime+1)/4 mod prime * Handbook of Applied Cryptography algorithm 3.36 */ /* x%4 == x&3 for x in N and x>0 */ if ((prime->dp[0] & 3u) == 3u) { - if ((err = mp_add_d(prime, 1uL, &t1)) != MP_OKAY) goto LBL_END; - if ((err = mp_div_2(&t1, &t1)) != MP_OKAY) goto LBL_END; - if ((err = mp_div_2(&t1, &t1)) != MP_OKAY) goto LBL_END; - if ((err = mp_exptmod(n, &t1, prime, ret)) != MP_OKAY) goto LBL_END; - err = MP_OKAY; - goto LBL_END; + if ((err = mp_add_d(prime, 1uL, &t1)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_END); + if ((err = mp_div_2(&t1, &t1)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_END); + if ((err = mp_div_2(&t1, &t1)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_END); + if ((err = mp_exptmod(n, &t1, prime, ret)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_END); + goto LTM_ERR_END; } /* NOW: Tonelli-Shanks algorithm */ /* factor out powers of 2 from prime-1, defining Q and S as: prime-1 = Q*2^S */ - if ((err = mp_copy(prime, &Q)) != MP_OKAY) goto LBL_END; - if ((err = mp_sub_d(&Q, 1uL, &Q)) != MP_OKAY) goto LBL_END; + if ((err = mp_copy(prime, &Q)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_END); + if ((err = mp_sub_d(&Q, 1uL, &Q)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_END); /* Q = prime - 1 */ S = 0; /* S = 0 */ while (mp_iseven(&Q)) { - if ((err = mp_div_2(&Q, &Q)) != MP_OKAY) goto LBL_END; + if ((err = mp_div_2(&Q, &Q)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_END); /* Q = Q / 2 */ S++; /* S = S + 1 */ @@ -66,33 +70,33 @@ mp_err mp_sqrtmod_prime(const mp_int *n, const mp_int *prime, mp_int *ret) mp_set(&Z, 2uL); /* Z = 2 */ for (;;) { - if ((err = mp_kronecker(&Z, prime, &legendre)) != MP_OKAY) goto LBL_END; + if ((err = mp_kronecker(&Z, prime, &legendre)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_END); /* If "prime" (p) is an odd prime Jacobi(k|p) = 0 for k \cong 0 (mod p) */ /* but there is at least one non-quadratic residue before k>=p if p is an odd prime. */ if (legendre == 0) { err = MP_VAL; - goto LBL_END; + goto LTM_ERR_END; } if (legendre == -1) break; - if ((err = mp_add_d(&Z, 1uL, &Z)) != MP_OKAY) goto LBL_END; + if ((err = mp_add_d(&Z, 1uL, &Z)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_END); /* Z = Z + 1 */ } - if ((err = mp_exptmod(&Z, &Q, prime, &C)) != MP_OKAY) goto LBL_END; + if ((err = mp_exptmod(&Z, &Q, prime, &C)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_END); /* C = Z ^ Q mod prime */ - if ((err = mp_add_d(&Q, 1uL, &t1)) != MP_OKAY) goto LBL_END; - if ((err = mp_div_2(&t1, &t1)) != MP_OKAY) goto LBL_END; + if ((err = mp_add_d(&Q, 1uL, &t1)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_END); + if ((err = mp_div_2(&t1, &t1)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_END); /* t1 = (Q + 1) / 2 */ - if ((err = mp_exptmod(n, &t1, prime, &R)) != MP_OKAY) goto LBL_END; + if ((err = mp_exptmod(n, &t1, prime, &R)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_END); /* R = n ^ ((Q + 1) / 2) mod prime */ - if ((err = mp_exptmod(n, &Q, prime, &T)) != MP_OKAY) goto LBL_END; + if ((err = mp_exptmod(n, &Q, prime, &T)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_END); /* T = n ^ Q mod prime */ M = S; /* M = S */ mp_set(&two, 2uL); for (;;) { - if ((err = mp_copy(&T, &t1)) != MP_OKAY) goto LBL_END; + if ((err = mp_copy(&T, &t1)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_END); i = 0; for (;;) { if (mp_cmp_d(&t1, 1uL) == MP_EQ) break; @@ -100,33 +104,33 @@ mp_err mp_sqrtmod_prime(const mp_int *n, const mp_int *prime, mp_int *ret) (M is at least 1 in the first round because "prime" > 2) */ if (M == i) { err = MP_VAL; - goto LBL_END; + MP_TRACE_ERROR(err, LTM_ERR_END); } - if ((err = mp_exptmod(&t1, &two, prime, &t1)) != MP_OKAY) goto LBL_END; + if ((err = mp_exptmod(&t1, &two, prime, &t1)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_END); i++; } if (i == 0) { - if ((err = mp_copy(&R, ret)) != MP_OKAY) goto LBL_END; - err = MP_OKAY; - goto LBL_END; + if ((err = mp_copy(&R, ret)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_END); + goto LTM_ERR_END; } mp_set_i32(&t1, M - i - 1); - if ((err = mp_exptmod(&two, &t1, prime, &t1)) != MP_OKAY) goto LBL_END; + if ((err = mp_exptmod(&two, &t1, prime, &t1)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_END); /* t1 = 2 ^ (M - i - 1) */ - if ((err = mp_exptmod(&C, &t1, prime, &t1)) != MP_OKAY) goto LBL_END; + if ((err = mp_exptmod(&C, &t1, prime, &t1)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_END); /* t1 = C ^ (2 ^ (M - i - 1)) mod prime */ - if ((err = mp_sqrmod(&t1, prime, &C)) != MP_OKAY) goto LBL_END; + if ((err = mp_sqrmod(&t1, prime, &C)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_END); /* C = (t1 * t1) mod prime */ - if ((err = mp_mulmod(&R, &t1, prime, &R)) != MP_OKAY) goto LBL_END; + if ((err = mp_mulmod(&R, &t1, prime, &R)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_END); /* R = (R * t1) mod prime */ - if ((err = mp_mulmod(&T, &C, prime, &T)) != MP_OKAY) goto LBL_END; + if ((err = mp_mulmod(&T, &C, prime, &T)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_END); /* T = (T * C) mod prime */ M = i; /* M = i */ } -LBL_END: +LTM_ERR_END: mp_clear_multi(&t1, &C, &Q, &Z, &T, &R, &two, NULL); +LTM_ERR: return err; } diff --git a/mp_sub.c b/mp_sub.c index 1c95ad544..e9886fbe6 100644 --- a/mp_sub.c +++ b/mp_sub.c @@ -6,13 +6,15 @@ /* high level subtraction (handles signs) */ mp_err mp_sub(const mp_int *a, const mp_int *b, mp_int *c) { + mp_err err = MP_OKAY; if (a->sign != b->sign) { /* subtract a negative from a positive, OR */ /* subtract a positive from a negative. */ /* In either case, ADD their magnitudes, */ /* and use the sign of the first number. */ c->sign = a->sign; - return s_mp_add(a, b, c); + if ((err = s_mp_add(a, b, c)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); + return err; } /* subtract a positive from a positive, OR */ @@ -30,7 +32,10 @@ mp_err mp_sub(const mp_int *a, const mp_int *b, mp_int *c) /* Copy the sign from the first */ c->sign = a->sign; } - return s_mp_sub(a, b, c); + if ((err = s_mp_sub(a, b, c)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); + +LTM_ERR: + return err; } #endif diff --git a/mp_sub_d.c b/mp_sub_d.c index b2b4d723d..7b4e7ebcd 100644 --- a/mp_sub_d.c +++ b/mp_sub_d.c @@ -6,7 +6,7 @@ /* single digit subtraction */ mp_err mp_sub_d(const mp_int *a, mp_digit b, mp_int *c) { - mp_err err; + mp_err err = MP_OKAY; int oldused; /* fast path for a == c */ @@ -24,17 +24,16 @@ mp_err mp_sub_d(const mp_int *a, mp_digit b, mp_int *c) } /* grow c as required */ - if ((err = mp_grow(c, a->used + 1)) != MP_OKAY) { - return err; - } + if ((err = mp_grow(c, a->used + 1)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); /* if a is negative just do an unsigned * addition [with fudged signs] */ if (a->sign == MP_NEG) { mp_int a_ = *a; + a_.sign = MP_ZPOS; - err = mp_add_d(&a_, b, c); + if ((err = mp_add_d(&a_, b, c)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); c->sign = MP_NEG; /* clamp */ @@ -72,7 +71,9 @@ mp_err mp_sub_d(const mp_int *a, mp_digit b, mp_int *c) s_mp_zero_digs(c->dp + c->used, oldused - c->used); mp_clamp(c); - return MP_OKAY; + +LTM_ERR: + return err; } #endif diff --git a/mp_submod.c b/mp_submod.c index 6e4d4f712..cba664776 100644 --- a/mp_submod.c +++ b/mp_submod.c @@ -6,10 +6,12 @@ /* d = a - b (mod c) */ mp_err mp_submod(const mp_int *a, const mp_int *b, const mp_int *c, mp_int *d) { - mp_err err; - if ((err = mp_sub(a, b, d)) != MP_OKAY) { - return err; - } - return mp_mod(d, c, d); + mp_err err = MP_OKAY; + + if ((err = mp_sub(a, b, d)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); + if ((err = mp_mod(d, c, d)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); + +LTM_ERR: + return err; } #endif diff --git a/mp_to_radix.c b/mp_to_radix.c index 1e5e67110..6971d9fe4 100644 --- a/mp_to_radix.c +++ b/mp_to_radix.c @@ -22,17 +22,19 @@ static void s_reverse(char *s, size_t len) mp_err mp_to_radix(const mp_int *a, char *str, size_t maxlen, size_t *written, int radix) { size_t digs; - mp_err err; + mp_err err = MP_OKAY; mp_int t; mp_digit d; char *_s = str; /* check range of radix and size*/ if (maxlen < 2u) { - return MP_BUF; + err = MP_BUF; + MP_TRACE_ERROR(err, LTM_ERR); } if ((radix < 2) || (radix > 64)) { - return MP_VAL; + err = MP_VAL; + MP_TRACE_ERROR(err, LTM_ERR); } /* quick out if its zero */ @@ -42,12 +44,10 @@ mp_err mp_to_radix(const mp_int *a, char *str, size_t maxlen, size_t *written, i if (written != NULL) { *written = 2u; } - return MP_OKAY; + goto LTM_ERR; } - if ((err = mp_init_copy(&t, a)) != MP_OKAY) { - return err; - } + if ((err = mp_init_copy(&t, a)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); /* if it is negative output a - */ if (mp_isneg(&t)) { @@ -66,11 +66,9 @@ mp_err mp_to_radix(const mp_int *a, char *str, size_t maxlen, size_t *written, i if (--maxlen < 1u) { /* no more room */ err = MP_BUF; - goto LBL_ERR; - } - if ((err = mp_div_d(&t, (mp_digit)radix, &t, &d)) != MP_OKAY) { - goto LBL_ERR; + MP_TRACE_ERROR(err, LTM_ERR_1); } + if ((err = mp_div_d(&t, (mp_digit)radix, &t, &d)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); *str++ = s_mp_radix_map[d]; ++digs; } @@ -87,8 +85,9 @@ mp_err mp_to_radix(const mp_int *a, char *str, size_t maxlen, size_t *written, i *written = mp_isneg(a) ? (digs + 1u): digs; } -LBL_ERR: +LTM_ERR_1: mp_clear(&t); +LTM_ERR: return err; } diff --git a/mp_to_sbin.c b/mp_to_sbin.c index 00884c3be..30ecc01b8 100644 --- a/mp_to_sbin.c +++ b/mp_to_sbin.c @@ -6,17 +6,19 @@ /* store in signed [big endian] format */ mp_err mp_to_sbin(const mp_int *a, uint8_t *buf, size_t maxlen, size_t *written) { - mp_err err; + mp_err err = MP_OKAY; + if (maxlen == 0u) { - return MP_BUF; - } - if ((err = mp_to_ubin(a, buf + 1, maxlen - 1u, written)) != MP_OKAY) { - return err; + err = MP_BUF; + MP_TRACE_ERROR(err, LTM_ERR); } + if ((err = mp_to_ubin(a, buf + 1, maxlen - 1u, written)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); if (written != NULL) { (*written)++; } buf[0] = mp_isneg(a) ? (uint8_t)1 : (uint8_t)0; - return MP_OKAY; + +LTM_ERR: + return err; } #endif diff --git a/mp_to_ubin.c b/mp_to_ubin.c index e8643cc07..8a3f860bb 100644 --- a/mp_to_ubin.c +++ b/mp_to_ubin.c @@ -7,31 +7,29 @@ mp_err mp_to_ubin(const mp_int *a, uint8_t *buf, size_t maxlen, size_t *written) { size_t x, count; - mp_err err; + mp_err err = MP_OKAY; mp_int t; count = mp_ubin_size(a); if (count > maxlen) { - return MP_BUF; + err = MP_BUF; + MP_TRACE_ERROR(err, LTM_ERR); } - if ((err = mp_init_copy(&t, a)) != MP_OKAY) { - return err; - } + if ((err = mp_init_copy(&t, a)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); for (x = count; x --> 0u;) { buf[x] = (uint8_t)(t.dp[0] & 255u); - if ((err = mp_div_2d(&t, 8, &t, NULL)) != MP_OKAY) { - goto LBL_ERR; - } + if ((err = mp_div_2d(&t, 8, &t, NULL)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); } if (written != NULL) { *written = count; } -LBL_ERR: +LTM_ERR_1: mp_clear(&t); +LTM_ERR: return err; } #endif diff --git a/mp_unpack.c b/mp_unpack.c index f0127fa44..70ebbb371 100644 --- a/mp_unpack.c +++ b/mp_unpack.c @@ -9,7 +9,7 @@ mp_err mp_unpack(mp_int *rop, size_t count, mp_order order, size_t size, mp_endian endian, size_t nails, const void *op) { - mp_err err; + mp_err err = MP_OKAY; size_t odd_nails, nail_bytes, i, j; uint8_t odd_nail_mask; @@ -32,10 +32,8 @@ mp_err mp_unpack(mp_int *rop, size_t count, mp_order order, size_t size, (((order == MP_MSB_FIRST) ? i : ((count - 1u) - i)) * size) + ((endian == MP_BIG_ENDIAN) ? (j + nail_bytes) : (((size - 1u) - j) - nail_bytes))); - if ((err = mp_mul_2d(rop, (j == 0u) ? (int)(8u - odd_nails) : 8, rop)) != MP_OKAY) { - return err; - } - + if ((err = mp_mul_2d(rop, (j == 0u) ? (int)(8u - odd_nails) : 8, rop)) != MP_OKAY) + MP_TRACE_ERROR(err, LTM_ERR); rop->dp[0] |= (j == 0u) ? (mp_digit)(byte & odd_nail_mask) : (mp_digit)byte; rop->used += 1; } @@ -43,7 +41,8 @@ mp_err mp_unpack(mp_int *rop, size_t count, mp_order order, size_t size, mp_clamp(rop); - return MP_OKAY; +LTM_ERR: + return err; } #endif diff --git a/mp_xor.c b/mp_xor.c index ff264192c..a65f3b27e 100644 --- a/mp_xor.c +++ b/mp_xor.c @@ -7,13 +7,11 @@ mp_err mp_xor(const mp_int *a, const mp_int *b, mp_int *c) { int used = MP_MAX(a->used, b->used) + 1, i; - mp_err err; + mp_err err = MP_OKAY; mp_digit ac = 1, bc = 1, cc = 1; bool neg = (a->sign != b->sign); - if ((err = mp_grow(c, used)) != MP_OKAY) { - return err; - } + if ((err = mp_grow(c, used)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); for (i = 0; i < used; i++) { mp_digit x, y; @@ -49,6 +47,8 @@ mp_err mp_xor(const mp_int *a, const mp_int *b, mp_int *c) c->used = used; c->sign = (neg ? MP_NEG : MP_ZPOS); mp_clamp(c); - return MP_OKAY; + +LTM_ERR: + return err; } #endif diff --git a/s_mp_add.c b/s_mp_add.c index 2bda2fe1b..0e341dcb4 100644 --- a/s_mp_add.c +++ b/s_mp_add.c @@ -8,7 +8,7 @@ mp_err s_mp_add(const mp_int *a, const mp_int *b, mp_int *c) { int oldused, min, max, i; mp_digit u; - mp_err err; + mp_err err = MP_OKAY; /* find sizes, we let |a| <= |b| which means we have to sort * them. "x" will point to the input with the most digits @@ -21,9 +21,7 @@ mp_err s_mp_add(const mp_int *a, const mp_int *b, mp_int *c) max = a->used; /* init result */ - if ((err = mp_grow(c, max + 1)) != MP_OKAY) { - return err; - } + if ((err = mp_grow(c, max + 1)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); /* get old used digit count and set new one */ oldused = c->used; @@ -65,6 +63,8 @@ mp_err s_mp_add(const mp_int *a, const mp_int *b, mp_int *c) s_mp_zero_digs(c->dp + c->used, oldused - c->used); mp_clamp(c); - return MP_OKAY; + +LTM_ERR: + return err; } #endif diff --git a/s_mp_copy_digs.c b/s_mp_copy_digs.c index 4079c33a6..47f9338ab 100644 --- a/s_mp_copy_digs.c +++ b/s_mp_copy_digs.c @@ -11,6 +11,7 @@ void s_mp_copy_digs(mp_digit *d, const mp_digit *s, int digits) { #ifdef MP_USE_MEMOPS if (digits > 0) { + /* TODO: Make a note somewhere that "d" and "s" must not overlap or use memmove instead */ memcpy(d, s, (size_t)digits * sizeof(mp_digit)); } #else diff --git a/s_mp_div_3.c b/s_mp_div_3.c index 1cc6d3d8c..cdcc61be6 100644 --- a/s_mp_div_3.c +++ b/s_mp_div_3.c @@ -9,15 +9,13 @@ mp_err s_mp_div_3(const mp_int *a, mp_int *c, mp_digit *d) mp_int q; mp_word w; mp_digit b; - mp_err err; + mp_err err = MP_OKAY; int ix; /* b = 2**MP_DIGIT_BIT / 3 */ b = ((mp_word)1 << (mp_word)MP_DIGIT_BIT) / (mp_word)3; - if ((err = mp_init_size(&q, a->used)) != MP_OKAY) { - return err; - } + if ((err = mp_init_size(&q, a->used)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); q.used = a->used; q.sign = a->sign; @@ -58,7 +56,8 @@ mp_err s_mp_div_3(const mp_int *a, mp_int *c, mp_digit *d) } mp_clear(&q); - return MP_OKAY; +LTM_ERR: + return err; } #endif diff --git a/s_mp_div_recursive.c b/s_mp_div_recursive.c index d719c4e28..547f3b2dc 100644 --- a/s_mp_div_recursive.c +++ b/s_mp_div_recursive.c @@ -16,63 +16,64 @@ static mp_err s_recursion(const mp_int *a, const mp_int *b, mp_int *q, mp_int *r) { - mp_err err; + mp_err err = MP_OKAY; mp_int A1, A2, B1, B0, Q1, Q0, R1, R0, t; int m = a->used - b->used, k = m/2; if (m < (MP_MUL_KARATSUBA_CUTOFF)) { - return s_mp_div_school(a, b, q, r); + if ((err = s_mp_div_school(a, b, q, r)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); + return err; } - if ((err = mp_init_multi(&A1, &A2, &B1, &B0, &Q1, &Q0, &R1, &R0, &t, NULL)) != MP_OKAY) { - goto LBL_ERR; - } + if ((err = mp_init_multi(&A1, &A2, &B1, &B0, &Q1, &Q0, &R1, &R0, &t, NULL)) != MP_OKAY) + MP_TRACE_ERROR(err, LTM_ERR); /* B1 = b / beta^k, B0 = b % beta^k*/ - if ((err = mp_div_2d(b, k * MP_DIGIT_BIT, &B1, &B0)) != MP_OKAY) goto LBL_ERR; + if ((err = mp_div_2d(b, k * MP_DIGIT_BIT, &B1, &B0)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); /* (Q1, R1) = RecursiveDivRem(A / beta^(2k), B1) */ - if ((err = mp_div_2d(a, 2*k * MP_DIGIT_BIT, &A1, &t)) != MP_OKAY) goto LBL_ERR; - if ((err = s_recursion(&A1, &B1, &Q1, &R1)) != MP_OKAY) goto LBL_ERR; + if ((err = mp_div_2d(a, 2*k * MP_DIGIT_BIT, &A1, &t)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); + if ((err = s_recursion(&A1, &B1, &Q1, &R1)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); /* A1 = (R1 * beta^(2k)) + (A % beta^(2k)) - (Q1 * B0 * beta^k) */ - if ((err = mp_lshd(&R1, 2*k)) != MP_OKAY) goto LBL_ERR; - if ((err = mp_add(&R1, &t, &A1)) != MP_OKAY) goto LBL_ERR; - if ((err = mp_mul(&Q1, &B0, &t)) != MP_OKAY) goto LBL_ERR; - if ((err = mp_lshd(&t, k)) != MP_OKAY) goto LBL_ERR; - if ((err = mp_sub(&A1, &t, &A1)) != MP_OKAY) goto LBL_ERR; + if ((err = mp_lshd(&R1, 2*k)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); + if ((err = mp_add(&R1, &t, &A1)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); + if ((err = mp_mul(&Q1, &B0, &t)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); + if ((err = mp_lshd(&t, k)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); + if ((err = mp_sub(&A1, &t, &A1)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); /* while A1 < 0 do Q1 = Q1 - 1, A1 = A1 + (beta^k * B) */ if (mp_cmp_d(&A1, 0uL) == MP_LT) { - if ((err = mp_mul_2d(b, k * MP_DIGIT_BIT, &t)) != MP_OKAY) goto LBL_ERR; + if ((err = mp_mul_2d(b, k * MP_DIGIT_BIT, &t)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); do { - if ((err = mp_decr(&Q1)) != MP_OKAY) goto LBL_ERR; - if ((err = mp_add(&A1, &t, &A1)) != MP_OKAY) goto LBL_ERR; + if ((err = mp_decr(&Q1)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); + if ((err = mp_add(&A1, &t, &A1)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); } while (mp_cmp_d(&A1, 0uL) == MP_LT); } /* (Q0, R0) = RecursiveDivRem(A1 / beta^(k), B1) */ - if ((err = mp_div_2d(&A1, k * MP_DIGIT_BIT, &A1, &t)) != MP_OKAY) goto LBL_ERR; - if ((err = s_recursion(&A1, &B1, &Q0, &R0)) != MP_OKAY) goto LBL_ERR; + if ((err = mp_div_2d(&A1, k * MP_DIGIT_BIT, &A1, &t)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); + if ((err = s_recursion(&A1, &B1, &Q0, &R0)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); /* A2 = (R0*beta^k) + (A1 % beta^k) - (Q0*B0) */ - if ((err = mp_lshd(&R0, k)) != MP_OKAY) goto LBL_ERR; - if ((err = mp_add(&R0, &t, &A2)) != MP_OKAY) goto LBL_ERR; - if ((err = mp_mul(&Q0, &B0, &t)) != MP_OKAY) goto LBL_ERR; - if ((err = mp_sub(&A2, &t, &A2)) != MP_OKAY) goto LBL_ERR; + if ((err = mp_lshd(&R0, k)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); + if ((err = mp_add(&R0, &t, &A2)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); + if ((err = mp_mul(&Q0, &B0, &t)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); + if ((err = mp_sub(&A2, &t, &A2)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); /* while A2 < 0 do Q0 = Q0 - 1, A2 = A2 + B */ while (mp_cmp_d(&A2, 0uL) == MP_LT) { - if ((err = mp_decr(&Q0)) != MP_OKAY) goto LBL_ERR; - if ((err = mp_add(&A2, b, &A2)) != MP_OKAY) goto LBL_ERR; + if ((err = mp_decr(&Q0)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); + if ((err = mp_add(&A2, b, &A2)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); } /* return q = (Q1*beta^k) + Q0, r = A2 */ - if ((err = mp_lshd(&Q1, k)) != MP_OKAY) goto LBL_ERR; - if ((err = mp_add(&Q1, &Q0, q)) != MP_OKAY) goto LBL_ERR; + if ((err = mp_lshd(&Q1, k)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); + if ((err = mp_add(&Q1, &Q0, q)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); - if ((err = mp_copy(&A2, r)) != MP_OKAY) goto LBL_ERR; + if ((err = mp_copy(&A2, r)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); -LBL_ERR: +LTM_ERR_1: mp_clear_multi(&A1, &A2, &B1, &B0, &Q1, &Q0, &R1, &R0, &t, NULL); +LTM_ERR: return err; } @@ -80,14 +81,13 @@ static mp_err s_recursion(const mp_int *a, const mp_int *b, mp_int *q, mp_int *r mp_err s_mp_div_recursive(const mp_int *a, const mp_int *b, mp_int *q, mp_int *r) { int j, m, n, sigma; - mp_err err; + mp_err err = MP_OKAY; bool neg; mp_digit msb_b, msb; mp_int A, B, Q, Q1, R, A_div, A_mod; - if ((err = mp_init_multi(&A, &B, &Q, &Q1, &R, &A_div, &A_mod, NULL)) != MP_OKAY) { - goto LBL_ERR; - } + if ((err = mp_init_multi(&A, &B, &Q, &Q1, &R, &A_div, &A_mod, NULL)) != MP_OKAY) + MP_TRACE_ERROR(err, LTM_ERR); /* most significant bit of a limb */ /* assumes MP_DIGIT_MAX < (sizeof(mp_digit) * CHAR_BIT) */ @@ -99,12 +99,8 @@ mp_err s_mp_div_recursive(const mp_int *a, const mp_int *b, mp_int *q, mp_int *r msb_b <<= 1; } /* Use that sigma to normalize B */ - if ((err = mp_mul_2d(b, sigma, &B)) != MP_OKAY) { - goto LBL_ERR; - } - if ((err = mp_mul_2d(a, sigma, &A)) != MP_OKAY) { - goto LBL_ERR; - } + if ((err = mp_mul_2d(b, sigma, &B)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); + if ((err = mp_mul_2d(a, sigma, &A)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); /* fix the sign */ neg = (a->sign != b->sign); @@ -122,22 +118,22 @@ mp_err s_mp_div_recursive(const mp_int *a, const mp_int *b, mp_int *q, mp_int *r while (m > n) { /* (q, r) = RecursiveDivRem(A / (beta^(m-n)), B) */ j = (m - n) * MP_DIGIT_BIT; - if ((err = mp_div_2d(&A, j, &A_div, &A_mod)) != MP_OKAY) goto LBL_ERR; - if ((err = s_recursion(&A_div, &B, &Q1, &R)) != MP_OKAY) goto LBL_ERR; + if ((err = mp_div_2d(&A, j, &A_div, &A_mod)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); + if ((err = s_recursion(&A_div, &B, &Q1, &R)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); /* Q = (Q*beta!(n)) + q */ - if ((err = mp_mul_2d(&Q, n * MP_DIGIT_BIT, &Q)) != MP_OKAY) goto LBL_ERR; - if ((err = mp_add(&Q, &Q1, &Q)) != MP_OKAY) goto LBL_ERR; + if ((err = mp_mul_2d(&Q, n * MP_DIGIT_BIT, &Q)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); + if ((err = mp_add(&Q, &Q1, &Q)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); /* A = (r * beta^(m-n)) + (A % beta^(m-n))*/ - if ((err = mp_mul_2d(&R, (m - n) * MP_DIGIT_BIT, &R)) != MP_OKAY) goto LBL_ERR; - if ((err = mp_add(&R, &A_mod, &A)) != MP_OKAY) goto LBL_ERR; + if ((err = mp_mul_2d(&R, (m - n) * MP_DIGIT_BIT, &R)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); + if ((err = mp_add(&R, &A_mod, &A)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); /* m = m - n */ m = m - n; } /* (q, r) = RecursiveDivRem(A, B) */ - if ((err = s_recursion(&A, &B, &Q1, &R)) != MP_OKAY) goto LBL_ERR; + if ((err = s_recursion(&A, &B, &Q1, &R)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); /* Q = (Q * beta^m) + q, R = r */ - if ((err = mp_mul_2d(&Q, m * MP_DIGIT_BIT, &Q)) != MP_OKAY) goto LBL_ERR; - if ((err = mp_add(&Q, &Q1, &Q)) != MP_OKAY) goto LBL_ERR; + if ((err = mp_mul_2d(&Q, m * MP_DIGIT_BIT, &Q)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); + if ((err = mp_add(&Q, &Q1, &Q)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); /* get sign before writing to c */ R.sign = (mp_iszero(&Q) ? MP_ZPOS : a->sign); @@ -148,11 +144,12 @@ mp_err s_mp_div_recursive(const mp_int *a, const mp_int *b, mp_int *q, mp_int *r } if (r != NULL) { /* de-normalize the remainder */ - if ((err = mp_div_2d(&R, sigma, &R, NULL)) != MP_OKAY) goto LBL_ERR; + if ((err = mp_div_2d(&R, sigma, &R, NULL)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); mp_exch(&R, r); } -LBL_ERR: +LTM_ERR_1: mp_clear_multi(&A, &B, &Q, &Q1, &R, &A_div, &A_mod, NULL); +LTM_ERR: return err; } diff --git a/s_mp_div_school.c b/s_mp_div_school.c index 304c7a9ff..c0e963bc4 100644 --- a/s_mp_div_school.c +++ b/s_mp_div_school.c @@ -22,17 +22,15 @@ mp_err s_mp_div_school(const mp_int *a, const mp_int *b, mp_int *c, mp_int *d) mp_digit xdpi; int n, t, i, norm; bool neg; - mp_err err; + mp_err err = MP_OKAY; - if ((err = mp_init_size(&q, a->used + 2)) != MP_OKAY) { - return err; - } + if ((err = mp_init_size(&q, a->used + 2)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); q.used = a->used + 2; - if ((err = mp_init(&t1)) != MP_OKAY) goto LBL_Q; - if ((err = mp_init(&t2)) != MP_OKAY) goto LBL_T1; - if ((err = mp_init_copy(&x, a)) != MP_OKAY) goto LBL_T2; - if ((err = mp_init_copy(&y, b)) != MP_OKAY) goto LBL_X; + if ((err = mp_init(&t1)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_Q); + if ((err = mp_init(&t2)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_T1); + if ((err = mp_init_copy(&x, a)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_T2); + if ((err = mp_init_copy(&y, b)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_X); /* fix the sign */ neg = (a->sign != b->sign); @@ -42,8 +40,8 @@ mp_err s_mp_div_school(const mp_int *a, const mp_int *b, mp_int *c, mp_int *d) norm = mp_count_bits(&y) % MP_DIGIT_BIT; if (norm < (MP_DIGIT_BIT - 1)) { norm = (MP_DIGIT_BIT - 1) - norm; - if ((err = mp_mul_2d(&x, norm, &x)) != MP_OKAY) goto LBL_Y; - if ((err = mp_mul_2d(&y, norm, &y)) != MP_OKAY) goto LBL_Y; + if ((err = mp_mul_2d(&x, norm, &x)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_Y); + if ((err = mp_mul_2d(&y, norm, &y)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_Y); } else { norm = 0; } @@ -54,11 +52,11 @@ mp_err s_mp_div_school(const mp_int *a, const mp_int *b, mp_int *c, mp_int *d) /* while (x >= y*b**n-t) do { q[n-t] += 1; x -= y*b**{n-t} } */ /* y = y*b**{n-t} */ - if ((err = mp_lshd(&y, n - t)) != MP_OKAY) goto LBL_Y; + if ((err = mp_lshd(&y, n - t)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_Y); while (mp_cmp(&x, &y) != MP_LT) { ++(q.dp[n - t]); - if ((err = mp_sub(&x, &y, &x)) != MP_OKAY) goto LBL_Y; + if ((err = mp_sub(&x, &y, &x)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_Y); } /* reset y by shifting it back down */ @@ -101,7 +99,7 @@ mp_err s_mp_div_school(const mp_int *a, const mp_int *b, mp_int *c, mp_int *d) t1.dp[0] = ((t - 1) < 0) ? 0u : y.dp[t - 1]; t1.dp[1] = y.dp[t]; t1.used = 2; - if ((err = mp_mul_d(&t1, q.dp[(i - t) - 1], &t1)) != MP_OKAY) goto LBL_Y; + if ((err = mp_mul_d(&t1, q.dp[(i - t) - 1], &t1)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_Y); /* find right hand */ t2.dp[0] = ((i - 2) < 0) ? 0u : x.dp[i - 2]; @@ -111,15 +109,15 @@ mp_err s_mp_div_school(const mp_int *a, const mp_int *b, mp_int *c, mp_int *d) } while (mp_cmp_mag(&t1, &t2) == MP_GT); /* step 3.3 x = x - q{i-t-1} * y * b**{i-t-1} */ - if ((err = mp_mul_d(&y, q.dp[(i - t) - 1], &t1)) != MP_OKAY) goto LBL_Y; - if ((err = mp_lshd(&t1, (i - t) - 1)) != MP_OKAY) goto LBL_Y; - if ((err = mp_sub(&x, &t1, &x)) != MP_OKAY) goto LBL_Y; + if ((err = mp_mul_d(&y, q.dp[(i - t) - 1], &t1)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_Y); + if ((err = mp_lshd(&t1, (i - t) - 1)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_Y); + if ((err = mp_sub(&x, &t1, &x)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_Y); /* if x < 0 then { x = x + y*b**{i-t-1}; q{i-t-1} -= 1; } */ if (mp_isneg(&x)) { - if ((err = mp_copy(&y, &t1)) != MP_OKAY) goto LBL_Y; - if ((err = mp_lshd(&t1, (i - t) - 1)) != MP_OKAY) goto LBL_Y; - if ((err = mp_add(&x, &t1, &x)) != MP_OKAY) goto LBL_Y; + if ((err = mp_copy(&y, &t1)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_Y); + if ((err = mp_lshd(&t1, (i - t) - 1)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_Y); + if ((err = mp_add(&x, &t1, &x)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_Y); q.dp[(i - t) - 1] = (q.dp[(i - t) - 1] - 1uL) & MP_MASK; } @@ -139,20 +137,21 @@ mp_err s_mp_div_school(const mp_int *a, const mp_int *b, mp_int *c, mp_int *d) } if (d != NULL) { - if ((err = mp_div_2d(&x, norm, &x, NULL)) != MP_OKAY) goto LBL_Y; + if ((err = mp_div_2d(&x, norm, &x, NULL)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_Y); mp_exch(&x, d); } -LBL_Y: +LTM_ERR_Y: mp_clear(&y); -LBL_X: +LTM_ERR_X: mp_clear(&x); -LBL_T2: +LTM_ERR_T2: mp_clear(&t2); -LBL_T1: +LTM_ERR_T1: mp_clear(&t1); -LBL_Q: +LTM_ERR_Q: mp_clear(&q); +LTM_ERR: return err; } diff --git a/s_mp_div_small.c b/s_mp_div_small.c index 2d951be1c..0510e43de 100644 --- a/s_mp_div_small.c +++ b/s_mp_div_small.c @@ -9,27 +9,25 @@ mp_err s_mp_div_small(const mp_int *a, const mp_int *b, mp_int *c, mp_int *d) mp_int ta, tb, tq, q; int n; bool neg; - mp_err err; + mp_err err = MP_OKAY; /* init our temps */ - if ((err = mp_init_multi(&ta, &tb, &tq, &q, NULL)) != MP_OKAY) { - return err; - } + if ((err = mp_init_multi(&ta, &tb, &tq, &q, NULL)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); mp_set(&tq, 1uL); n = mp_count_bits(a) - mp_count_bits(b); - if ((err = mp_abs(a, &ta)) != MP_OKAY) goto LBL_ERR; - if ((err = mp_abs(b, &tb)) != MP_OKAY) goto LBL_ERR; - if ((err = mp_mul_2d(&tb, n, &tb)) != MP_OKAY) goto LBL_ERR; - if ((err = mp_mul_2d(&tq, n, &tq)) != MP_OKAY) goto LBL_ERR; + if ((err = mp_abs(a, &ta)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); + if ((err = mp_abs(b, &tb)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); + if ((err = mp_mul_2d(&tb, n, &tb)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); + if ((err = mp_mul_2d(&tq, n, &tq)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); while (n-- >= 0) { if (mp_cmp(&tb, &ta) != MP_GT) { - if ((err = mp_sub(&ta, &tb, &ta)) != MP_OKAY) goto LBL_ERR; - if ((err = mp_add(&q, &tq, &q)) != MP_OKAY) goto LBL_ERR; + if ((err = mp_sub(&ta, &tb, &ta)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); + if ((err = mp_add(&q, &tq, &q)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); } - if ((err = mp_div_2d(&tb, 1, &tb, NULL)) != MP_OKAY) goto LBL_ERR; - if ((err = mp_div_2d(&tq, 1, &tq, NULL)) != MP_OKAY) goto LBL_ERR; + if ((err = mp_div_2d(&tb, 1, &tb, NULL)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); + if ((err = mp_div_2d(&tq, 1, &tq, NULL)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); } /* now q == quotient and ta == remainder */ @@ -43,8 +41,9 @@ mp_err s_mp_div_small(const mp_int *a, const mp_int *b, mp_int *c, mp_int *d) mp_exch(d, &ta); d->sign = (mp_iszero(d) ? MP_ZPOS : a->sign); } -LBL_ERR: +LTM_ERR_1: mp_clear_multi(&ta, &tb, &tq, &q, NULL); +LTM_ERR: return err; } diff --git a/s_mp_exptmod.c b/s_mp_exptmod.c index 2a89a2cbf..669b7c99b 100644 --- a/s_mp_exptmod.c +++ b/s_mp_exptmod.c @@ -15,7 +15,7 @@ mp_err s_mp_exptmod(const mp_int *G, const mp_int *X, const mp_int *P, mp_int *Y { mp_int M[TAB_SIZE], res, mu; mp_digit buf; - mp_err err; + mp_err err = MP_OKAY; int bitbuf, bitcpy, bitcnt, mode, digidx, x, y, winsize; mp_err(*redux)(mp_int *x, const mp_int *m, const mp_int *mu); @@ -41,9 +41,7 @@ mp_err s_mp_exptmod(const mp_int *G, const mp_int *X, const mp_int *P, mp_int *Y /* init M array */ /* init first cell */ - if ((err = mp_init(&M[1])) != MP_OKAY) { - return err; - } + if ((err = mp_init(&M[1])) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); /* now init the second half of the array */ for (x = 1<<(winsize-1); x < (1 << winsize); x++) { @@ -57,13 +55,13 @@ mp_err s_mp_exptmod(const mp_int *G, const mp_int *X, const mp_int *P, mp_int *Y } /* create mu, used for Barrett reduction */ - if ((err = mp_init(&mu)) != MP_OKAY) goto LBL_M; + if ((err = mp_init(&mu)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_M); if (redmode == 0) { - if ((err = mp_reduce_setup(&mu, P)) != MP_OKAY) goto LBL_MU; + if ((err = mp_reduce_setup(&mu, P)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_MU); redux = mp_reduce; } else { - if ((err = mp_reduce_2k_setup_l(P, &mu)) != MP_OKAY) goto LBL_MU; + if ((err = mp_reduce_2k_setup_l(P, &mu)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_MU); redux = mp_reduce_2k_l; } @@ -75,32 +73,34 @@ mp_err s_mp_exptmod(const mp_int *G, const mp_int *X, const mp_int *P, mp_int *Y * The first half of the table is not * computed though accept for M[0] and M[1] */ - if ((err = mp_mod(G, P, &M[1])) != MP_OKAY) goto LBL_MU; + if ((err = mp_mod(G, P, &M[1])) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_MU); /* compute the value at M[1<<(winsize-1)] by squaring * M[1] (winsize-1) times */ - if ((err = mp_copy(&M[1], &M[(size_t)1 << (winsize - 1)])) != MP_OKAY) goto LBL_MU; + if ((err = mp_copy(&M[1], &M[(size_t)1 << (winsize - 1)])) != MP_OKAY) + MP_TRACE_ERROR(err, LTM_ERR_MU); for (x = 0; x < (winsize - 1); x++) { /* square it */ if ((err = mp_sqr(&M[(size_t)1 << (winsize - 1)], - &M[(size_t)1 << (winsize - 1)])) != MP_OKAY) goto LBL_MU; + &M[(size_t)1 << (winsize - 1)])) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_MU); /* reduce modulo P */ - if ((err = redux(&M[(size_t)1 << (winsize - 1)], P, &mu)) != MP_OKAY) goto LBL_MU; + if ((err = redux(&M[(size_t)1 << (winsize - 1)], P, &mu)) != MP_OKAY) + MP_TRACE_ERROR(err, LTM_ERR_MU); } /* create upper table, that is M[x] = M[x-1] * M[1] (mod P) * for x = (2**(winsize - 1) + 1) to (2**winsize - 1) */ for (x = (1 << (winsize - 1)) + 1; x < (1 << winsize); x++) { - if ((err = mp_mul(&M[x - 1], &M[1], &M[x])) != MP_OKAY) goto LBL_MU; - if ((err = redux(&M[x], P, &mu)) != MP_OKAY) goto LBL_MU; + if ((err = mp_mul(&M[x - 1], &M[1], &M[x])) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_MU); + if ((err = redux(&M[x], P, &mu)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_MU); } /* setup result */ - if ((err = mp_init(&res)) != MP_OKAY) goto LBL_MU; + if ((err = mp_init(&res)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_MU); mp_set(&res, 1uL); /* set initial mode and bit cnt */ @@ -138,8 +138,8 @@ mp_err s_mp_exptmod(const mp_int *G, const mp_int *X, const mp_int *P, mp_int *Y /* if the bit is zero and mode == 1 then we square */ if ((mode == 1) && (y == 0)) { - if ((err = mp_sqr(&res, &res)) != MP_OKAY) goto LBL_RES; - if ((err = redux(&res, P, &mu)) != MP_OKAY) goto LBL_RES; + if ((err = mp_sqr(&res, &res)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_RES); + if ((err = redux(&res, P, &mu)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_RES); continue; } @@ -151,13 +151,13 @@ mp_err s_mp_exptmod(const mp_int *G, const mp_int *X, const mp_int *P, mp_int *Y /* ok window is filled so square as required and multiply */ /* square first */ for (x = 0; x < winsize; x++) { - if ((err = mp_sqr(&res, &res)) != MP_OKAY) goto LBL_RES; - if ((err = redux(&res, P, &mu)) != MP_OKAY) goto LBL_RES; + if ((err = mp_sqr(&res, &res)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_RES); + if ((err = redux(&res, P, &mu)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_RES); } /* then multiply */ - if ((err = mp_mul(&res, &M[bitbuf], &res)) != MP_OKAY) goto LBL_RES; - if ((err = redux(&res, P, &mu)) != MP_OKAY) goto LBL_RES; + if ((err = mp_mul(&res, &M[bitbuf], &res)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_RES); + if ((err = redux(&res, P, &mu)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_RES); /* empty window and reset */ bitcpy = 0; @@ -170,29 +170,30 @@ mp_err s_mp_exptmod(const mp_int *G, const mp_int *X, const mp_int *P, mp_int *Y if ((mode == 2) && (bitcpy > 0)) { /* square then multiply if the bit is set */ for (x = 0; x < bitcpy; x++) { - if ((err = mp_sqr(&res, &res)) != MP_OKAY) goto LBL_RES; - if ((err = redux(&res, P, &mu)) != MP_OKAY) goto LBL_RES; + if ((err = mp_sqr(&res, &res)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_RES); + if ((err = redux(&res, P, &mu)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_RES); bitbuf <<= 1; if ((bitbuf & (1 << winsize)) != 0) { /* then multiply */ - if ((err = mp_mul(&res, &M[1], &res)) != MP_OKAY) goto LBL_RES; - if ((err = redux(&res, P, &mu)) != MP_OKAY) goto LBL_RES; + if ((err = mp_mul(&res, &M[1], &res)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_RES); + if ((err = redux(&res, P, &mu)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_RES); } } } mp_exch(&res, Y); - err = MP_OKAY; -LBL_RES: + +LTM_ERR_RES: mp_clear(&res); -LBL_MU: +LTM_ERR_MU: mp_clear(&mu); -LBL_M: +LTM_ERR_M: mp_clear(&M[1]); for (x = 1<<(winsize-1); x < (1 << winsize); x++) { mp_clear(&M[x]); } +LTM_ERR: return err; } #endif diff --git a/s_mp_exptmod_fast.c b/s_mp_exptmod_fast.c index e7729f49d..10abc1c1d 100644 --- a/s_mp_exptmod_fast.c +++ b/s_mp_exptmod_fast.c @@ -24,7 +24,7 @@ mp_err s_mp_exptmod_fast(const mp_int *G, const mp_int *X, const mp_int *P, mp_i mp_int M[TAB_SIZE], res; mp_digit buf, mp; int bitbuf, bitcpy, bitcnt, mode, digidx, x, y, winsize; - mp_err err; + mp_err err = MP_OKAY; /* use a pointer to the reduction algorithm. This allows us to use * one of many reduction algorithms without modding the guts of @@ -54,9 +54,7 @@ mp_err s_mp_exptmod_fast(const mp_int *G, const mp_int *X, const mp_int *P, mp_i /* init M array */ /* init first cell */ - if ((err = mp_init_size(&M[1], P->alloc)) != MP_OKAY) { - return err; - } + if ((err = mp_init_size(&M[1], P->alloc)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); /* now init the second half of the array */ for (x = 1<<(winsize-1); x < (1 << winsize); x++) { @@ -65,7 +63,7 @@ mp_err s_mp_exptmod_fast(const mp_int *G, const mp_int *X, const mp_int *P, mp_i mp_clear(&M[y]); } mp_clear(&M[1]); - return err; + MP_TRACE_ERROR(err, LTM_ERR); } } @@ -73,10 +71,10 @@ mp_err s_mp_exptmod_fast(const mp_int *G, const mp_int *X, const mp_int *P, mp_i if (redmode == 0) { if (MP_HAS(MP_MONTGOMERY_SETUP)) { /* now setup montgomery */ - if ((err = mp_montgomery_setup(P, &mp)) != MP_OKAY) goto LBL_M; + if ((err = mp_montgomery_setup(P, &mp)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_M); } else { err = MP_VAL; - goto LBL_M; + MP_TRACE_ERROR(err, LTM_ERR_M); } /* automatically pick the comba one if available (saves quite a few calls/ifs) */ @@ -89,7 +87,7 @@ mp_err s_mp_exptmod_fast(const mp_int *G, const mp_int *X, const mp_int *P, mp_i redux = mp_montgomery_reduce; } else { err = MP_VAL; - goto LBL_M; + MP_TRACE_ERROR(err, LTM_ERR_M); } } else if (redmode == 1) { if (MP_HAS(MP_DR_SETUP) && MP_HAS(MP_DR_REDUCE)) { @@ -98,19 +96,19 @@ mp_err s_mp_exptmod_fast(const mp_int *G, const mp_int *X, const mp_int *P, mp_i redux = mp_dr_reduce; } else { err = MP_VAL; - goto LBL_M; + MP_TRACE_ERROR(err, LTM_ERR_M); } } else if (MP_HAS(MP_REDUCE_2K_SETUP) && MP_HAS(MP_REDUCE_2K)) { /* setup DR reduction for moduli of the form 2**k - b */ - if ((err = mp_reduce_2k_setup(P, &mp)) != MP_OKAY) goto LBL_M; + if ((err = mp_reduce_2k_setup(P, &mp)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_M); redux = mp_reduce_2k; } else { err = MP_VAL; - goto LBL_M; + MP_TRACE_ERROR(err, LTM_ERR_M); } /* setup result */ - if ((err = mp_init_size(&res, P->alloc)) != MP_OKAY) goto LBL_M; + if ((err = mp_init_size(&res, P->alloc)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_M); /* create M table * @@ -122,31 +120,35 @@ mp_err s_mp_exptmod_fast(const mp_int *G, const mp_int *X, const mp_int *P, mp_i if (redmode == 0) { if (MP_HAS(MP_MONTGOMERY_CALC_NORMALIZATION)) { /* now we need R mod m */ - if ((err = mp_montgomery_calc_normalization(&res, P)) != MP_OKAY) goto LBL_RES; + if ((err = mp_montgomery_calc_normalization(&res, P)) != MP_OKAY) + MP_TRACE_ERROR(err, LTM_ERR_RES); /* now set M[1] to G * R mod m */ - if ((err = mp_mulmod(G, &res, P, &M[1])) != MP_OKAY) goto LBL_RES; + if ((err = mp_mulmod(G, &res, P, &M[1])) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_RES); } else { err = MP_VAL; - goto LBL_RES; + MP_TRACE_ERROR(err, LTM_ERR_RES); } } else { mp_set(&res, 1uL); - if ((err = mp_mod(G, P, &M[1])) != MP_OKAY) goto LBL_RES; + if ((err = mp_mod(G, P, &M[1])) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_RES); } /* compute the value at M[1<<(winsize-1)] by squaring M[1] (winsize-1) times */ - if ((err = mp_copy(&M[1], &M[(size_t)1 << (winsize - 1)])) != MP_OKAY) goto LBL_RES; + if ((err = mp_copy(&M[1], &M[(size_t)1 << (winsize - 1)])) != MP_OKAY) + MP_TRACE_ERROR(err, LTM_ERR_RES); for (x = 0; x < (winsize - 1); x++) { - if ((err = mp_sqr(&M[(size_t)1 << (winsize - 1)], &M[(size_t)1 << (winsize - 1)])) != MP_OKAY) goto LBL_RES; - if ((err = redux(&M[(size_t)1 << (winsize - 1)], P, mp)) != MP_OKAY) goto LBL_RES; + if ((err = mp_sqr(&M[(size_t)1 << (winsize - 1)], &M[(size_t)1 << (winsize - 1)])) != MP_OKAY) + MP_TRACE_ERROR(err, LTM_ERR_RES); + if ((err = redux(&M[(size_t)1 << (winsize - 1)], P, mp)) != MP_OKAY) + MP_TRACE_ERROR(err, LTM_ERR_RES); } /* create upper table */ for (x = (1 << (winsize - 1)) + 1; x < (1 << winsize); x++) { - if ((err = mp_mul(&M[x - 1], &M[1], &M[x])) != MP_OKAY) goto LBL_RES; - if ((err = redux(&M[x], P, mp)) != MP_OKAY) goto LBL_RES; + if ((err = mp_mul(&M[x - 1], &M[1], &M[x])) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_RES); + if ((err = redux(&M[x], P, mp)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_RES); } /* set initial mode and bit cnt */ @@ -184,8 +186,8 @@ mp_err s_mp_exptmod_fast(const mp_int *G, const mp_int *X, const mp_int *P, mp_i /* if the bit is zero and mode == 1 then we square */ if ((mode == 1) && (y == 0)) { - if ((err = mp_sqr(&res, &res)) != MP_OKAY) goto LBL_RES; - if ((err = redux(&res, P, mp)) != MP_OKAY) goto LBL_RES; + if ((err = mp_sqr(&res, &res)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_RES); + if ((err = redux(&res, P, mp)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_RES); continue; } @@ -197,13 +199,13 @@ mp_err s_mp_exptmod_fast(const mp_int *G, const mp_int *X, const mp_int *P, mp_i /* ok window is filled so square as required and multiply */ /* square first */ for (x = 0; x < winsize; x++) { - if ((err = mp_sqr(&res, &res)) != MP_OKAY) goto LBL_RES; - if ((err = redux(&res, P, mp)) != MP_OKAY) goto LBL_RES; + if ((err = mp_sqr(&res, &res)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_RES); + if ((err = redux(&res, P, mp)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_RES); } /* then multiply */ - if ((err = mp_mul(&res, &M[bitbuf], &res)) != MP_OKAY) goto LBL_RES; - if ((err = redux(&res, P, mp)) != MP_OKAY) goto LBL_RES; + if ((err = mp_mul(&res, &M[bitbuf], &res)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_RES); + if ((err = redux(&res, P, mp)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_RES); /* empty window and reset */ bitcpy = 0; @@ -216,15 +218,15 @@ mp_err s_mp_exptmod_fast(const mp_int *G, const mp_int *X, const mp_int *P, mp_i if ((mode == 2) && (bitcpy > 0)) { /* square then multiply if the bit is set */ for (x = 0; x < bitcpy; x++) { - if ((err = mp_sqr(&res, &res)) != MP_OKAY) goto LBL_RES; - if ((err = redux(&res, P, mp)) != MP_OKAY) goto LBL_RES; + if ((err = mp_sqr(&res, &res)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_RES); + if ((err = redux(&res, P, mp)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_RES); /* get next bit of the window */ bitbuf <<= 1; if ((bitbuf & (1 << winsize)) != 0) { /* then multiply */ - if ((err = mp_mul(&res, &M[1], &res)) != MP_OKAY) goto LBL_RES; - if ((err = redux(&res, P, mp)) != MP_OKAY) goto LBL_RES; + if ((err = mp_mul(&res, &M[1], &res)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_RES); + if ((err = redux(&res, P, mp)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_RES); } } } @@ -236,19 +238,20 @@ mp_err s_mp_exptmod_fast(const mp_int *G, const mp_int *X, const mp_int *P, mp_i * to reduce one more time to cancel out the factor * of R. */ - if ((err = redux(&res, P, mp)) != MP_OKAY) goto LBL_RES; + if ((err = redux(&res, P, mp)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_RES); } /* swap res with Y */ mp_exch(&res, Y); - err = MP_OKAY; -LBL_RES: + +LTM_ERR_RES: mp_clear(&res); -LBL_M: +LTM_ERR_M: mp_clear(&M[1]); for (x = 1<<(winsize-1); x < (1 << winsize); x++) { mp_clear(&M[x]); } +LTM_ERR: return err; } #endif diff --git a/s_mp_fp_log.c b/s_mp_fp_log.c index 90a89abdf..d34ec1467 100644 --- a/s_mp_fp_log.c +++ b/s_mp_fp_log.c @@ -11,40 +11,39 @@ static mp_err s_mp_fp_log_fraction(const mp_int *a, int p, mp_int *c) int i; mp_err err; - if ((err = mp_init_multi(&b, &L_out, &twoep, &a_bar, NULL)) != MP_OKAY) { - return err; - } + if ((err = mp_init_multi(&b, &L_out, &twoep, &a_bar, NULL)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); L = mp_count_bits(a) - 1; pmL = (p < L) ? L - p: p - L; - if ((err = mp_mul_2d(a, pmL, &a_bar)) != MP_OKAY) goto LTM_ERR; - if ((err = mp_2expt(&b, p - 1)) != MP_OKAY) goto LTM_ERR; + if ((err = mp_mul_2d(a, pmL, &a_bar)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); + if ((err = mp_2expt(&b, p - 1)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); mp_set_i32(&L_out, L); - if ((err = mp_mul_2d(&L_out, p, &L_out)) != MP_OKAY) goto LTM_ERR; + if ((err = mp_mul_2d(&L_out, p, &L_out)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); - if ((err = mp_2expt(&twoep, p + 1)) != MP_OKAY) goto LTM_ERR; + if ((err = mp_2expt(&twoep, p + 1)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); for (i = 0; i < p; i++) { - if ((err = mp_sqr(&a_bar, &a_bar)) != MP_OKAY) goto LTM_ERR; - if ((err = mp_div_2d(&a_bar, p, &a_bar, NULL)) != MP_OKAY) goto LTM_ERR; + if ((err = mp_sqr(&a_bar, &a_bar)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); + if ((err = mp_div_2d(&a_bar, p, &a_bar, NULL)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); if (mp_cmp(&a_bar, &twoep) != MP_LT) { - if ((err = mp_div_2(&a_bar, &a_bar)) != MP_OKAY) goto LTM_ERR; - if ((err = mp_add(&L_out, &b, &L_out)) != MP_OKAY) goto LTM_ERR; + if ((err = mp_div_2(&a_bar, &a_bar)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); + if ((err = mp_add(&L_out, &b, &L_out)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); } - if ((err = mp_div_2(&b, &b)) != MP_OKAY) goto LTM_ERR; + if ((err = mp_div_2(&b, &b)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); } mp_exch(c, &L_out); -LTM_ERR: +LTM_ERR_1: mp_clear_multi(&b, &L_out, &twoep, &a_bar, NULL); +LTM_ERR: return err; } mp_err s_mp_fp_log(const mp_int *a, mp_int *c) { mp_int La, t; - mp_err err; + mp_err err = MP_OKAY; int fla; /* We have arbitrary precision here and could adapt "prec" to actual precision, @@ -56,26 +55,23 @@ mp_err s_mp_fp_log(const mp_int *a, mp_int *c) fla = mp_count_bits(a) - 1; - if ((err = mp_init_multi(&La, &t, NULL)) != MP_OKAY) { - return err; - } + if ((err = mp_init_multi(&La, &t, NULL)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); if (fla > prec) { - if ((err = mp_div_2d(a, fla - prec, &t, NULL)) != MP_OKAY) goto LTM_ERR; - if ((err = s_mp_fp_log_fraction(&t, prec, - &La)) != MP_OKAY) goto LTM_ERR; + if ((err = mp_div_2d(a, fla - prec, &t, NULL)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); + if ((err = s_mp_fp_log_fraction(&t, prec, &La)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); mp_set_i32(&t,fla - prec); - if ((err = mp_mul_2d(&t,prec, &t)) != MP_OKAY) goto LTM_ERR; - if ((err = mp_add(&La, &t, &La)) != MP_OKAY) goto LTM_ERR; + if ((err = mp_mul_2d(&t,prec, &t)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); + if ((err = mp_add(&La, &t, &La)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); } else { - if ((err = s_mp_fp_log_fraction(a, prec, - &La)) != MP_OKAY) goto LTM_ERR; + if ((err = s_mp_fp_log_fraction(a, prec, &La)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); } mp_exch(&La, c); -LTM_ERR: +LTM_ERR_1: mp_clear_multi(&La, &t, NULL); +LTM_ERR: return err; } diff --git a/s_mp_fp_log_d.c b/s_mp_fp_log_d.c index 71d82dc41..e03913601 100644 --- a/s_mp_fp_log_d.c +++ b/s_mp_fp_log_d.c @@ -16,12 +16,14 @@ static mp_word s_mp_flog2_mp_word_d(mp_word value) static mp_err s_mp_fp_log_fraction_d(mp_word x, int p, mp_word *c) { mp_word b, L_out, L, a_bar, twoep; + mp_err err = MP_OKAY; int i; L = s_mp_flog2_mp_word_d(x); if ((L + (mp_word)p) > MP_UPPER_LIMIT_FIXED_LOG) { - return MP_VAL; + err = MP_VAL; + MP_TRACE_ERROR(err, LTM_ERR); } a_bar = ((mp_word)p < L) ? x << (L - (mp_word)p) : x << ((mp_word)p - L); @@ -39,13 +41,15 @@ static mp_err s_mp_fp_log_fraction_d(mp_word x, int p, mp_word *c) b >>= 1u; } *c = L_out; - return MP_OKAY; + +LTM_ERR: + return err; } /* Approximate the base two logarithm of "a" */ mp_err s_mp_fp_log_d(const mp_int *a, mp_word *c) { - mp_err err; + mp_err err = MP_OKAY; int la; int prec = MP_PRECISION_FIXED_LOG; mp_word tmp, la_word; @@ -55,28 +59,27 @@ mp_err s_mp_fp_log_d(const mp_int *a, mp_word *c) /* We don't use the whole number, just the most significant "prec" bits */ if (la > prec) { - if ((err = mp_init(&t)) != MP_OKAY) goto LTM_ERR; + if ((err = mp_init(&t)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); /* Get enough msb-bits for the chosen precision */ - if ((err = mp_div_2d(a, la - prec, &t, NULL)) != MP_OKAY) goto LTM_ERR; + if ((err = mp_div_2d(a, la - prec, &t, NULL)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); tmp = mp_get_u64(&t); /* Compute the low precision approximation for the fractional part */ - if ((err = s_mp_fp_log_fraction_d(tmp, prec, &la_word)) != MP_OKAY) goto LTM_ERR; + if ((err = s_mp_fp_log_fraction_d(tmp, prec, &la_word)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); /* Compute the integer part and add it */ tmp = ((mp_word)(la - prec))<sign == MP_NEG) || mp_iszero(b)) { - return MP_VAL; - } + if ((b->sign == MP_NEG) || mp_iszero(b)) MP_TRACE_ERROR(err, LTM_ERR); /* init temps */ - if ((err = mp_init_multi(&x, &y, &u, &v, - &A, &B, &C, &D, NULL)) != MP_OKAY) { - return err; - } + if ((err = mp_init_multi(&x, &y, &u, &v, &A, &B, &C, &D, NULL)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); /* x = a, y = b */ - if ((err = mp_mod(a, b, &x)) != MP_OKAY) goto LBL_ERR; - if ((err = mp_copy(b, &y)) != MP_OKAY) goto LBL_ERR; + if ((err = mp_mod(a, b, &x)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); + if ((err = mp_copy(b, &y)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); /* 2. [modified] if x,y are both even then return an error! */ if (mp_iseven(&x) && mp_iseven(&y)) { err = MP_VAL; - goto LBL_ERR; + MP_TRACE_ERROR(err, LTM_ERR_1); } /* 3. u=x, v=y, A=1, B=0, C=0,D=1 */ - if ((err = mp_copy(&x, &u)) != MP_OKAY) goto LBL_ERR; - if ((err = mp_copy(&y, &v)) != MP_OKAY) goto LBL_ERR; + if ((err = mp_copy(&x, &u)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); + if ((err = mp_copy(&y, &v)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); mp_set(&A, 1uL); mp_set(&D, 1uL); @@ -40,50 +35,50 @@ mp_err s_mp_invmod(const mp_int *a, const mp_int *b, mp_int *c) /* 4. while u is even do */ while (mp_iseven(&u)) { /* 4.1 u = u/2 */ - if ((err = mp_div_2(&u, &u)) != MP_OKAY) goto LBL_ERR; + if ((err = mp_div_2(&u, &u)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); /* 4.2 if A or B is odd then */ if (mp_isodd(&A) || mp_isodd(&B)) { /* A = (A+y)/2, B = (B-x)/2 */ - if ((err = mp_add(&A, &y, &A)) != MP_OKAY) goto LBL_ERR; - if ((err = mp_sub(&B, &x, &B)) != MP_OKAY) goto LBL_ERR; + if ((err = mp_add(&A, &y, &A)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); + if ((err = mp_sub(&B, &x, &B)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); } /* A = A/2, B = B/2 */ - if ((err = mp_div_2(&A, &A)) != MP_OKAY) goto LBL_ERR; - if ((err = mp_div_2(&B, &B)) != MP_OKAY) goto LBL_ERR; + if ((err = mp_div_2(&A, &A)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); + if ((err = mp_div_2(&B, &B)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); } /* 5. while v is even do */ while (mp_iseven(&v)) { /* 5.1 v = v/2 */ - if ((err = mp_div_2(&v, &v)) != MP_OKAY) goto LBL_ERR; + if ((err = mp_div_2(&v, &v)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); /* 5.2 if C or D is odd then */ if (mp_isodd(&C) || mp_isodd(&D)) { /* C = (C+y)/2, D = (D-x)/2 */ - if ((err = mp_add(&C, &y, &C)) != MP_OKAY) goto LBL_ERR; - if ((err = mp_sub(&D, &x, &D)) != MP_OKAY) goto LBL_ERR; + if ((err = mp_add(&C, &y, &C)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); + if ((err = mp_sub(&D, &x, &D)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); } /* C = C/2, D = D/2 */ - if ((err = mp_div_2(&C, &C)) != MP_OKAY) goto LBL_ERR; - if ((err = mp_div_2(&D, &D)) != MP_OKAY) goto LBL_ERR; + if ((err = mp_div_2(&C, &C)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); + if ((err = mp_div_2(&D, &D)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); } /* 6. if u >= v then */ if (mp_cmp(&u, &v) != MP_LT) { /* u = u - v, A = A - C, B = B - D */ - if ((err = mp_sub(&u, &v, &u)) != MP_OKAY) goto LBL_ERR; + if ((err = mp_sub(&u, &v, &u)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); - if ((err = mp_sub(&A, &C, &A)) != MP_OKAY) goto LBL_ERR; + if ((err = mp_sub(&A, &C, &A)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); - if ((err = mp_sub(&B, &D, &B)) != MP_OKAY) goto LBL_ERR; + if ((err = mp_sub(&B, &D, &B)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); } else { /* v - v - u, C = C - A, D = D - B */ - if ((err = mp_sub(&v, &u, &v)) != MP_OKAY) goto LBL_ERR; + if ((err = mp_sub(&v, &u, &v)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); - if ((err = mp_sub(&C, &A, &C)) != MP_OKAY) goto LBL_ERR; + if ((err = mp_sub(&C, &A, &C)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); - if ((err = mp_sub(&D, &B, &D)) != MP_OKAY) goto LBL_ERR; + if ((err = mp_sub(&D, &B, &D)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); } /* if not zero goto step 4 */ @@ -94,24 +89,25 @@ mp_err s_mp_invmod(const mp_int *a, const mp_int *b, mp_int *c) /* if v != 1 then there is no inverse */ if (mp_cmp_d(&v, 1uL) != MP_EQ) { err = MP_VAL; - goto LBL_ERR; + MP_TRACE_ERROR(err, LTM_ERR_1); } /* if its too low */ while (mp_isneg(&C)) { - if ((err = mp_add(&C, b, &C)) != MP_OKAY) goto LBL_ERR; + if ((err = mp_add(&C, b, &C)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); } /* too big */ while (mp_cmp_mag(&C, b) != MP_LT) { - if ((err = mp_sub(&C, b, &C)) != MP_OKAY) goto LBL_ERR; + if ((err = mp_sub(&C, b, &C)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); } /* C is now the inverse */ mp_exch(&C, c); -LBL_ERR: +LTM_ERR_1: mp_clear_multi(&x, &y, &u, &v, &A, &B, &C, &D, NULL); +LTM_ERR: return err; } #endif diff --git a/s_mp_invmod_odd.c b/s_mp_invmod_odd.c index 11fc357dc..d9ecc1980 100644 --- a/s_mp_invmod_odd.c +++ b/s_mp_invmod_odd.c @@ -12,74 +12,73 @@ mp_err s_mp_invmod_odd(const mp_int *a, const mp_int *b, mp_int *c) { mp_int x, y, u, v, B, D; - mp_err err; + mp_err err = MP_OKAY; /* 2. [modified] b must be odd */ if (mp_iseven(b)) { - return MP_VAL; + err = MP_VAL; + MP_TRACE_ERROR(err, LTM_ERR); } /* init all our temps */ - if ((err = mp_init_multi(&x, &y, &u, &v, &B, &D, NULL)) != MP_OKAY) { - return err; - } + if ((err = mp_init_multi(&x, &y, &u, &v, &B, &D, NULL)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); /* x == modulus, y == value to invert */ - if ((err = mp_copy(b, &x)) != MP_OKAY) goto LBL_ERR; + if ((err = mp_copy(b, &x)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); /* y needs to be positive but the remainder d of mp_div(a,b,c,d) might be negative */ - if ((err = mp_mod(a, b, &y)) != MP_OKAY) goto LBL_ERR; + if ((err = mp_mod(a, b, &y)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); /* if one of x,y is zero return an error! */ if (mp_iszero(&x) || mp_iszero(&y)) { err = MP_VAL; - goto LBL_ERR; + MP_TRACE_ERROR(err, LTM_ERR_1); } /* 3. u=x, v=y, A=1, B=0, C=0,D=1 */ - if ((err = mp_copy(&x, &u)) != MP_OKAY) goto LBL_ERR; - if ((err = mp_copy(&y, &v)) != MP_OKAY) goto LBL_ERR; + if ((err = mp_copy(&x, &u)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); + if ((err = mp_copy(&y, &v)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); mp_set(&D, 1uL); do { /* 4. while u is even do */ while (mp_iseven(&u)) { /* 4.1 u = u/2 */ - if ((err = mp_div_2(&u, &u)) != MP_OKAY) goto LBL_ERR; + if ((err = mp_div_2(&u, &u)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); /* 4.2 if B is odd then */ if (mp_isodd(&B)) { - if ((err = mp_sub(&B, &x, &B)) != MP_OKAY) goto LBL_ERR; + if ((err = mp_sub(&B, &x, &B)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); } /* B = B/2 */ - if ((err = mp_div_2(&B, &B)) != MP_OKAY) goto LBL_ERR; + if ((err = mp_div_2(&B, &B)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); } /* 5. while v is even do */ while (mp_iseven(&v)) { /* 5.1 v = v/2 */ - if ((err = mp_div_2(&v, &v)) != MP_OKAY) goto LBL_ERR; + if ((err = mp_div_2(&v, &v)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); /* 5.2 if D is odd then */ if (mp_isodd(&D)) { /* D = (D-x)/2 */ - if ((err = mp_sub(&D, &x, &D)) != MP_OKAY) goto LBL_ERR; + if ((err = mp_sub(&D, &x, &D)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); } /* D = D/2 */ - if ((err = mp_div_2(&D, &D)) != MP_OKAY) goto LBL_ERR; + if ((err = mp_div_2(&D, &D)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); } /* 6. if u >= v then */ if (mp_cmp(&u, &v) != MP_LT) { /* u = u - v, B = B - D */ - if ((err = mp_sub(&u, &v, &u)) != MP_OKAY) goto LBL_ERR; + if ((err = mp_sub(&u, &v, &u)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); - if ((err = mp_sub(&B, &D, &B)) != MP_OKAY) goto LBL_ERR; + if ((err = mp_sub(&B, &D, &B)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); } else { /* v - v - u, D = D - B */ - if ((err = mp_sub(&v, &u, &v)) != MP_OKAY) goto LBL_ERR; + if ((err = mp_sub(&v, &u, &v)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); - if ((err = mp_sub(&D, &B, &D)) != MP_OKAY) goto LBL_ERR; + if ((err = mp_sub(&D, &B, &D)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); } /* if not zero goto step 4 */ @@ -90,24 +89,24 @@ mp_err s_mp_invmod_odd(const mp_int *a, const mp_int *b, mp_int *c) /* if v != 1 then there is no inverse */ if (mp_cmp_d(&v, 1uL) != MP_EQ) { err = MP_VAL; - goto LBL_ERR; + MP_TRACE_ERROR(err, LTM_ERR_1); } /* b is now the inverse */ while (mp_isneg(&D)) { - if ((err = mp_add(&D, b, &D)) != MP_OKAY) goto LBL_ERR; + if ((err = mp_add(&D, b, &D)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); } /* too big */ while (mp_cmp_mag(&D, b) != MP_LT) { - if ((err = mp_sub(&D, b, &D)) != MP_OKAY) goto LBL_ERR; + if ((err = mp_sub(&D, b, &D)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); } mp_exch(&D, c); - err = MP_OKAY; -LBL_ERR: +LTM_ERR_1: mp_clear_multi(&x, &y, &u, &v, &B, &D, NULL); +LTM_ERR: return err; } #endif diff --git a/s_mp_montgomery_reduce_comba.c b/s_mp_montgomery_reduce_comba.c index 3858f75a0..82cf4ce4a 100644 --- a/s_mp_montgomery_reduce_comba.c +++ b/s_mp_montgomery_reduce_comba.c @@ -14,14 +14,15 @@ mp_err s_mp_montgomery_reduce_comba(mp_int *x, const mp_int *n, mp_digit rho) { int ix, oldused; - mp_err err; + mp_err err = MP_OKAY; mp_word MP_ALLOC_WARRAY(W); MP_CHECK_WARRAY(W); if (x->used > MP_WARRAY) { MP_FREE_WARRAY(W); - return MP_VAL; + err = MP_VAL; + MP_TRACE_ERROR(err, LTM_ERR); } /* get old used count */ @@ -30,7 +31,7 @@ mp_err s_mp_montgomery_reduce_comba(mp_int *x, const mp_int *n, mp_digit rho) /* grow a as required */ if ((err = mp_grow(x, n->used + 1)) != MP_OKAY) { MP_FREE_WARRAY(W); - return err; + MP_TRACE_ERROR(err, LTM_ERR); } /* first we have to get the digits of the input into @@ -117,8 +118,10 @@ mp_err s_mp_montgomery_reduce_comba(mp_int *x, const mp_int *n, mp_digit rho) MP_FREE_WARRAY(W); /* if A >= m then A = A - m */ if (mp_cmp_mag(x, n) != MP_LT) { - return s_mp_sub(x, n, x); + if ((err = s_mp_sub(x, n, x)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); } - return MP_OKAY; + +LTM_ERR: + return err; } #endif diff --git a/s_mp_mul.c b/s_mp_mul.c index 3394c142e..75b15cff0 100644 --- a/s_mp_mul.c +++ b/s_mp_mul.c @@ -10,22 +10,22 @@ mp_err s_mp_mul(const mp_int *a, const mp_int *b, mp_int *c, int digs) { mp_int t; - mp_err err; + mp_err err = MP_OKAY; int pa, ix; if (digs < 0) { - return MP_VAL; + err = MP_VAL; + MP_TRACE_ERROR(err, LTM_ERR); } /* can we use the fast multiplier? */ if ((digs < MP_WARRAY) && (MP_MIN(a->used, b->used) < MP_MAX_COMBA)) { - return s_mp_mul_comba(a, b, c, digs); - } - - if ((err = mp_init_size(&t, digs)) != MP_OKAY) { + if ((err = s_mp_mul_comba(a, b, c, digs)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); return err; } + + if ((err = mp_init_size(&t, digs)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); t.used = digs; /* compute the digits of the product directly */ @@ -60,6 +60,8 @@ mp_err s_mp_mul(const mp_int *a, const mp_int *b, mp_int *c, int digs) mp_exch(&t, c); mp_clear(&t); - return MP_OKAY; + +LTM_ERR: + return err; } #endif diff --git a/s_mp_mul_balance.c b/s_mp_mul_balance.c index f36f0d30b..6e65d5d54 100644 --- a/s_mp_mul_balance.c +++ b/s_mp_mul_balance.c @@ -7,17 +7,15 @@ mp_err s_mp_mul_balance(const mp_int *a, const mp_int *b, mp_int *c) { mp_int a0, tmp, r; - mp_err err; + mp_err err = MP_OKAY; int i, j, nblocks = MP_MAX(a->used, b->used) / MP_MIN(a->used, b->used), bsize = MP_MIN(a->used, b->used); - if ((err = mp_init_size(&a0, bsize + 2)) != MP_OKAY) { - return err; - } + if ((err = mp_init_size(&a0, bsize + 2)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); if ((err = mp_init_multi(&tmp, &r, NULL)) != MP_OKAY) { mp_clear(&a0); - return err; + MP_TRACE_ERROR(err, LTM_ERR); } /* Make sure that A is the larger one*/ @@ -33,17 +31,11 @@ mp_err s_mp_mul_balance(const mp_int *a, const mp_int *b, mp_int *c) mp_clamp(&a0); /* Multiply with b */ - if ((err = mp_mul(&a0, b, &tmp)) != MP_OKAY) { - goto LBL_ERR; - } + if ((err = mp_mul(&a0, b, &tmp)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); /* Shift tmp to the correct position */ - if ((err = mp_lshd(&tmp, bsize * i)) != MP_OKAY) { - goto LBL_ERR; - } + if ((err = mp_lshd(&tmp, bsize * i)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); /* Add to output. No carry needed */ - if ((err = mp_add(&r, &tmp, &r)) != MP_OKAY) { - goto LBL_ERR; - } + if ((err = mp_add(&r, &tmp, &r)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); } /* The left-overs; there are always left-overs */ if (j < a->used) { @@ -52,20 +44,15 @@ mp_err s_mp_mul_balance(const mp_int *a, const mp_int *b, mp_int *c) j += a0.used; mp_clamp(&a0); - if ((err = mp_mul(&a0, b, &tmp)) != MP_OKAY) { - goto LBL_ERR; - } - if ((err = mp_lshd(&tmp, bsize * i)) != MP_OKAY) { - goto LBL_ERR; - } - if ((err = mp_add(&r, &tmp, &r)) != MP_OKAY) { - goto LBL_ERR; - } + if ((err = mp_mul(&a0, b, &tmp)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); + if ((err = mp_lshd(&tmp, bsize * i)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); + if ((err = mp_add(&r, &tmp, &r)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); } mp_exch(&r,c); -LBL_ERR: +LTM_ERR_1: mp_clear_multi(&a0, &tmp, &r,NULL); +LTM_ERR: return err; } #endif diff --git a/s_mp_mul_comba.c b/s_mp_mul_comba.c index 5b37035ea..e9ac7c397 100644 --- a/s_mp_mul_comba.c +++ b/s_mp_mul_comba.c @@ -22,7 +22,7 @@ mp_err s_mp_mul_comba(const mp_int *a, const mp_int *b, mp_int *c, int digs) { int oldused, pa, ix; - mp_err err; + mp_err err = MP_OKAY; mp_digit MP_ALLOC_WARRAY(W); mp_word _W; @@ -30,12 +30,14 @@ mp_err s_mp_mul_comba(const mp_int *a, const mp_int *b, mp_int *c, int digs) if (digs < 0) { MP_FREE_WARRAY(W); - return MP_VAL; + err = MP_VAL; + MP_TRACE_ERROR(err, LTM_ERR); } /* grow the destination as required */ if ((err = mp_grow(c, digs)) != MP_OKAY) { MP_FREE_WARRAY(W); + MP_TRACE_ERROR(err, LTM_ERR); return err; } @@ -82,6 +84,8 @@ mp_err s_mp_mul_comba(const mp_int *a, const mp_int *b, mp_int *c, int digs) mp_clamp(c); MP_FREE_WARRAY(W); - return MP_OKAY; + +LTM_ERR: + return err; } #endif diff --git a/s_mp_mul_high.c b/s_mp_mul_high.c index fd532ebd8..564520796 100644 --- a/s_mp_mul_high.c +++ b/s_mp_mul_high.c @@ -10,22 +10,22 @@ mp_err s_mp_mul_high(const mp_int *a, const mp_int *b, mp_int *c, int digs) { mp_int t; int pa, pb, ix; - mp_err err; + mp_err err = MP_OKAY; if (digs < 0) { - return MP_VAL; + err = MP_VAL; + MP_TRACE_ERROR(err, LTM_ERR); } /* can we use the fast multiplier? */ if (MP_HAS(S_MP_MUL_HIGH_COMBA) && ((a->used + b->used + 1) < MP_WARRAY) && (MP_MIN(a->used, b->used) < MP_MAX_COMBA)) { - return s_mp_mul_high_comba(a, b, c, digs); - } - - if ((err = mp_init_size(&t, a->used + b->used + 1)) != MP_OKAY) { + if ((err = s_mp_mul_high_comba(a, b, c, digs)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); return err; } + + if ((err = mp_init_size(&t, a->used + b->used + 1)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); t.used = a->used + b->used + 1; pa = a->used; @@ -51,6 +51,8 @@ mp_err s_mp_mul_high(const mp_int *a, const mp_int *b, mp_int *c, int digs) mp_clamp(&t); mp_exch(&t, c); mp_clear(&t); - return MP_OKAY; + +LTM_ERR: + return err; } #endif diff --git a/s_mp_mul_high_comba.c b/s_mp_mul_high_comba.c index b0096d4e6..18ab9889c 100644 --- a/s_mp_mul_high_comba.c +++ b/s_mp_mul_high_comba.c @@ -15,7 +15,7 @@ mp_err s_mp_mul_high_comba(const mp_int *a, const mp_int *b, mp_int *c, int digs) { int oldused, pa, ix; - mp_err err; + mp_err err = MP_OKAY; mp_digit MP_ALLOC_WARRAY(W); mp_word _W; @@ -23,14 +23,15 @@ mp_err s_mp_mul_high_comba(const mp_int *a, const mp_int *b, mp_int *c, int digs if (digs < 0) { MP_FREE_WARRAY(W); - return MP_VAL; + err = MP_VAL; + MP_TRACE_ERROR(err, LTM_ERR); } /* grow the destination as required */ pa = a->used + b->used; if ((err = mp_grow(c, pa)) != MP_OKAY) { MP_FREE_WARRAY(W); - return err; + MP_TRACE_ERROR(err, LTM_ERR); } /* number of output digits to produce */ @@ -74,6 +75,8 @@ mp_err s_mp_mul_high_comba(const mp_int *a, const mp_int *b, mp_int *c, int digs mp_clamp(c); MP_FREE_WARRAY(W); - return MP_OKAY; + +LTM_ERR: + return err; } #endif diff --git a/s_mp_mul_karatsuba.c b/s_mp_mul_karatsuba.c index b46529837..b257c88da 100644 --- a/s_mp_mul_karatsuba.c +++ b/s_mp_mul_karatsuba.c @@ -36,7 +36,7 @@ mp_err s_mp_mul_karatsuba(const mp_int *a, const mp_int *b, mp_int *c) { mp_int x0, x1, y0, y1, t1, x0y0, x1y1; int B; - mp_err err; + mp_err err= MP_OKAY;; /* min # of digits */ B = MP_MIN(a->used, b->used); @@ -45,29 +45,15 @@ mp_err s_mp_mul_karatsuba(const mp_int *a, const mp_int *b, mp_int *c) B = B >> 1; /* init copy all the temps */ - if ((err = mp_init_size(&x0, B)) != MP_OKAY) { - goto LBL_ERR; - } - if ((err = mp_init_size(&x1, a->used - B)) != MP_OKAY) { - goto X0; - } - if ((err = mp_init_size(&y0, B)) != MP_OKAY) { - goto X1; - } - if ((err = mp_init_size(&y1, b->used - B)) != MP_OKAY) { - goto Y0; - } + if ((err = mp_init_size(&x0, B)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); + if ((err = mp_init_size(&x1, a->used - B)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_X0); + if ((err = mp_init_size(&y0, B)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_X1); + if ((err = mp_init_size(&y1, b->used - B)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_Y0); /* init temps */ - if ((err = mp_init_size(&t1, B * 2)) != MP_OKAY) { - goto Y1; - } - if ((err = mp_init_size(&x0y0, B * 2)) != MP_OKAY) { - goto T1; - } - if ((err = mp_init_size(&x1y1, B * 2)) != MP_OKAY) { - goto X0Y0; - } + if ((err = mp_init_size(&t1, B * 2)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_Y1); + if ((err = mp_init_size(&x0y0, B * 2)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_T1); + if ((err = mp_init_size(&x1y1, B * 2)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_X0Y0); /* now shift the digits */ x0.used = y0.used = B; @@ -90,62 +76,51 @@ mp_err s_mp_mul_karatsuba(const mp_int *a, const mp_int *b, mp_int *c) /* now calc the products x0y0 and x1y1 */ /* after this x0 is no longer required, free temp [x0==t2]! */ - if ((err = mp_mul(&x0, &y0, &x0y0)) != MP_OKAY) { - goto X1Y1; /* x0y0 = x0*y0 */ - } - if ((err = mp_mul(&x1, &y1, &x1y1)) != MP_OKAY) { - goto X1Y1; /* x1y1 = x1*y1 */ - } + /* x0y0 = x0*y0 */ + if ((err = mp_mul(&x0, &y0, &x0y0)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_X1Y1); + /* x1y1 = x1*y1 */ + if ((err = mp_mul(&x1, &y1, &x1y1)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_X1Y1); /* now calc x1+x0 and y1+y0 */ - if ((err = s_mp_add(&x1, &x0, &t1)) != MP_OKAY) { - goto X1Y1; /* t1 = x1 - x0 */ - } - if ((err = s_mp_add(&y1, &y0, &x0)) != MP_OKAY) { - goto X1Y1; /* t2 = y1 - y0 */ - } - if ((err = mp_mul(&t1, &x0, &t1)) != MP_OKAY) { - goto X1Y1; /* t1 = (x1 + x0) * (y1 + y0) */ - } + /* t1 = x1 - x0 */ + if ((err = s_mp_add(&x1, &x0, &t1)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_X1Y1); + /* t2 = y1 - y0 */ + if ((err = s_mp_add(&y1, &y0, &x0)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_X1Y1); + /* t1 = (x1 + x0) * (y1 + y0) */ + if ((err = mp_mul(&t1, &x0, &t1)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_X1Y1); /* add x0y0 */ - if ((err = mp_add(&x0y0, &x1y1, &x0)) != MP_OKAY) { - goto X1Y1; /* t2 = x0y0 + x1y1 */ - } - if ((err = s_mp_sub(&t1, &x0, &t1)) != MP_OKAY) { - goto X1Y1; /* t1 = (x1+x0)*(y1+y0) - (x1y1 + x0y0) */ - } + /* t2 = x0y0 + x1y1 */ + if ((err = mp_add(&x0y0, &x1y1, &x0)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_X1Y1); + /* t1 = (x1+x0)*(y1+y0) - (x1y1 + x0y0) */ + if ((err = s_mp_sub(&t1, &x0, &t1)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_X1Y1); /* shift by B */ - if ((err = mp_lshd(&t1, B)) != MP_OKAY) { - goto X1Y1; /* t1 = (x0y0 + x1y1 - (x1-x0)*(y1-y0))<used, b->used) / 3; /** a = a2 * x^2 + a1 * x + a0; */ - if ((err = mp_init_size(&a0, B)) != MP_OKAY) goto LBL_ERRa0; - if ((err = mp_init_size(&a1, B)) != MP_OKAY) goto LBL_ERRa1; - if ((err = mp_init_size(&a2, a->used - 2 * B)) != MP_OKAY) goto LBL_ERRa2; + if ((err = mp_init_size(&a0, B)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_a0); + if ((err = mp_init_size(&a1, B)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_a1); + if ((err = mp_init_size(&a2, a->used - 2 * B)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_a2); a0.used = a1.used = B; a2.used = a->used - 2 * B; @@ -58,9 +56,9 @@ mp_err s_mp_mul_toom(const mp_int *a, const mp_int *b, mp_int *c) mp_clamp(&a2); /** b = b2 * x^2 + b1 * x + b0; */ - if ((err = mp_init_size(&b0, B)) != MP_OKAY) goto LBL_ERRb0; - if ((err = mp_init_size(&b1, B)) != MP_OKAY) goto LBL_ERRb1; - if ((err = mp_init_size(&b2, b->used - 2 * B)) != MP_OKAY) goto LBL_ERRb2; + if ((err = mp_init_size(&b0, B)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_b0); + if ((err = mp_init_size(&b1, B)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_b1); + if ((err = mp_init_size(&b2, b->used - 2 * B)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_b2); b0.used = b1.used = B; b2.used = b->used - 2 * B; @@ -73,129 +71,130 @@ mp_err s_mp_mul_toom(const mp_int *a, const mp_int *b, mp_int *c) /** \\ S1 = (a2+a1+a0) * (b2+b1+b0); */ /** T1 = a2 + a1; */ - if ((err = mp_add(&a2, &a1, &T1)) != MP_OKAY) goto LBL_ERR; + if ((err = mp_add(&a2, &a1, &T1)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_ALL); /** S2 = T1 + a0; */ - if ((err = mp_add(&T1, &a0, &S2)) != MP_OKAY) goto LBL_ERR; + if ((err = mp_add(&T1, &a0, &S2)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_ALL); /** c = b2 + b1; */ - if ((err = mp_add(&b2, &b1, c)) != MP_OKAY) goto LBL_ERR; + if ((err = mp_add(&b2, &b1, c)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_ALL); /** S1 = c + b0; */ - if ((err = mp_add(c, &b0, &S1)) != MP_OKAY) goto LBL_ERR; + if ((err = mp_add(c, &b0, &S1)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_ALL); /** S1 = S1 * S2; */ - if ((err = mp_mul(&S1, &S2, &S1)) != MP_OKAY) goto LBL_ERR; + if ((err = mp_mul(&S1, &S2, &S1)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_ALL); /** \\S2 = (4*a2+2*a1+a0) * (4*b2+2*b1+b0); */ /** T1 = T1 + a2; */ - if ((err = mp_add(&T1, &a2, &T1)) != MP_OKAY) goto LBL_ERR; + if ((err = mp_add(&T1, &a2, &T1)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_ALL); /** T1 = T1 << 1; */ - if ((err = mp_mul_2(&T1, &T1)) != MP_OKAY) goto LBL_ERR; + if ((err = mp_mul_2(&T1, &T1)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_ALL); /** T1 = T1 + a0; */ - if ((err = mp_add(&T1, &a0, &T1)) != MP_OKAY) goto LBL_ERR; + if ((err = mp_add(&T1, &a0, &T1)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_ALL); /** c = c + b2; */ - if ((err = mp_add(c, &b2, c)) != MP_OKAY) goto LBL_ERR; + if ((err = mp_add(c, &b2, c)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_ALL); /** c = c << 1; */ - if ((err = mp_mul_2(c, c)) != MP_OKAY) goto LBL_ERR; + if ((err = mp_mul_2(c, c)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_ALL); /** c = c + b0; */ - if ((err = mp_add(c, &b0, c)) != MP_OKAY) goto LBL_ERR; + if ((err = mp_add(c, &b0, c)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_ALL); /** S2 = T1 * c; */ - if ((err = mp_mul(&T1, c, &S2)) != MP_OKAY) goto LBL_ERR; + if ((err = mp_mul(&T1, c, &S2)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_ALL); /** \\S3 = (a2-a1+a0) * (b2-b1+b0); */ /** a1 = a2 - a1; */ - if ((err = mp_sub(&a2, &a1, &a1)) != MP_OKAY) goto LBL_ERR; + if ((err = mp_sub(&a2, &a1, &a1)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_ALL); /** a1 = a1 + a0; */ - if ((err = mp_add(&a1, &a0, &a1)) != MP_OKAY) goto LBL_ERR; + if ((err = mp_add(&a1, &a0, &a1)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_ALL); /** b1 = b2 - b1; */ - if ((err = mp_sub(&b2, &b1, &b1)) != MP_OKAY) goto LBL_ERR; + if ((err = mp_sub(&b2, &b1, &b1)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_ALL); /** b1 = b1 + b0; */ - if ((err = mp_add(&b1, &b0, &b1)) != MP_OKAY) goto LBL_ERR; + if ((err = mp_add(&b1, &b0, &b1)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_ALL); /** a1 = a1 * b1; */ - if ((err = mp_mul(&a1, &b1, &a1)) != MP_OKAY) goto LBL_ERR; + if ((err = mp_mul(&a1, &b1, &a1)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_ALL); /** b1 = a2 * b2; */ - if ((err = mp_mul(&a2, &b2, &b1)) != MP_OKAY) goto LBL_ERR; + if ((err = mp_mul(&a2, &b2, &b1)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_ALL); /** \\S2 = (S2 - S3)/3; */ /** S2 = S2 - a1; */ - if ((err = mp_sub(&S2, &a1, &S2)) != MP_OKAY) goto LBL_ERR; + if ((err = mp_sub(&S2, &a1, &S2)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_ALL); /** S2 = S2 / 3; \\ this is an exact division */ - if ((err = s_mp_div_3(&S2, &S2, NULL)) != MP_OKAY) goto LBL_ERR; + if ((err = s_mp_div_3(&S2, &S2, NULL)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_ALL); /** a1 = S1 - a1; */ - if ((err = mp_sub(&S1, &a1, &a1)) != MP_OKAY) goto LBL_ERR; + if ((err = mp_sub(&S1, &a1, &a1)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_ALL); /** a1 = a1 >> 1; */ - if ((err = mp_div_2(&a1, &a1)) != MP_OKAY) goto LBL_ERR; + if ((err = mp_div_2(&a1, &a1)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_ALL); /** a0 = a0 * b0; */ - if ((err = mp_mul(&a0, &b0, &a0)) != MP_OKAY) goto LBL_ERR; + if ((err = mp_mul(&a0, &b0, &a0)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_ALL); /** S1 = S1 - a0; */ - if ((err = mp_sub(&S1, &a0, &S1)) != MP_OKAY) goto LBL_ERR; + if ((err = mp_sub(&S1, &a0, &S1)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_ALL); /** S2 = S2 - S1; */ - if ((err = mp_sub(&S2, &S1, &S2)) != MP_OKAY) goto LBL_ERR; + if ((err = mp_sub(&S2, &S1, &S2)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_ALL); /** S2 = S2 >> 1; */ - if ((err = mp_div_2(&S2, &S2)) != MP_OKAY) goto LBL_ERR; + if ((err = mp_div_2(&S2, &S2)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_ALL); /** S1 = S1 - a1; */ - if ((err = mp_sub(&S1, &a1, &S1)) != MP_OKAY) goto LBL_ERR; + if ((err = mp_sub(&S1, &a1, &S1)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_ALL); /** S1 = S1 - b1; */ - if ((err = mp_sub(&S1, &b1, &S1)) != MP_OKAY) goto LBL_ERR; + if ((err = mp_sub(&S1, &b1, &S1)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_ALL); /** T1 = b1 << 1; */ - if ((err = mp_mul_2(&b1, &T1)) != MP_OKAY) goto LBL_ERR; + if ((err = mp_mul_2(&b1, &T1)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_ALL); /** S2 = S2 - T1; */ - if ((err = mp_sub(&S2, &T1, &S2)) != MP_OKAY) goto LBL_ERR; + if ((err = mp_sub(&S2, &T1, &S2)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_ALL); /** a1 = a1 - S2; */ - if ((err = mp_sub(&a1, &S2, &a1)) != MP_OKAY) goto LBL_ERR; + if ((err = mp_sub(&a1, &S2, &a1)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_ALL); /** P = b1*x^4+ S2*x^3+ S1*x^2+ a1*x + a0; */ - if ((err = mp_lshd(&b1, 4 * B)) != MP_OKAY) goto LBL_ERR; - if ((err = mp_lshd(&S2, 3 * B)) != MP_OKAY) goto LBL_ERR; - if ((err = mp_add(&b1, &S2, &b1)) != MP_OKAY) goto LBL_ERR; - if ((err = mp_lshd(&S1, 2 * B)) != MP_OKAY) goto LBL_ERR; - if ((err = mp_add(&b1, &S1, &b1)) != MP_OKAY) goto LBL_ERR; - if ((err = mp_lshd(&a1, 1 * B)) != MP_OKAY) goto LBL_ERR; - if ((err = mp_add(&b1, &a1, &b1)) != MP_OKAY) goto LBL_ERR; - if ((err = mp_add(&b1, &a0, c)) != MP_OKAY) goto LBL_ERR; + if ((err = mp_lshd(&b1, 4 * B)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_ALL); + if ((err = mp_lshd(&S2, 3 * B)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_ALL); + if ((err = mp_add(&b1, &S2, &b1)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_ALL); + if ((err = mp_lshd(&S1, 2 * B)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_ALL); + if ((err = mp_add(&b1, &S1, &b1)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_ALL); + if ((err = mp_lshd(&a1, 1 * B)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_ALL); + if ((err = mp_add(&b1, &a1, &b1)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_ALL); + if ((err = mp_add(&b1, &a0, c)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_ALL); /** a * b - P */ -LBL_ERR: +LTM_ERR_ALL: mp_clear(&b2); -LBL_ERRb2: +LTM_ERR_b2: mp_clear(&b1); -LBL_ERRb1: +LTM_ERR_b1: mp_clear(&b0); -LBL_ERRb0: +LTM_ERR_b0: mp_clear(&a2); -LBL_ERRa2: +LTM_ERR_a2: mp_clear(&a1); -LBL_ERRa1: +LTM_ERR_a1: mp_clear(&a0); -LBL_ERRa0: +LTM_ERR_a0: mp_clear_multi(&S1, &S2, &T1, NULL); +LTM_ERR: return err; } diff --git a/s_mp_prime_is_divisible.c b/s_mp_prime_is_divisible.c index 63b2405ab..320a72fe8 100644 --- a/s_mp_prime_is_divisible.c +++ b/s_mp_prime_is_divisible.c @@ -11,23 +11,23 @@ mp_err s_mp_prime_is_divisible(const mp_int *a, bool *result) { int i; + mp_err err = MP_OKAY; for (i = 0; i < MP_PRIME_TAB_SIZE; i++) { /* what is a mod LBL_prime_tab[i] */ - mp_err err; mp_digit res; - if ((err = mp_mod_d(a, s_mp_prime_tab[i], &res)) != MP_OKAY) { - return err; - } + if ((err = mp_mod_d(a, s_mp_prime_tab[i], &res)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); /* is the residue zero? */ if (res == 0u) { *result = true; - return MP_OKAY; + return err; } } /* default to not */ *result = false; - return MP_OKAY; + +LTM_ERR: + return err; } #endif diff --git a/s_mp_radix_size_overestimate.c b/s_mp_radix_size_overestimate.c index 4f0599732..4c952ec60 100644 --- a/s_mp_radix_size_overestimate.c +++ b/s_mp_radix_size_overestimate.c @@ -42,7 +42,8 @@ mp_err s_mp_radix_size_overestimate(const mp_int *a, const int radix, size_t *si mp_err err = MP_OKAY; if ((radix < 2) || (radix > 64)) { - return MP_VAL; + err = MP_VAL; + MP_TRACE_ERROR(err, LTM_ERR); } if (mp_iszero(a)) { @@ -56,9 +57,7 @@ mp_err s_mp_radix_size_overestimate(const mp_int *a, const int radix, size_t *si return MP_OKAY; } - if ((err = mp_init_multi(&bi_bit_count, &bi_k, NULL)) != MP_OKAY) { - return err; - } + if ((err = mp_init_multi(&bi_bit_count, &bi_k, NULL)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); /* la = floor(log_2(a)) + 1 */ bit_count = mp_count_bits(a); @@ -67,15 +66,17 @@ mp_err s_mp_radix_size_overestimate(const mp_int *a, const int radix, size_t *si /* k = floor(2^29/log_2(radix)) + 1 */ mp_set_u32(&bi_k, s_log_bases[radix]); /* n = floor((la * k) / 2^29) + 1 */ - if ((err = mp_mul(&bi_bit_count, &bi_k, &bi_bit_count)) != MP_OKAY) goto LBL_ERR; - if ((err = mp_div_2d(&bi_bit_count, MP_RADIX_SIZE_SCALE, &bi_bit_count, NULL)) != MP_OKAY) goto LBL_ERR; + if ((err = mp_mul(&bi_bit_count, &bi_k, &bi_bit_count)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_1); + if ((err = mp_div_2d(&bi_bit_count, MP_RADIX_SIZE_SCALE, &bi_bit_count, NULL)) != MP_OKAY) + MP_TRACE_ERROR(err, LTM_ERR_1); /* The "+1" here is the "+1" in "floor((la * k) / 2^29) + 1" */ /* n = n + 1 + EOS + sign */ *size = (size_t)(mp_get_u64(&bi_bit_count) + 3U); -LBL_ERR: +LTM_ERR_1: mp_clear_multi(&bi_bit_count, &bi_k, NULL); +LTM_ERR: return err; } diff --git a/s_mp_rand_platform.c b/s_mp_rand_platform.c index 0a6982a55..5473fe87b 100644 --- a/s_mp_rand_platform.c +++ b/s_mp_rand_platform.c @@ -3,6 +3,8 @@ /* LibTomMath, multiple-precision integer library -- Tom St Denis */ /* SPDX-License-Identifier: Unlicense */ +/* TODO: it is a bit more complicated to fiddle the tracing in, so it is left for later */ + /* First the OS-specific special cases * - *BSD * - Windows diff --git a/s_mp_sqr.c b/s_mp_sqr.c index da9aa69ce..997cd9b3f 100644 --- a/s_mp_sqr.c +++ b/s_mp_sqr.c @@ -8,12 +8,10 @@ mp_err s_mp_sqr(const mp_int *a, mp_int *b) { mp_int t; int ix, pa; - mp_err err; + mp_err err = MP_OKAY; pa = a->used; - if ((err = mp_init_size(&t, (2 * pa) + 1)) != MP_OKAY) { - return err; - } + if ((err = mp_init_size(&t, (2 * pa) + 1)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); /* default used is maximum possible size */ t.used = (2 * pa) + 1; @@ -73,6 +71,8 @@ mp_err s_mp_sqr(const mp_int *a, mp_int *b) mp_clamp(&t); mp_exch(&t, b); mp_clear(&t); - return MP_OKAY; + +LTM_ERR: + return err; } #endif diff --git a/s_mp_sqr_comba.c b/s_mp_sqr_comba.c index 336a0a082..d93cf0fcc 100644 --- a/s_mp_sqr_comba.c +++ b/s_mp_sqr_comba.c @@ -18,7 +18,7 @@ mp_err s_mp_sqr_comba(const mp_int *a, mp_int *b) int oldused, pa, ix; mp_digit MP_ALLOC_WARRAY(W); mp_word W1; - mp_err err; + mp_err err = MP_OKAY; MP_CHECK_WARRAY(W); @@ -26,7 +26,7 @@ mp_err s_mp_sqr_comba(const mp_int *a, mp_int *b) pa = a->used + a->used; if ((err = mp_grow(b, pa)) != MP_OKAY) { MP_FREE_WARRAY(W); - return err; + MP_TRACE_ERROR(err, LTM_ERR); } /* number of output digits to produce */ @@ -86,6 +86,8 @@ mp_err s_mp_sqr_comba(const mp_int *a, mp_int *b) mp_clamp(b); MP_FREE_WARRAY(W); - return MP_OKAY; + +LTM_ERR: + return err; } #endif diff --git a/s_mp_sqr_karatsuba.c b/s_mp_sqr_karatsuba.c index f064b46a1..193d4e60d 100644 --- a/s_mp_sqr_karatsuba.c +++ b/s_mp_sqr_karatsuba.c @@ -14,7 +14,7 @@ mp_err s_mp_sqr_karatsuba(const mp_int *a, mp_int *b) { mp_int x0, x1, t1, t2, x0x0, x1x1; int B; - mp_err err; + mp_err err = MP_OKAY; /* min # of digits */ B = a->used; @@ -23,20 +23,14 @@ mp_err s_mp_sqr_karatsuba(const mp_int *a, mp_int *b) B = B >> 1; /* init copy all the temps */ - if ((err = mp_init_size(&x0, B)) != MP_OKAY) - goto LBL_ERR; - if ((err = mp_init_size(&x1, a->used - B)) != MP_OKAY) - goto X0; + if ((err = mp_init_size(&x0, B)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); + if ((err = mp_init_size(&x1, a->used - B)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_X0); /* init temps */ - if ((err = mp_init_size(&t1, a->used * 2)) != MP_OKAY) - goto X1; - if ((err = mp_init_size(&t2, a->used * 2)) != MP_OKAY) - goto T1; - if ((err = mp_init_size(&x0x0, B * 2)) != MP_OKAY) - goto T2; - if ((err = mp_init_size(&x1x1, (a->used - B) * 2)) != MP_OKAY) - goto X0X0; + if ((err = mp_init_size(&t1, a->used * 2)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_X1); + if ((err = mp_init_size(&t2, a->used * 2)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_T1); + if ((err = mp_init_size(&x0x0, B * 2)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_T2); + if ((err = mp_init_size(&x1x1, (a->used - B) * 2)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_X0X0); /* now shift the digits */ x0.used = B; @@ -46,47 +40,46 @@ mp_err s_mp_sqr_karatsuba(const mp_int *a, mp_int *b) mp_clamp(&x0); /* now calc the products x0*x0 and x1*x1 */ - if ((err = mp_sqr(&x0, &x0x0)) != MP_OKAY) - goto X1X1; /* x0x0 = x0*x0 */ - if ((err = mp_sqr(&x1, &x1x1)) != MP_OKAY) - goto X1X1; /* x1x1 = x1*x1 */ + /* x0x0 = x0*x0 */ + if ((err = mp_sqr(&x0, &x0x0)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_X1X1); + /* x1x1 = x1*x1 */ + if ((err = mp_sqr(&x1, &x1x1)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_X1X1); /* now calc (x1+x0)**2 */ - if ((err = s_mp_add(&x1, &x0, &t1)) != MP_OKAY) - goto X1X1; /* t1 = x1 - x0 */ - if ((err = mp_sqr(&t1, &t1)) != MP_OKAY) - goto X1X1; /* t1 = (x1 - x0) * (x1 - x0) */ + /* t1 = x1 - x0 */ + if ((err = s_mp_add(&x1, &x0, &t1)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_X1X1); + /* t1 = (x1 - x0) * (x1 - x0) */ + if ((err = mp_sqr(&t1, &t1)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_X1X1); /* add x0y0 */ - if ((err = s_mp_add(&x0x0, &x1x1, &t2)) != MP_OKAY) - goto X1X1; /* t2 = x0x0 + x1x1 */ - if ((err = s_mp_sub(&t1, &t2, &t1)) != MP_OKAY) - goto X1X1; /* t1 = (x1+x0)**2 - (x0x0 + x1x1) */ + /* t2 = x0x0 + x1x1 */ + if ((err = s_mp_add(&x0x0, &x1x1, &t2)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_X1X1); + /* t1 = (x1+x0)**2 - (x0x0 + x1x1) */ + if ((err = s_mp_sub(&t1, &t2, &t1)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_X1X1); /* shift by B */ - if ((err = mp_lshd(&t1, B)) != MP_OKAY) - goto X1X1; /* t1 = (x0x0 + x1x1 - (x1-x0)*(x1-x0))<used / 3; /** a = a2 * x^2 + a1 * x + a0; */ - if ((err = mp_init_size(&a0, B)) != MP_OKAY) goto LBL_ERRa0; - if ((err = mp_init_size(&a1, B)) != MP_OKAY) goto LBL_ERRa1; - if ((err = mp_init_size(&a2, a->used - (2 * B))) != MP_OKAY) goto LBL_ERRa2; + if ((err = mp_init_size(&a0, B)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_a0); + if ((err = mp_init_size(&a1, B)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_a1); + if ((err = mp_init_size(&a2, a->used - (2 * B))) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_a2); a0.used = a1.used = B; a2.used = a->used - 2 * B; @@ -47,86 +45,86 @@ mp_err s_mp_sqr_toom(const mp_int *a, mp_int *b) mp_clamp(&a2); /** S0 = a0^2; */ - if ((err = mp_sqr(&a0, &S0)) != MP_OKAY) goto LBL_ERR; + if ((err = mp_sqr(&a0, &S0)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_ALL); /** \\S1 = (a2 + a1 + a0)^2 */ /** \\S2 = (a2 - a1 + a0)^2 */ /** \\S1 = a0 + a2; */ /** a0 = a0 + a2; */ - if ((err = mp_add(&a0, &a2, &a0)) != MP_OKAY) goto LBL_ERR; + if ((err = mp_add(&a0, &a2, &a0)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_ALL); /** \\S2 = S1 - a1; */ /** b = a0 - a1; */ - if ((err = mp_sub(&a0, &a1, b)) != MP_OKAY) goto LBL_ERR; + if ((err = mp_sub(&a0, &a1, b)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_ALL); /** \\S1 = S1 + a1; */ /** a0 = a0 + a1; */ - if ((err = mp_add(&a0, &a1, &a0)) != MP_OKAY) goto LBL_ERR; + if ((err = mp_add(&a0, &a1, &a0)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_ALL); /** \\S1 = S1^2; */ /** a0 = a0^2; */ - if ((err = mp_sqr(&a0, &a0)) != MP_OKAY) goto LBL_ERR; + if ((err = mp_sqr(&a0, &a0)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_ALL); /** \\S2 = S2^2; */ /** b = b^2; */ - if ((err = mp_sqr(b, b)) != MP_OKAY) goto LBL_ERR; + if ((err = mp_sqr(b, b)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_ALL); /** \\ S3 = 2 * a1 * a2 */ /** \\S3 = a1 * a2; */ /** a1 = a1 * a2; */ - if ((err = mp_mul(&a1, &a2, &a1)) != MP_OKAY) goto LBL_ERR; + if ((err = mp_mul(&a1, &a2, &a1)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_ALL); /** \\S3 = S3 << 1; */ /** a1 = a1 << 1; */ - if ((err = mp_mul_2(&a1, &a1)) != MP_OKAY) goto LBL_ERR; + if ((err = mp_mul_2(&a1, &a1)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_ALL); /** \\S4 = a2^2; */ /** a2 = a2^2; */ - if ((err = mp_sqr(&a2, &a2)) != MP_OKAY) goto LBL_ERR; + if ((err = mp_sqr(&a2, &a2)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_ALL); /** \\ tmp = (S1 + S2)/2 */ /** \\tmp = S1 + S2; */ /** b = a0 + b; */ - if ((err = mp_add(&a0, b, b)) != MP_OKAY) goto LBL_ERR; + if ((err = mp_add(&a0, b, b)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_ALL); /** \\tmp = tmp >> 1; */ /** b = b >> 1; */ - if ((err = mp_div_2(b, b)) != MP_OKAY) goto LBL_ERR; + if ((err = mp_div_2(b, b)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_ALL); /** \\ S1 = S1 - tmp - S3 */ /** \\S1 = S1 - tmp; */ /** a0 = a0 - b; */ - if ((err = mp_sub(&a0, b, &a0)) != MP_OKAY) goto LBL_ERR; + if ((err = mp_sub(&a0, b, &a0)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_ALL); /** \\S1 = S1 - S3; */ /** a0 = a0 - a1; */ - if ((err = mp_sub(&a0, &a1, &a0)) != MP_OKAY) goto LBL_ERR; + if ((err = mp_sub(&a0, &a1, &a0)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_ALL); /** \\S2 = tmp - S4 -S0 */ /** \\S2 = tmp - S4; */ /** b = b - a2; */ - if ((err = mp_sub(b, &a2, b)) != MP_OKAY) goto LBL_ERR; + if ((err = mp_sub(b, &a2, b)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_ALL); /** \\S2 = S2 - S0; */ /** b = b - S0; */ - if ((err = mp_sub(b, &S0, b)) != MP_OKAY) goto LBL_ERR; + if ((err = mp_sub(b, &S0, b)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_ALL); /** \\P = S4*x^4 + S3*x^3 + S2*x^2 + S1*x + S0; */ /** P = a2*x^4 + a1*x^3 + b*x^2 + a0*x + S0; */ - if ((err = mp_lshd(&a2, 4 * B)) != MP_OKAY) goto LBL_ERR; - if ((err = mp_lshd(&a1, 3 * B)) != MP_OKAY) goto LBL_ERR; - if ((err = mp_lshd(b, 2 * B)) != MP_OKAY) goto LBL_ERR; - if ((err = mp_lshd(&a0, 1 * B)) != MP_OKAY) goto LBL_ERR; - if ((err = mp_add(&a2, &a1, &a2)) != MP_OKAY) goto LBL_ERR; - if ((err = mp_add(&a2, b, b)) != MP_OKAY) goto LBL_ERR; - if ((err = mp_add(b, &a0, b)) != MP_OKAY) goto LBL_ERR; - if ((err = mp_add(b, &S0, b)) != MP_OKAY) goto LBL_ERR; + if ((err = mp_lshd(&a2, 4 * B)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_ALL); + if ((err = mp_lshd(&a1, 3 * B)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_ALL); + if ((err = mp_lshd(b, 2 * B)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_ALL); + if ((err = mp_lshd(&a0, 1 * B)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_ALL); + if ((err = mp_add(&a2, &a1, &a2)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_ALL); + if ((err = mp_add(&a2, b, b)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_ALL); + if ((err = mp_add(b, &a0, b)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_ALL); + if ((err = mp_add(b, &S0, b)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR_ALL); /** a^2 - P */ -LBL_ERR: +LTM_ERR_ALL: mp_clear(&a2); -LBL_ERRa2: +LTM_ERR_a2: mp_clear(&a1); -LBL_ERRa1: +LTM_ERR_a1: mp_clear(&a0); -LBL_ERRa0: +LTM_ERR_a0: mp_clear(&S0); - +LTM_ERR: return err; } diff --git a/s_mp_sub.c b/s_mp_sub.c index b1a749e35..95b2648be 100644 --- a/s_mp_sub.c +++ b/s_mp_sub.c @@ -8,12 +8,10 @@ mp_err s_mp_sub(const mp_int *a, const mp_int *b, mp_int *c) { int oldused = c->used, min = b->used, max = a->used, i; mp_digit u; - mp_err err; + mp_err err = MP_OKAY; /* init result */ - if ((err = mp_grow(c, max)) != MP_OKAY) { - return err; - } + if ((err = mp_grow(c, max)) != MP_OKAY) MP_TRACE_ERROR(err, LTM_ERR); c->used = max; @@ -50,7 +48,9 @@ mp_err s_mp_sub(const mp_int *a, const mp_int *b, mp_int *c) s_mp_zero_digs(c->dp + c->used, oldused - c->used); mp_clamp(c); - return MP_OKAY; + +LTM_ERR: + return err; } #endif diff --git a/tommath_private.h b/tommath_private.h index be620dbc9..5b1f637d9 100644 --- a/tommath_private.h +++ b/tommath_private.h @@ -8,6 +8,17 @@ #include "tommath_class.h" #include + +#ifdef MP_ADD_ERROR_TRACING +/* Not everything is supported everywhere but some (e.g.: filename and linenumber) are */ +#define MP_TRACE_ERROR(ERROR_NUMBER, ERROR_GOTO) do{\ + fprintf(stderr, "In %s at %d in %s: %s\n",__FILE__, __LINE__, __FUNCTION__, mp_error_to_string((ERROR_NUMBER)));\ + goto ERROR_GOTO;\ + }while(0) +#else +#define MP_TRACE_ERROR(ERROR_NUMBER, ERROR_GOTO) goto ERROR_GOTO +#endif + /* * Private symbols * --------------- From d678bbca5c93745da06bfa78d26dd749d76cfcd3 Mon Sep 17 00:00:00 2001 From: czurnieden Date: Wed, 15 May 2024 02:46:15 +0200 Subject: [PATCH 2/2] Removed unreachable code --- s_mp_mul_comba.c | 1 - 1 file changed, 1 deletion(-) diff --git a/s_mp_mul_comba.c b/s_mp_mul_comba.c index e9ac7c397..609b2362a 100644 --- a/s_mp_mul_comba.c +++ b/s_mp_mul_comba.c @@ -38,7 +38,6 @@ mp_err s_mp_mul_comba(const mp_int *a, const mp_int *b, mp_int *c, int digs) if ((err = mp_grow(c, digs)) != MP_OKAY) { MP_FREE_WARRAY(W); MP_TRACE_ERROR(err, LTM_ERR); - return err; } /* number of output digits to produce */