diff --git a/dev/aarch64_clean/meta.h b/dev/aarch64_clean/meta.h index 9f4a01835..72f746459 100644 --- a/dev/aarch64_clean/meta.h +++ b/dev/aarch64_clean/meta.h @@ -32,6 +32,7 @@ #if !defined(__ASSEMBLER__) +#include #include "../api.h" #include "src/arith_native_aarch64.h" @@ -49,9 +50,8 @@ static MLD_INLINE int mld_intt_native(int32_t data[MLDSA_N]) return MLD_NATIVE_FUNC_SUCCESS; } -static MLD_INLINE int mld_rej_uniform_native(int32_t *r, unsigned len, - const uint8_t *buf, - unsigned buflen) +static MLD_INLINE int mld_rej_uniform_native(int32_t *r, int len, + const uint8_t *buf, int buflen) { if (len != MLDSA_N || buflen % 24 != 0) /* NEON support is mandatory for AArch64 */ @@ -59,15 +59,15 @@ static MLD_INLINE int mld_rej_uniform_native(int32_t *r, unsigned len, return MLD_NATIVE_FUNC_FALLBACK; } - /* Safety: outlen is at most MLDSA_N, hence, this cast is safe. */ + /* Safety: outlen is at most MLDSA_N and, hence, this cast is safe. */ return (int)mld_rej_uniform_asm(r, buf, buflen, mld_rej_uniform_table); } -static MLD_INLINE int mld_rej_uniform_eta2_native(int32_t *r, unsigned len, +static MLD_INLINE int mld_rej_uniform_eta2_native(int32_t *r, int len, const uint8_t *buf, - unsigned buflen) + int buflen) { - unsigned int outlen; + int32_t outlen; /* AArch64 implementation assumes specific buffer lengths */ if (len != MLDSA_N || buflen != MLD_AARCH64_REJ_UNIFORM_ETA2_BUFLEN) { @@ -82,16 +82,16 @@ static MLD_INLINE int mld_rej_uniform_eta2_native(int32_t *r, unsigned len, */ MLD_CT_TESTING_DECLASSIFY(buf, buflen); outlen = mld_rej_uniform_eta2_asm(r, buf, buflen, mld_rej_uniform_eta_table); - MLD_CT_TESTING_SECRET(r, sizeof(int32_t) * outlen); + MLD_CT_TESTING_SECRET(r, sizeof(int32_t) * (size_t)outlen); /* Safety: outlen is at most MLDSA_N and, hence, this cast is safe. */ return (int)outlen; } -static MLD_INLINE int mld_rej_uniform_eta4_native(int32_t *r, unsigned len, +static MLD_INLINE int mld_rej_uniform_eta4_native(int32_t *r, int len, const uint8_t *buf, - unsigned buflen) + int buflen) { - unsigned int outlen; + int32_t outlen; /* AArch64 implementation assumes specific buffer lengths */ if (len != MLDSA_N || buflen != MLD_AARCH64_REJ_UNIFORM_ETA4_BUFLEN) { @@ -106,7 +106,7 @@ static MLD_INLINE int mld_rej_uniform_eta4_native(int32_t *r, unsigned len, */ MLD_CT_TESTING_DECLASSIFY(buf, buflen); outlen = mld_rej_uniform_eta4_asm(r, buf, buflen, mld_rej_uniform_eta_table); - MLD_CT_TESTING_SECRET(r, sizeof(int32_t) * outlen); + MLD_CT_TESTING_SECRET(r, sizeof(int32_t) * (size_t)outlen); /* Safety: outlen is at most MLDSA_N and, hence, this cast is safe. */ return (int)outlen; } diff --git a/dev/aarch64_clean/src/arith_native_aarch64.h b/dev/aarch64_clean/src/arith_native_aarch64.h index 22f193585..b3bd326ef 100644 --- a/dev/aarch64_clean/src/arith_native_aarch64.h +++ b/dev/aarch64_clean/src/arith_native_aarch64.h @@ -56,16 +56,16 @@ void mld_ntt_asm(int32_t *, const int32_t *, const int32_t *); void mld_intt_asm(int32_t *, const int32_t *, const int32_t *); #define mld_rej_uniform_asm MLD_NAMESPACE(rej_uniform_asm) -uint64_t mld_rej_uniform_asm(int32_t *r, const uint8_t *buf, unsigned buflen, +uint64_t mld_rej_uniform_asm(int32_t *r, const uint8_t *buf, int32_t buflen, const uint8_t *table); #define mld_rej_uniform_eta2_asm MLD_NAMESPACE(rej_uniform_eta2_asm) -unsigned mld_rej_uniform_eta2_asm(int32_t *r, const uint8_t *buf, - unsigned buflen, const uint8_t *table); +int32_t mld_rej_uniform_eta2_asm(int32_t *r, const uint8_t *buf, int32_t buflen, + const uint8_t *table); #define mld_rej_uniform_eta4_asm MLD_NAMESPACE(rej_uniform_eta4_asm) -unsigned mld_rej_uniform_eta4_asm(int32_t *r, const uint8_t *buf, - unsigned buflen, const uint8_t *table); +int32_t mld_rej_uniform_eta4_asm(int32_t *r, const uint8_t *buf, int32_t buflen, + const uint8_t *table); #define mld_poly_decompose_32_asm MLD_NAMESPACE(poly_decompose_32_asm) void mld_poly_decompose_32_asm(int32_t *a1, int32_t *a0, const int32_t *a); diff --git a/dev/aarch64_opt/meta.h b/dev/aarch64_opt/meta.h index 9f4a01835..72f746459 100644 --- a/dev/aarch64_opt/meta.h +++ b/dev/aarch64_opt/meta.h @@ -32,6 +32,7 @@ #if !defined(__ASSEMBLER__) +#include #include "../api.h" #include "src/arith_native_aarch64.h" @@ -49,9 +50,8 @@ static MLD_INLINE int mld_intt_native(int32_t data[MLDSA_N]) return MLD_NATIVE_FUNC_SUCCESS; } -static MLD_INLINE int mld_rej_uniform_native(int32_t *r, unsigned len, - const uint8_t *buf, - unsigned buflen) +static MLD_INLINE int mld_rej_uniform_native(int32_t *r, int len, + const uint8_t *buf, int buflen) { if (len != MLDSA_N || buflen % 24 != 0) /* NEON support is mandatory for AArch64 */ @@ -59,15 +59,15 @@ static MLD_INLINE int mld_rej_uniform_native(int32_t *r, unsigned len, return MLD_NATIVE_FUNC_FALLBACK; } - /* Safety: outlen is at most MLDSA_N, hence, this cast is safe. */ + /* Safety: outlen is at most MLDSA_N and, hence, this cast is safe. */ return (int)mld_rej_uniform_asm(r, buf, buflen, mld_rej_uniform_table); } -static MLD_INLINE int mld_rej_uniform_eta2_native(int32_t *r, unsigned len, +static MLD_INLINE int mld_rej_uniform_eta2_native(int32_t *r, int len, const uint8_t *buf, - unsigned buflen) + int buflen) { - unsigned int outlen; + int32_t outlen; /* AArch64 implementation assumes specific buffer lengths */ if (len != MLDSA_N || buflen != MLD_AARCH64_REJ_UNIFORM_ETA2_BUFLEN) { @@ -82,16 +82,16 @@ static MLD_INLINE int mld_rej_uniform_eta2_native(int32_t *r, unsigned len, */ MLD_CT_TESTING_DECLASSIFY(buf, buflen); outlen = mld_rej_uniform_eta2_asm(r, buf, buflen, mld_rej_uniform_eta_table); - MLD_CT_TESTING_SECRET(r, sizeof(int32_t) * outlen); + MLD_CT_TESTING_SECRET(r, sizeof(int32_t) * (size_t)outlen); /* Safety: outlen is at most MLDSA_N and, hence, this cast is safe. */ return (int)outlen; } -static MLD_INLINE int mld_rej_uniform_eta4_native(int32_t *r, unsigned len, +static MLD_INLINE int mld_rej_uniform_eta4_native(int32_t *r, int len, const uint8_t *buf, - unsigned buflen) + int buflen) { - unsigned int outlen; + int32_t outlen; /* AArch64 implementation assumes specific buffer lengths */ if (len != MLDSA_N || buflen != MLD_AARCH64_REJ_UNIFORM_ETA4_BUFLEN) { @@ -106,7 +106,7 @@ static MLD_INLINE int mld_rej_uniform_eta4_native(int32_t *r, unsigned len, */ MLD_CT_TESTING_DECLASSIFY(buf, buflen); outlen = mld_rej_uniform_eta4_asm(r, buf, buflen, mld_rej_uniform_eta_table); - MLD_CT_TESTING_SECRET(r, sizeof(int32_t) * outlen); + MLD_CT_TESTING_SECRET(r, sizeof(int32_t) * (size_t)outlen); /* Safety: outlen is at most MLDSA_N and, hence, this cast is safe. */ return (int)outlen; } diff --git a/dev/aarch64_opt/src/arith_native_aarch64.h b/dev/aarch64_opt/src/arith_native_aarch64.h index 22f193585..b3bd326ef 100644 --- a/dev/aarch64_opt/src/arith_native_aarch64.h +++ b/dev/aarch64_opt/src/arith_native_aarch64.h @@ -56,16 +56,16 @@ void mld_ntt_asm(int32_t *, const int32_t *, const int32_t *); void mld_intt_asm(int32_t *, const int32_t *, const int32_t *); #define mld_rej_uniform_asm MLD_NAMESPACE(rej_uniform_asm) -uint64_t mld_rej_uniform_asm(int32_t *r, const uint8_t *buf, unsigned buflen, +uint64_t mld_rej_uniform_asm(int32_t *r, const uint8_t *buf, int32_t buflen, const uint8_t *table); #define mld_rej_uniform_eta2_asm MLD_NAMESPACE(rej_uniform_eta2_asm) -unsigned mld_rej_uniform_eta2_asm(int32_t *r, const uint8_t *buf, - unsigned buflen, const uint8_t *table); +int32_t mld_rej_uniform_eta2_asm(int32_t *r, const uint8_t *buf, int32_t buflen, + const uint8_t *table); #define mld_rej_uniform_eta4_asm MLD_NAMESPACE(rej_uniform_eta4_asm) -unsigned mld_rej_uniform_eta4_asm(int32_t *r, const uint8_t *buf, - unsigned buflen, const uint8_t *table); +int32_t mld_rej_uniform_eta4_asm(int32_t *r, const uint8_t *buf, int32_t buflen, + const uint8_t *table); #define mld_poly_decompose_32_asm MLD_NAMESPACE(poly_decompose_32_asm) void mld_poly_decompose_32_asm(int32_t *a1, int32_t *a0, const int32_t *a); diff --git a/dev/x86_64/meta.h b/dev/x86_64/meta.h index 7978443f4..553b311bc 100644 --- a/dev/x86_64/meta.h +++ b/dev/x86_64/meta.h @@ -31,6 +31,7 @@ #define MLD_USE_NATIVE_POLYVECL_POINTWISE_ACC_MONTGOMERY_L7 #if !defined(__ASSEMBLER__) +#include #include #include "../../common.h" #include "../api.h" @@ -64,9 +65,8 @@ static MLD_INLINE int mld_intt_native(int32_t data[MLDSA_N]) return MLD_NATIVE_FUNC_SUCCESS; } -static MLD_INLINE int mld_rej_uniform_native(int32_t *r, unsigned len, - const uint8_t *buf, - unsigned buflen) +static MLD_INLINE int mld_rej_uniform_native(int32_t *r, int len, + const uint8_t *buf, int buflen) { /* AVX2 implementation assumes specific buffer lengths */ if (!mld_sys_check_capability(MLD_SYS_CAP_AVX2) || len != MLDSA_N || @@ -79,11 +79,11 @@ static MLD_INLINE int mld_rej_uniform_native(int32_t *r, unsigned len, return (int)mld_rej_uniform_avx2(r, buf); } -static MLD_INLINE int mld_rej_uniform_eta2_native(int32_t *r, unsigned len, +static MLD_INLINE int mld_rej_uniform_eta2_native(int32_t *r, int len, const uint8_t *buf, - unsigned buflen) + int buflen) { - unsigned int outlen; + int32_t outlen; /* AVX2 implementation assumes specific buffer lengths */ if (!mld_sys_check_capability(MLD_SYS_CAP_AVX2) || len != MLDSA_N || buflen != MLD_AVX2_REJ_UNIFORM_ETA2_BUFLEN) @@ -100,16 +100,16 @@ static MLD_INLINE int mld_rej_uniform_eta2_native(int32_t *r, unsigned len, */ MLD_CT_TESTING_DECLASSIFY(buf, buflen); outlen = mld_rej_uniform_eta2_avx2(r, buf); - MLD_CT_TESTING_SECRET(r, sizeof(int32_t) * outlen); + MLD_CT_TESTING_SECRET(r, sizeof(int32_t) * (size_t)outlen); /* Safety: outlen is at most MLDSA_N and, hence, this cast is safe. */ return (int)outlen; } -static MLD_INLINE int mld_rej_uniform_eta4_native(int32_t *r, unsigned len, +static MLD_INLINE int mld_rej_uniform_eta4_native(int32_t *r, int len, const uint8_t *buf, - unsigned buflen) + int buflen) { - unsigned int outlen; + int32_t outlen; /* AVX2 implementation assumes specific buffer lengths */ if (!mld_sys_check_capability(MLD_SYS_CAP_AVX2) || len != MLDSA_N || buflen != MLD_AVX2_REJ_UNIFORM_ETA4_BUFLEN) @@ -126,7 +126,7 @@ static MLD_INLINE int mld_rej_uniform_eta4_native(int32_t *r, unsigned len, */ MLD_CT_TESTING_DECLASSIFY(buf, buflen); outlen = mld_rej_uniform_eta4_avx2(r, buf); - MLD_CT_TESTING_SECRET(r, sizeof(int32_t) * outlen); + MLD_CT_TESTING_SECRET(r, sizeof(int32_t) * (size_t)outlen); /* Safety: outlen is at most MLDSA_N and, hence, this cast is safe. */ return (int)outlen; } diff --git a/dev/x86_64/src/arith_native_x86_64.h b/dev/x86_64/src/arith_native_x86_64.h index fd59a21e0..1d15eb6f5 100644 --- a/dev/x86_64/src/arith_native_x86_64.h +++ b/dev/x86_64/src/arith_native_x86_64.h @@ -43,15 +43,15 @@ void mld_invntt_avx2(__m256i *r, const __m256i *mld_qdata); void mld_nttunpack_avx2(__m256i *r); #define mld_rej_uniform_avx2 MLD_NAMESPACE(mld_rej_uniform_avx2) -unsigned mld_rej_uniform_avx2(int32_t *r, - const uint8_t buf[MLD_AVX2_REJ_UNIFORM_BUFLEN]); +int32_t mld_rej_uniform_avx2(int32_t *r, + const uint8_t buf[MLD_AVX2_REJ_UNIFORM_BUFLEN]); #define mld_rej_uniform_eta2_avx2 MLD_NAMESPACE(mld_rej_uniform_eta2_avx2) -unsigned mld_rej_uniform_eta2_avx2( +int32_t mld_rej_uniform_eta2_avx2( int32_t *r, const uint8_t buf[MLD_AVX2_REJ_UNIFORM_ETA2_BUFLEN]); #define mld_rej_uniform_eta4_avx2 MLD_NAMESPACE(mld_rej_uniform_eta4_avx2) -unsigned mld_rej_uniform_eta4_avx2( +int32_t mld_rej_uniform_eta4_avx2( int32_t *r, const uint8_t buf[MLD_AVX2_REJ_UNIFORM_ETA4_BUFLEN]); #define mld_poly_decompose_32_avx2 MLD_NAMESPACE(mld_poly_decompose_32_avx2) diff --git a/dev/x86_64/src/rej_uniform_avx2.c b/dev/x86_64/src/rej_uniform_avx2.c index 07a5df970..6c7243bd3 100644 --- a/dev/x86_64/src/rej_uniform_avx2.c +++ b/dev/x86_64/src/rej_uniform_avx2.c @@ -36,8 +36,8 @@ * frontend to perform the unintuitive padding. */ -unsigned int mld_rej_uniform_avx2( - int32_t *MLD_RESTRICT r, const uint8_t buf[MLD_AVX2_REJ_UNIFORM_BUFLEN]) +int32_t mld_rej_uniform_avx2(int32_t *MLD_RESTRICT r, + const uint8_t buf[MLD_AVX2_REJ_UNIFORM_BUFLEN]) { unsigned int ctr, pos; uint32_t good; @@ -116,7 +116,7 @@ unsigned int mld_rej_uniform_avx2( } } - return ctr; + return (int32_t)ctr; } #else /* MLD_ARITH_BACKEND_X86_64_DEFAULT && !MLD_CONFIG_MULTILEVEL_NO_SHARED \ diff --git a/dev/x86_64/src/rej_uniform_eta2_avx2.c b/dev/x86_64/src/rej_uniform_eta2_avx2.c index 70c5c42c7..4283775de 100644 --- a/dev/x86_64/src/rej_uniform_eta2_avx2.c +++ b/dev/x86_64/src/rej_uniform_eta2_avx2.c @@ -35,7 +35,7 @@ * rej_eta_avx and supports multiple values for ETA via preprocessor * conditionals. We move the conditionals to the frontend. */ -unsigned int mld_rej_uniform_eta2_avx2( +int32_t mld_rej_uniform_eta2_avx2( int32_t *MLD_RESTRICT r, const uint8_t buf[MLD_AVX2_REJ_UNIFORM_ETA2_BUFLEN]) { @@ -139,7 +139,7 @@ unsigned int mld_rej_uniform_eta2_avx2( } } - return ctr; + return (int32_t)ctr; } #else /* MLD_ARITH_BACKEND_X86_64_DEFAULT && !MLD_CONFIG_MULTILEVEL_NO_SHARED \ diff --git a/dev/x86_64/src/rej_uniform_eta4_avx2.c b/dev/x86_64/src/rej_uniform_eta4_avx2.c index ba00e9125..fa4721db8 100644 --- a/dev/x86_64/src/rej_uniform_eta4_avx2.c +++ b/dev/x86_64/src/rej_uniform_eta4_avx2.c @@ -36,7 +36,7 @@ * conditionals. We move the conditionals to the frontend. */ -unsigned int mld_rej_uniform_eta4_avx2( +int32_t mld_rej_uniform_eta4_avx2( int32_t *MLD_RESTRICT r, const uint8_t buf[MLD_AVX2_REJ_UNIFORM_ETA4_BUFLEN]) { @@ -123,7 +123,7 @@ unsigned int mld_rej_uniform_eta4_avx2( } } - return ctr; + return (int32_t)ctr; } #else /* MLD_ARITH_BACKEND_X86_64_DEFAULT && !MLD_CONFIG_MULTILEVEL_NO_SHARED \ diff --git a/mldsa/src/cbmc.h b/mldsa/src/cbmc.h index 72b8538f0..dff3d2303 100644 --- a/mldsa/src/cbmc.h +++ b/mldsa/src/cbmc.h @@ -93,14 +93,14 @@ #define forall(qvar, qvar_lb, qvar_ub, predicate) \ __CPROVER_forall \ { \ - unsigned qvar; \ + signed int qvar; \ ((qvar_lb) <= (qvar) && (qvar) < (qvar_ub)) ==> (predicate) \ } #define exists(qvar, qvar_lb, qvar_ub, predicate) \ __CPROVER_exists \ { \ - unsigned qvar; \ + signed int qvar; \ ((qvar_lb) <= (qvar) && (qvar) < (qvar_ub)) && (predicate) \ } /* clang-format on */ @@ -119,7 +119,7 @@ value_lb, value_ub) \ __CPROVER_forall \ { \ - unsigned qvar; \ + signed int qvar; \ ((qvar_lb) <= (qvar) && (qvar) < (qvar_ub)) ==> \ (((int)(value_lb) <= ((array_var)[(qvar)])) && \ (((array_var)[(qvar)]) < (int)(value_ub))) \ diff --git a/mldsa/src/debug.c b/mldsa/src/debug.c index a55d51e8b..3fbbdb133 100644 --- a/mldsa/src/debug.c +++ b/mldsa/src/debug.c @@ -30,11 +30,11 @@ void mld_debug_check_assert(const char *file, int line, const int val) } void mld_debug_check_bounds(const char *file, int line, const int32_t *ptr, - unsigned len, int64_t lower_bound_exclusive, + int len, int64_t lower_bound_exclusive, int64_t upper_bound_exclusive) { int err = 0; - unsigned i; + int i; for (i = 0; i < len; i++) { int32_t val = ptr[i]; diff --git a/mldsa/src/debug.h b/mldsa/src/debug.h index af187bb9d..b592d6d0f 100644 --- a/mldsa/src/debug.h +++ b/mldsa/src/debug.h @@ -43,7 +43,7 @@ void mld_debug_check_assert(const char *file, int line, const int val); **************************************************/ #define mld_debug_check_bounds MLD_NAMESPACE(mldsa_debug_check_bounds) void mld_debug_check_bounds(const char *file, int line, const int32_t *ptr, - unsigned len, int64_t lower_bound_exclusive, + int len, int64_t lower_bound_exclusive, int64_t upper_bound_exclusive); /* Check assertion, calling exit() upon failure diff --git a/mldsa/src/fips202/fips202.c b/mldsa/src/fips202/fips202.c index 72b15ad36..f4e91fd4f 100644 --- a/mldsa/src/fips202/fips202.c +++ b/mldsa/src/fips202/fips202.c @@ -64,45 +64,44 @@ __contract__( * Description: Absorb step of Keccak; incremental. * * Arguments: - uint64_t *s: pointer to Keccak state - * - unsigned int pos: position in current block to be absorbed - * - unsigned int r: rate in bytes (e.g., 168 for SHAKE128) + * - int pos: position in current block to be absorbed + * - int r: rate in bytes (e.g., 168 for SHAKE128) * - const uint8_t *in: pointer to input to be absorbed into s * - size_t inlen: length of input in bytes * * Returns new position pos in current block **************************************************/ -static unsigned int keccak_absorb(uint64_t s[MLD_KECCAK_LANES], - unsigned int pos, unsigned int r, - const uint8_t *in, size_t inlen) +static int keccak_absorb(uint64_t s[MLD_KECCAK_LANES], int pos, int r, + const uint8_t *in, size_t inlen) __contract__( requires(inlen <= MLD_MAX_BUFFER_SIZE) - requires(r < sizeof(uint64_t) * MLD_KECCAK_LANES) - requires(pos <= r) + requires(r >= 0 && r < sizeof(uint64_t) * MLD_KECCAK_LANES) + requires(pos >= 0 && pos <= r) requires(memory_no_alias(s, sizeof(uint64_t) * MLD_KECCAK_LANES)) requires(memory_no_alias(in, inlen)) assigns(memory_slice(s, sizeof(uint64_t) * MLD_KECCAK_LANES)) - ensures(return_value < r)) + ensures(return_value >= 0 && return_value < r)) { - while (inlen >= r - pos) + while (inlen >= (size_t)r - (size_t)pos) __loop__( assigns(pos, in, inlen, memory_slice(s, sizeof(uint64_t) * MLD_KECCAK_LANES)) + invariant(pos >= 0 && pos <= r) invariant(inlen <= loop_entry(inlen)) - invariant(pos <= r) invariant(in == loop_entry(in) + (loop_entry(inlen) - inlen))) { mld_keccakf1600_xor_bytes(s, in, pos, r - pos); - inlen -= r - pos; + inlen -= (size_t)r - (size_t)pos; in += r - pos; mld_keccakf1600_permute(s); pos = 0; } - /* Safety: At this point, inlen < r, so the truncation to unsigned is safe. */ - mld_keccakf1600_xor_bytes(s, in, pos, (unsigned)inlen); + /* Safety: At this point, inlen < r, so the cast to int is safe. */ + mld_keccakf1600_xor_bytes(s, in, pos, (int)inlen); - /* Safety: At this point, inlen < r and pos <= r so the truncation to unsigned + /* Safety: At this point, inlen < r and pos <= r so the cast to int * is safe. */ - return (unsigned)(pos + inlen); + return pos + (int)inlen; } /************************************************* @@ -111,15 +110,15 @@ __contract__( * Description: Finalize absorb step. * * Arguments: - uint64_t *s: pointer to Keccak state - * - unsigned int pos: position in current block to be absorbed - * - unsigned int r: rate in bytes (e.g., 168 for SHAKE128) + * - int pos: position in current block to be absorbed + * - int r: rate in bytes (e.g., 168 for SHAKE128) * - uint8_t p: domain separation byte **************************************************/ -static void keccak_finalize(uint64_t s[MLD_KECCAK_LANES], unsigned int pos, - unsigned int r, uint8_t p) +static void keccak_finalize(uint64_t s[MLD_KECCAK_LANES], int pos, int r, + uint8_t p) __contract__( - requires(pos <= r && r < sizeof(uint64_t) * MLD_KECCAK_LANES) - requires((r / 8) >= 1) + requires(pos >= 0 && pos <= r && r < sizeof(uint64_t) * MLD_KECCAK_LANES) + requires(r >= 0 && (r / 8) >= 1) requires(memory_no_alias(s, sizeof(uint64_t) * MLD_KECCAK_LANES)) assigns(memory_slice(s, sizeof(uint64_t) * MLD_KECCAK_LANES)) ) @@ -139,28 +138,28 @@ __contract__( * Arguments: - uint8_t *out: pointer to output data * - size_t outlen: number of bytes to be squeezed (written to out) * - uint64_t *s: pointer to input/output Keccak state - * - unsigned int pos: number of bytes in current block already - *squeezed - * - unsigned int r: rate in bytes (e.g., 168 for SHAKE128) + * - int pos: number of bytes in current block already squeezed + * - int r: rate in bytes (e.g., 168 for SHAKE128) * * Returns new position pos in current block **************************************************/ -static unsigned int keccak_squeeze(uint8_t *out, size_t outlen, - uint64_t s[MLD_KECCAK_LANES], - unsigned int pos, unsigned int r) +static int keccak_squeeze(uint8_t *out, size_t outlen, + uint64_t s[MLD_KECCAK_LANES], int pos, int r) __contract__( + requires(pos >= 0 && pos <= r) requires((r == SHAKE128_RATE && pos <= SHAKE128_RATE) || (r == SHAKE256_RATE && pos <= SHAKE256_RATE) || (r == SHA3_512_RATE && pos <= SHA3_512_RATE)) + requires(r >= 0) requires(outlen <= 8 * r /* somewhat arbitrary bound */) requires(memory_no_alias(s, sizeof(uint64_t) * MLD_KECCAK_LANES)) requires(memory_no_alias(out, outlen)) assigns(memory_slice(s, sizeof(uint64_t) * MLD_KECCAK_LANES)) assigns(memory_slice(out, outlen)) - ensures(return_value <= r)) + ensures(return_value >= 0 && return_value <= r)) { - unsigned int i; - size_t out_offset = 0; + int i; + int out_offset = 0; /* Reference: This code is re-factored from the reference implementation * to facilitate proof with CBMC and to improve readability. @@ -168,14 +167,14 @@ __contract__( * Take a mutable copy of outlen to count down the number of bytes * still to squeeze. The initial value of outlen is needed for the CBMC * assigns() clauses. */ - size_t bytes_to_go = outlen; + int bytes_to_go = (int)outlen; while (bytes_to_go > 0) __loop__( assigns(i, bytes_to_go, pos, out_offset, memory_slice(s, sizeof(uint64_t) * MLD_KECCAK_LANES), memory_slice(out, outlen)) - invariant(bytes_to_go <= outlen) - invariant(out_offset == outlen - bytes_to_go) - invariant(pos <= r) + invariant(pos >= 0 && pos <= r) + invariant(bytes_to_go >= 0 && bytes_to_go <= outlen) + invariant(out_offset == (int)outlen - bytes_to_go) ) { if (pos == r) @@ -184,7 +183,7 @@ __contract__( pos = 0; } /* Safety: If bytes_to_go < r - pos, truncation to unsigned is safe. */ - i = bytes_to_go < r - pos ? (unsigned)bytes_to_go : r - pos; + i = (bytes_to_go < r - pos) ? bytes_to_go : r - pos; mld_keccakf1600_extract_bytes(s, out + out_offset, pos, i); bytes_to_go -= i; pos += i; diff --git a/mldsa/src/fips202/fips202.h b/mldsa/src/fips202/fips202.h index 9f0253cb8..c5675790c 100644 --- a/mldsa/src/fips202/fips202.h +++ b/mldsa/src/fips202/fips202.h @@ -22,13 +22,13 @@ typedef struct { uint64_t s[MLD_KECCAK_LANES]; - unsigned int pos; + int pos; } mld_shake128ctx; typedef struct { uint64_t s[MLD_KECCAK_LANES]; - unsigned int pos; + int pos; } mld_shake256ctx; #define mld_shake128_init MLD_NAMESPACE(shake128_init) @@ -63,9 +63,9 @@ __contract__( requires(inlen <= MLD_MAX_BUFFER_SIZE) requires(memory_no_alias(state, sizeof(mld_shake128ctx))) requires(memory_no_alias(in, inlen)) - requires(state->pos <= SHAKE128_RATE) + requires(state->pos >= 0 && state->pos <= SHAKE128_RATE) assigns(memory_slice(state, sizeof(mld_shake128ctx))) - ensures(state->pos <= SHAKE128_RATE) + ensures(state->pos >= 0 && state->pos <= SHAKE128_RATE) ); #define mld_shake128_finalize MLD_NAMESPACE(shake128_finalize) @@ -79,9 +79,9 @@ __contract__( void mld_shake128_finalize(mld_shake128ctx *state) __contract__( requires(memory_no_alias(state, sizeof(mld_shake128ctx))) - requires(state->pos <= SHAKE128_RATE) + requires(state->pos >= 0 && state->pos <= SHAKE128_RATE) assigns(memory_slice(state, sizeof(mld_shake128ctx))) - ensures(state->pos <= SHAKE128_RATE) + ensures(state->pos >= 0 && state->pos <= SHAKE128_RATE) ); #define mld_shake128_squeeze MLD_NAMESPACE(shake128_squeeze) @@ -101,10 +101,10 @@ __contract__( requires(outlen <= 8 * SHAKE128_RATE /* somewhat arbitrary bound */) requires(memory_no_alias(state, sizeof(mld_shake128ctx))) requires(memory_no_alias(out, outlen)) - requires(state->pos <= SHAKE128_RATE) + requires(state->pos >= 0 && state->pos <= SHAKE128_RATE) assigns(memory_slice(state, sizeof(mld_shake128ctx))) assigns(memory_slice(out, outlen)) - ensures(state->pos <= SHAKE128_RATE) + ensures(state->pos >= 0 && state->pos <= SHAKE128_RATE) ); #define mld_shake128_release MLD_NAMESPACE(shake128_release) @@ -153,9 +153,9 @@ __contract__( requires(inlen <= MLD_MAX_BUFFER_SIZE) requires(memory_no_alias(state, sizeof(mld_shake256ctx))) requires(memory_no_alias(in, inlen)) - requires(state->pos <= SHAKE256_RATE) + requires(state->pos >= 0 && state->pos <= SHAKE256_RATE) assigns(memory_slice(state, sizeof(mld_shake256ctx))) - ensures(state->pos <= SHAKE256_RATE) + ensures(state->pos >= 0 && state->pos <= SHAKE256_RATE) ); #define mld_shake256_finalize MLD_NAMESPACE(shake256_finalize) @@ -169,9 +169,9 @@ __contract__( void mld_shake256_finalize(mld_shake256ctx *state) __contract__( requires(memory_no_alias(state, sizeof(mld_shake256ctx))) - requires(state->pos <= SHAKE256_RATE) + requires(state->pos >= 0 && state->pos <= SHAKE256_RATE) assigns(memory_slice(state, sizeof(mld_shake256ctx))) - ensures(state->pos <= SHAKE256_RATE) + ensures(state->pos >= 0 && state->pos <= SHAKE256_RATE) ); #define mld_shake256_squeeze MLD_NAMESPACE(shake256_squeeze) @@ -191,10 +191,10 @@ __contract__( requires(outlen <= 8 * SHAKE256_RATE /* somewhat arbitrary bound */) requires(memory_no_alias(state, sizeof(mld_shake256ctx))) requires(memory_no_alias(out, outlen)) - requires(state->pos <= SHAKE256_RATE) + requires(state->pos >= 0 && state->pos <= SHAKE256_RATE) assigns(memory_slice(state, sizeof(mld_shake256ctx))) assigns(memory_slice(out, outlen)) - ensures(state->pos <= SHAKE256_RATE) + ensures(state->pos >= 0 && state->pos <= SHAKE256_RATE) ); #define mld_shake256_release MLD_NAMESPACE(shake256_release) diff --git a/mldsa/src/fips202/fips202x4.c b/mldsa/src/fips202/fips202x4.c index 0fb5cd57c..ea061130c 100644 --- a/mldsa/src/fips202/fips202x4.c +++ b/mldsa/src/fips202/fips202x4.c @@ -22,21 +22,21 @@ #include "fips202x4.h" #include "keccakf1600.h" -static void mld_keccak_absorb_once_x4(uint64_t *s, uint32_t r, - const uint8_t *in0, const uint8_t *in1, - const uint8_t *in2, const uint8_t *in3, - size_t inlen, uint8_t p) +static void mld_keccak_absorb_once_x4(uint64_t *s, int r, const uint8_t *in0, + const uint8_t *in1, const uint8_t *in2, + const uint8_t *in3, size_t inlen, + uint8_t p) __contract__( requires(inlen <= MLD_MAX_BUFFER_SIZE) requires(memory_no_alias(s, sizeof(uint64_t) * MLD_KECCAK_LANES * MLD_KECCAK_WAY)) - requires(r <= sizeof(uint64_t) * MLD_KECCAK_LANES) + requires(r >= 0 && r <= sizeof(uint64_t) * MLD_KECCAK_LANES) requires(memory_no_alias(in0, inlen)) requires(memory_no_alias(in1, inlen)) requires(memory_no_alias(in2, inlen)) requires(memory_no_alias(in3, inlen)) assigns(memory_slice(s, sizeof(uint64_t) * MLD_KECCAK_LANES * MLD_KECCAK_WAY))) { - while (inlen >= r) + while (inlen >= (size_t)r) __loop__( assigns(inlen, in0, in1, in2, in3, memory_slice(s, sizeof(uint64_t) * MLD_KECCAK_LANES * MLD_KECCAK_WAY)) invariant(inlen <= loop_entry(inlen)) @@ -52,24 +52,24 @@ __contract__( in1 += r; in2 += r; in3 += r; - inlen -= r; + inlen -= (size_t)r; } /* Safety: At this point, inlen < r, so the truncations to unsigned are safe * below. */ if (inlen > 0) { - mld_keccakf1600x4_xor_bytes(s, in0, in1, in2, in3, 0, (unsigned)inlen); + mld_keccakf1600x4_xor_bytes(s, in0, in1, in2, in3, 0, (int)inlen); } - if (inlen == r - 1) + if (inlen == (size_t)r - 1) { p |= 128; - mld_keccakf1600x4_xor_bytes(s, &p, &p, &p, &p, (unsigned)inlen, 1); + mld_keccakf1600x4_xor_bytes(s, &p, &p, &p, &p, (int)inlen, 1); } else { - mld_keccakf1600x4_xor_bytes(s, &p, &p, &p, &p, (unsigned)inlen, 1); + mld_keccakf1600x4_xor_bytes(s, &p, &p, &p, &p, (int)inlen, 1); p = 128; mld_keccakf1600x4_xor_bytes(s, &p, &p, &p, &p, r - 1, 1); } @@ -77,9 +77,9 @@ __contract__( static void mld_keccak_squeezeblocks_x4(uint8_t *out0, uint8_t *out1, uint8_t *out2, uint8_t *out3, - size_t nblocks, uint64_t *s, uint32_t r) + size_t nblocks, uint64_t *s, int r) __contract__( - requires(r <= sizeof(uint64_t) * MLD_KECCAK_LANES) + requires(r >= 0 && r <= sizeof(uint64_t) * MLD_KECCAK_LANES) requires(nblocks <= 8 /* somewhat arbitrary bound */) requires(memory_no_alias(s, sizeof(uint64_t) * MLD_KECCAK_LANES * MLD_KECCAK_WAY)) requires(memory_no_alias(out0, nblocks * r)) @@ -107,7 +107,7 @@ __contract__( out3 == loop_entry(out3) + r * (loop_entry(nblocks) - nblocks))) { mld_keccakf1600x4_permute(s); - mld_keccakf1600x4_extract_bytes(s, out0, out1, out2, out3, 0, r); + mld_keccakf1600x4_extract_bytes(s, out0, out1, out2, out3, 0, (int)r); out0 += r; out1 += r; diff --git a/mldsa/src/fips202/keccakf1600.c b/mldsa/src/fips202/keccakf1600.c index e59120c07..3aec9b33a 100644 --- a/mldsa/src/fips202/keccakf1600.c +++ b/mldsa/src/fips202/keccakf1600.c @@ -37,20 +37,20 @@ #define MLD_KECCAK_ROL(a, offset) ((a << offset) ^ (a >> (64 - offset))) void mld_keccakf1600_extract_bytes(uint64_t *state, unsigned char *data, - unsigned offset, unsigned length) + int offset, int length) { - unsigned i; + int i; #if defined(MLD_SYS_LITTLE_ENDIAN) uint8_t *state_ptr = (uint8_t *)state + offset; for (i = 0; i < length; i++) - __loop__(invariant(i <= length)) + __loop__(invariant(i >= 0 && i <= length)) { data[i] = state_ptr[i]; } #else /* MLD_SYS_LITTLE_ENDIAN */ /* Portable version */ for (i = 0; i < length; i++) - __loop__(invariant(i <= length)) + __loop__(invariant(i >= 0 && i <= length)) { data[i] = (state[(offset + i) >> 3] >> (8 * ((offset + i) & 0x07))) & 0xFF; } @@ -58,20 +58,20 @@ void mld_keccakf1600_extract_bytes(uint64_t *state, unsigned char *data, } void mld_keccakf1600_xor_bytes(uint64_t *state, const unsigned char *data, - unsigned offset, unsigned length) + int offset, int length) { - unsigned i; + int i; #if defined(MLD_SYS_LITTLE_ENDIAN) uint8_t *state_ptr = (uint8_t *)state + offset; for (i = 0; i < length; i++) - __loop__(invariant(i <= length)) + __loop__(invariant(i >= 0 && i <= length)) { state_ptr[i] ^= data[i]; } #else /* MLD_SYS_LITTLE_ENDIAN */ /* Portable version */ for (i = 0; i < length; i++) - __loop__(invariant(i <= length)) + __loop__(invariant(i >= 0 && i <= length)) { state[(offset + i) >> 3] ^= (uint64_t)data[i] << (8 * ((offset + i) & 0x07)); @@ -81,8 +81,8 @@ void mld_keccakf1600_xor_bytes(uint64_t *state, const unsigned char *data, void mld_keccakf1600x4_extract_bytes(uint64_t *state, unsigned char *data0, unsigned char *data1, unsigned char *data2, - unsigned char *data3, unsigned offset, - unsigned length) + unsigned char *data3, int offset, + int length) { mld_keccakf1600_extract_bytes(state + MLD_KECCAK_LANES * 0, data0, offset, length); @@ -97,8 +97,8 @@ void mld_keccakf1600x4_extract_bytes(uint64_t *state, unsigned char *data0, void mld_keccakf1600x4_xor_bytes(uint64_t *state, const unsigned char *data0, const unsigned char *data1, const unsigned char *data2, - const unsigned char *data3, unsigned offset, - unsigned length) + const unsigned char *data3, int offset, + int length) { mld_keccakf1600_xor_bytes(state + MLD_KECCAK_LANES * 0, data0, offset, length); @@ -140,7 +140,7 @@ static const uint64_t mld_KeccakF_RoundConstants[MLD_KECCAK_NROUNDS] = { static void mld_keccakf1600_permute_c(uint64_t *state) { - unsigned round; + int round; uint64_t Aba, Abe, Abi, Abo, Abu; uint64_t Aga, Age, Agi, Ago, Agu; @@ -183,7 +183,7 @@ static void mld_keccakf1600_permute_c(uint64_t *state) Asu = state[24]; for (round = 0; round < MLD_KECCAK_NROUNDS; round += 2) - __loop__(invariant(round <= MLD_KECCAK_NROUNDS && round % 2 == 0)) + __loop__(invariant(round >= 0 && round <= MLD_KECCAK_NROUNDS && round % 2 == 0)) { /* prepareTheta */ BCa = Aba ^ Aga ^ Aka ^ Ama ^ Asa; diff --git a/mldsa/src/fips202/keccakf1600.h b/mldsa/src/fips202/keccakf1600.h index e56adc156..63e8ed562 100644 --- a/mldsa/src/fips202/keccakf1600.h +++ b/mldsa/src/fips202/keccakf1600.h @@ -22,7 +22,7 @@ #define mld_keccakf1600_extract_bytes MLD_NAMESPACE(keccakf1600_extract_bytes) void mld_keccakf1600_extract_bytes(uint64_t *state, unsigned char *data, - unsigned offset, unsigned length) + int offset, int length) __contract__( requires(0 <= offset && offset <= MLD_KECCAK_LANES * sizeof(uint64_t) && 0 <= length && length <= MLD_KECCAK_LANES * sizeof(uint64_t) - offset) @@ -33,7 +33,7 @@ __contract__( #define mld_keccakf1600_xor_bytes MLD_NAMESPACE(keccakf1600_xor_bytes) void mld_keccakf1600_xor_bytes(uint64_t *state, const unsigned char *data, - unsigned offset, unsigned length) + int offset, int length) __contract__( requires(0 <= offset && offset <= MLD_KECCAK_LANES * sizeof(uint64_t) && 0 <= length && length <= MLD_KECCAK_LANES * sizeof(uint64_t) - offset) @@ -46,8 +46,8 @@ __contract__( MLD_NAMESPACE(keccakf1600x4_extract_bytes) void mld_keccakf1600x4_extract_bytes(uint64_t *state, unsigned char *data0, unsigned char *data1, unsigned char *data2, - unsigned char *data3, unsigned offset, - unsigned length) + unsigned char *data3, int offset, + int length) __contract__( requires(0 <= offset && offset <= MLD_KECCAK_LANES * sizeof(uint64_t) && 0 <= length && length <= MLD_KECCAK_LANES * sizeof(uint64_t) - offset) @@ -66,8 +66,8 @@ __contract__( void mld_keccakf1600x4_xor_bytes(uint64_t *state, const unsigned char *data0, const unsigned char *data1, const unsigned char *data2, - const unsigned char *data3, unsigned offset, - unsigned length) + const unsigned char *data3, int offset, + int length) __contract__( requires(0 <= offset && offset <= MLD_KECCAK_LANES * sizeof(uint64_t) && 0 <= length && length <= MLD_KECCAK_LANES * sizeof(uint64_t) - offset) diff --git a/mldsa/src/native/aarch64/meta.h b/mldsa/src/native/aarch64/meta.h index 9f4a01835..72f746459 100644 --- a/mldsa/src/native/aarch64/meta.h +++ b/mldsa/src/native/aarch64/meta.h @@ -32,6 +32,7 @@ #if !defined(__ASSEMBLER__) +#include #include "../api.h" #include "src/arith_native_aarch64.h" @@ -49,9 +50,8 @@ static MLD_INLINE int mld_intt_native(int32_t data[MLDSA_N]) return MLD_NATIVE_FUNC_SUCCESS; } -static MLD_INLINE int mld_rej_uniform_native(int32_t *r, unsigned len, - const uint8_t *buf, - unsigned buflen) +static MLD_INLINE int mld_rej_uniform_native(int32_t *r, int len, + const uint8_t *buf, int buflen) { if (len != MLDSA_N || buflen % 24 != 0) /* NEON support is mandatory for AArch64 */ @@ -59,15 +59,15 @@ static MLD_INLINE int mld_rej_uniform_native(int32_t *r, unsigned len, return MLD_NATIVE_FUNC_FALLBACK; } - /* Safety: outlen is at most MLDSA_N, hence, this cast is safe. */ + /* Safety: outlen is at most MLDSA_N and, hence, this cast is safe. */ return (int)mld_rej_uniform_asm(r, buf, buflen, mld_rej_uniform_table); } -static MLD_INLINE int mld_rej_uniform_eta2_native(int32_t *r, unsigned len, +static MLD_INLINE int mld_rej_uniform_eta2_native(int32_t *r, int len, const uint8_t *buf, - unsigned buflen) + int buflen) { - unsigned int outlen; + int32_t outlen; /* AArch64 implementation assumes specific buffer lengths */ if (len != MLDSA_N || buflen != MLD_AARCH64_REJ_UNIFORM_ETA2_BUFLEN) { @@ -82,16 +82,16 @@ static MLD_INLINE int mld_rej_uniform_eta2_native(int32_t *r, unsigned len, */ MLD_CT_TESTING_DECLASSIFY(buf, buflen); outlen = mld_rej_uniform_eta2_asm(r, buf, buflen, mld_rej_uniform_eta_table); - MLD_CT_TESTING_SECRET(r, sizeof(int32_t) * outlen); + MLD_CT_TESTING_SECRET(r, sizeof(int32_t) * (size_t)outlen); /* Safety: outlen is at most MLDSA_N and, hence, this cast is safe. */ return (int)outlen; } -static MLD_INLINE int mld_rej_uniform_eta4_native(int32_t *r, unsigned len, +static MLD_INLINE int mld_rej_uniform_eta4_native(int32_t *r, int len, const uint8_t *buf, - unsigned buflen) + int buflen) { - unsigned int outlen; + int32_t outlen; /* AArch64 implementation assumes specific buffer lengths */ if (len != MLDSA_N || buflen != MLD_AARCH64_REJ_UNIFORM_ETA4_BUFLEN) { @@ -106,7 +106,7 @@ static MLD_INLINE int mld_rej_uniform_eta4_native(int32_t *r, unsigned len, */ MLD_CT_TESTING_DECLASSIFY(buf, buflen); outlen = mld_rej_uniform_eta4_asm(r, buf, buflen, mld_rej_uniform_eta_table); - MLD_CT_TESTING_SECRET(r, sizeof(int32_t) * outlen); + MLD_CT_TESTING_SECRET(r, sizeof(int32_t) * (size_t)outlen); /* Safety: outlen is at most MLDSA_N and, hence, this cast is safe. */ return (int)outlen; } diff --git a/mldsa/src/native/aarch64/src/arith_native_aarch64.h b/mldsa/src/native/aarch64/src/arith_native_aarch64.h index 22f193585..b3bd326ef 100644 --- a/mldsa/src/native/aarch64/src/arith_native_aarch64.h +++ b/mldsa/src/native/aarch64/src/arith_native_aarch64.h @@ -56,16 +56,16 @@ void mld_ntt_asm(int32_t *, const int32_t *, const int32_t *); void mld_intt_asm(int32_t *, const int32_t *, const int32_t *); #define mld_rej_uniform_asm MLD_NAMESPACE(rej_uniform_asm) -uint64_t mld_rej_uniform_asm(int32_t *r, const uint8_t *buf, unsigned buflen, +uint64_t mld_rej_uniform_asm(int32_t *r, const uint8_t *buf, int32_t buflen, const uint8_t *table); #define mld_rej_uniform_eta2_asm MLD_NAMESPACE(rej_uniform_eta2_asm) -unsigned mld_rej_uniform_eta2_asm(int32_t *r, const uint8_t *buf, - unsigned buflen, const uint8_t *table); +int32_t mld_rej_uniform_eta2_asm(int32_t *r, const uint8_t *buf, int32_t buflen, + const uint8_t *table); #define mld_rej_uniform_eta4_asm MLD_NAMESPACE(rej_uniform_eta4_asm) -unsigned mld_rej_uniform_eta4_asm(int32_t *r, const uint8_t *buf, - unsigned buflen, const uint8_t *table); +int32_t mld_rej_uniform_eta4_asm(int32_t *r, const uint8_t *buf, int32_t buflen, + const uint8_t *table); #define mld_poly_decompose_32_asm MLD_NAMESPACE(poly_decompose_32_asm) void mld_poly_decompose_32_asm(int32_t *a1, int32_t *a0, const int32_t *a); diff --git a/mldsa/src/native/api.h b/mldsa/src/native/api.h index 333bb0c7b..458b3dbc1 100644 --- a/mldsa/src/native/api.h +++ b/mldsa/src/native/api.h @@ -122,19 +122,18 @@ static MLD_INLINE int mld_intt_native(int32_t p[MLDSA_N]); * uniform random integers in [0, MLDSA_Q-1] * * Arguments: - int32_t *r: pointer to output buffer - * - unsigned len: requested number of 32-bit integers + * - int len: requested number of 32-bit integers * (uniform mod q). * - const uint8_t *buf: pointer to input buffer * (assumed to be uniform random bytes) - * - unsigned buflen: length of input buffer in bytes. + * - int buflen: length of input buffer in bytes. * * Return -1 if the native implementation does not support the input * lengths. Otherwise, returns non-negative number of sampled 32-bit integers * (at most len). **************************************************/ -static MLD_INLINE int mld_rej_uniform_native(int32_t *r, unsigned len, - const uint8_t *buf, - unsigned buflen); +static MLD_INLINE int mld_rej_uniform_native(int32_t *r, int len, + const uint8_t *buf, int buflen); #endif /* MLD_USE_NATIVE_REJ_UNIFORM */ #if defined(MLD_USE_NATIVE_REJ_UNIFORM_ETA2) @@ -145,19 +144,19 @@ static MLD_INLINE int mld_rej_uniform_native(int32_t *r, unsigned len, * uniform random integers in [-2,+2]. * * Arguments: - int32_t *r: pointer to output buffer - * - unsigned len: requested number of 32-bit integers + * - int len: requested number of 32-bit integers * (uniform in [-2, +2]). * - const uint8_t *buf: pointer to input buffer * (assumed to be uniform random bytes) - * - unsigned buflen: length of input buffer in bytes. + * - int buflen: length of input buffer in bytes. * * Return -1 if the native implementation does not support the input *lengths. Otherwise, returns non-negative number of sampled 32-bit integers *(at most len). **************************************************/ -static MLD_INLINE int mld_rej_uniform_eta2_native(int32_t *r, unsigned len, +static MLD_INLINE int mld_rej_uniform_eta2_native(int32_t *r, int len, const uint8_t *buf, - unsigned buflen); + int buflen); #endif /* MLD_USE_NATIVE_REJ_UNIFORM_ETA2 */ #if defined(MLD_USE_NATIVE_REJ_UNIFORM_ETA4) @@ -168,19 +167,19 @@ static MLD_INLINE int mld_rej_uniform_eta2_native(int32_t *r, unsigned len, * uniform random integers in [-4,+4]. * * Arguments: - int32_t *r: pointer to output buffer - * - unsigned len: requested number of 32-bit integers + * - int len: requested number of 32-bit integers * (uniform in [-4, +4]). * - const uint8_t *buf: pointer to input buffer * (assumed to be uniform random bytes) - * - unsigned buflen: length of input buffer in bytes. + * - int buflen: length of input buffer in bytes. * * Return -1 if the native implementation does not support the input *lengths. Otherwise, returns non-negative number of sampled 32-bit integers *(at most len). **************************************************/ -static MLD_INLINE int mld_rej_uniform_eta4_native(int32_t *r, unsigned len, +static MLD_INLINE int mld_rej_uniform_eta4_native(int32_t *r, int len, const uint8_t *buf, - unsigned buflen); + int buflen); #endif /* MLD_USE_NATIVE_REJ_UNIFORM_ETA4 */ #if defined(MLD_USE_NATIVE_POLY_DECOMPOSE_32) diff --git a/mldsa/src/native/x86_64/meta.h b/mldsa/src/native/x86_64/meta.h index 7978443f4..553b311bc 100644 --- a/mldsa/src/native/x86_64/meta.h +++ b/mldsa/src/native/x86_64/meta.h @@ -31,6 +31,7 @@ #define MLD_USE_NATIVE_POLYVECL_POINTWISE_ACC_MONTGOMERY_L7 #if !defined(__ASSEMBLER__) +#include #include #include "../../common.h" #include "../api.h" @@ -64,9 +65,8 @@ static MLD_INLINE int mld_intt_native(int32_t data[MLDSA_N]) return MLD_NATIVE_FUNC_SUCCESS; } -static MLD_INLINE int mld_rej_uniform_native(int32_t *r, unsigned len, - const uint8_t *buf, - unsigned buflen) +static MLD_INLINE int mld_rej_uniform_native(int32_t *r, int len, + const uint8_t *buf, int buflen) { /* AVX2 implementation assumes specific buffer lengths */ if (!mld_sys_check_capability(MLD_SYS_CAP_AVX2) || len != MLDSA_N || @@ -79,11 +79,11 @@ static MLD_INLINE int mld_rej_uniform_native(int32_t *r, unsigned len, return (int)mld_rej_uniform_avx2(r, buf); } -static MLD_INLINE int mld_rej_uniform_eta2_native(int32_t *r, unsigned len, +static MLD_INLINE int mld_rej_uniform_eta2_native(int32_t *r, int len, const uint8_t *buf, - unsigned buflen) + int buflen) { - unsigned int outlen; + int32_t outlen; /* AVX2 implementation assumes specific buffer lengths */ if (!mld_sys_check_capability(MLD_SYS_CAP_AVX2) || len != MLDSA_N || buflen != MLD_AVX2_REJ_UNIFORM_ETA2_BUFLEN) @@ -100,16 +100,16 @@ static MLD_INLINE int mld_rej_uniform_eta2_native(int32_t *r, unsigned len, */ MLD_CT_TESTING_DECLASSIFY(buf, buflen); outlen = mld_rej_uniform_eta2_avx2(r, buf); - MLD_CT_TESTING_SECRET(r, sizeof(int32_t) * outlen); + MLD_CT_TESTING_SECRET(r, sizeof(int32_t) * (size_t)outlen); /* Safety: outlen is at most MLDSA_N and, hence, this cast is safe. */ return (int)outlen; } -static MLD_INLINE int mld_rej_uniform_eta4_native(int32_t *r, unsigned len, +static MLD_INLINE int mld_rej_uniform_eta4_native(int32_t *r, int len, const uint8_t *buf, - unsigned buflen) + int buflen) { - unsigned int outlen; + int32_t outlen; /* AVX2 implementation assumes specific buffer lengths */ if (!mld_sys_check_capability(MLD_SYS_CAP_AVX2) || len != MLDSA_N || buflen != MLD_AVX2_REJ_UNIFORM_ETA4_BUFLEN) @@ -126,7 +126,7 @@ static MLD_INLINE int mld_rej_uniform_eta4_native(int32_t *r, unsigned len, */ MLD_CT_TESTING_DECLASSIFY(buf, buflen); outlen = mld_rej_uniform_eta4_avx2(r, buf); - MLD_CT_TESTING_SECRET(r, sizeof(int32_t) * outlen); + MLD_CT_TESTING_SECRET(r, sizeof(int32_t) * (size_t)outlen); /* Safety: outlen is at most MLDSA_N and, hence, this cast is safe. */ return (int)outlen; } diff --git a/mldsa/src/native/x86_64/src/arith_native_x86_64.h b/mldsa/src/native/x86_64/src/arith_native_x86_64.h index fd59a21e0..1d15eb6f5 100644 --- a/mldsa/src/native/x86_64/src/arith_native_x86_64.h +++ b/mldsa/src/native/x86_64/src/arith_native_x86_64.h @@ -43,15 +43,15 @@ void mld_invntt_avx2(__m256i *r, const __m256i *mld_qdata); void mld_nttunpack_avx2(__m256i *r); #define mld_rej_uniform_avx2 MLD_NAMESPACE(mld_rej_uniform_avx2) -unsigned mld_rej_uniform_avx2(int32_t *r, - const uint8_t buf[MLD_AVX2_REJ_UNIFORM_BUFLEN]); +int32_t mld_rej_uniform_avx2(int32_t *r, + const uint8_t buf[MLD_AVX2_REJ_UNIFORM_BUFLEN]); #define mld_rej_uniform_eta2_avx2 MLD_NAMESPACE(mld_rej_uniform_eta2_avx2) -unsigned mld_rej_uniform_eta2_avx2( +int32_t mld_rej_uniform_eta2_avx2( int32_t *r, const uint8_t buf[MLD_AVX2_REJ_UNIFORM_ETA2_BUFLEN]); #define mld_rej_uniform_eta4_avx2 MLD_NAMESPACE(mld_rej_uniform_eta4_avx2) -unsigned mld_rej_uniform_eta4_avx2( +int32_t mld_rej_uniform_eta4_avx2( int32_t *r, const uint8_t buf[MLD_AVX2_REJ_UNIFORM_ETA4_BUFLEN]); #define mld_poly_decompose_32_avx2 MLD_NAMESPACE(mld_poly_decompose_32_avx2) diff --git a/mldsa/src/native/x86_64/src/rej_uniform_avx2.c b/mldsa/src/native/x86_64/src/rej_uniform_avx2.c index 07a5df970..6c7243bd3 100644 --- a/mldsa/src/native/x86_64/src/rej_uniform_avx2.c +++ b/mldsa/src/native/x86_64/src/rej_uniform_avx2.c @@ -36,8 +36,8 @@ * frontend to perform the unintuitive padding. */ -unsigned int mld_rej_uniform_avx2( - int32_t *MLD_RESTRICT r, const uint8_t buf[MLD_AVX2_REJ_UNIFORM_BUFLEN]) +int32_t mld_rej_uniform_avx2(int32_t *MLD_RESTRICT r, + const uint8_t buf[MLD_AVX2_REJ_UNIFORM_BUFLEN]) { unsigned int ctr, pos; uint32_t good; @@ -116,7 +116,7 @@ unsigned int mld_rej_uniform_avx2( } } - return ctr; + return (int32_t)ctr; } #else /* MLD_ARITH_BACKEND_X86_64_DEFAULT && !MLD_CONFIG_MULTILEVEL_NO_SHARED \ diff --git a/mldsa/src/native/x86_64/src/rej_uniform_eta2_avx2.c b/mldsa/src/native/x86_64/src/rej_uniform_eta2_avx2.c index 70c5c42c7..4283775de 100644 --- a/mldsa/src/native/x86_64/src/rej_uniform_eta2_avx2.c +++ b/mldsa/src/native/x86_64/src/rej_uniform_eta2_avx2.c @@ -35,7 +35,7 @@ * rej_eta_avx and supports multiple values for ETA via preprocessor * conditionals. We move the conditionals to the frontend. */ -unsigned int mld_rej_uniform_eta2_avx2( +int32_t mld_rej_uniform_eta2_avx2( int32_t *MLD_RESTRICT r, const uint8_t buf[MLD_AVX2_REJ_UNIFORM_ETA2_BUFLEN]) { @@ -139,7 +139,7 @@ unsigned int mld_rej_uniform_eta2_avx2( } } - return ctr; + return (int32_t)ctr; } #else /* MLD_ARITH_BACKEND_X86_64_DEFAULT && !MLD_CONFIG_MULTILEVEL_NO_SHARED \ diff --git a/mldsa/src/native/x86_64/src/rej_uniform_eta4_avx2.c b/mldsa/src/native/x86_64/src/rej_uniform_eta4_avx2.c index ba00e9125..fa4721db8 100644 --- a/mldsa/src/native/x86_64/src/rej_uniform_eta4_avx2.c +++ b/mldsa/src/native/x86_64/src/rej_uniform_eta4_avx2.c @@ -36,7 +36,7 @@ * conditionals. We move the conditionals to the frontend. */ -unsigned int mld_rej_uniform_eta4_avx2( +int32_t mld_rej_uniform_eta4_avx2( int32_t *MLD_RESTRICT r, const uint8_t buf[MLD_AVX2_REJ_UNIFORM_ETA4_BUFLEN]) { @@ -123,7 +123,7 @@ unsigned int mld_rej_uniform_eta4_avx2( } } - return ctr; + return (int32_t)ctr; } #else /* MLD_ARITH_BACKEND_X86_64_DEFAULT && !MLD_CONFIG_MULTILEVEL_NO_SHARED \ diff --git a/mldsa/src/ntt.c b/mldsa/src/ntt.c index a916a640a..e4b0d24b4 100644 --- a/mldsa/src/ntt.c +++ b/mldsa/src/ntt.c @@ -71,10 +71,10 @@ __contract__( /* Reference: Embedded in `ntt()` in the reference implementation @[REF]. */ static void mld_ntt_butterfly_block(int32_t r[MLDSA_N], const int32_t zeta, - const unsigned start, const unsigned len, - const unsigned bound) + const int start, const int len, + const int bound) __contract__( - requires(start < MLDSA_N) + requires(start >= 0 && start < MLDSA_N) requires(1 <= len && len <= MLDSA_N / 2 && start + 2 * len <= MLDSA_N) requires(0 <= bound && bound < INT32_MAX - MLDSA_Q) requires(-MLDSA_Q_HALF < zeta && zeta < MLDSA_Q_HALF) @@ -86,7 +86,7 @@ __contract__( ensures(array_abs_bound(r, start + 2 * len, MLDSA_N, bound))) { /* `bound` is a ghost variable only needed in the CBMC specification */ - unsigned j; + int j; ((void)bound); for (j = start; j < start + len; j++) __loop__( @@ -117,7 +117,7 @@ __contract__( */ /* Reference: Embedded in `ntt()` in the reference implementation @[REF]. */ -static void mld_ntt_layer(int32_t r[MLDSA_N], const unsigned layer) +static void mld_ntt_layer(int32_t r[MLDSA_N], int layer) __contract__( requires(memory_no_alias(r, sizeof(int32_t) * MLDSA_N)) requires(1 <= layer && layer <= 8) @@ -125,14 +125,14 @@ __contract__( assigns(memory_slice(r, sizeof(int32_t) * MLDSA_N)) ensures(array_abs_bound(r, 0, MLDSA_N, (layer + 1) * MLDSA_Q))) { - unsigned start, k, len; + int start, k, len; /* Twiddle factors for layer n are at indices 2^(n-1)..2^n-1. */ - k = 1u << (layer - 1); - len = (unsigned)MLDSA_N >> layer; + k = 1 << (layer - 1); + len = MLDSA_N >> layer; for (start = 0; start < MLDSA_N; start += 2 * len) __loop__( - invariant(start < MLDSA_N + 2 * len) - invariant(k <= MLDSA_N) + invariant(start >= 0 && start < MLDSA_N + 2 * len) + invariant(k >= 1 && k <= MLDSA_N) invariant(2 * len * k == start + MLDSA_N) invariant(array_abs_bound(r, 0, start, layer * MLDSA_Q + MLDSA_Q)) invariant(array_abs_bound(r, start, MLDSA_N, layer * MLDSA_Q))) @@ -145,7 +145,7 @@ __contract__( MLD_INTERNAL_API void mld_ntt(int32_t a[MLDSA_N]) { - unsigned int layer; + int layer; for (layer = 1; layer < 9; layer++) __loop__( @@ -187,7 +187,7 @@ __contract__( /* Reference: Embedded into `invntt_tomont()` in the reference implementation * @[REF] */ -static void mld_invntt_layer(int32_t r[MLDSA_N], unsigned layer) +static void mld_invntt_layer(int32_t r[MLDSA_N], int layer) __contract__( requires(memory_no_alias(r, sizeof(int32_t) * MLDSA_N)) requires(1 <= layer && layer <= 8) @@ -195,22 +195,25 @@ __contract__( assigns(memory_slice(r, sizeof(int32_t) * MLDSA_N)) ensures(array_abs_bound(r, 0, MLDSA_N, (MLDSA_N >> (layer - 1)) * MLDSA_Q))) { - unsigned start, k, len; - len = (unsigned)MLDSA_N >> layer; - k = (1u << layer) - 1; + int start, k, len; + len = MLDSA_N >> layer; + k = (1 << layer) - 1; for (start = 0; start < MLDSA_N; start += 2 * len) __loop__( - invariant(start <= MLDSA_N && k <= 255) + invariant(start >= 0 && start <= MLDSA_N) + invariant(k >= 0 && k <= 255) + invariant(len >= 1 && len <= 128) invariant(2 * len * k + start == 2 * MLDSA_N - 2 * len) invariant(array_abs_bound(r, 0, start, (MLDSA_N >> (layer - 1)) * MLDSA_Q)) invariant(array_abs_bound(r, start, MLDSA_N, (MLDSA_N >> layer) * MLDSA_Q))) { - unsigned j; + int j; int32_t zeta = -mld_zetas[k--]; for (j = start; j < start + len; j++) __loop__( invariant(start <= j && j <= start + len) + invariant(k >= 0 && len >= 1) invariant(array_abs_bound(r, 0, start, (MLDSA_N >> (layer - 1)) * MLDSA_Q)) invariant(array_abs_bound(r, start, j, (MLDSA_N >> (layer - 1)) * MLDSA_Q)) invariant(array_abs_bound(r, j, start + len, (MLDSA_N >> layer) * MLDSA_Q)) @@ -227,11 +230,11 @@ __contract__( MLD_INTERNAL_API void mld_invntt_tomont(int32_t a[MLDSA_N]) { - unsigned int layer, j; + int layer, j; for (layer = 8; layer >= 1; layer--) __loop__( - invariant(layer <= 8) + invariant(layer >= 0 && layer <= 8) /* Absolute bounds increase from 1Q before layer 8 */ /* up to 256Q after layer 1 */ invariant(array_abs_bound(a, 0, MLDSA_N, (MLDSA_N >> layer) * MLDSA_Q))) @@ -247,7 +250,7 @@ void mld_invntt_tomont(int32_t a[MLDSA_N]) * value.*/ for (j = 0; j < MLDSA_N; ++j) __loop__( - invariant(j <= MLDSA_N) + invariant(j >= 0 && j <= MLDSA_N) invariant(array_abs_bound(a, 0, j, MLD_INTT_BOUND)) invariant(array_abs_bound(a, j, MLDSA_N, MLDSA_N * MLDSA_Q)) ) diff --git a/mldsa/src/packing.c b/mldsa/src/packing.c index a2bd68555..c08ffe9d7 100644 --- a/mldsa/src/packing.c +++ b/mldsa/src/packing.c @@ -20,13 +20,13 @@ MLD_INTERNAL_API void mld_pack_pk(uint8_t pk[CRYPTO_PUBLICKEYBYTES], const uint8_t rho[MLDSA_SEEDBYTES], const mld_polyveck *t1) { - unsigned int i; + int i; mld_memcpy(pk, rho, MLDSA_SEEDBYTES); for (i = 0; i < MLDSA_K; ++i) __loop__( assigns(i, memory_slice(pk, CRYPTO_PUBLICKEYBYTES)) - invariant(i <= MLDSA_K) + invariant(i >= 0 && i <= MLDSA_K) ) { mld_polyt1_pack(pk + MLDSA_SEEDBYTES + i * MLDSA_POLYT1_PACKEDBYTES, @@ -38,7 +38,7 @@ MLD_INTERNAL_API void mld_unpack_pk(uint8_t rho[MLDSA_SEEDBYTES], mld_polyveck *t1, const uint8_t pk[CRYPTO_PUBLICKEYBYTES]) { - unsigned int i; + int i; mld_memcpy(rho, pk, MLDSA_SEEDBYTES); pk += MLDSA_SEEDBYTES; @@ -101,9 +101,9 @@ void mld_unpack_sk(uint8_t rho[MLDSA_SEEDBYTES], uint8_t tr[MLDSA_TRBYTES], MLD_INTERNAL_API void mld_pack_sig(uint8_t sig[CRYPTO_BYTES], const uint8_t c[MLDSA_CTILDEBYTES], const mld_polyvecl *z, const mld_polyveck *h, - const unsigned int number_of_hints) + const int number_of_hints) { - unsigned int i, j, k; + int i, j, k; mld_memcpy(sig, c, MLDSA_CTILDEBYTES); sig += MLDSA_CTILDEBYTES; @@ -133,8 +133,8 @@ void mld_pack_sig(uint8_t sig[CRYPTO_BYTES], const uint8_t c[MLDSA_CTILDEBYTES], for (i = 0; i < MLDSA_K; ++i) __loop__( assigns(i, j, k, memory_slice(sig, MLDSA_POLYVECH_PACKEDBYTES)) - invariant(i <= MLDSA_K) - invariant(k <= number_of_hints) + invariant(i >= 0 && i <= MLDSA_K) + invariant(k >= 0 && k <= number_of_hints) invariant(number_of_hints <= MLDSA_OMEGA) ) { @@ -143,10 +143,10 @@ void mld_pack_sig(uint8_t sig[CRYPTO_BYTES], const uint8_t c[MLDSA_CTILDEBYTES], for (j = 0; j < MLDSA_N; ++j) __loop__( assigns(j, k, memory_slice(sig, MLDSA_POLYVECH_PACKEDBYTES)) - invariant(i <= MLDSA_K) - invariant(j <= MLDSA_N) - invariant(k <= number_of_hints) - invariant(number_of_hints <= MLDSA_OMEGA) + invariant(i >= 0 && i <= MLDSA_K) + invariant(j >= 0 && j <= MLDSA_N) + invariant(k >= 0 && k <= number_of_hints) + invariant(number_of_hints >= 0 && number_of_hints <= MLDSA_OMEGA) ) { /* The reference implementation implicitly relies on the total */ @@ -192,8 +192,8 @@ __contract__( ensures(return_value >= 0 && return_value <= 1) ) { - unsigned int i, j; - unsigned int old_hint_count; + int i; + unsigned int j, old_hint_count; /* Set all coefficients of all polynomials to 0. */ /* Only those that are actually non-zero hints will */ @@ -203,7 +203,7 @@ __contract__( old_hint_count = 0; for (i = 0; i < MLDSA_K; ++i) __loop__( - invariant(i <= MLDSA_K) + invariant(i >= 0 && i <= MLDSA_K) /* Maintain the post-condition */ invariant(forall(k1, 0, MLDSA_K, array_bound(h->vec[k1].coeffs, 0, MLDSA_N, 0, 2))) ) diff --git a/mldsa/src/packing.h b/mldsa/src/packing.h index d03706137..faa4bf0fd 100644 --- a/mldsa/src/packing.h +++ b/mldsa/src/packing.h @@ -80,8 +80,7 @@ __contract__( * MLDSA_SEEDBYTES * - const mld_polyvecl *z: pointer to vector z * - const mld_polyveck *h: pointer to hint vector h - * - const unsigned int number_of_hints: total - * hints in *h + * - const int number_of_hints: total hints in *h * * Note that the number_of_hints argument is not present * in the reference implementation. It is added here to ease @@ -90,7 +89,7 @@ __contract__( MLD_INTERNAL_API void mld_pack_sig(uint8_t sig[CRYPTO_BYTES], const uint8_t c[MLDSA_CTILDEBYTES], const mld_polyvecl *z, const mld_polyveck *h, - const unsigned int number_of_hints) + const int number_of_hints) __contract__( requires(memory_no_alias(sig, CRYPTO_BYTES)) requires(memory_no_alias(c, MLDSA_CTILDEBYTES)) @@ -100,7 +99,7 @@ __contract__( array_bound(z->vec[k0].coeffs, 0, MLDSA_N, -(MLDSA_GAMMA1 - 1), MLDSA_GAMMA1 + 1))) requires(forall(k1, 0, MLDSA_K, array_bound(h->vec[k1].coeffs, 0, MLDSA_N, 0, 2))) - requires(number_of_hints <= MLDSA_OMEGA) + requires(number_of_hints >= 0 && number_of_hints <= MLDSA_OMEGA) assigns(memory_slice(sig, CRYPTO_BYTES)) ); diff --git a/mldsa/src/poly.c b/mldsa/src/poly.c index e60fc306b..014c56c9f 100644 --- a/mldsa/src/poly.c +++ b/mldsa/src/poly.c @@ -35,12 +35,12 @@ MLD_INTERNAL_API void mld_poly_reduce(mld_poly *a) { - unsigned int i; + int i; mld_assert_bound(a->coeffs, MLDSA_N, INT32_MIN, REDUCE32_DOMAIN_MAX); for (i = 0; i < MLDSA_N; ++i) __loop__( - invariant(i <= MLDSA_N) + invariant(i >= 0 && i <= MLDSA_N) invariant(forall(k0, i, MLDSA_N, a->coeffs[k0] == loop_entry(*a).coeffs[k0])) invariant(array_bound(a->coeffs, 0, i, -REDUCE32_RANGE_MAX, REDUCE32_RANGE_MAX))) { @@ -58,12 +58,12 @@ __contract__( ensures(array_bound(a->coeffs, 0, MLDSA_N, 0, MLDSA_Q)) ) { - unsigned int i; + int i; mld_assert_abs_bound(a->coeffs, MLDSA_N, MLDSA_Q); for (i = 0; i < MLDSA_N; ++i) __loop__( - invariant(i <= MLDSA_N) + invariant(i >= 0 && i <= MLDSA_N) invariant(forall(k0, i, MLDSA_N, a->coeffs[k0] == loop_entry(*a).coeffs[k0])) invariant(array_bound(a->coeffs, 0, i, 0, MLDSA_Q)) ) @@ -95,11 +95,11 @@ void mld_poly_caddq(mld_poly *a) MLD_INTERNAL_API void mld_poly_add(mld_poly *r, const mld_poly *b) { - unsigned int i; + int i; for (i = 0; i < MLDSA_N; ++i) __loop__( assigns(i, memory_slice(r, sizeof(mld_poly))) - invariant(i <= MLDSA_N) + invariant(i >= 0 && i <= MLDSA_N) invariant(forall(k0, i, MLDSA_N, r->coeffs[k0] == loop_entry(*r).coeffs[k0])) invariant(forall(k1, 0, i, r->coeffs[k1] == loop_entry(*r).coeffs[k1] + b->coeffs[k1])) invariant(forall(k2, 0, i, r->coeffs[k2] < REDUCE32_DOMAIN_MAX)) @@ -115,13 +115,13 @@ void mld_poly_add(mld_poly *r, const mld_poly *b) MLD_INTERNAL_API void mld_poly_sub(mld_poly *r, const mld_poly *b) { - unsigned int i; + int i; mld_assert_abs_bound(b->coeffs, MLDSA_N, MLDSA_Q); mld_assert_abs_bound(r->coeffs, MLDSA_N, MLDSA_Q); for (i = 0; i < MLDSA_N; ++i) __loop__( - invariant(i <= MLDSA_N) + invariant(i >= 0 && i <= MLDSA_N) invariant(array_bound(r->coeffs, 0, i, INT32_MIN, REDUCE32_DOMAIN_MAX)) invariant(forall(k0, i, MLDSA_N, r->coeffs[k0] == loop_entry(*r).coeffs[k0])) ) @@ -135,12 +135,12 @@ void mld_poly_sub(mld_poly *r, const mld_poly *b) MLD_INTERNAL_API void mld_poly_shiftl(mld_poly *a) { - unsigned int i; + int i; mld_assert_bound(a->coeffs, MLDSA_N, 0, 1 << 10); for (i = 0; i < MLDSA_N; i++) __loop__( - invariant(i <= MLDSA_N) + invariant(i >= 0 && i <= MLDSA_N) invariant(array_bound(a->coeffs, 0, i, 0, MLDSA_Q)) invariant(forall(k0, i, MLDSA_N, a->coeffs[k0] == loop_entry(*a).coeffs[k0]))) { @@ -224,13 +224,14 @@ __contract__( ensures(array_abs_bound(c->coeffs, 0, MLDSA_N, MLDSA_Q)) ) { - unsigned int i; + int i; + mld_assert_abs_bound(a->coeffs, MLDSA_N, MLD_NTT_BOUND); mld_assert_abs_bound(b->coeffs, MLDSA_N, MLD_NTT_BOUND); for (i = 0; i < MLDSA_N; ++i) __loop__( - invariant(i <= MLDSA_N) + invariant(i >= 0 && i <= MLDSA_N) invariant(array_abs_bound(c->coeffs, 0, i, MLDSA_Q)) ) { @@ -261,13 +262,13 @@ void mld_poly_pointwise_montgomery(mld_poly *c, const mld_poly *a, MLD_INTERNAL_API void mld_poly_power2round(mld_poly *a1, mld_poly *a0, const mld_poly *a) { - unsigned int i; + int i; mld_assert_bound(a->coeffs, MLDSA_N, 0, MLDSA_Q); for (i = 0; i < MLDSA_N; ++i) __loop__( assigns(i, memory_slice(a0, sizeof(mld_poly)), memory_slice(a1, sizeof(mld_poly))) - invariant(i <= MLDSA_N) + invariant(i >= 0 && i <= MLDSA_N) invariant(array_bound(a0->coeffs, 0, i, -(MLD_2_POW_D/2)+1, (MLD_2_POW_D/2)+1)) invariant(array_bound(a1->coeffs, 0, i, 0, ((MLDSA_Q - 1) / MLD_2_POW_D) + 1)) ) @@ -287,23 +288,21 @@ void mld_poly_power2round(mld_poly *a1, mld_poly *a0, const mld_poly *a) * in that it adds the offset and always expects the base of the * target buffer. This avoids shifting the buffer base in the * caller, which appears tricky to reason about. */ -MLD_STATIC_TESTABLE unsigned int mld_rej_uniform_c(int32_t *a, - unsigned int target, - unsigned int offset, - const uint8_t *buf, - unsigned int buflen) +MLD_STATIC_TESTABLE int mld_rej_uniform_c(int32_t *a, int target, int offset, + const uint8_t *buf, int buflen) __contract__( - requires(offset <= target && target <= MLDSA_N) - requires(buflen <= (POLY_UNIFORM_NBLOCKS * STREAM128_BLOCKBYTES) && buflen % 3 == 0) + requires(target >= 0 && target <= MLDSA_N) + requires(offset >= 0 && offset <= target) + requires(buflen >= 0 && buflen <= (POLY_UNIFORM_NBLOCKS * STREAM128_BLOCKBYTES) && buflen % 3 == 0) requires(memory_no_alias(a, sizeof(int32_t) * target)) requires(memory_no_alias(buf, buflen)) requires(array_bound(a, 0, offset, 0, MLDSA_Q)) assigns(memory_slice(a, sizeof(int32_t) * target)) - ensures(offset <= return_value && return_value <= target) + ensures(0 <= return_value && offset <= return_value && return_value <= target) ensures(array_bound(a, 0, return_value, 0, MLDSA_Q)) ) { - unsigned int ctr, pos; + int ctr, pos; uint32_t t; mld_assert_bound(a, offset, 0, MLDSA_Q); @@ -313,7 +312,9 @@ __contract__( buflen <= (POLY_UNIFORM_NBLOCKS * STREAM128_BLOCKBYTES) */ while (ctr < target && pos + 3 <= buflen) __loop__( - invariant(offset <= ctr && ctr <= target && pos <= buflen) + invariant(offset >= 0 && offset <= ctr) + invariant(ctr >= 0 && ctr <= target) + invariant(pos >= 0 && pos <= buflen) invariant(array_bound(a, 0, ctr, 0, MLDSA_Q))) { t = buf[pos++]; @@ -354,11 +355,12 @@ __contract__( * in that it adds the offset and always expects the base of the * target buffer. This avoids shifting the buffer base in the * caller, which appears tricky to reason about. */ -static unsigned int mld_rej_uniform(int32_t *a, unsigned int target, - unsigned int offset, const uint8_t *buf, - unsigned int buflen) +static int mld_rej_uniform(int32_t *a, int target, int offset, + const uint8_t *buf, int buflen) __contract__( + requires(offset >= 0) requires(offset <= target && target <= MLDSA_N) + requires(buflen >= 0) requires(buflen <= (POLY_UNIFORM_NBLOCKS * STREAM128_BLOCKBYTES) && buflen % 3 == 0) requires(memory_no_alias(a, sizeof(int32_t) * target)) requires(memory_no_alias(buf, buflen)) @@ -377,9 +379,8 @@ __contract__( ret = mld_rej_uniform_native(a, target, buf, buflen); if (ret != MLD_NATIVE_FUNC_FALLBACK) { - unsigned res = (unsigned)ret; - mld_assert_bound(a, res, 0, MLDSA_Q); - return res; + mld_assert_bound(a, ret, 0, MLDSA_Q); + return ret; } } #endif /* MLD_USE_NATIVE_REJ_UNIFORM */ @@ -398,8 +399,8 @@ __contract__( MLD_INTERNAL_API void mld_poly_uniform(mld_poly *a, const uint8_t seed[MLDSA_SEEDBYTES + 2]) { - unsigned int ctr; - unsigned int buflen = POLY_UNIFORM_NBLOCKS * STREAM128_BLOCKBYTES; + int ctr; + int buflen = POLY_UNIFORM_NBLOCKS * STREAM128_BLOCKBYTES; MLD_ALIGN uint8_t buf[POLY_UNIFORM_NBLOCKS * STREAM128_BLOCKBYTES]; mld_xof128_ctx state; @@ -412,9 +413,9 @@ void mld_poly_uniform(mld_poly *a, const uint8_t seed[MLDSA_SEEDBYTES + 2]) while (ctr < MLDSA_N) __loop__( assigns(ctr, state, memory_slice(a, sizeof(mld_poly)), object_whole(buf)) - invariant(ctr <= MLDSA_N) + invariant(ctr >= 0 && ctr <= MLDSA_N) invariant(array_bound(a->coeffs, 0, ctr, 0, MLDSA_Q)) - invariant(state.pos <= SHAKE128_RATE) + invariant(state.pos >= 0 && state.pos <= SHAKE128_RATE) ) { mld_xof128_squeezeblocks(buf, 1, &state); @@ -438,9 +439,9 @@ void mld_poly_uniform_4x(mld_poly *vec0, mld_poly *vec1, mld_poly *vec2, buf[4][MLD_ALIGN_UP(POLY_UNIFORM_NBLOCKS * STREAM128_BLOCKBYTES)]; /* Tracks the number of coefficients we have already sampled */ - unsigned ctr[4]; + int ctr[4]; mld_xof128_x4_ctx state; - unsigned buflen; + int buflen; mld_xof128_x4_init(&state); mld_xof128_x4_absorb(&state, seed, MLDSA_SEEDBYTES + 2); @@ -468,8 +469,10 @@ void mld_poly_uniform_4x(mld_poly *vec0, mld_poly *vec1, mld_poly *vec2, assigns(ctr, state, object_whole(buf), memory_slice(vec0, sizeof(mld_poly)), memory_slice(vec1, sizeof(mld_poly)), memory_slice(vec2, sizeof(mld_poly)), memory_slice(vec3, sizeof(mld_poly))) - invariant(ctr[0] <= MLDSA_N && ctr[1] <= MLDSA_N) - invariant(ctr[2] <= MLDSA_N && ctr[3] <= MLDSA_N) + invariant(ctr[0] >= 0 && ctr[0] <= MLDSA_N) + invariant(ctr[1] >= 0 && ctr[1] <= MLDSA_N) + invariant(ctr[2] >= 0 && ctr[2] <= MLDSA_N) + invariant(ctr[3] >= 0 && ctr[3] <= MLDSA_N) invariant(array_bound(vec0->coeffs, 0, ctr[0], 0, MLDSA_Q)) invariant(array_bound(vec1->coeffs, 0, ctr[1], 0, MLDSA_Q)) invariant(array_bound(vec2->coeffs, 0, ctr[2], 0, MLDSA_Q)) @@ -497,12 +500,12 @@ void mld_poly_uniform_4x(mld_poly *vec0, mld_poly *vec1, mld_poly *vec2, MLD_INTERNAL_API void mld_polyt1_pack(uint8_t r[MLDSA_POLYT1_PACKEDBYTES], const mld_poly *a) { - unsigned int i; + int i; mld_assert_bound(a->coeffs, MLDSA_N, 0, 1 << 10); for (i = 0; i < MLDSA_N / 4; ++i) __loop__( - invariant(i <= MLDSA_N/4)) + invariant(i >= 0 && i <= MLDSA_N/4)) { r[5 * i + 0] = (uint8_t)((a->coeffs[4 * i + 0] >> 0) & 0xFF); r[5 * i + 1] = @@ -521,11 +524,11 @@ void mld_polyt1_pack(uint8_t r[MLDSA_POLYT1_PACKEDBYTES], const mld_poly *a) MLD_INTERNAL_API void mld_polyt1_unpack(mld_poly *r, const uint8_t a[MLDSA_POLYT1_PACKEDBYTES]) { - unsigned int i; + int i; for (i = 0; i < MLDSA_N / 4; ++i) __loop__( - invariant(i <= MLDSA_N/4) + invariant(i >= 0 && i <= MLDSA_N/4) invariant(array_bound(r->coeffs, 0, i*4, 0, 1 << 10))) { r->coeffs[4 * i + 0] = @@ -544,7 +547,7 @@ void mld_polyt1_unpack(mld_poly *r, const uint8_t a[MLDSA_POLYT1_PACKEDBYTES]) MLD_INTERNAL_API void mld_polyt0_pack(uint8_t r[MLDSA_POLYT0_PACKEDBYTES], const mld_poly *a) { - unsigned int i; + int i; uint32_t t[8]; mld_assert_bound(a->coeffs, MLDSA_N, -(1 << (MLDSA_D - 1)) + 1, @@ -552,7 +555,7 @@ void mld_polyt0_pack(uint8_t r[MLDSA_POLYT0_PACKEDBYTES], const mld_poly *a) for (i = 0; i < MLDSA_N / 8; ++i) __loop__( - invariant(i <= MLDSA_N/8)) + invariant(i >= 0 && i <= MLDSA_N/8)) { /* Safety: a->coeffs[i] <= (1 << (MLDSA_D - 1) as they are output of * power2round, hence, these casts are safe. */ @@ -591,11 +594,11 @@ void mld_polyt0_pack(uint8_t r[MLDSA_POLYT0_PACKEDBYTES], const mld_poly *a) MLD_INTERNAL_API void mld_polyt0_unpack(mld_poly *r, const uint8_t a[MLDSA_POLYT0_PACKEDBYTES]) { - unsigned int i; + int i; for (i = 0; i < MLDSA_N / 8; ++i) __loop__( - invariant(i <= MLDSA_N/8) + invariant(i >= 0 && i <= MLDSA_N/8) invariant(array_bound(r->coeffs, 0, i*8, -(1<<(MLDSA_D-1)) + 1, (1<<(MLDSA_D-1)) + 1))) { r->coeffs[8 * i + 0] = a[13 * i + 0]; @@ -657,12 +660,12 @@ __contract__( ensures((return_value == 0) == array_abs_bound(a->coeffs, 0, MLDSA_N, B)) ) { - unsigned int i; + int i; uint32_t t = 0; mld_assert_bound(a->coeffs, MLDSA_N, -REDUCE32_RANGE_MAX, REDUCE32_RANGE_MAX); for (i = 0; i < MLDSA_N; ++i) __loop__( - invariant(i <= MLDSA_N) + invariant(i >= 0 && i <= MLDSA_N) invariant(t == 0 || t == 0xFFFFFFFF) invariant((t == 0) == array_abs_bound(a->coeffs, 0, i, B)) ) diff --git a/mldsa/src/poly_kl.c b/mldsa/src/poly_kl.c index ba7ba25b2..d988fbb60 100644 --- a/mldsa/src/poly_kl.c +++ b/mldsa/src/poly_kl.c @@ -52,12 +52,12 @@ __contract__( ensures(array_abs_bound(a0->coeffs, 0, MLDSA_N, MLDSA_GAMMA2+1)) ) { - unsigned int i; + int i; mld_assert_bound(a->coeffs, MLDSA_N, 0, MLDSA_Q); for (i = 0; i < MLDSA_N; ++i) __loop__( assigns(i, memory_slice(a0, sizeof(mld_poly)), memory_slice(a1, sizeof(mld_poly))) - invariant(i <= MLDSA_N) + invariant(i >= 0 && i <= MLDSA_N) invariant(array_bound(a1->coeffs, 0, i, 0, (MLDSA_Q-1)/(2*MLDSA_GAMMA2))) invariant(array_abs_bound(a0->coeffs, 0, i, MLDSA_GAMMA2+1)) ) @@ -104,19 +104,19 @@ void mld_poly_decompose(mld_poly *a1, mld_poly *a0, const mld_poly *a) } MLD_INTERNAL_API -unsigned int mld_poly_make_hint(mld_poly *h, const mld_poly *a0, - const mld_poly *a1) +int mld_poly_make_hint(mld_poly *h, const mld_poly *a0, const mld_poly *a1) { - unsigned int i, s = 0; + int i; + int s = 0; for (i = 0; i < MLDSA_N; ++i) __loop__( - invariant(i <= MLDSA_N) - invariant(s <= i) + invariant(i >= 0 && i <= MLDSA_N) + invariant(s >= 0 && s <= i) invariant(array_bound(h->coeffs, 0, i, 0, 2)) ) { - const unsigned int hint_bit = mld_make_hint(a0->coeffs[i], a1->coeffs[i]); + const int hint_bit = mld_make_hint(a0->coeffs[i], a1->coeffs[i]); h->coeffs[i] = (int32_t)hint_bit; s += hint_bit; } @@ -138,13 +138,13 @@ __contract__( ensures(array_bound(b->coeffs, 0, MLDSA_N, 0, (MLDSA_Q-1)/(2*MLDSA_GAMMA2))) ) { - unsigned int i; + int i; mld_assert_bound(a->coeffs, MLDSA_N, 0, MLDSA_Q); mld_assert_bound(h->coeffs, MLDSA_N, 0, 2); for (i = 0; i < MLDSA_N; ++i) __loop__( - invariant(i <= MLDSA_N) + invariant(i >= 0 && i <= MLDSA_N) invariant(array_bound(b->coeffs, 0, i, 0, (MLDSA_Q-1)/(2*MLDSA_GAMMA2))) ) { @@ -189,17 +189,16 @@ void mld_poly_use_hint(mld_poly *b, const mld_poly *a, const mld_poly *h) * Name: mld_rej_eta * * Description: Sample uniformly random coefficients in [-MLDSA_ETA, MLDSA_ETA] - *by performing rejection sampling on array of random bytes. + * by performing rejection sampling on array of random bytes. * - * Arguments: - int32_t *a: pointer to output array (allocated) - * - unsigned int target: requested number of coefficients to - *sample - * - unsigned int offset: number of coefficients already sampled - * - const uint8_t *buf: array of random bytes to sample from - * - unsigned int buflen: length of array of random bytes + * Arguments: - int32_t *a: pointer to output array (allocated) + * - int target: requested number of coefficients to sample + * - int offset: number of coefficients already sampled + * - const uint8_t *buf: array of random bytes to sample from + * - int buflen: length of array of random bytes * * Returns number of sampled coefficients. Can be smaller than target if not - *enough random bytes were given. + * enough random bytes were given. **************************************************/ /* Reference: `mld_rej_eta()` in the reference implementation @[REF]. @@ -226,22 +225,21 @@ void mld_poly_use_hint(mld_poly *b, const mld_poly *a, const mld_poly *h) #error "Invalid value of MLDSA_ETA" #endif /* MLDSA_ETA != 2 && MLDSA_ETA != 4 */ -MLD_STATIC_TESTABLE unsigned int mld_rej_eta_c(int32_t *a, unsigned int target, - unsigned int offset, - const uint8_t *buf, - unsigned int buflen) +MLD_STATIC_TESTABLE int mld_rej_eta_c(int32_t *a, int target, int offset, + const uint8_t *buf, int buflen) __contract__( - requires(offset <= target && target <= MLDSA_N) - requires(buflen <= (POLY_UNIFORM_ETA_NBLOCKS * STREAM256_BLOCKBYTES)) + requires(offset >= 0 && offset <= target) + requires(target >= 0 && target <= MLDSA_N) + requires(buflen >= 0 && buflen <= (POLY_UNIFORM_ETA_NBLOCKS * STREAM256_BLOCKBYTES)) requires(memory_no_alias(a, sizeof(int32_t) * target)) requires(memory_no_alias(buf, buflen)) requires(array_abs_bound(a, 0, offset, MLDSA_ETA + 1)) assigns(memory_slice(a, sizeof(int32_t) * target)) - ensures(offset <= return_value && return_value <= target) + ensures(0 <= return_value && offset <= return_value && return_value <= target) ensures(array_abs_bound(a, 0, return_value, MLDSA_ETA + 1)) ) { - unsigned int ctr, pos; + int ctr, pos; int t_valid; uint32_t t0, t1; mld_assert_abs_bound(a, offset, MLDSA_ETA + 1); @@ -249,7 +247,9 @@ __contract__( pos = 0; while (ctr < target && pos < buflen) __loop__( - invariant(offset <= ctr && ctr <= target && pos <= buflen) + invariant(offset >= 0 && offset <= ctr) + invariant(ctr >= 0 && ctr <= target) + invariant(pos >= 0 && pos <= buflen) invariant(array_abs_bound(a, 0, ctr, MLDSA_ETA + 1)) ) { @@ -299,11 +299,12 @@ __contract__( return ctr; } -static unsigned int mld_rej_eta(int32_t *a, unsigned int target, - unsigned int offset, const uint8_t *buf, - unsigned int buflen) +static int mld_rej_eta(int32_t *a, int target, int offset, const uint8_t *buf, + int buflen) __contract__( + requires(offset >= 0) requires(offset <= target && target <= MLDSA_N) + requires(buflen >= 0) requires(buflen <= (POLY_UNIFORM_ETA_NBLOCKS * STREAM256_BLOCKBYTES)) requires(memory_no_alias(a, sizeof(int32_t) * target)) requires(memory_no_alias(buf, buflen)) @@ -322,9 +323,8 @@ __contract__( ret = mld_rej_uniform_eta2_native(a, target, buf, buflen); if (ret != MLD_NATIVE_FUNC_FALLBACK) { - unsigned res = (unsigned)ret; - mld_assert_abs_bound(a, res, MLDSA_ETA + 1); - return res; + mld_assert_abs_bound(a, ret, MLDSA_ETA + 1); + return ret; } } /* TODO: CBMC proof based on mld_rej_uniform_eta4_native */ @@ -336,9 +336,8 @@ __contract__( ret = mld_rej_uniform_eta4_native(a, target, buf, buflen); if (ret != MLD_NATIVE_FUNC_FALLBACK) { - unsigned res = (unsigned)ret; - mld_assert_abs_bound(a, res, MLDSA_ETA + 1); - return res; + mld_assert_abs_bound(a, ret, MLDSA_ETA + 1); + return ret; } } #endif /* !(MLDSA_ETA == 2 && MLD_USE_NATIVE_REJ_UNIFORM_ETA2) && MLDSA_ETA == \ @@ -361,9 +360,9 @@ void mld_poly_uniform_eta_4x(mld_poly *r0, mld_poly *r1, mld_poly *r2, MLD_ALIGN uint8_t extseed[4][MLD_ALIGN_UP(MLDSA_CRHBYTES + 2)]; /* Tracks the number of coefficients we have already sampled */ - unsigned ctr[4]; + int ctr[4]; mld_xof256_x4_ctx state; - unsigned buflen; + int buflen; mld_memcpy(extseed[0], seed, MLDSA_CRHBYTES); mld_memcpy(extseed[1], seed, MLDSA_CRHBYTES); @@ -406,8 +405,11 @@ void mld_poly_uniform_eta_4x(mld_poly *r0, mld_poly *r1, mld_poly *r2, memory_slice(r3, sizeof(mld_poly)), object_whole(buf[0]), object_whole(buf[1]), object_whole(buf[2]), object_whole(buf[3])) - invariant(ctr[0] <= MLDSA_N && ctr[1] <= MLDSA_N) - invariant(ctr[2] <= MLDSA_N && ctr[3] <= MLDSA_N) + + invariant(ctr[0] >= 0 && ctr[0] <= MLDSA_N) + invariant(ctr[1] >= 0 && ctr[1] <= MLDSA_N) + invariant(ctr[2] >= 0 && ctr[2] <= MLDSA_N) + invariant(ctr[3] >= 0 && ctr[3] <= MLDSA_N) invariant(array_abs_bound(r0->coeffs, 0, ctr[0], MLDSA_ETA + 1)) invariant(array_abs_bound(r1->coeffs, 0, ctr[1], MLDSA_ETA + 1)) invariant(array_abs_bound(r2->coeffs, 0, ctr[2], MLDSA_ETA + 1)) @@ -442,9 +444,8 @@ void mld_poly_uniform_eta(mld_poly *r, const uint8_t seed[MLDSA_CRHBYTES], MLD_ALIGN uint8_t extseed[MLDSA_CRHBYTES + 2]; /* Tracks the number of coefficients we have already sampled */ - unsigned ctr; + int ctr, buflen; mld_xof256_ctx state; - unsigned buflen; mld_memcpy(extseed, seed, MLDSA_CRHBYTES); extseed[MLDSA_CRHBYTES] = nonce; @@ -471,8 +472,8 @@ void mld_poly_uniform_eta(mld_poly *r, const uint8_t seed[MLDSA_CRHBYTES], __loop__( assigns(ctr, object_whole(&state), object_whole(buf), memory_slice(r, sizeof(mld_poly))) - invariant(ctr <= MLDSA_N) - invariant(state.pos <= SHAKE256_RATE) + invariant(ctr >= 0 && ctr <= MLDSA_N) + invariant(state.pos >= 0 && state.pos <= SHAKE256_RATE) invariant(array_abs_bound(r->coeffs, 0, ctr, MLDSA_ETA + 1))) { mld_xof256_squeezeblocks(buf, 1, &state); @@ -576,7 +577,8 @@ void mld_poly_uniform_gamma1_4x(mld_poly *r0, mld_poly *r1, mld_poly *r2, MLD_INTERNAL_API void mld_poly_challenge(mld_poly *c, const uint8_t seed[MLDSA_CTILDEBYTES]) { - unsigned int i, j, pos; + int i, pos; + int j = 0; uint64_t signs; uint64_t offset; MLD_ALIGN uint8_t buf[SHAKE256_RATE]; @@ -593,7 +595,7 @@ void mld_poly_challenge(mld_poly *c, const uint8_t seed[MLDSA_CTILDEBYTES]) for (i = 0; i < 8; ++i) __loop__( assigns(i, signs) - invariant(i <= 8) + invariant(i >= 0 && i <= 8) ) { signs |= (uint64_t)buf[i] << 8 * i; @@ -605,18 +607,19 @@ void mld_poly_challenge(mld_poly *c, const uint8_t seed[MLDSA_CTILDEBYTES]) for (i = MLDSA_N - MLDSA_TAU; i < MLDSA_N; ++i) __loop__( assigns(i, j, object_whole(buf), state, pos, memory_slice(c, sizeof(mld_poly)), signs) - invariant(i >= MLDSA_N - MLDSA_TAU) - invariant(i <= MLDSA_N) - invariant(pos >= 1) - invariant(pos <= SHAKE256_RATE) + invariant(i >= MLDSA_N - MLDSA_TAU && i <= MLDSA_N) + invariant(j >= 0 && j <= 255) + invariant(pos >= 1 && pos <= SHAKE256_RATE) invariant(array_bound(c->coeffs, 0, MLDSA_N, -1, 2)) - invariant(state.pos <= SHAKE256_RATE) + invariant(state.pos >= 0 && state.pos <= SHAKE256_RATE) ) { do __loop__( assigns(j, object_whole(buf), state, pos) - invariant(state.pos <= SHAKE256_RATE) + invariant(j >= 0 && j <= 255) + invariant(pos >= 1 && pos <= SHAKE256_RATE) + invariant(state.pos >= 0 && state.pos <= SHAKE256_RATE) ) { if (pos >= SHAKE256_RATE) @@ -624,7 +627,7 @@ void mld_poly_challenge(mld_poly *c, const uint8_t seed[MLDSA_CTILDEBYTES]) mld_shake256_squeeze(buf, SHAKE256_RATE, &state); pos = 0; } - j = buf[pos++]; + j = (int)buf[pos++]; } while (j > i); c->coeffs[i] = c->coeffs[j]; @@ -656,7 +659,7 @@ void mld_poly_challenge(mld_poly *c, const uint8_t seed[MLDSA_CTILDEBYTES]) MLD_INTERNAL_API void mld_polyeta_pack(uint8_t r[MLDSA_POLYETA_PACKEDBYTES], const mld_poly *a) { - unsigned int i; + int i; uint8_t t[8]; mld_assert_abs_bound(a->coeffs, MLDSA_N, MLDSA_ETA + 1); @@ -664,7 +667,7 @@ void mld_polyeta_pack(uint8_t r[MLDSA_POLYETA_PACKEDBYTES], const mld_poly *a) #if MLDSA_ETA == 2 for (i = 0; i < MLDSA_N / 8; ++i) __loop__( - invariant(i <= MLDSA_N/8)) + invariant(i >= 0 && i <= MLDSA_N/8)) { /* The casts are safe since we assume that the coefficients * of a are <= MLDSA_ETA in absolute value. */ @@ -686,7 +689,7 @@ void mld_polyeta_pack(uint8_t r[MLDSA_POLYETA_PACKEDBYTES], const mld_poly *a) #elif MLDSA_ETA == 4 for (i = 0; i < MLDSA_N / 2; ++i) __loop__( - invariant(i <= MLDSA_N/2)) + invariant(i >= 0 && i <= MLDSA_N/2)) { /* The casts are safe since we assume that the coefficients * of a are <= MLDSA_ETA in absolute value. */ @@ -701,12 +704,12 @@ void mld_polyeta_pack(uint8_t r[MLDSA_POLYETA_PACKEDBYTES], const mld_poly *a) void mld_polyeta_unpack(mld_poly *r, const uint8_t a[MLDSA_POLYETA_PACKEDBYTES]) { - unsigned int i; + int i; #if MLDSA_ETA == 2 for (i = 0; i < MLDSA_N / 8; ++i) __loop__( - invariant(i <= MLDSA_N/8) + invariant(i >= 0 && i <= MLDSA_N/8) invariant(array_bound(r->coeffs, 0, i*8, -5, MLDSA_ETA + 1))) { r->coeffs[8 * i + 0] = (a[3 * i + 0] >> 0) & 7; @@ -730,7 +733,7 @@ void mld_polyeta_unpack(mld_poly *r, const uint8_t a[MLDSA_POLYETA_PACKEDBYTES]) #elif MLDSA_ETA == 4 for (i = 0; i < MLDSA_N / 2; ++i) __loop__( - invariant(i <= MLDSA_N/2) + invariant(i >= 0 && i <= MLDSA_N/2) invariant(array_bound(r->coeffs, 0, i*2, -11, MLDSA_ETA + 1))) { r->coeffs[2 * i + 0] = a[i] & 0x0F; @@ -750,7 +753,7 @@ void mld_polyeta_unpack(mld_poly *r, const uint8_t a[MLDSA_POLYETA_PACKEDBYTES]) MLD_INTERNAL_API void mld_polyz_pack(uint8_t r[MLDSA_POLYZ_PACKEDBYTES], const mld_poly *a) { - unsigned int i; + int i; uint32_t t[4]; mld_assert_bound(a->coeffs, MLDSA_N, -(MLDSA_GAMMA1 - 1), MLDSA_GAMMA1 + 1); @@ -758,7 +761,7 @@ void mld_polyz_pack(uint8_t r[MLDSA_POLYZ_PACKEDBYTES], const mld_poly *a) #if MLD_CONFIG_PARAMETER_SET == 44 for (i = 0; i < MLDSA_N / 4; ++i) __loop__( - invariant(i <= MLDSA_N/4)) + invariant(i >= 0 && i <= MLDSA_N/4)) { /* Safety: a->coeffs[i] <= MLDSA_GAMMA1, hence, these casts are safe. */ t[0] = (uint32_t)(MLDSA_GAMMA1 - a->coeffs[4 * i + 0]); @@ -782,7 +785,7 @@ void mld_polyz_pack(uint8_t r[MLDSA_POLYZ_PACKEDBYTES], const mld_poly *a) #else /* MLD_CONFIG_PARAMETER_SET == 44 */ for (i = 0; i < MLDSA_N / 2; ++i) __loop__( - invariant(i <= MLDSA_N/2)) + invariant(i >= 0 && i <= MLDSA_N/2)) { /* Safety: a->coeffs[i] <= MLDSA_GAMMA1, hence, these casts are safe. */ t[0] = (uint32_t)(MLDSA_GAMMA1 - a->coeffs[2 * i + 0]); @@ -807,11 +810,11 @@ __contract__( ensures(array_bound(r->coeffs, 0, MLDSA_N, -(MLDSA_GAMMA1 - 1), MLDSA_GAMMA1 + 1)) ) { - unsigned int i; + int i; #if MLD_CONFIG_PARAMETER_SET == 44 for (i = 0; i < MLDSA_N / 4; ++i) __loop__( - invariant(i <= MLDSA_N/4) + invariant(i >= 0 && i <= MLDSA_N/4) invariant(array_bound(r->coeffs, 0, i*4, -(MLDSA_GAMMA1 - 1), MLDSA_GAMMA1 + 1))) { r->coeffs[4 * i + 0] = a[9 * i + 0]; @@ -842,7 +845,7 @@ __contract__( #else /* MLD_CONFIG_PARAMETER_SET == 44 */ for (i = 0; i < MLDSA_N / 2; ++i) __loop__( - invariant(i <= MLDSA_N/2) + invariant(i >= 0 && i <= MLDSA_N/2) invariant(array_bound(r->coeffs, 0, i*2, -(MLDSA_GAMMA1 - 1), MLDSA_GAMMA1 + 1))) { r->coeffs[2 * i + 0] = a[5 * i + 0]; @@ -895,14 +898,14 @@ void mld_polyz_unpack(mld_poly *r, const uint8_t a[MLDSA_POLYZ_PACKEDBYTES]) MLD_INTERNAL_API void mld_polyw1_pack(uint8_t r[MLDSA_POLYW1_PACKEDBYTES], const mld_poly *a) { - unsigned int i; + int i; mld_assert_bound(a->coeffs, MLDSA_N, 0, (MLDSA_Q - 1) / (2 * MLDSA_GAMMA2)); #if MLD_CONFIG_PARAMETER_SET == 44 for (i = 0; i < MLDSA_N / 4; ++i) __loop__( - invariant(i <= MLDSA_N/4)) + invariant(i >= 0 && i <= MLDSA_N/4)) { r[3 * i + 0] = (uint8_t)((a->coeffs[4 * i + 0]) & 0xFF); r[3 * i + 0] |= (uint8_t)((a->coeffs[4 * i + 1] << 6) & 0xFF); @@ -914,7 +917,7 @@ void mld_polyw1_pack(uint8_t r[MLDSA_POLYW1_PACKEDBYTES], const mld_poly *a) #else /* MLD_CONFIG_PARAMETER_SET == 44 */ for (i = 0; i < MLDSA_N / 2; ++i) __loop__( - invariant(i <= MLDSA_N/2)) + invariant(i >= 0 && i <= MLDSA_N/2)) { r[i] = (uint8_t)((a->coeffs[2 * i + 0] | (a->coeffs[2 * i + 1] << 4)) & 0xFF); diff --git a/mldsa/src/poly_kl.h b/mldsa/src/poly_kl.h index 23c525efe..3f56408e3 100644 --- a/mldsa/src/poly_kl.h +++ b/mldsa/src/poly_kl.h @@ -55,14 +55,13 @@ __contract__( * Returns number of 1 bits. **************************************************/ MLD_INTERNAL_API -unsigned int mld_poly_make_hint(mld_poly *h, const mld_poly *a0, - const mld_poly *a1) +int mld_poly_make_hint(mld_poly *h, const mld_poly *a0, const mld_poly *a1) __contract__( requires(memory_no_alias(h, sizeof(mld_poly))) requires(memory_no_alias(a0, sizeof(mld_poly))) requires(memory_no_alias(a1, sizeof(mld_poly))) assigns(memory_slice(h, sizeof(mld_poly))) - ensures(return_value <= MLDSA_N) + ensures(return_value >= 0 && return_value <= MLDSA_N) ensures(array_bound(h->coeffs, 0, MLDSA_N, 0, 2)) ); diff --git a/mldsa/src/polyvec.c b/mldsa/src/polyvec.c index 975b35b3e..bffc9e885 100644 --- a/mldsa/src/polyvec.c +++ b/mldsa/src/polyvec.c @@ -49,7 +49,7 @@ __contract__( { #if defined(MLD_USE_NATIVE_NTT_CUSTOM_ORDER) /* TODO: proof */ - unsigned int i, j; + int i, j; for (i = 0; i < MLDSA_K; i++) { for (j = 0; j < MLDSA_L; j++) @@ -71,7 +71,7 @@ MLD_INTERNAL_API void mld_polyvec_matrix_expand(mld_polymat *mat, const uint8_t rho[MLDSA_SEEDBYTES]) { - unsigned int i, j; + int i, j; /* * We generate four separate seed arrays rather than a single one to work * around limitations in CBMC function contracts dealing with disjoint slices @@ -83,7 +83,7 @@ void mld_polyvec_matrix_expand(mld_polymat *mat, for (j = 0; j < 4; j++) __loop__( assigns(j, object_whole(seed_ext)) - invariant(j <= 4) + invariant(j >= 0 && j <= 4) ) { mld_memcpy(seed_ext[j], rho, MLDSA_SEEDBYTES); @@ -94,7 +94,7 @@ void mld_polyvec_matrix_expand(mld_polymat *mat, for (i = 0; i < (MLDSA_K * MLDSA_L / 4) * 4; i += 4) __loop__( assigns(i, j, object_whole(seed_ext), memory_slice(mat, sizeof(mld_polymat))) - invariant(i <= (MLDSA_K * MLDSA_L / 4) * 4 && i % 4 == 0) + invariant(i >= 0 && i <= (MLDSA_K * MLDSA_L / 4) * 4 && i % 4 == 0) /* vectors 0 .. i / MLDSA_L are completely sampled */ invariant(forall(k1, 0, i / MLDSA_L, forall(l1, 0, MLDSA_L, array_bound(mat->vec[k1].vec[l1].coeffs, 0, MLDSA_N, 0, MLDSA_Q)))) @@ -106,7 +106,7 @@ void mld_polyvec_matrix_expand(mld_polymat *mat, for (j = 0; j < 4; j++) __loop__( assigns(j, object_whole(seed_ext)) - invariant(j <= 4) + invariant(j >= 0 && j <= 4) ) { uint8_t x = (uint8_t)((i + j) / MLDSA_L); @@ -130,7 +130,7 @@ void mld_polyvec_matrix_expand(mld_polymat *mat, while (i < MLDSA_K * MLDSA_L) __loop__( assigns(i, object_whole(seed_ext), memory_slice(mat, sizeof(mld_polymat))) - invariant(i <= MLDSA_K * MLDSA_L) + invariant(i >= 0 && i <= MLDSA_K * MLDSA_L) /* vectors 0 .. i / MLDSA_L are completely sampled */ invariant(forall(k1, 0, i / MLDSA_L, forall(l1, 0, MLDSA_L, array_bound(mat->vec[k1].vec[l1].coeffs, 0, MLDSA_N, 0, MLDSA_Q)))) @@ -161,13 +161,13 @@ void mld_polyvec_matrix_pointwise_montgomery(mld_polyveck *t, const mld_polymat *mat, const mld_polyvecl *v) { - unsigned int i; + int i; mld_assert_abs_bound_2d(v->vec, MLDSA_L, MLDSA_N, MLD_NTT_BOUND); for (i = 0; i < MLDSA_K; ++i) __loop__( assigns(i, memory_slice(t, sizeof(mld_polyveck))) - invariant(i <= MLDSA_K) + invariant(i >= 0 && i <= MLDSA_K) invariant(forall(k0, 0, i, array_abs_bound(t->vec[k0].coeffs, 0, MLDSA_N, MLDSA_Q))) ) @@ -227,13 +227,13 @@ void mld_polyvecl_uniform_gamma1(mld_polyvecl *v, MLD_INTERNAL_API void mld_polyvecl_reduce(mld_polyvecl *v) { - unsigned int i; + int i; mld_assert_bound_2d(v->vec, MLDSA_L, MLDSA_N, INT32_MIN, REDUCE32_DOMAIN_MAX); for (i = 0; i < MLDSA_L; ++i) __loop__( assigns(i, memory_slice(v, sizeof(mld_polyvecl))) - invariant(i <= MLDSA_L) + invariant(i >= 0 && i <= MLDSA_L) invariant(forall(k0, i, MLDSA_L, forall(k1, 0, MLDSA_N, v->vec[k0].coeffs[k1] == loop_entry(*v).vec[k0].coeffs[k1]))) invariant(forall(k2, 0, i, array_bound(v->vec[k2].coeffs, 0, MLDSA_N, -REDUCE32_RANGE_MAX, REDUCE32_RANGE_MAX)))) @@ -250,15 +250,15 @@ void mld_polyvecl_reduce(mld_polyvecl *v) MLD_INTERNAL_API void mld_polyvecl_add(mld_polyvecl *u, const mld_polyvecl *v) { - unsigned int i; + int i; - for (i = 0; i < MLDSA_L; ++i) + for (i = 0; i < MLDSA_L; i++) __loop__( assigns(i, memory_slice(u, sizeof(mld_polyvecl))) - invariant(i <= MLDSA_L) + invariant(i >= 0 && i <= MLDSA_L) + invariant(forall(k6, 0, i, array_bound(u->vec[k6].coeffs, 0, MLDSA_N, INT32_MIN, REDUCE32_DOMAIN_MAX))) invariant(forall(k0, i, MLDSA_L, forall(k1, 0, MLDSA_N, u->vec[k0].coeffs[k1] == loop_entry(*u).vec[k0].coeffs[k1]))) - invariant(forall(k6, 0, i, array_bound(u->vec[k6].coeffs, 0, MLDSA_N, INT32_MIN, REDUCE32_DOMAIN_MAX))) ) { mld_poly_add(&u->vec[i], &v->vec[i]); @@ -269,13 +269,13 @@ void mld_polyvecl_add(mld_polyvecl *u, const mld_polyvecl *v) MLD_INTERNAL_API void mld_polyvecl_ntt(mld_polyvecl *v) { - unsigned int i; + int i; mld_assert_abs_bound_2d(v->vec, MLDSA_L, MLDSA_N, MLDSA_Q); for (i = 0; i < MLDSA_L; ++i) __loop__( assigns(i, memory_slice(v, sizeof(mld_polyvecl))) - invariant(i <= MLDSA_L) + invariant(i >= 0 && i <= MLDSA_L) invariant(forall(k0, i, MLDSA_L, forall(k1, 0, MLDSA_N, v->vec[k0].coeffs[k1] == loop_entry(*v).vec[k0].coeffs[k1]))) invariant(forall(k1, 0, i, array_abs_bound(v->vec[k1].coeffs, 0, MLDSA_N, MLD_NTT_BOUND)))) { @@ -288,13 +288,13 @@ void mld_polyvecl_ntt(mld_polyvecl *v) MLD_INTERNAL_API void mld_polyvecl_invntt_tomont(mld_polyvecl *v) { - unsigned int i; + int i; mld_assert_abs_bound_2d(v->vec, MLDSA_L, MLDSA_N, MLDSA_Q); for (i = 0; i < MLDSA_L; ++i) __loop__( assigns(i, memory_slice(v, sizeof(mld_polyvecl))) - invariant(i <= MLDSA_L) + invariant(i >= 0 && i <= MLDSA_L) invariant(forall(k0, i, MLDSA_L, forall(k1, 0, MLDSA_N, v->vec[k0].coeffs[k1] == loop_entry(*v).vec[k0].coeffs[k1]))) invariant(forall(k1, 0, i, array_abs_bound(v->vec[k1].coeffs, 0, MLDSA_N, MLD_INTT_BOUND)))) { @@ -308,14 +308,14 @@ MLD_INTERNAL_API void mld_polyvecl_pointwise_poly_montgomery(mld_polyvecl *r, const mld_poly *a, const mld_polyvecl *v) { - unsigned int i; + int i; mld_assert_abs_bound(a->coeffs, MLDSA_N, MLD_NTT_BOUND); mld_assert_abs_bound_2d(v->vec, MLDSA_L, MLDSA_N, MLD_NTT_BOUND); for (i = 0; i < MLDSA_L; ++i) __loop__( assigns(i, memory_slice(r, sizeof(mld_polyvecl))) - invariant(i <= MLDSA_L) + invariant(i >= 0 && i <= MLDSA_L) invariant(forall(k2, 0, i, array_abs_bound(r->vec[k2].coeffs, 0, MLDSA_N, MLDSA_Q))) ) { @@ -339,13 +339,13 @@ __contract__( ensures(array_abs_bound(w->coeffs, 0, MLDSA_N, MLDSA_Q)) ) { - unsigned int i, j; + int i, j; mld_assert_bound_2d(u->vec, MLDSA_L, MLDSA_N, 0, MLDSA_Q); mld_assert_abs_bound_2d(v->vec, MLDSA_L, MLDSA_N, MLD_NTT_BOUND); for (i = 0; i < MLDSA_N; i++) __loop__( assigns(i, j, memory_slice(w, sizeof(mld_poly))) - invariant(i <= MLDSA_N) + invariant(i >= 0 && i <= MLDSA_N) invariant(array_abs_bound(w->coeffs, 0, i, MLDSA_Q)) ) { @@ -354,7 +354,7 @@ __contract__( for (j = 0; j < MLDSA_L; j++) __loop__( assigns(j, t) - invariant(j <= MLDSA_L) + invariant(j >= 0 && j <= MLDSA_L) invariant(t >= -(int64_t)j*(MLDSA_Q - 1)*(MLD_NTT_BOUND - 1)) invariant(t <= (int64_t)j*(MLDSA_Q - 1)*(MLD_NTT_BOUND - 1)) ) @@ -435,14 +435,14 @@ void mld_polyvecl_pointwise_acc_montgomery(mld_poly *w, const mld_polyvecl *u, MLD_INTERNAL_API uint32_t mld_polyvecl_chknorm(const mld_polyvecl *v, int32_t bound) { - unsigned int i; + int i; uint32_t t = 0; mld_assert_bound_2d(v->vec, MLDSA_L, MLDSA_N, -REDUCE32_RANGE_MAX, REDUCE32_RANGE_MAX); for (i = 0; i < MLDSA_L; ++i) __loop__( - invariant(i <= MLDSA_L) + invariant(i >= 0 && i <= MLDSA_L) invariant(t == 0 || t == 0xFFFFFFFF) invariant((t == 0) == forall(k1, 0, i, array_abs_bound(v->vec[k1].coeffs, 0, MLDSA_N, bound))) ) @@ -462,13 +462,13 @@ uint32_t mld_polyvecl_chknorm(const mld_polyvecl *v, int32_t bound) MLD_INTERNAL_API void mld_polyveck_reduce(mld_polyveck *v) { - unsigned int i; + int i; mld_assert_bound_2d(v->vec, MLDSA_K, MLDSA_N, INT32_MIN, REDUCE32_DOMAIN_MAX); for (i = 0; i < MLDSA_K; ++i) __loop__( assigns(i, memory_slice(v, sizeof(mld_polyveck))) - invariant(i <= MLDSA_K) + invariant(i >= 0 && i <= MLDSA_K) invariant(forall(k0, i, MLDSA_K, forall(k1, 0, MLDSA_N, v->vec[k0].coeffs[k1] == loop_entry(*v).vec[k0].coeffs[k1]))) invariant(forall(k2, 0, i, array_bound(v->vec[k2].coeffs, 0, MLDSA_N, -REDUCE32_RANGE_MAX, REDUCE32_RANGE_MAX))) @@ -484,13 +484,13 @@ void mld_polyveck_reduce(mld_polyveck *v) MLD_INTERNAL_API void mld_polyveck_caddq(mld_polyveck *v) { - unsigned int i; + int i; mld_assert_abs_bound_2d(v->vec, MLDSA_K, MLDSA_N, MLDSA_Q); for (i = 0; i < MLDSA_K; ++i) __loop__( assigns(i, memory_slice(v, sizeof(mld_polyveck))) - invariant(i <= MLDSA_K) + invariant(i >= 0 && i <= MLDSA_K) invariant(forall(k0, i, MLDSA_K, forall(k1, 0, MLDSA_N, v->vec[k0].coeffs[k1] == loop_entry(*v).vec[k0].coeffs[k1]))) invariant(forall(k1, 0, i, array_bound(v->vec[k1].coeffs, 0, MLDSA_N, 0, MLDSA_Q)))) { @@ -505,12 +505,12 @@ void mld_polyveck_caddq(mld_polyveck *v) MLD_INTERNAL_API void mld_polyveck_add(mld_polyveck *u, const mld_polyveck *v) { - unsigned int i; + int i; for (i = 0; i < MLDSA_K; ++i) __loop__( assigns(i, memory_slice(u, sizeof(mld_polyveck))) - invariant(i <= MLDSA_K) + invariant(i >= 0 && i <= MLDSA_K) invariant(forall(k0, i, MLDSA_K, forall(k1, 0, MLDSA_N, u->vec[k0].coeffs[k1] == loop_entry(*u).vec[k0].coeffs[k1]))) invariant(forall(k6, 0, i, array_bound(u->vec[k6].coeffs, 0, MLDSA_N, INT32_MIN, REDUCE32_DOMAIN_MAX))) @@ -524,14 +524,14 @@ void mld_polyveck_add(mld_polyveck *u, const mld_polyveck *v) MLD_INTERNAL_API void mld_polyveck_sub(mld_polyveck *u, const mld_polyveck *v) { - unsigned int i; + int i; mld_assert_abs_bound_2d(u->vec, MLDSA_K, MLDSA_N, MLDSA_Q); mld_assert_abs_bound_2d(v->vec, MLDSA_K, MLDSA_N, MLDSA_Q); for (i = 0; i < MLDSA_K; ++i) __loop__( assigns(i, memory_slice(u, sizeof(mld_polyveck))) - invariant(i <= MLDSA_K) + invariant(i >= 0 && i <= MLDSA_K) invariant(forall(k0, 0, i, array_bound(u->vec[k0].coeffs, 0, MLDSA_N, INT32_MIN, REDUCE32_DOMAIN_MAX))) invariant(forall(k1, i, MLDSA_K, @@ -546,13 +546,13 @@ void mld_polyveck_sub(mld_polyveck *u, const mld_polyveck *v) MLD_INTERNAL_API void mld_polyveck_shiftl(mld_polyveck *v) { - unsigned int i; + int i; mld_assert_bound_2d(v->vec, MLDSA_K, MLDSA_N, 0, 1 << 10); for (i = 0; i < MLDSA_K; ++i) __loop__( assigns(i, memory_slice(v, sizeof(mld_polyveck))) - invariant(i <= MLDSA_K) + invariant(i >= 0 && i <= MLDSA_K) invariant(forall(k1, 0, i, array_bound(v->vec[k1].coeffs, 0, MLDSA_N, 0, MLDSA_Q))) invariant(forall(k1, i, MLDSA_K, forall(n1, 0, MLDSA_N, v->vec[k1].coeffs[n1] == loop_entry(*v).vec[k1].coeffs[n1]))) @@ -567,13 +567,13 @@ void mld_polyveck_shiftl(mld_polyveck *v) MLD_INTERNAL_API void mld_polyveck_ntt(mld_polyveck *v) { - unsigned int i; + int i; mld_assert_abs_bound_2d(v->vec, MLDSA_K, MLDSA_N, MLDSA_Q); for (i = 0; i < MLDSA_K; ++i) __loop__( assigns(i, memory_slice(v, sizeof(mld_polyveck))) - invariant(i <= MLDSA_K) + invariant(i >= 0 && i <= MLDSA_K) invariant(forall(k0, i, MLDSA_K, forall(k1, 0, MLDSA_N, v->vec[k0].coeffs[k1] == loop_entry(*v).vec[k0].coeffs[k1]))) invariant(forall(k1, 0, i, array_abs_bound(v->vec[k1].coeffs, 0, MLDSA_N, MLD_NTT_BOUND)))) { @@ -585,13 +585,13 @@ void mld_polyveck_ntt(mld_polyveck *v) MLD_INTERNAL_API void mld_polyveck_invntt_tomont(mld_polyveck *v) { - unsigned int i; + int i; mld_assert_abs_bound_2d(v->vec, MLDSA_K, MLDSA_N, MLDSA_Q); for (i = 0; i < MLDSA_K; ++i) __loop__( assigns(i, memory_slice(v, sizeof(mld_polyveck))) - invariant(i <= MLDSA_K) + invariant(i >= 0 && i <= MLDSA_K) invariant(forall(k0, i, MLDSA_K, forall(k1, 0, MLDSA_N, v->vec[k0].coeffs[k1] == loop_entry(*v).vec[k0].coeffs[k1]))) invariant(forall(k1, 0, i, array_abs_bound(v->vec[k1].coeffs, 0, MLDSA_N, MLD_INTT_BOUND)))) { @@ -605,13 +605,13 @@ MLD_INTERNAL_API void mld_polyveck_pointwise_poly_montgomery(mld_polyveck *r, const mld_poly *a, const mld_polyveck *v) { - unsigned int i; + int i; mld_assert_abs_bound_2d(v->vec, MLDSA_K, MLDSA_N, MLD_NTT_BOUND); for (i = 0; i < MLDSA_K; ++i) __loop__( assigns(i, memory_slice(r, sizeof(mld_polyveck))) - invariant(i <= MLDSA_K) + invariant(i >= 0 && i <= MLDSA_K) invariant(forall(k2, 0, i, array_abs_bound(r->vec[k2].coeffs, 0, MLDSA_N, MLDSA_Q))) ) { @@ -623,14 +623,14 @@ void mld_polyveck_pointwise_poly_montgomery(mld_polyveck *r, const mld_poly *a, MLD_INTERNAL_API uint32_t mld_polyveck_chknorm(const mld_polyveck *v, int32_t bound) { - unsigned int i; + int i; uint32_t t = 0; mld_assert_bound_2d(v->vec, MLDSA_K, MLDSA_N, -REDUCE32_RANGE_MAX, REDUCE32_RANGE_MAX); for (i = 0; i < MLDSA_K; ++i) __loop__( - invariant(i <= MLDSA_K) + invariant(i >= 0 && i <= MLDSA_K) invariant(t == 0 || t == 0xFFFFFFFF) invariant((t == 0) == forall(k1, 0, i, array_abs_bound(v->vec[k1].coeffs, 0, MLDSA_N, bound))) ) @@ -649,13 +649,13 @@ MLD_INTERNAL_API void mld_polyveck_power2round(mld_polyveck *v1, mld_polyveck *v0, const mld_polyveck *v) { - unsigned int i; + int i; mld_assert_bound_2d(v->vec, MLDSA_K, MLDSA_N, 0, MLDSA_Q); for (i = 0; i < MLDSA_K; ++i) __loop__( assigns(i, memory_slice(v0, sizeof(mld_polyveck)), memory_slice(v1, sizeof(mld_polyveck))) - invariant(i <= MLDSA_K) + invariant(i >= 0 && i <= MLDSA_K) invariant(forall(k1, 0, i, array_bound(v0->vec[k1].coeffs, 0, MLDSA_N, -(MLD_2_POW_D/2)+1, (MLD_2_POW_D/2)+1))) invariant(forall(k2, 0, i, array_bound(v1->vec[k2].coeffs, 0, MLDSA_N, 0, ((MLDSA_Q - 1) / MLD_2_POW_D) + 1))) ) @@ -673,13 +673,13 @@ MLD_INTERNAL_API void mld_polyveck_decompose(mld_polyveck *v1, mld_polyveck *v0, const mld_polyveck *v) { - unsigned int i; + int i; mld_assert_bound_2d(v->vec, MLDSA_K, MLDSA_N, 0, MLDSA_Q); for (i = 0; i < MLDSA_K; ++i) __loop__( assigns(i, memory_slice(v0, sizeof(mld_polyveck)), memory_slice(v1, sizeof(mld_polyveck))) - invariant(i <= MLDSA_K) + invariant(i >= 0 && i <= MLDSA_K) invariant(forall(k1, 0, i, array_bound(v1->vec[k1].coeffs, 0, MLDSA_N, 0, (MLDSA_Q-1)/(2*MLDSA_GAMMA2)))) invariant(forall(k2, 0, i, @@ -695,16 +695,17 @@ void mld_polyveck_decompose(mld_polyveck *v1, mld_polyveck *v0, } MLD_INTERNAL_API -unsigned int mld_polyveck_make_hint(mld_polyveck *h, const mld_polyveck *v0, - const mld_polyveck *v1) +int mld_polyveck_make_hint(mld_polyveck *h, const mld_polyveck *v0, + const mld_polyveck *v1) { - unsigned int i, s = 0; + int s = 0; + int i; for (i = 0; i < MLDSA_K; ++i) __loop__( assigns(i, s, memory_slice(h, sizeof(mld_polyveck))) - invariant(i <= MLDSA_K) - invariant(s <= i * MLDSA_N) + invariant(i >= 0 && i <= MLDSA_K) + invariant(s >= 0 && s <= i * MLDSA_N) invariant(forall(k1, 0, i, array_bound(h->vec[k1].coeffs, 0, MLDSA_N, 0, 2))) ) { @@ -719,14 +720,14 @@ MLD_INTERNAL_API void mld_polyveck_use_hint(mld_polyveck *w, const mld_polyveck *u, const mld_polyveck *h) { - unsigned int i; + int i; mld_assert_bound_2d(u->vec, MLDSA_K, MLDSA_N, 0, MLDSA_Q); mld_assert_bound_2d(h->vec, MLDSA_K, MLDSA_N, 0, 2); for (i = 0; i < MLDSA_K; ++i) __loop__( assigns(i, memory_slice(w, sizeof(mld_polyveck))) - invariant(i <= MLDSA_K) + invariant(i >= 0 && i <= MLDSA_K) invariant(forall(k2, 0, i, array_bound(w->vec[k2].coeffs, 0, MLDSA_N, 0, (MLDSA_Q - 1) / (2 * MLDSA_GAMMA2)))) @@ -743,14 +744,14 @@ MLD_INTERNAL_API void mld_polyveck_pack_w1(uint8_t r[MLDSA_K * MLDSA_POLYW1_PACKEDBYTES], const mld_polyveck *w1) { - unsigned int i; + int i; mld_assert_bound_2d(w1->vec, MLDSA_K, MLDSA_N, 0, (MLDSA_Q - 1) / (2 * MLDSA_GAMMA2)); for (i = 0; i < MLDSA_K; ++i) __loop__( assigns(i, memory_slice(r, MLDSA_K * MLDSA_POLYW1_PACKEDBYTES)) - invariant(i <= MLDSA_K) + invariant(i >= 0 && i <= MLDSA_K) ) { mld_polyw1_pack(&r[i * MLDSA_POLYW1_PACKEDBYTES], &w1->vec[i]); @@ -761,12 +762,12 @@ MLD_INTERNAL_API void mld_polyveck_pack_eta(uint8_t r[MLDSA_K * MLDSA_POLYETA_PACKEDBYTES], const mld_polyveck *p) { - unsigned int i; + int i; mld_assert_abs_bound_2d(p->vec, MLDSA_K, MLDSA_N, MLDSA_ETA + 1); for (i = 0; i < MLDSA_K; ++i) __loop__( assigns(i, memory_slice(r, MLDSA_K * MLDSA_POLYETA_PACKEDBYTES)) - invariant(i <= MLDSA_K) + invariant(i >= 0 && i <= MLDSA_K) ) { mld_polyeta_pack(&r[i * MLDSA_POLYETA_PACKEDBYTES], &p->vec[i]); @@ -777,12 +778,12 @@ MLD_INTERNAL_API void mld_polyvecl_pack_eta(uint8_t r[MLDSA_L * MLDSA_POLYETA_PACKEDBYTES], const mld_polyvecl *p) { - unsigned int i; + int i; mld_assert_abs_bound_2d(p->vec, MLDSA_L, MLDSA_N, MLDSA_ETA + 1); for (i = 0; i < MLDSA_L; ++i) __loop__( assigns(i, memory_slice(r, MLDSA_L * MLDSA_POLYETA_PACKEDBYTES)) - invariant(i <= MLDSA_L) + invariant(i >= 0 && i <= MLDSA_L) ) { mld_polyeta_pack(&r[i * MLDSA_POLYETA_PACKEDBYTES], &p->vec[i]); @@ -793,13 +794,13 @@ MLD_INTERNAL_API void mld_polyvecl_pack_z(uint8_t r[MLDSA_L * MLDSA_POLYZ_PACKEDBYTES], const mld_polyvecl *p) { - unsigned int i; + int i; mld_assert_bound_2d(p->vec, MLDSA_L, MLDSA_N, -(MLDSA_GAMMA1 - 1), MLDSA_GAMMA1 + 1); for (i = 0; i < MLDSA_L; ++i) __loop__( assigns(i, memory_slice(r, MLDSA_L * MLDSA_POLYZ_PACKEDBYTES)) - invariant(i <= MLDSA_L) + invariant(i >= 0 && i <= MLDSA_L) ) { mld_polyz_pack(&r[i * MLDSA_POLYZ_PACKEDBYTES], &p->vec[i]); @@ -810,13 +811,13 @@ MLD_INTERNAL_API void mld_polyveck_pack_t0(uint8_t r[MLDSA_K * MLDSA_POLYT0_PACKEDBYTES], const mld_polyveck *p) { - unsigned int i; + int i; mld_assert_bound_2d(p->vec, MLDSA_K, MLDSA_N, -(1 << (MLDSA_D - 1)) + 1, (1 << (MLDSA_D - 1)) + 1); for (i = 0; i < MLDSA_K; ++i) __loop__( assigns(i, memory_slice(r, MLDSA_K * MLDSA_POLYT0_PACKEDBYTES)) - invariant(i <= MLDSA_K) + invariant(i >= 0 && i <= MLDSA_K) ) { mld_polyt0_pack(&r[i * MLDSA_POLYT0_PACKEDBYTES], &p->vec[i]); @@ -827,7 +828,7 @@ MLD_INTERNAL_API void mld_polyvecl_unpack_eta( mld_polyvecl *p, const uint8_t r[MLDSA_L * MLDSA_POLYETA_PACKEDBYTES]) { - unsigned int i; + int i; for (i = 0; i < MLDSA_L; ++i) { mld_polyeta_unpack(&p->vec[i], r + i * MLDSA_POLYETA_PACKEDBYTES); @@ -841,7 +842,7 @@ MLD_INTERNAL_API void mld_polyvecl_unpack_z(mld_polyvecl *z, const uint8_t r[MLDSA_L * MLDSA_POLYZ_PACKEDBYTES]) { - unsigned int i; + int i; for (i = 0; i < MLDSA_L; ++i) { mld_polyz_unpack(&z->vec[i], r + i * MLDSA_POLYZ_PACKEDBYTES); @@ -855,7 +856,7 @@ MLD_INTERNAL_API void mld_polyveck_unpack_eta( mld_polyveck *p, const uint8_t r[MLDSA_K * MLDSA_POLYETA_PACKEDBYTES]) { - unsigned int i; + int i; for (i = 0; i < MLDSA_K; ++i) { mld_polyeta_unpack(&p->vec[i], r + i * MLDSA_POLYETA_PACKEDBYTES); @@ -869,7 +870,7 @@ MLD_INTERNAL_API void mld_polyveck_unpack_t0(mld_polyveck *p, const uint8_t r[MLDSA_K * MLDSA_POLYT0_PACKEDBYTES]) { - unsigned int i; + int i; for (i = 0; i < MLDSA_K; ++i) { mld_polyt0_unpack(&p->vec[i], r + i * MLDSA_POLYT0_PACKEDBYTES); diff --git a/mldsa/src/polyvec.h b/mldsa/src/polyvec.h index bb1ed8fba..6f3ff6f35 100644 --- a/mldsa/src/polyvec.h +++ b/mldsa/src/polyvec.h @@ -508,14 +508,14 @@ __contract__( * Returns number of 1 bits. **************************************************/ MLD_INTERNAL_API -unsigned int mld_polyveck_make_hint(mld_polyveck *h, const mld_polyveck *v0, - const mld_polyveck *v1) +int mld_polyveck_make_hint(mld_polyveck *h, const mld_polyveck *v0, + const mld_polyveck *v1) __contract__( requires(memory_no_alias(h, sizeof(mld_polyveck))) requires(memory_no_alias(v0, sizeof(mld_polyveck))) requires(memory_no_alias(v1, sizeof(mld_polyveck))) assigns(memory_slice(h, sizeof(mld_polyveck))) - ensures(return_value <= MLDSA_N * MLDSA_K) + ensures(return_value >= 0 && return_value <= MLDSA_N * MLDSA_K) ensures(forall(k1, 0, MLDSA_K, array_bound(h->vec[k1].coeffs, 0, MLDSA_N, 0, 2))) ); diff --git a/mldsa/src/rounding.h b/mldsa/src/rounding.h index a83562b0f..c04ded2c7 100644 --- a/mldsa/src/rounding.h +++ b/mldsa/src/rounding.h @@ -157,7 +157,7 @@ __contract__( * * Returns 1 if overflow, 0 otherwise **************************************************/ -static MLD_INLINE unsigned int mld_make_hint(int32_t a0, int32_t a1) +static MLD_INLINE int mld_make_hint(int32_t a0, int32_t a1) __contract__( ensures(return_value >= 0 && return_value <= 1) ) diff --git a/mldsa/src/sign.c b/mldsa/src/sign.c index 86e318fb6..9a21c1be7 100644 --- a/mldsa/src/sign.c +++ b/mldsa/src/sign.c @@ -445,7 +445,7 @@ __contract__( ) { MLD_ALIGN uint8_t challenge_bytes[MLDSA_CTILDEBYTES]; - unsigned int n; + int n; mld_polyvecl y, z; mld_polyveck w, w1, w0, h; mld_poly cp; @@ -782,7 +782,7 @@ int crypto_sign_verify_internal(const uint8_t *sig, size_t siglen, const uint8_t pk[CRYPTO_PUBLICKEYBYTES], int externalmu) { - unsigned int i; + int i; int res; MLD_ALIGN uint8_t buf[MLDSA_K * MLDSA_POLYW1_PACKEDBYTES]; MLD_ALIGN uint8_t rho[MLDSA_SEEDBYTES]; @@ -864,7 +864,7 @@ int crypto_sign_verify_internal(const uint8_t *sig, size_t siglen, MLD_CT_TESTING_DECLASSIFY(c2, sizeof(c2)); for (i = 0; i < MLDSA_CTILDEBYTES; ++i) __loop__( - invariant(i <= MLDSA_CTILDEBYTES) + invariant(i >= 0 && i <= MLDSA_CTILDEBYTES) ) { if (c[i] != c2[i]) diff --git a/proofs/cbmc/invntt_layer/Makefile b/proofs/cbmc/invntt_layer/Makefile index 7480bf536..8b1a3c5ba 100644 --- a/proofs/cbmc/invntt_layer/Makefile +++ b/proofs/cbmc/invntt_layer/Makefile @@ -26,7 +26,8 @@ USE_DYNAMIC_FRAMES=1 # Disable any setting of EXTERNAL_SAT_SOLVER, and choose SMT backend instead EXTERNAL_SAT_SOLVER= -CBMCFLAGS=--smt2 +# Bitwuzla has been found to be more efficient on this function +CBMCFLAGS=--bitwuzla --slice-formula FUNCTION_NAME = mld_invntt_layer diff --git a/proofs/cbmc/invntt_layer/invntt_layer_harness.c b/proofs/cbmc/invntt_layer/invntt_layer_harness.c index 4f8fb4ce9..32d525724 100644 --- a/proofs/cbmc/invntt_layer/invntt_layer_harness.c +++ b/proofs/cbmc/invntt_layer/invntt_layer_harness.c @@ -4,11 +4,11 @@ #include #include "params.h" -void mld_invntt_layer(int32_t r[MLDSA_N], unsigned layer); +void mld_invntt_layer(int32_t r[MLDSA_N], int layer); void harness(void) { int32_t *r; - unsigned layer; + int layer; mld_invntt_layer(r, layer); } diff --git a/proofs/cbmc/keccak_absorb/keccak_absorb_harness.c b/proofs/cbmc/keccak_absorb/keccak_absorb_harness.c index 0385ea4c1..6211bc840 100644 --- a/proofs/cbmc/keccak_absorb/keccak_absorb_harness.c +++ b/proofs/cbmc/keccak_absorb/keccak_absorb_harness.c @@ -3,18 +3,16 @@ #include "fips202/fips202.h" -extern unsigned int keccak_absorb(uint64_t s[MLD_KECCAK_LANES], - unsigned int pos, unsigned int r, - const uint8_t *in, size_t inlen); +extern int keccak_absorb(uint64_t s[MLD_KECCAK_LANES], int pos, int r, + const uint8_t *in, size_t inlen); void harness(void) { uint64_t *s; - unsigned int pos; - const unsigned int r; + int pos, r, r2; const uint8_t *in; size_t inlen; uint8_t p; - keccak_absorb(s, pos, r, in, inlen); + r2 = keccak_absorb(s, pos, r, in, inlen); } diff --git a/proofs/cbmc/keccak_absorb_once_x4/keccak_absorb_once_x4_harness.c b/proofs/cbmc/keccak_absorb_once_x4/keccak_absorb_once_x4_harness.c index b6e606de1..ab3678170 100644 --- a/proofs/cbmc/keccak_absorb_once_x4/keccak_absorb_once_x4_harness.c +++ b/proofs/cbmc/keccak_absorb_once_x4/keccak_absorb_once_x4_harness.c @@ -9,14 +9,14 @@ #include -void mld_keccak_absorb_once_x4(uint64_t *s, uint32_t r, const uint8_t *in0, +void mld_keccak_absorb_once_x4(uint64_t *s, int r, const uint8_t *in0, const uint8_t *in1, const uint8_t *in2, const uint8_t *in3, size_t inlen, uint8_t p); void harness(void) { uint64_t *s; - uint32_t r; + int r; const uint8_t *in0, *in1, *in2, *in3; size_t inlen; uint8_t p; diff --git a/proofs/cbmc/keccak_finalize/keccak_finalize_harness.c b/proofs/cbmc/keccak_finalize/keccak_finalize_harness.c index ad33a1738..4a18a927b 100644 --- a/proofs/cbmc/keccak_finalize/keccak_finalize_harness.c +++ b/proofs/cbmc/keccak_finalize/keccak_finalize_harness.c @@ -3,13 +3,13 @@ #include "fips202/fips202.h" -extern void keccak_finalize(uint64_t s[MLD_KECCAK_LANES], unsigned int pos, - unsigned int r, uint8_t p); +extern void keccak_finalize(uint64_t s[MLD_KECCAK_LANES], int pos, int r, + uint8_t p); void harness(void) { uint64_t *s; - unsigned int pos, r; + int pos, r; uint8_t p; keccak_finalize(s, pos, r, p); diff --git a/proofs/cbmc/keccak_squeeze/keccak_squeeze_harness.c b/proofs/cbmc/keccak_squeeze/keccak_squeeze_harness.c index 1c25ae10a..a3a17919e 100644 --- a/proofs/cbmc/keccak_squeeze/keccak_squeeze_harness.c +++ b/proofs/cbmc/keccak_squeeze/keccak_squeeze_harness.c @@ -3,16 +3,15 @@ #include "fips202/fips202.h" -static unsigned int keccak_squeeze(uint8_t *out, size_t outlen, - uint64_t s[MLD_KECCAK_LANES], - unsigned int pos, unsigned int r); +static int keccak_squeeze(uint8_t *out, size_t outlen, + uint64_t s[MLD_KECCAK_LANES], int pos, int r); void harness(void) { uint8_t *out; size_t outlen; uint64_t *s; - unsigned int pos, r; + int pos, r, r2; - keccak_squeeze(out, outlen, s, pos, r); + r2 = keccak_squeeze(out, outlen, s, pos, r); } diff --git a/proofs/cbmc/keccak_squeezeblocks_x4/keccak_squeezeblocks_x4_harness.c b/proofs/cbmc/keccak_squeezeblocks_x4/keccak_squeezeblocks_x4_harness.c index 09af465f1..f21cabd79 100644 --- a/proofs/cbmc/keccak_squeezeblocks_x4/keccak_squeezeblocks_x4_harness.c +++ b/proofs/cbmc/keccak_squeezeblocks_x4/keccak_squeezeblocks_x4_harness.c @@ -11,13 +11,13 @@ void mld_keccak_squeezeblocks_x4(uint8_t *out0, uint8_t *out1, uint8_t *out2, uint8_t *out3, size_t nblocks, uint64_t *s, - uint32_t r); + int r); void harness(void) { uint8_t *out0, out1, out2, out3; size_t nblocks; uint64_t *s; - uint32_t r; + int r; mld_keccak_squeezeblocks_x4(out0, out1, out2, out3, nblocks, s, r); } diff --git a/proofs/cbmc/keccakf1600_extract_bytes/keccakf1600_extract_bytes_harness.c b/proofs/cbmc/keccakf1600_extract_bytes/keccakf1600_extract_bytes_harness.c index 9a2c745ef..d467d9af4 100644 --- a/proofs/cbmc/keccakf1600_extract_bytes/keccakf1600_extract_bytes_harness.c +++ b/proofs/cbmc/keccakf1600_extract_bytes/keccakf1600_extract_bytes_harness.c @@ -9,7 +9,7 @@ void harness(void) { uint64_t *state; unsigned char *data; - unsigned offset; - unsigned length; + int offset; + int length; mld_keccakf1600_extract_bytes(state, data, offset, length); } diff --git a/proofs/cbmc/keccakf1600_extract_bytes_BE/keccakf1600_extract_bytes_be_harness.c b/proofs/cbmc/keccakf1600_extract_bytes_BE/keccakf1600_extract_bytes_be_harness.c index 9a2c745ef..d467d9af4 100644 --- a/proofs/cbmc/keccakf1600_extract_bytes_BE/keccakf1600_extract_bytes_be_harness.c +++ b/proofs/cbmc/keccakf1600_extract_bytes_BE/keccakf1600_extract_bytes_be_harness.c @@ -9,7 +9,7 @@ void harness(void) { uint64_t *state; unsigned char *data; - unsigned offset; - unsigned length; + int offset; + int length; mld_keccakf1600_extract_bytes(state, data, offset, length); } diff --git a/proofs/cbmc/keccakf1600_xor_bytes/keccakf1600_xor_bytes_harness.c b/proofs/cbmc/keccakf1600_xor_bytes/keccakf1600_xor_bytes_harness.c index 977f571b5..35e118740 100644 --- a/proofs/cbmc/keccakf1600_xor_bytes/keccakf1600_xor_bytes_harness.c +++ b/proofs/cbmc/keccakf1600_xor_bytes/keccakf1600_xor_bytes_harness.c @@ -9,7 +9,7 @@ void harness(void) { uint64_t *state; const unsigned char *data; - unsigned offset; - unsigned length; + int offset; + int length; mld_keccakf1600_xor_bytes(state, data, offset, length); } diff --git a/proofs/cbmc/keccakf1600_xor_bytes_BE/keccakf1600_xor_bytes_be_harness.c b/proofs/cbmc/keccakf1600_xor_bytes_BE/keccakf1600_xor_bytes_be_harness.c index 977f571b5..35e118740 100644 --- a/proofs/cbmc/keccakf1600_xor_bytes_BE/keccakf1600_xor_bytes_be_harness.c +++ b/proofs/cbmc/keccakf1600_xor_bytes_BE/keccakf1600_xor_bytes_be_harness.c @@ -9,7 +9,7 @@ void harness(void) { uint64_t *state; const unsigned char *data; - unsigned offset; - unsigned length; + int offset; + int length; mld_keccakf1600_xor_bytes(state, data, offset, length); } diff --git a/proofs/cbmc/keccakf1600x4_extract_bytes/keccakf1600x4_extract_bytes_harness.c b/proofs/cbmc/keccakf1600x4_extract_bytes/keccakf1600x4_extract_bytes_harness.c index a0791ca53..fbaa93fa3 100644 --- a/proofs/cbmc/keccakf1600x4_extract_bytes/keccakf1600x4_extract_bytes_harness.c +++ b/proofs/cbmc/keccakf1600x4_extract_bytes/keccakf1600x4_extract_bytes_harness.c @@ -9,8 +9,8 @@ void harness(void) { uint64_t *state; unsigned char *data0, *data1, *data2, *data3; - unsigned offset; - unsigned length; + int offset; + int length; mld_keccakf1600x4_extract_bytes(state, data0, data1, data2, data3, offset, length); } diff --git a/proofs/cbmc/keccakf1600x4_xor_bytes/keccakf1600x4_xor_bytes_harness.c b/proofs/cbmc/keccakf1600x4_xor_bytes/keccakf1600x4_xor_bytes_harness.c index e6e88a490..0e449e0b7 100644 --- a/proofs/cbmc/keccakf1600x4_xor_bytes/keccakf1600x4_xor_bytes_harness.c +++ b/proofs/cbmc/keccakf1600x4_xor_bytes/keccakf1600x4_xor_bytes_harness.c @@ -9,8 +9,8 @@ void harness(void) { uint64_t *state; const unsigned char *data0, *data1, *data2, *data3; - unsigned offset; - unsigned length; + int offset; + int length; mld_keccakf1600x4_xor_bytes(state, data0, data1, data2, data3, offset, length); } diff --git a/proofs/cbmc/ntt_butterfly_block/ntt_butterfly_block_harness.c b/proofs/cbmc/ntt_butterfly_block/ntt_butterfly_block_harness.c index fb4ffa2bb..ab0556855 100644 --- a/proofs/cbmc/ntt_butterfly_block/ntt_butterfly_block_harness.c +++ b/proofs/cbmc/ntt_butterfly_block/ntt_butterfly_block_harness.c @@ -4,13 +4,13 @@ #include #include "params.h" -void mld_ntt_butterfly_block(int32_t r[MLDSA_N], int32_t zeta, unsigned start, - unsigned len, unsigned bound); +void mld_ntt_butterfly_block(int32_t r[MLDSA_N], int32_t zeta, const int start, + const int len, const int bound); void harness(void) { int32_t *r, zeta; - unsigned start, len; - unsigned bound; + int start, len; + int bound; mld_ntt_butterfly_block(r, zeta, start, len, bound); } diff --git a/proofs/cbmc/ntt_layer/ntt_layer_harness.c b/proofs/cbmc/ntt_layer/ntt_layer_harness.c index e07f526f0..22ca927bd 100644 --- a/proofs/cbmc/ntt_layer/ntt_layer_harness.c +++ b/proofs/cbmc/ntt_layer/ntt_layer_harness.c @@ -4,11 +4,11 @@ #include #include "params.h" -void mld_ntt_layer(int32_t r[MLDSA_N], unsigned layer); +void mld_ntt_layer(int32_t r[MLDSA_N], const int layer); void harness(void) { int32_t *r; - unsigned layer; + int layer; mld_ntt_layer(r, layer); } diff --git a/proofs/cbmc/pack_sig/pack_sig_harness.c b/proofs/cbmc/pack_sig/pack_sig_harness.c index 40d7aecf7..4bfb26c75 100644 --- a/proofs/cbmc/pack_sig/pack_sig_harness.c +++ b/proofs/cbmc/pack_sig/pack_sig_harness.c @@ -9,6 +9,6 @@ void harness(void) uint8_t *a, *b; mld_polyveck *h; mld_polyvecl *z; - unsigned int nh; + int nh; mld_pack_sig(a, b, z, h, nh); } diff --git a/proofs/cbmc/polyvecl_add/Makefile b/proofs/cbmc/polyvecl_add/Makefile index 38de96cdd..e04857161 100644 --- a/proofs/cbmc/polyvecl_add/Makefile +++ b/proofs/cbmc/polyvecl_add/Makefile @@ -26,7 +26,7 @@ USE_DYNAMIC_FRAMES=1 # Disable any setting of EXTERNAL_SAT_SOLVER, and choose SMT backend instead EXTERNAL_SAT_SOLVER= -CBMCFLAGS=--smt2 --arrays-uf-always --slice-formula +CBMCFLAGS=--smt2 --slice-formula FUNCTION_NAME = polyvecl_add diff --git a/proofs/cbmc/rej_eta/rej_eta_harness.c b/proofs/cbmc/rej_eta/rej_eta_harness.c index b540b7bf9..a582bf2da 100644 --- a/proofs/cbmc/rej_eta/rej_eta_harness.c +++ b/proofs/cbmc/rej_eta/rej_eta_harness.c @@ -3,17 +3,14 @@ #include "poly.h" -static unsigned int mld_rej_eta(int32_t *a, unsigned int target, - unsigned int offset, const uint8_t *buf, - unsigned int buflen); +static int mld_rej_eta(int32_t *a, int target, int offset, const uint8_t *buf, + int buflen); void harness(void) { int32_t *a; - unsigned int target; - unsigned int offset; + int target, offset, buflen, r; const uint8_t *buf; - unsigned int buflen; - mld_rej_eta(a, target, offset, buf, buflen); + r = mld_rej_eta(a, target, offset, buf, buflen); } diff --git a/proofs/cbmc/rej_eta_c/rej_eta_c_harness.c b/proofs/cbmc/rej_eta_c/rej_eta_c_harness.c index 7355bc6c2..b44a004c1 100644 --- a/proofs/cbmc/rej_eta_c/rej_eta_c_harness.c +++ b/proofs/cbmc/rej_eta_c/rej_eta_c_harness.c @@ -4,17 +4,16 @@ #include "poly.h" #define mld_rej_eta_c MLD_ADD_PARAM_SET(mld_rej_eta_c) -static unsigned int mld_rej_eta_c(int32_t *a, unsigned int target, - unsigned int offset, const uint8_t *buf, - unsigned int buflen); +static int mld_rej_eta_c(int32_t *a, int target, int offset, const uint8_t *buf, + int buflen); void harness(void) { int32_t *a; - unsigned int target; - unsigned int offset; + int target; + int offset; const uint8_t *buf; - unsigned int buflen; + int buflen; mld_rej_eta_c(a, target, offset, buf, buflen); } diff --git a/proofs/cbmc/rej_uniform/rej_uniform_harness.c b/proofs/cbmc/rej_uniform/rej_uniform_harness.c index e4b909748..72fea262b 100644 --- a/proofs/cbmc/rej_uniform/rej_uniform_harness.c +++ b/proofs/cbmc/rej_uniform/rej_uniform_harness.c @@ -3,17 +3,14 @@ #include "poly.h" -static unsigned int mld_rej_uniform(int32_t *a, unsigned int target, - unsigned int offset, const uint8_t *buf, - unsigned int buflen); +static int mld_rej_uniform(int32_t *a, int target, int offset, + const uint8_t *buf, int buflen); void harness(void) { int32_t *a; - unsigned int target; - unsigned int offset; + int target, offset, buflen, r; const uint8_t *buf; - unsigned int buflen; - mld_rej_uniform(a, target, offset, buf, buflen); + r = mld_rej_uniform(a, target, offset, buf, buflen); } diff --git a/proofs/cbmc/rej_uniform_c/rej_uniform_c_harness.c b/proofs/cbmc/rej_uniform_c/rej_uniform_c_harness.c index c366b8e48..4c2f2a025 100644 --- a/proofs/cbmc/rej_uniform_c/rej_uniform_c_harness.c +++ b/proofs/cbmc/rej_uniform_c/rej_uniform_c_harness.c @@ -3,17 +3,16 @@ #include "poly.h" -static unsigned int mld_rej_uniform_c(int32_t *a, unsigned int target, - unsigned int offset, const uint8_t *buf, - unsigned int buflen); +static int mld_rej_uniform_c(int32_t *a, int target, int offset, + const uint8_t *buf, int buflen); void harness(void) { int32_t *a; - unsigned int target; - unsigned int offset; + int target; + int offset; const uint8_t *buf; - unsigned int buflen; + int buflen; mld_rej_uniform_c(a, target, offset, buf, buflen); }