quickjs-tart

quickjs-based runtime for wallet-core logic
Log | Files | Refs | README | LICENSE

constant_time.c (8856B)


      1 /**
      2  *  Constant-time functions
      3  *
      4  *  Copyright The Mbed TLS Contributors
      5  *  SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
      6  */
      7 
      8 /*
      9  * The following functions are implemented without using comparison operators, as those
     10  * might be translated to branches by some compilers on some platforms.
     11  */
     12 
     13 #include <stdint.h>
     14 #include <limits.h>
     15 
     16 #include "common.h"
     17 #include "constant_time_internal.h"
     18 #include "mbedtls/constant_time.h"
     19 #include "mbedtls/error.h"
     20 #include "mbedtls/platform_util.h"
     21 
     22 #include <string.h>
     23 
     24 #if !defined(MBEDTLS_CT_ASM)
     25 /*
     26  * Define an object with the value zero, such that the compiler cannot prove that it
     27  * has the value zero (because it is volatile, it "may be modified in ways unknown to
     28  * the implementation").
     29  */
     30 volatile mbedtls_ct_uint_t mbedtls_ct_zero = 0;
     31 #endif
     32 
     33 /*
     34  * Define MBEDTLS_EFFICIENT_UNALIGNED_VOLATILE_ACCESS where assembly is present to
     35  * perform fast unaligned access to volatile data.
     36  *
     37  * This is needed because mbedtls_get_unaligned_uintXX etc don't support volatile
     38  * memory accesses.
     39  *
     40  * Some of these definitions could be moved into alignment.h but for now they are
     41  * only used here.
     42  */
     43 #if defined(MBEDTLS_EFFICIENT_UNALIGNED_ACCESS) && \
     44     ((defined(MBEDTLS_CT_ARM_ASM) && (UINTPTR_MAX == 0xfffffffful)) || \
     45     defined(MBEDTLS_CT_AARCH64_ASM))
     46 /* We check pointer sizes to avoid issues with them not matching register size requirements */
     47 #define MBEDTLS_EFFICIENT_UNALIGNED_VOLATILE_ACCESS
     48 
     49 static inline uint32_t mbedtls_get_unaligned_volatile_uint32(volatile const unsigned char *p)
     50 {
     51     /* This is UB, even where it's safe:
     52      *    return *((volatile uint32_t*)p);
     53      * so instead the same thing is expressed in assembly below.
     54      */
     55     uint32_t r;
     56 #if defined(MBEDTLS_CT_ARM_ASM)
     57     asm volatile ("ldr %0, [%1]" : "=r" (r) : "r" (p) :);
     58 #elif defined(MBEDTLS_CT_AARCH64_ASM)
     59     asm volatile ("ldr %w0, [%1]" : "=r" (r) : MBEDTLS_ASM_AARCH64_PTR_CONSTRAINT(p) :);
     60 #else
     61 #error "No assembly defined for mbedtls_get_unaligned_volatile_uint32"
     62 #endif
     63     return r;
     64 }
     65 #endif /* defined(MBEDTLS_EFFICIENT_UNALIGNED_ACCESS) &&
     66           (defined(MBEDTLS_CT_ARM_ASM) || defined(MBEDTLS_CT_AARCH64_ASM)) */
     67 
     68 int mbedtls_ct_memcmp(const void *a,
     69                       const void *b,
     70                       size_t n)
     71 {
     72     size_t i = 0;
     73     /*
     74      * `A` and `B` are cast to volatile to ensure that the compiler
     75      * generates code that always fully reads both buffers.
     76      * Otherwise it could generate a test to exit early if `diff` has all
     77      * bits set early in the loop.
     78      */
     79     volatile const unsigned char *A = (volatile const unsigned char *) a;
     80     volatile const unsigned char *B = (volatile const unsigned char *) b;
     81     uint32_t diff = 0;
     82 
     83 #if defined(MBEDTLS_EFFICIENT_UNALIGNED_VOLATILE_ACCESS)
     84     for (; (i + 4) <= n; i += 4) {
     85         uint32_t x = mbedtls_get_unaligned_volatile_uint32(A + i);
     86         uint32_t y = mbedtls_get_unaligned_volatile_uint32(B + i);
     87         diff |= x ^ y;
     88     }
     89 #endif
     90 
     91     for (; i < n; i++) {
     92         /* Read volatile data in order before computing diff.
     93          * This avoids IAR compiler warning:
     94          * 'the order of volatile accesses is undefined ..' */
     95         unsigned char x = A[i], y = B[i];
     96         diff |= x ^ y;
     97     }
     98 
     99 
    100 #if (INT_MAX < INT32_MAX)
    101     /* We don't support int smaller than 32-bits, but if someone tried to build
    102      * with this configuration, there is a risk that, for differing data, the
    103      * only bits set in diff are in the top 16-bits, and would be lost by a
    104      * simple cast from uint32 to int.
    105      * This would have significant security implications, so protect against it. */
    106 #error "mbedtls_ct_memcmp() requires minimum 32-bit ints"
    107 #else
    108     /* The bit-twiddling ensures that when we cast uint32_t to int, we are casting
    109      * a value that is in the range 0..INT_MAX - a value larger than this would
    110      * result in implementation defined behaviour.
    111      *
    112      * This ensures that the value returned by the function is non-zero iff
    113      * diff is non-zero.
    114      */
    115     return (int) ((diff & 0xffff) | (diff >> 16));
    116 #endif
    117 }
    118 
    119 #if defined(MBEDTLS_NIST_KW_C)
    120 
    121 int mbedtls_ct_memcmp_partial(const void *a,
    122                               const void *b,
    123                               size_t n,
    124                               size_t skip_head,
    125                               size_t skip_tail)
    126 {
    127     unsigned int diff = 0;
    128 
    129     volatile const unsigned char *A = (volatile const unsigned char *) a;
    130     volatile const unsigned char *B = (volatile const unsigned char *) b;
    131 
    132     size_t valid_end = n - skip_tail;
    133 
    134     for (size_t i = 0; i < n; i++) {
    135         unsigned char x = A[i], y = B[i];
    136         unsigned int d = x ^ y;
    137         mbedtls_ct_condition_t valid = mbedtls_ct_bool_and(mbedtls_ct_uint_ge(i, skip_head),
    138                                                            mbedtls_ct_uint_lt(i, valid_end));
    139         diff |= mbedtls_ct_uint_if_else_0(valid, d);
    140     }
    141 
    142     /* Since we go byte-by-byte, the only bits set will be in the bottom 8 bits, so the
    143      * cast from uint to int is safe. */
    144     return (int) diff;
    145 }
    146 
    147 #endif
    148 
    149 #if defined(MBEDTLS_PKCS1_V15) && defined(MBEDTLS_RSA_C) && !defined(MBEDTLS_RSA_ALT)
    150 
    151 void mbedtls_ct_memmove_left(void *start, size_t total, size_t offset)
    152 {
    153     volatile unsigned char *buf = start;
    154     for (size_t i = 0; i < total; i++) {
    155         mbedtls_ct_condition_t no_op = mbedtls_ct_uint_gt(total - offset, i);
    156         /* The first `total - offset` passes are a no-op. The last
    157          * `offset` passes shift the data one byte to the left and
    158          * zero out the last byte. */
    159         for (size_t n = 0; n < total - 1; n++) {
    160             unsigned char current = buf[n];
    161             unsigned char next    = buf[n+1];
    162             buf[n] = mbedtls_ct_uint_if(no_op, current, next);
    163         }
    164         buf[total-1] = mbedtls_ct_uint_if_else_0(no_op, buf[total-1]);
    165     }
    166 }
    167 
    168 #endif /* MBEDTLS_PKCS1_V15 && MBEDTLS_RSA_C && ! MBEDTLS_RSA_ALT */
    169 
    170 void mbedtls_ct_memcpy_if(mbedtls_ct_condition_t condition,
    171                           unsigned char *dest,
    172                           const unsigned char *src1,
    173                           const unsigned char *src2,
    174                           size_t len)
    175 {
    176 #if defined(MBEDTLS_CT_SIZE_64)
    177     const uint64_t mask     = (uint64_t) condition;
    178     const uint64_t not_mask = (uint64_t) ~mbedtls_ct_compiler_opaque(condition);
    179 #else
    180     const uint32_t mask     = (uint32_t) condition;
    181     const uint32_t not_mask = (uint32_t) ~mbedtls_ct_compiler_opaque(condition);
    182 #endif
    183 
    184     /* If src2 is NULL, setup src2 so that we read from the destination address.
    185      *
    186      * This means that if src2 == NULL && condition is false, the result will be a
    187      * no-op because we read from dest and write the same data back into dest.
    188      */
    189     if (src2 == NULL) {
    190         src2 = dest;
    191     }
    192 
    193     /* dest[i] = c1 == c2 ? src[i] : dest[i] */
    194     size_t i = 0;
    195 #if defined(MBEDTLS_EFFICIENT_UNALIGNED_ACCESS)
    196 #if defined(MBEDTLS_CT_SIZE_64)
    197     for (; (i + 8) <= len; i += 8) {
    198         uint64_t a = mbedtls_get_unaligned_uint64(src1 + i) & mask;
    199         uint64_t b = mbedtls_get_unaligned_uint64(src2 + i) & not_mask;
    200         mbedtls_put_unaligned_uint64(dest + i, a | b);
    201     }
    202 #else
    203     for (; (i + 4) <= len; i += 4) {
    204         uint32_t a = mbedtls_get_unaligned_uint32(src1 + i) & mask;
    205         uint32_t b = mbedtls_get_unaligned_uint32(src2 + i) & not_mask;
    206         mbedtls_put_unaligned_uint32(dest + i, a | b);
    207     }
    208 #endif /* defined(MBEDTLS_CT_SIZE_64) */
    209 #endif /* MBEDTLS_EFFICIENT_UNALIGNED_ACCESS */
    210     for (; i < len; i++) {
    211         dest[i] = (src1[i] & mask) | (src2[i] & not_mask);
    212     }
    213 }
    214 
    215 void mbedtls_ct_memcpy_offset(unsigned char *dest,
    216                               const unsigned char *src,
    217                               size_t offset,
    218                               size_t offset_min,
    219                               size_t offset_max,
    220                               size_t len)
    221 {
    222     size_t offsetval;
    223 
    224     for (offsetval = offset_min; offsetval <= offset_max; offsetval++) {
    225         mbedtls_ct_memcpy_if(mbedtls_ct_uint_eq(offsetval, offset), dest, src + offsetval, NULL,
    226                              len);
    227     }
    228 }
    229 
    230 #if defined(MBEDTLS_PKCS1_V15) && defined(MBEDTLS_RSA_C) && !defined(MBEDTLS_RSA_ALT)
    231 
    232 void mbedtls_ct_zeroize_if(mbedtls_ct_condition_t condition, void *buf, size_t len)
    233 {
    234     uint32_t mask = (uint32_t) ~condition;
    235     uint8_t *p = (uint8_t *) buf;
    236     size_t i = 0;
    237 #if defined(MBEDTLS_EFFICIENT_UNALIGNED_ACCESS)
    238     for (; (i + 4) <= len; i += 4) {
    239         mbedtls_put_unaligned_uint32((void *) (p + i),
    240                                      mbedtls_get_unaligned_uint32((void *) (p + i)) & mask);
    241     }
    242 #endif
    243     for (; i < len; i++) {
    244         p[i] = p[i] & mask;
    245     }
    246 }
    247 
    248 #endif /* defined(MBEDTLS_PKCS1_V15) && defined(MBEDTLS_RSA_C) && !defined(MBEDTLS_RSA_ALT) */