summaryrefslogtreecommitdiff
path: root/deps/openssl/openssl/crypto/evp/e_aes.c
diff options
context:
space:
mode:
authorSam Roberts <vieuxtech@gmail.com>2018-11-22 10:39:20 -0800
committerSam Roberts <vieuxtech@gmail.com>2019-01-22 13:32:34 -0800
commit4231ad04f0b2aee5bda6be94715d4b70badaac8b (patch)
tree19f189fae6828708ebd37e466ce4a7716494b96a /deps/openssl/openssl/crypto/evp/e_aes.c
parent5d80f9ea6091847176fa47fb1395fdffc4af9164 (diff)
downloadandroid-node-v8-4231ad04f0b2aee5bda6be94715d4b70badaac8b.tar.gz
android-node-v8-4231ad04f0b2aee5bda6be94715d4b70badaac8b.tar.bz2
android-node-v8-4231ad04f0b2aee5bda6be94715d4b70badaac8b.zip
deps: upgrade openssl sources to 1.1.1a
This updates all sources in deps/openssl/openssl with openssl-1.1.1a. PR-URL: https://github.com/nodejs/node/pull/25381 Reviewed-By: Daniel Bevenius <daniel.bevenius@gmail.com> Reviewed-By: Shigeki Ohtsu <ohtsu@ohtsu.org>
Diffstat (limited to 'deps/openssl/openssl/crypto/evp/e_aes.c')
-rw-r--r--deps/openssl/openssl/crypto/evp/e_aes.c1641
1 files changed, 1573 insertions, 68 deletions
diff --git a/deps/openssl/openssl/crypto/evp/e_aes.c b/deps/openssl/openssl/crypto/evp/e_aes.c
index 3f36d7072d..39eb4f379a 100644
--- a/deps/openssl/openssl/crypto/evp/e_aes.c
+++ b/deps/openssl/openssl/crypto/evp/e_aes.c
@@ -136,14 +136,30 @@ void AES_ctr32_encrypt(const unsigned char *in, unsigned char *out,
const unsigned char ivec[AES_BLOCK_SIZE]);
#endif
#ifdef AES_XTS_ASM
-void AES_xts_encrypt(const char *inp, char *out, size_t len,
+void AES_xts_encrypt(const unsigned char *inp, unsigned char *out, size_t len,
const AES_KEY *key1, const AES_KEY *key2,
const unsigned char iv[16]);
-void AES_xts_decrypt(const char *inp, char *out, size_t len,
+void AES_xts_decrypt(const unsigned char *inp, unsigned char *out, size_t len,
const AES_KEY *key1, const AES_KEY *key2,
const unsigned char iv[16]);
#endif
+/* increment counter (64-bit int) by 1 */
+static void ctr64_inc(unsigned char *counter)
+{
+ int n = 8;
+ unsigned char c;
+
+ do {
+ --n;
+ c = counter[n];
+ ++c;
+ counter[n] = c;
+ if (c)
+ return;
+ } while (n);
+}
+
#if defined(OPENSSL_CPUID_OBJ) && (defined(__powerpc__) || defined(__ppc__) || defined(_ARCH_PPC))
# include "ppc_arch.h"
# ifdef VPAES_ASM
@@ -950,6 +966,1521 @@ static const EVP_CIPHER aes_##keylen##_##mode = { \
const EVP_CIPHER *EVP_aes_##keylen##_##mode(void) \
{ return SPARC_AES_CAPABLE?&aes_t4_##keylen##_##mode:&aes_##keylen##_##mode; }
+#elif defined(OPENSSL_CPUID_OBJ) && defined(__s390__)
+/*
+ * IBM S390X support
+ */
+# include "s390x_arch.h"
+
+typedef struct {
+ union {
+ double align;
+ /*-
+ * KM-AES parameter block - begin
+ * (see z/Architecture Principles of Operation >= SA22-7832-06)
+ */
+ struct {
+ unsigned char k[32];
+ } param;
+ /* KM-AES parameter block - end */
+ } km;
+ unsigned int fc;
+} S390X_AES_ECB_CTX;
+
+typedef struct {
+ union {
+ double align;
+ /*-
+ * KMO-AES parameter block - begin
+ * (see z/Architecture Principles of Operation >= SA22-7832-08)
+ */
+ struct {
+ unsigned char cv[16];
+ unsigned char k[32];
+ } param;
+ /* KMO-AES parameter block - end */
+ } kmo;
+ unsigned int fc;
+
+ int res;
+} S390X_AES_OFB_CTX;
+
+typedef struct {
+ union {
+ double align;
+ /*-
+ * KMF-AES parameter block - begin
+ * (see z/Architecture Principles of Operation >= SA22-7832-08)
+ */
+ struct {
+ unsigned char cv[16];
+ unsigned char k[32];
+ } param;
+ /* KMF-AES parameter block - end */
+ } kmf;
+ unsigned int fc;
+
+ int res;
+} S390X_AES_CFB_CTX;
+
+typedef struct {
+ union {
+ double align;
+ /*-
+ * KMA-GCM-AES parameter block - begin
+ * (see z/Architecture Principles of Operation >= SA22-7832-11)
+ */
+ struct {
+ unsigned char reserved[12];
+ union {
+ unsigned int w;
+ unsigned char b[4];
+ } cv;
+ union {
+ unsigned long long g[2];
+ unsigned char b[16];
+ } t;
+ unsigned char h[16];
+ unsigned long long taadl;
+ unsigned long long tpcl;
+ union {
+ unsigned long long g[2];
+ unsigned int w[4];
+ } j0;
+ unsigned char k[32];
+ } param;
+ /* KMA-GCM-AES parameter block - end */
+ } kma;
+ unsigned int fc;
+ int key_set;
+
+ unsigned char *iv;
+ int ivlen;
+ int iv_set;
+ int iv_gen;
+
+ int taglen;
+
+ unsigned char ares[16];
+ unsigned char mres[16];
+ unsigned char kres[16];
+ int areslen;
+ int mreslen;
+ int kreslen;
+
+ int tls_aad_len;
+} S390X_AES_GCM_CTX;
+
+typedef struct {
+ union {
+ double align;
+ /*-
+ * Padding is chosen so that ccm.kmac_param.k overlaps with key.k and
+ * ccm.fc with key.k.rounds. Remember that on s390x, an AES_KEY's
+ * rounds field is used to store the function code and that the key
+ * schedule is not stored (if aes hardware support is detected).
+ */
+ struct {
+ unsigned char pad[16];
+ AES_KEY k;
+ } key;
+
+ struct {
+ /*-
+ * KMAC-AES parameter block - begin
+ * (see z/Architecture Principles of Operation >= SA22-7832-08)
+ */
+ struct {
+ union {
+ unsigned long long g[2];
+ unsigned char b[16];
+ } icv;
+ unsigned char k[32];
+ } kmac_param;
+ /* KMAC-AES paramater block - end */
+
+ union {
+ unsigned long long g[2];
+ unsigned char b[16];
+ } nonce;
+ union {
+ unsigned long long g[2];
+ unsigned char b[16];
+ } buf;
+
+ unsigned long long blocks;
+ int l;
+ int m;
+ int tls_aad_len;
+ int iv_set;
+ int tag_set;
+ int len_set;
+ int key_set;
+
+ unsigned char pad[140];
+ unsigned int fc;
+ } ccm;
+ } aes;
+} S390X_AES_CCM_CTX;
+
+/* Convert key size to function code: [16,24,32] -> [18,19,20]. */
+# define S390X_AES_FC(keylen) (S390X_AES_128 + ((((keylen) << 3) - 128) >> 6))
+
+/* Most modes of operation need km for partial block processing. */
+# define S390X_aes_128_CAPABLE (OPENSSL_s390xcap_P.km[0] & \
+ S390X_CAPBIT(S390X_AES_128))
+# define S390X_aes_192_CAPABLE (OPENSSL_s390xcap_P.km[0] & \
+ S390X_CAPBIT(S390X_AES_192))
+# define S390X_aes_256_CAPABLE (OPENSSL_s390xcap_P.km[0] & \
+ S390X_CAPBIT(S390X_AES_256))
+
+# define s390x_aes_init_key aes_init_key
+static int s390x_aes_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
+ const unsigned char *iv, int enc);
+
+# define S390X_aes_128_cbc_CAPABLE 1 /* checked by callee */
+# define S390X_aes_192_cbc_CAPABLE 1
+# define S390X_aes_256_cbc_CAPABLE 1
+# define S390X_AES_CBC_CTX EVP_AES_KEY
+
+# define s390x_aes_cbc_init_key aes_init_key
+
+# define s390x_aes_cbc_cipher aes_cbc_cipher
+static int s390x_aes_cbc_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
+ const unsigned char *in, size_t len);
+
+# define S390X_aes_128_ecb_CAPABLE S390X_aes_128_CAPABLE
+# define S390X_aes_192_ecb_CAPABLE S390X_aes_192_CAPABLE
+# define S390X_aes_256_ecb_CAPABLE S390X_aes_256_CAPABLE
+
+static int s390x_aes_ecb_init_key(EVP_CIPHER_CTX *ctx,
+ const unsigned char *key,
+ const unsigned char *iv, int enc)
+{
+ S390X_AES_ECB_CTX *cctx = EVP_C_DATA(S390X_AES_ECB_CTX, ctx);
+ const int keylen = EVP_CIPHER_CTX_key_length(ctx);
+
+ cctx->fc = S390X_AES_FC(keylen);
+ if (!enc)
+ cctx->fc |= S390X_DECRYPT;
+
+ memcpy(cctx->km.param.k, key, keylen);
+ return 1;
+}
+
+static int s390x_aes_ecb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
+ const unsigned char *in, size_t len)
+{
+ S390X_AES_ECB_CTX *cctx = EVP_C_DATA(S390X_AES_ECB_CTX, ctx);
+
+ s390x_km(in, len, out, cctx->fc, &cctx->km.param);
+ return 1;
+}
+
+# define S390X_aes_128_ofb_CAPABLE (S390X_aes_128_CAPABLE && \
+ (OPENSSL_s390xcap_P.kmo[0] & \
+ S390X_CAPBIT(S390X_AES_128)))
+# define S390X_aes_192_ofb_CAPABLE (S390X_aes_192_CAPABLE && \
+ (OPENSSL_s390xcap_P.kmo[0] & \
+ S390X_CAPBIT(S390X_AES_192)))
+# define S390X_aes_256_ofb_CAPABLE (S390X_aes_256_CAPABLE && \
+ (OPENSSL_s390xcap_P.kmo[0] & \
+ S390X_CAPBIT(S390X_AES_256)))
+
+static int s390x_aes_ofb_init_key(EVP_CIPHER_CTX *ctx,
+ const unsigned char *key,
+ const unsigned char *ivec, int enc)
+{
+ S390X_AES_OFB_CTX *cctx = EVP_C_DATA(S390X_AES_OFB_CTX, ctx);
+ const unsigned char *iv = EVP_CIPHER_CTX_original_iv(ctx);
+ const int keylen = EVP_CIPHER_CTX_key_length(ctx);
+ const int ivlen = EVP_CIPHER_CTX_iv_length(ctx);
+
+ memcpy(cctx->kmo.param.cv, iv, ivlen);
+ memcpy(cctx->kmo.param.k, key, keylen);
+ cctx->fc = S390X_AES_FC(keylen);
+ cctx->res = 0;
+ return 1;
+}
+
+static int s390x_aes_ofb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
+ const unsigned char *in, size_t len)
+{
+ S390X_AES_OFB_CTX *cctx = EVP_C_DATA(S390X_AES_OFB_CTX, ctx);
+ int n = cctx->res;
+ int rem;
+
+ while (n && len) {
+ *out = *in ^ cctx->kmo.param.cv[n];
+ n = (n + 1) & 0xf;
+ --len;
+ ++in;
+ ++out;
+ }
+
+ rem = len & 0xf;
+
+ len &= ~(size_t)0xf;
+ if (len) {
+ s390x_kmo(in, len, out, cctx->fc, &cctx->kmo.param);
+
+ out += len;
+ in += len;
+ }
+
+ if (rem) {
+ s390x_km(cctx->kmo.param.cv, 16, cctx->kmo.param.cv, cctx->fc,
+ cctx->kmo.param.k);
+
+ while (rem--) {
+ out[n] = in[n] ^ cctx->kmo.param.cv[n];
+ ++n;
+ }
+ }
+
+ cctx->res = n;
+ return 1;
+}
+
+# define S390X_aes_128_cfb_CAPABLE (S390X_aes_128_CAPABLE && \
+ (OPENSSL_s390xcap_P.kmf[0] & \
+ S390X_CAPBIT(S390X_AES_128)))
+# define S390X_aes_192_cfb_CAPABLE (S390X_aes_192_CAPABLE && \
+ (OPENSSL_s390xcap_P.kmf[0] & \
+ S390X_CAPBIT(S390X_AES_192)))
+# define S390X_aes_256_cfb_CAPABLE (S390X_aes_256_CAPABLE && \
+ (OPENSSL_s390xcap_P.kmf[0] & \
+ S390X_CAPBIT(S390X_AES_256)))
+
+static int s390x_aes_cfb_init_key(EVP_CIPHER_CTX *ctx,
+ const unsigned char *key,
+ const unsigned char *ivec, int enc)
+{
+ S390X_AES_CFB_CTX *cctx = EVP_C_DATA(S390X_AES_CFB_CTX, ctx);
+ const unsigned char *iv = EVP_CIPHER_CTX_original_iv(ctx);
+ const int keylen = EVP_CIPHER_CTX_key_length(ctx);
+ const int ivlen = EVP_CIPHER_CTX_iv_length(ctx);
+
+ cctx->fc = S390X_AES_FC(keylen);
+ cctx->fc |= 16 << 24; /* 16 bytes cipher feedback */
+ if (!enc)
+ cctx->fc |= S390X_DECRYPT;
+
+ cctx->res = 0;
+ memcpy(cctx->kmf.param.cv, iv, ivlen);
+ memcpy(cctx->kmf.param.k, key, keylen);
+ return 1;
+}
+
+static int s390x_aes_cfb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
+ const unsigned char *in, size_t len)
+{
+ S390X_AES_CFB_CTX *cctx = EVP_C_DATA(S390X_AES_CFB_CTX, ctx);
+ const int keylen = EVP_CIPHER_CTX_key_length(ctx);
+ const int enc = EVP_CIPHER_CTX_encrypting(ctx);
+ int n = cctx->res;
+ int rem;
+ unsigned char tmp;
+
+ while (n && len) {
+ tmp = *in;
+ *out = cctx->kmf.param.cv[n] ^ tmp;
+ cctx->kmf.param.cv[n] = enc ? *out : tmp;
+ n = (n + 1) & 0xf;
+ --len;
+ ++in;
+ ++out;
+ }
+
+ rem = len & 0xf;
+
+ len &= ~(size_t)0xf;
+ if (len) {
+ s390x_kmf(in, len, out, cctx->fc, &cctx->kmf.param);
+
+ out += len;
+ in += len;
+ }
+
+ if (rem) {
+ s390x_km(cctx->kmf.param.cv, 16, cctx->kmf.param.cv,
+ S390X_AES_FC(keylen), cctx->kmf.param.k);
+
+ while (rem--) {
+ tmp = in[n];
+ out[n] = cctx->kmf.param.cv[n] ^ tmp;
+ cctx->kmf.param.cv[n] = enc ? out[n] : tmp;
+ ++n;
+ }
+ }
+
+ cctx->res = n;
+ return 1;
+}
+
+# define S390X_aes_128_cfb8_CAPABLE (OPENSSL_s390xcap_P.kmf[0] & \
+ S390X_CAPBIT(S390X_AES_128))
+# define S390X_aes_192_cfb8_CAPABLE (OPENSSL_s390xcap_P.kmf[0] & \
+ S390X_CAPBIT(S390X_AES_192))
+# define S390X_aes_256_cfb8_CAPABLE (OPENSSL_s390xcap_P.kmf[0] & \
+ S390X_CAPBIT(S390X_AES_256))
+
+static int s390x_aes_cfb8_init_key(EVP_CIPHER_CTX *ctx,
+ const unsigned char *key,
+ const unsigned char *ivec, int enc)
+{
+ S390X_AES_CFB_CTX *cctx = EVP_C_DATA(S390X_AES_CFB_CTX, ctx);
+ const unsigned char *iv = EVP_CIPHER_CTX_original_iv(ctx);
+ const int keylen = EVP_CIPHER_CTX_key_length(ctx);
+ const int ivlen = EVP_CIPHER_CTX_iv_length(ctx);
+
+ cctx->fc = S390X_AES_FC(keylen);
+ cctx->fc |= 1 << 24; /* 1 byte cipher feedback */
+ if (!enc)
+ cctx->fc |= S390X_DECRYPT;
+
+ memcpy(cctx->kmf.param.cv, iv, ivlen);
+ memcpy(cctx->kmf.param.k, key, keylen);
+ return 1;
+}
+
+static int s390x_aes_cfb8_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
+ const unsigned char *in, size_t len)
+{
+ S390X_AES_CFB_CTX *cctx = EVP_C_DATA(S390X_AES_CFB_CTX, ctx);
+
+ s390x_kmf(in, len, out, cctx->fc, &cctx->kmf.param);
+ return 1;
+}
+
+# define S390X_aes_128_cfb1_CAPABLE 0
+# define S390X_aes_192_cfb1_CAPABLE 0
+# define S390X_aes_256_cfb1_CAPABLE 0
+
+# define s390x_aes_cfb1_init_key aes_init_key
+
+# define s390x_aes_cfb1_cipher aes_cfb1_cipher
+static int s390x_aes_cfb1_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
+ const unsigned char *in, size_t len);
+
+# define S390X_aes_128_ctr_CAPABLE 1 /* checked by callee */
+# define S390X_aes_192_ctr_CAPABLE 1
+# define S390X_aes_256_ctr_CAPABLE 1
+# define S390X_AES_CTR_CTX EVP_AES_KEY
+
+# define s390x_aes_ctr_init_key aes_init_key
+
+# define s390x_aes_ctr_cipher aes_ctr_cipher
+static int s390x_aes_ctr_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
+ const unsigned char *in, size_t len);
+
+# define S390X_aes_128_gcm_CAPABLE (S390X_aes_128_CAPABLE && \
+ (OPENSSL_s390xcap_P.kma[0] & \
+ S390X_CAPBIT(S390X_AES_128)))
+# define S390X_aes_192_gcm_CAPABLE (S390X_aes_192_CAPABLE && \
+ (OPENSSL_s390xcap_P.kma[0] & \
+ S390X_CAPBIT(S390X_AES_192)))
+# define S390X_aes_256_gcm_CAPABLE (S390X_aes_256_CAPABLE && \
+ (OPENSSL_s390xcap_P.kma[0] & \
+ S390X_CAPBIT(S390X_AES_256)))
+
+/* iv + padding length for iv lenghts != 12 */
+# define S390X_gcm_ivpadlen(i) ((((i) + 15) >> 4 << 4) + 16)
+
+/*-
+ * Process additional authenticated data. Returns 0 on success. Code is
+ * big-endian.
+ */
+static int s390x_aes_gcm_aad(S390X_AES_GCM_CTX *ctx, const unsigned char *aad,
+ size_t len)
+{
+ unsigned long long alen;
+ int n, rem;
+
+ if (ctx->kma.param.tpcl)
+ return -2;
+
+ alen = ctx->kma.param.taadl + len;
+ if (alen > (U64(1) << 61) || (sizeof(len) == 8 && alen < len))
+ return -1;
+ ctx->kma.param.taadl = alen;
+
+ n = ctx->areslen;
+ if (n) {
+ while (n && len) {
+ ctx->ares[n] = *aad;
+ n = (n + 1) & 0xf;
+ ++aad;
+ --len;
+ }
+ /* ctx->ares contains a complete block if offset has wrapped around */
+ if (!n) {
+ s390x_kma(ctx->ares, 16, NULL, 0, NULL, ctx->fc, &ctx->kma.param);
+ ctx->fc |= S390X_KMA_HS;
+ }
+ ctx->areslen = n;
+ }
+
+ rem = len & 0xf;
+
+ len &= ~(size_t)0xf;
+ if (len) {
+ s390x_kma(aad, len, NULL, 0, NULL, ctx->fc, &ctx->kma.param);
+ aad += len;
+ ctx->fc |= S390X_KMA_HS;
+ }
+
+ if (rem) {
+ ctx->areslen = rem;
+
+ do {
+ --rem;
+ ctx->ares[rem] = aad[rem];
+ } while (rem);
+ }
+ return 0;
+}
+
+/*-
+ * En/de-crypt plain/cipher-text and authenticate ciphertext. Returns 0 for
+ * success. Code is big-endian.
+ */
+static int s390x_aes_gcm(S390X_AES_GCM_CTX *ctx, const unsigned char *in,
+ unsigned char *out, size_t len)
+{
+ const unsigned char *inptr;
+ unsigned long long mlen;
+ union {
+ unsigned int w[4];
+ unsigned char b[16];
+ } buf;
+ size_t inlen;
+ int n, rem, i;
+
+ mlen = ctx->kma.param.tpcl + len;
+ if (mlen > ((U64(1) << 36) - 32) || (sizeof(len) == 8 && mlen < len))
+ return -1;
+ ctx->kma.param.tpcl = mlen;
+
+ n = ctx->mreslen;
+ if (n) {
+ inptr = in;
+ inlen = len;
+ while (n && inlen) {
+ ctx->mres[n] = *inptr;
+ n = (n + 1) & 0xf;
+ ++inptr;
+ --inlen;
+ }
+ /* ctx->mres contains a complete block if offset has wrapped around */
+ if (!n) {
+ s390x_kma(ctx->ares, ctx->areslen, ctx->mres, 16, buf.b,
+ ctx->fc | S390X_KMA_LAAD, &ctx->kma.param);
+ ctx->fc |= S390X_KMA_HS;
+ ctx->areslen = 0;
+
+ /* previous call already encrypted/decrypted its remainder,
+ * see comment below */
+ n = ctx->mreslen;
+ while (n) {
+ *out = buf.b[n];
+ n = (n + 1) & 0xf;
+ ++out;
+ ++in;
+ --len;
+ }
+ ctx->mreslen = 0;
+ }
+ }
+
+ rem = len & 0xf;
+
+ len &= ~(size_t)0xf;
+ if (len) {
+ s390x_kma(ctx->ares, ctx->areslen, in, len, out,
+ ctx->fc | S390X_KMA_LAAD, &ctx->kma.param);
+ in += len;
+ out += len;
+ ctx->fc |= S390X_KMA_HS;
+ ctx->areslen = 0;
+ }
+
+ /*-
+ * If there is a remainder, it has to be saved such that it can be
+ * processed by kma later. However, we also have to do the for-now
+ * unauthenticated encryption/decryption part here and now...
+ */
+ if (rem) {
+ if (!ctx->mreslen) {
+ buf.w[0] = ctx->kma.param.j0.w[0];
+ buf.w[1] = ctx->kma.param.j0.w[1];
+ buf.w[2] = ctx->kma.param.j0.w[2];
+ buf.w[3] = ctx->kma.param.cv.w + 1;
+ s390x_km(buf.b, 16, ctx->kres, ctx->fc & 0x1f, &ctx->kma.param.k);
+ }
+
+ n = ctx->mreslen;
+ for (i = 0; i < rem; i++) {
+ ctx->mres[n + i] = in[i];
+ out[i] = in[i] ^ ctx->kres[n + i];
+ }
+
+ ctx->mreslen += rem;
+ }
+ return 0;
+}
+
+/*-
+ * Initialize context structure. Code is big-endian.
+ */
+static void s390x_aes_gcm_setiv(S390X_AES_GCM_CTX *ctx,
+ const unsigned char *iv)
+{
+ ctx->kma.param.t.g[0] = 0;
+ ctx->kma.param.t.g[1] = 0;
+ ctx->kma.param.tpcl = 0;
+ ctx->kma.param.taadl = 0;
+ ctx->mreslen = 0;
+ ctx->areslen = 0;
+ ctx->kreslen = 0;
+
+ if (ctx->ivlen == 12) {
+ memcpy(&ctx->kma.param.j0, iv, ctx->ivlen);
+ ctx->kma.param.j0.w[3] = 1;
+ ctx->kma.param.cv.w = 1;
+ } else {
+ /* ctx->iv has the right size and is already padded. */
+ memcpy(ctx->iv, iv, ctx->ivlen);
+ s390x_kma(ctx->iv, S390X_gcm_ivpadlen(ctx->ivlen), NULL, 0, NULL,
+ ctx->fc, &ctx->kma.param);
+ ctx->fc |= S390X_KMA_HS;
+
+ ctx->kma.param.j0.g[0] = ctx->kma.param.t.g[0];
+ ctx->kma.param.j0.g[1] = ctx->kma.param.t.g[1];
+ ctx->kma.param.cv.w = ctx->kma.param.j0.w[3];
+ ctx->kma.param.t.g[0] = 0;
+ ctx->kma.param.t.g[1] = 0;
+ }
+}
+
+/*-
+ * Performs various operations on the context structure depending on control
+ * type. Returns 1 for success, 0 for failure and -1 for unknown control type.
+ * Code is big-endian.
+ */
+static int s390x_aes_gcm_ctrl(EVP_CIPHER_CTX *c, int type, int arg, void *ptr)
+{
+ S390X_AES_GCM_CTX *gctx = EVP_C_DATA(S390X_AES_GCM_CTX, c);
+ S390X_AES_GCM_CTX *gctx_out;
+ EVP_CIPHER_CTX *out;
+ unsigned char *buf, *iv;
+ int ivlen, enc, len;
+
+ switch (type) {
+ case EVP_CTRL_INIT:
+ ivlen = EVP_CIPHER_CTX_iv_length(c);
+ iv = EVP_CIPHER_CTX_iv_noconst(c);
+ gctx->key_set = 0;
+ gctx->iv_set = 0;
+ gctx->ivlen = ivlen;
+ gctx->iv = iv;
+ gctx->taglen = -1;
+ gctx->iv_gen = 0;
+ gctx->tls_aad_len = -1;
+ return 1;
+
+ case EVP_CTRL_AEAD_SET_IVLEN:
+ if (arg <= 0)
+ return 0;
+
+ if (arg != 12) {
+ iv = EVP_CIPHER_CTX_iv_noconst(c);
+ len = S390X_gcm_ivpadlen(arg);
+
+ /* Allocate memory for iv if needed. */
+ if (gctx->ivlen == 12 || len > S390X_gcm_ivpadlen(gctx->ivlen)) {
+ if (gctx->iv != iv)
+ OPENSSL_free(gctx->iv);
+
+ if ((gctx->iv = OPENSSL_malloc(len)) == NULL) {
+ EVPerr(EVP_F_S390X_AES_GCM_CTRL, ERR_R_MALLOC_FAILURE);
+ return 0;
+ }
+ }
+ /* Add padding. */
+ memset(gctx->iv + arg, 0, len - arg - 8);
+ *((unsigned long long *)(gctx->iv + len - 8)) = arg << 3;
+ }
+ gctx->ivlen = arg;
+ return 1;
+
+ case EVP_CTRL_AEAD_SET_TAG:
+ buf = EVP_CIPHER_CTX_buf_noconst(c);
+ enc = EVP_CIPHER_CTX_encrypting(c);
+ if (arg <= 0 || arg > 16 || enc)
+ return 0;
+
+ memcpy(buf, ptr, arg);
+ gctx->taglen = arg;
+ return 1;
+
+ case EVP_CTRL_AEAD_GET_TAG:
+ enc = EVP_CIPHER_CTX_encrypting(c);
+ if (arg <= 0 || arg > 16 || !enc || gctx->taglen < 0)
+ return 0;
+
+ memcpy(ptr, gctx->kma.param.t.b, arg);
+ return 1;
+
+ case EVP_CTRL_GCM_SET_IV_FIXED:
+ /* Special case: -1 length restores whole iv */
+ if (arg == -1) {
+ memcpy(gctx->iv, ptr, gctx->ivlen);
+ gctx->iv_gen = 1;
+ return 1;
+ }
+ /*
+ * Fixed field must be at least 4 bytes and invocation field at least
+ * 8.
+ */
+ if ((arg < 4) || (gctx->ivlen - arg) < 8)
+ return 0;
+
+ if (arg)
+ memcpy(gctx->iv, ptr, arg);
+
+ enc = EVP_CIPHER_CTX_encrypting(c);
+ if (enc && RAND_bytes(gctx->iv + arg, gctx->ivlen - arg) <= 0)
+ return 0;
+
+ gctx->iv_gen = 1;
+ return 1;
+
+ case EVP_CTRL_GCM_IV_GEN:
+ if (gctx->iv_gen == 0 || gctx->key_set == 0)
+ return 0;
+
+ s390x_aes_gcm_setiv(gctx, gctx->iv);
+
+ if (arg <= 0 || arg > gctx->ivlen)
+ arg = gctx->ivlen;
+
+ memcpy(ptr, gctx->iv + gctx->ivlen - arg, arg);
+ /*
+ * Invocation field will be at least 8 bytes in size and so no need
+ * to check wrap around or increment more than last 8 bytes.
+ */
+ ctr64_inc(gctx->iv + gctx->ivlen - 8);
+ gctx->iv_set = 1;
+ return 1;
+
+ case EVP_CTRL_GCM_SET_IV_INV:
+ enc = EVP_CIPHER_CTX_encrypting(c);
+ if (gctx->iv_gen == 0 || gctx->key_set == 0 || enc)
+ return 0;
+
+ memcpy(gctx->iv + gctx->ivlen - arg, ptr, arg);
+ s390x_aes_gcm_setiv(gctx, gctx->iv);
+ gctx->iv_set = 1;
+ return 1;
+
+ case EVP_CTRL_AEAD_TLS1_AAD:
+ /* Save the aad for later use. */
+ if (arg != EVP_AEAD_TLS1_AAD_LEN)
+ return 0;
+
+ buf = EVP_CIPHER_CTX_buf_noconst(c);
+ memcpy(buf, ptr, arg);
+ gctx->tls_aad_len = arg;
+
+ len = buf[arg - 2] << 8 | buf[arg - 1];
+ /* Correct length for explicit iv. */
+ if (len < EVP_GCM_TLS_EXPLICIT_IV_LEN)
+ return 0;
+ len -= EVP_GCM_TLS_EXPLICIT_IV_LEN;
+
+ /* If decrypting correct for tag too. */
+ enc = EVP_CIPHER_CTX_encrypting(c);
+ if (!enc) {
+ if (len < EVP_GCM_TLS_TAG_LEN)
+ return 0;
+ len -= EVP_GCM_TLS_TAG_LEN;
+ }
+ buf[arg - 2] = len >> 8;
+ buf[arg - 1] = len & 0xff;
+ /* Extra padding: tag appended to record. */
+ return EVP_GCM_TLS_TAG_LEN;
+
+ case EVP_CTRL_COPY:
+ out = ptr;
+ gctx_out = EVP_C_DATA(S390X_AES_GCM_CTX, out);
+ iv = EVP_CIPHER_CTX_iv_noconst(c);
+
+ if (gctx->iv == iv) {
+ gctx_out->iv = EVP_CIPHER_CTX_iv_noconst(out);
+ } else {
+ len = S390X_gcm_ivpadlen(gctx->ivlen);
+
+ if ((gctx_out->iv = OPENSSL_malloc(len)) == NULL) {
+ EVPerr(EVP_F_S390X_AES_GCM_CTRL, ERR_R_MALLOC_FAILURE);
+ return 0;
+ }
+
+ memcpy(gctx_out->iv, gctx->iv, len);
+ }
+ return 1;
+
+ default:
+ return -1;
+ }
+}
+
+/*-
+ * Set key and/or iv. Returns 1 on success. Otherwise 0 is returned.
+ */
+static int s390x_aes_gcm_init_key(EVP_CIPHER_CTX *ctx,
+ const unsigned char *key,
+ const unsigned char *iv, int enc)
+{
+ S390X_AES_GCM_CTX *gctx = EVP_C_DATA(S390X_AES_GCM_CTX, ctx);
+ int keylen;
+
+ if (iv == NULL && key == NULL)
+ return 1;
+
+ if (key != NULL) {
+ keylen = EVP_CIPHER_CTX_key_length(ctx);
+ memcpy(&gctx->kma.param.k, key, keylen);
+
+ gctx->fc = S390X_AES_FC(keylen);
+ if (!enc)
+ gctx->fc |= S390X_DECRYPT;
+
+ if (iv == NULL && gctx->iv_set)
+ iv = gctx->iv;
+
+ if (iv != NULL) {
+ s390x_aes_gcm_setiv(gctx, iv);
+ gctx->iv_set = 1;
+ }
+ gctx->key_set = 1;
+ } else {
+ if (gctx->key_set)
+ s390x_aes_gcm_setiv(gctx, iv);
+ else
+ memcpy(gctx->iv, iv, gctx->ivlen);
+
+ gctx->iv_set = 1;
+ gctx->iv_gen = 0;
+ }
+ return 1;
+}
+
+/*-
+ * En/de-crypt and authenticate TLS packet. Returns the number of bytes written
+ * if successful. Otherwise -1 is returned. Code is big-endian.
+ */
+static int s390x_aes_gcm_tls_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
+ const unsigned char *in, size_t len)
+{
+ S390X_AES_GCM_CTX *gctx = EVP_C_DATA(S390X_AES_GCM_CTX, ctx);
+ const unsigned char *buf = EVP_CIPHER_CTX_buf_noconst(ctx);
+ const int enc = EVP_CIPHER_CTX_encrypting(ctx);
+ int rv = -1;
+
+ if (out != in || len < (EVP_GCM_TLS_EXPLICIT_IV_LEN + EVP_GCM_TLS_TAG_LEN))
+ return -1;
+
+ if (EVP_CIPHER_CTX_ctrl(ctx, enc ? EVP_CTRL_GCM_IV_GEN
+ : EVP_CTRL_GCM_SET_IV_INV,
+ EVP_GCM_TLS_EXPLICIT_IV_LEN, out) <= 0)
+ goto err;
+
+ in += EVP_GCM_TLS_EXPLICIT_IV_LEN;
+ out += EVP_GCM_TLS_EXPLICIT_IV_LEN;
+ len -= EVP_GCM_TLS_EXPLICIT_IV_LEN + EVP_GCM_TLS_TAG_LEN;
+
+ gctx->kma.param.taadl = gctx->tls_aad_len << 3;
+ gctx->kma.param.tpcl = len << 3;
+ s390x_kma(buf, gctx->tls_aad_len, in, len, out,
+ gctx->fc | S390X_KMA_LAAD | S390X_KMA_LPC, &gctx->kma.param);
+
+ if (enc) {
+ memcpy(out + len, gctx->kma.param.t.b, EVP_GCM_TLS_TAG_LEN);
+ rv = len + EVP_GCM_TLS_EXPLICIT_IV_LEN + EVP_GCM_TLS_TAG_LEN;
+ } else {
+ if (CRYPTO_memcmp(gctx->kma.param.t.b, in + len,
+ EVP_GCM_TLS_TAG_LEN)) {
+ OPENSSL_cleanse(out, len);
+ goto err;
+ }
+ rv = len;
+ }
+err:
+ gctx->iv_set = 0;
+ gctx->tls_aad_len = -1;
+ return rv;
+}
+
+/*-
+ * Called from EVP layer to initialize context, process additional
+ * authenticated data, en/de-crypt plain/cipher-text and authenticate
+ * ciphertext or process a TLS packet, depending on context. Returns bytes
+ * written on success. Otherwise -1 is returned. Code is big-endian.
+ */
+static int s390x_aes_gcm_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
+ const unsigned char *in, size_t len)
+{
+ S390X_AES_GCM_CTX *gctx = EVP_C_DATA(S390X_AES_GCM_CTX, ctx);
+ unsigned char *buf, tmp[16];
+ int enc;
+
+ if (!gctx->key_set)
+ return -1;
+
+ if (gctx->tls_aad_len >= 0)
+ return s390x_aes_gcm_tls_cipher(ctx, out, in, len);
+
+ if (!gctx->iv_set)
+ return -1;
+
+ if (in != NULL) {
+ if (out == NULL) {
+ if (s390x_aes_gcm_aad(gctx, in, len))
+ return -1;
+ } else {
+ if (s390x_aes_gcm(gctx, in, out, len))
+ return -1;
+ }
+ return len;
+ } else {
+ gctx->kma.param.taadl <<= 3;
+ gctx->kma.param.tpcl <<= 3;
+ s390x_kma(gctx->ares, gctx->areslen, gctx->mres, gctx->mreslen, tmp,
+ gctx->fc | S390X_KMA_LAAD | S390X_KMA_LPC, &gctx->kma.param);
+ /* recall that we already did en-/decrypt gctx->mres
+ * and returned it to caller... */
+ OPENSSL_cleanse(tmp, gctx->mreslen);
+ gctx->iv_set = 0;
+
+ enc = EVP_CIPHER_CTX_encrypting(ctx);
+ if (enc) {
+ gctx->taglen = 16;
+ } else {
+ if (gctx->taglen < 0)
+ return -1;
+
+ buf = EVP_CIPHER_CTX_buf_noconst(ctx);
+ if (CRYPTO_memcmp(buf, gctx->kma.param.t.b, gctx->taglen))
+ return -1;
+ }
+ return 0;
+ }
+}
+
+static int s390x_aes_gcm_cleanup(EVP_CIPHER_CTX *c)
+{
+ S390X_AES_GCM_CTX *gctx = EVP_C_DATA(S390X_AES_GCM_CTX, c);
+ const unsigned char *iv;
+
+ if (gctx == NULL)
+ return 0;
+
+ iv = EVP_CIPHER_CTX_iv(c);
+ if (iv != gctx->iv)
+ OPENSSL_free(gctx->iv);
+
+ OPENSSL_cleanse(gctx, sizeof(*gctx));
+ return 1;
+}
+
+# define S390X_AES_XTS_CTX EVP_AES_XTS_CTX
+# define S390X_aes_128_xts_CAPABLE 1 /* checked by callee */
+# define S390X_aes_256_xts_CAPABLE 1
+
+# define s390x_aes_xts_init_key aes_xts_init_key
+static int s390x_aes_xts_init_key(EVP_CIPHER_CTX *ctx,
+ const unsigned char *key,
+ const unsigned char *iv, int enc);
+# define s390x_aes_xts_cipher aes_xts_cipher
+static int s390x_aes_xts_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
+ const unsigned char *in, size_t len);
+# define s390x_aes_xts_ctrl aes_xts_ctrl
+static int s390x_aes_xts_ctrl(EVP_CIPHER_CTX *, int type, int arg, void *ptr);
+# define s390x_aes_xts_cleanup aes_xts_cleanup
+
+# define S390X_aes_128_ccm_CAPABLE (S390X_aes_128_CAPABLE && \
+ (OPENSSL_s390xcap_P.kmac[0] & \
+ S390X_CAPBIT(S390X_AES_128)))
+# define S390X_aes_192_ccm_CAPABLE (S390X_aes_192_CAPABLE && \
+ (OPENSSL_s390xcap_P.kmac[0] & \
+ S390X_CAPBIT(S390X_AES_192)))
+# define S390X_aes_256_ccm_CAPABLE (S390X_aes_256_CAPABLE && \
+ (OPENSSL_s390xcap_P.kmac[0] & \
+ S390X_CAPBIT(S390X_AES_256)))
+
+# define S390X_CCM_AAD_FLAG 0x40
+
+/*-
+ * Set nonce and length fields. Code is big-endian.
+ */
+static inline void s390x_aes_ccm_setiv(S390X_AES_CCM_CTX *ctx,
+ const unsigned char *nonce,
+ size_t mlen)
+{
+ ctx->aes.ccm.nonce.b[0] &= ~S390X_CCM_AAD_FLAG;
+ ctx->aes.ccm.nonce.g[1] = mlen;
+ memcpy(ctx->aes.ccm.nonce.b + 1, nonce, 15 - ctx->aes.ccm.l);
+}
+
+/*-
+ * Process additional authenticated data. Code is big-endian.
+ */
+static void s390x_aes_ccm_aad(S390X_AES_CCM_CTX *ctx, const unsigned char *aad,
+ size_t alen)
+{
+ unsigned char *ptr;
+ int i, rem;
+
+ if (!alen)
+ return;
+
+ ctx->aes.ccm.nonce.b[0] |= S390X_CCM_AAD_FLAG;
+
+ /* Suppress 'type-punned pointer dereference' warning. */
+ ptr = ctx->aes.ccm.buf.b;
+
+ if (alen < ((1 << 16) - (1 << 8))) {
+ *(uint16_t *)ptr = alen;
+ i = 2;
+ } else if (sizeof(alen) == 8
+ && alen >= (size_t)1 << (32 % (sizeof(alen) * 8))) {
+ *(uint16_t *)ptr = 0xffff;
+ *(uint64_t *)(ptr + 2) = alen;
+ i = 10;
+ } else {
+ *(uint16_t *)ptr = 0xfffe;
+ *(uint32_t *)(ptr + 2) = alen;
+ i = 6;
+ }
+
+ while (i < 16 && alen) {
+ ctx->aes.ccm.buf.b[i] = *aad;
+ ++aad;
+ --alen;
+ ++i;
+ }
+ while (i < 16) {
+ ctx->aes.ccm.buf.b[i] = 0;
+ ++i;
+ }
+
+ ctx->aes.ccm.kmac_param.icv.g[0] = 0;
+ ctx->aes.ccm.kmac_param.icv.g[1] = 0;
+ s390x_kmac(ctx->aes.ccm.nonce.b, 32, ctx->aes.ccm.fc,
+ &ctx->aes.ccm.kmac_param);
+ ctx->aes.ccm.blocks += 2;
+
+ rem = alen & 0xf;
+ alen &= ~(size_t)0xf;
+ if (alen) {
+ s390x_kmac(aad, alen, ctx->aes.ccm.fc, &ctx->aes.ccm.kmac_param);
+ ctx->aes.ccm.blocks += alen >> 4;
+ aad += alen;
+ }
+ if (rem) {
+ for (i = 0; i < rem; i++)
+ ctx->aes.ccm.kmac_param.icv.b[i] ^= aad[i];
+
+ s390x_km(ctx->aes.ccm.kmac_param.icv.b, 16,
+ ctx->aes.ccm.kmac_param.icv.b, ctx->aes.ccm.fc,
+ ctx->aes.ccm.kmac_param.k);
+ ctx->aes.ccm.blocks++;
+ }
+}
+
+/*-
+ * En/de-crypt plain/cipher-text. Compute tag from plaintext. Returns 0 for
+ * success.
+ */
+static int s390x_aes_ccm(S390X_AES_CCM_CTX *ctx, const unsigned char *in,
+ unsigned char *out, size_t len, int enc)
+{
+ size_t n, rem;
+ unsigned int i, l, num;
+ unsigned char flags;
+
+ flags = ctx->aes.ccm.nonce.b[0];
+ if (!(flags & S390X_CCM_AAD_FLAG)) {
+ s390x_km(ctx->aes.ccm.nonce.b, 16, ctx->aes.ccm.kmac_param.icv.b,
+ ctx->aes.ccm.fc, ctx->aes.ccm.kmac_param.k);
+ ctx->aes.ccm.blocks++;
+ }
+ l = flags & 0x7;
+ ctx->aes.ccm.nonce.b[0] = l;
+
+ /*-
+ * Reconstruct length from encoded length field
+ * and initialize it with counter value.
+ */
+ n = 0;
+ for (i = 15 - l; i < 15; i++) {
+ n |= ctx->aes.ccm.nonce.b[i];
+ ctx->aes.ccm.nonce.b[i] = 0;
+ n <<= 8;
+ }
+ n |= ctx->aes.ccm.nonce.b[15];
+ ctx->aes.ccm.nonce.b[15] = 1;
+
+ if (n != len)
+ return -1; /* length mismatch */
+
+ if (enc) {
+ /* Two operations per block plus one for tag encryption */
+ ctx->aes.ccm.blocks += (((len + 15) >> 4) << 1) + 1;
+ if (ctx->aes.ccm.blocks > (1ULL << 61))
+ return -2; /* too much data */
+ }
+
+ num = 0;
+ rem = len & 0xf;
+ len &= ~(size_t)0xf;
+
+ if (enc) {
+ /* mac-then-encrypt */
+ if (len)
+ s390x_kmac(in, len, ctx->aes.ccm.fc, &ctx->aes.ccm.kmac_param);
+ if (rem) {
+ for (i = 0; i < rem; i++)
+ ctx->aes.ccm.kmac_param.icv.b[i] ^= in[len + i];
+
+ s390x_km(ctx->aes.ccm.kmac_param.icv.b, 16,
+ ctx->aes.ccm.kmac_param.icv.b, ctx->aes.ccm.fc,
+ ctx->aes.ccm.kmac_param.k);
+ }
+
+ CRYPTO_ctr128_encrypt_ctr32(in, out, len + rem, &ctx->aes.key.k,
+ ctx->aes.ccm.nonce.b, ctx->aes.ccm.buf.b,
+ &num, (ctr128_f)AES_ctr32_encrypt);
+ } else {
+ /* decrypt-then-mac */
+ CRYPTO_ctr128_encrypt_ctr32(in, out, len + rem, &ctx->aes.key.k,
+ ctx->aes.ccm.nonce.b, ctx->aes.ccm.buf.b,
+ &num, (ctr128_f)AES_ctr32_encrypt);
+
+ if (len)
+ s390x_kmac(out, len, ctx->aes.ccm.fc, &ctx->aes.ccm.kmac_param);
+ if (rem) {
+ for (i = 0; i < rem; i++)
+ ctx->aes.ccm.kmac_param.icv.b[i] ^= out[len + i];
+
+ s390x_km(ctx->aes.ccm.kmac_param.icv.b, 16,
+ ctx->aes.ccm.kmac_param.icv.b, ctx->aes.ccm.fc,
+ ctx->aes.ccm.kmac_param.k);
+ }
+ }
+ /* encrypt tag */
+ for (i = 15 - l; i < 16; i++)
+ ctx->aes.ccm.nonce.b[i] = 0;
+
+ s390x_km(ctx->aes.ccm.nonce.b, 16, ctx->aes.ccm.buf.b, ctx->aes.ccm.fc,
+ ctx->aes.ccm.kmac_param.k);
+ ctx->aes.ccm.kmac_param.icv.g[0] ^= ctx->aes.ccm.buf.g[0];
+ ctx->aes.ccm.kmac_param.icv.g[1] ^= ctx->aes.ccm.buf.g[1];
+
+ ctx->aes.ccm.nonce.b[0] = flags; /* restore flags field */
+ return 0;
+}
+
+/*-
+ * En/de-crypt and authenticate TLS packet. Returns the number of bytes written
+ * if successful. Otherwise -1 is returned.
+ */
+static int s390x_aes_ccm_tls_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
+ const unsigned char *in, size_t len)
+{
+ S390X_AES_CCM_CTX *cctx = EVP_C_DATA(S390X_AES_CCM_CTX, ctx);
+ unsigned char *ivec = EVP_CIPHER_CTX_iv_noconst(ctx);
+ unsigned char *buf = EVP_CIPHER_CTX_buf_noconst(ctx);
+ const int enc = EVP_CIPHER_CTX_encrypting(ctx);
+
+ if (out != in
+ || len < (EVP_CCM_TLS_EXPLICIT_IV_LEN + (size_t)cctx->aes.ccm.m))
+ return -1;
+
+ if (enc) {
+ /* Set explicit iv (sequence number). */
+ memcpy(out, buf, EVP_CCM_TLS_EXPLICIT_IV_LEN);
+ }
+
+ len -= EVP_CCM_TLS_EXPLICIT_IV_LEN + cctx->aes.ccm.m;
+ /*-
+ * Get explicit iv (sequence number). We already have fixed iv
+ * (server/client_write_iv) here.
+ */
+ memcpy(ivec + EVP_CCM_TLS_FIXED_IV_LEN, in, EVP_CCM_TLS_EXPLICIT_IV_LEN);
+ s390x_aes_ccm_setiv(cctx, ivec, len);
+
+ /* Process aad (sequence number|type|version|length) */
+ s390x_aes_ccm_aad(cctx, buf, cctx->aes.ccm.tls_aad_len);
+
+ in += EVP_CCM_TLS_EXPLICIT_IV_LEN;
+ out += EVP_CCM_TLS_EXPLICIT_IV_LEN;
+
+ if (enc) {
+ if (s390x_aes_ccm(cctx, in, out, len, enc))
+ return -1;
+
+ memcpy(out + len, cctx->aes.ccm.kmac_param.icv.b, cctx->aes.ccm.m);
+ return len + EVP_CCM_TLS_EXPLICIT_IV_LEN + cctx->aes.ccm.m;
+ } else {
+ if (!s390x_aes_ccm(cctx, in, out, len, enc)) {
+ if (!CRYPTO_memcmp(cctx->aes.ccm.kmac_param.icv.b, in + len,
+ cctx->aes.ccm.m))
+ return len;
+ }
+
+ OPENSSL_cleanse(out, len);
+ return -1;
+ }
+}
+
+/*-
+ * Set key and flag field and/or iv. Returns 1 if successful. Otherwise 0 is
+ * returned.
+ */
+static int s390x_aes_ccm_init_key(EVP_CIPHER_CTX *ctx,
+ const unsigned char *key,
+ const unsigned char *iv, int enc)
+{
+ S390X_AES_CCM_CTX *cctx = EVP_C_DATA(S390X_AES_CCM_CTX, ctx);
+ unsigned char *ivec;
+ int keylen;
+
+ if (iv == NULL && key == NULL)
+ return 1;
+
+ if (key != NULL) {
+ keylen = EVP_CIPHER_CTX_key_length(ctx);
+ cctx->aes.ccm.fc = S390X_AES_FC(keylen);
+ memcpy(cctx->aes.ccm.kmac_param.k, key, keylen);
+
+ /* Store encoded m and l. */
+ cctx->aes.ccm.nonce.b[0] = ((cctx->aes.ccm.l - 1) & 0x7)
+ | (((cctx->aes.ccm.m - 2) >> 1) & 0x7) << 3;
+ memset(cctx->aes.ccm.nonce.b + 1, 0,
+ sizeof(cctx->aes.ccm.nonce.b));
+ cctx->aes.ccm.blocks = 0;
+
+ cctx->aes.ccm.key_set = 1;
+ }
+
+ if (iv != NULL) {
+ ivec = EVP_CIPHER_CTX_iv_noconst(ctx);
+ memcpy(ivec, iv, 15 - cctx->aes.ccm.l);
+
+ cctx->aes.ccm.iv_set = 1;
+ }
+
+ return 1;
+}
+
+/*-
+ * Called from EVP layer to initialize context, process additional
+ * authenticated data, en/de-crypt plain/cipher-text and authenticate
+ * plaintext or process a TLS packet, depending on context. Returns bytes
+ * written on success. Otherwise -1 is returned.
+ */
+static int s390x_aes_ccm_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
+ const unsigned char *in, size_t len)
+{
+ S390X_AES_CCM_CTX *cctx = EVP_C_DATA(S390X_AES_CCM_CTX, ctx);
+ const int enc = EVP_CIPHER_CTX_encrypting(ctx);
+ int rv;
+ unsigned char *buf, *ivec;
+
+ if (!cctx->aes.ccm.key_set)
+ return -1;
+
+ if (cctx->aes.ccm.tls_aad_len >= 0)
+ return s390x_aes_ccm_tls_cipher(ctx, out, in, len);
+
+ /*-
+ * Final(): Does not return any data. Recall that ccm is mac-then-encrypt
+ * so integrity must be checked already at Update() i.e., before
+ * potentially corrupted data is output.
+ */
+ if (in == NULL && out != NULL)
+ return 0;
+
+ if (!cctx->aes.ccm.iv_set)
+ return -1;
+
+ if (!enc && !cctx->aes.ccm.tag_set)
+ return -1;
+
+ if (out == NULL) {
+ /* Update(): Pass message length. */
+ if (in == NULL) {
+ ivec = EVP_CIPHER_CTX_iv_noconst(ctx);
+ s390x_aes_ccm_setiv(cctx, ivec, len);
+
+ cctx->aes.ccm.len_set = 1;
+ return len;
+ }
+
+ /* Update(): Process aad. */
+ if (!cctx->aes.ccm.len_set && len)
+ return -1;
+
+ s390x_aes_ccm_aad(cctx, in, len);
+ return len;
+ }
+
+ /* Update(): Process message. */
+
+ if (!cctx->aes.ccm.len_set) {
+ /*-
+ * In case message length was not previously set explicitly via
+ * Update(), set it now.
+ */
+ ivec = EVP_CIPHER_CTX_iv_noconst(ctx);
+ s390x_aes_ccm_setiv(cctx, ivec, len);
+
+ cctx->aes.ccm.len_set = 1;
+ }
+
+ if (enc) {
+ if (s390x_aes_ccm(cctx, in, out, len, enc))
+ return -1;
+
+ cctx->aes.ccm.tag_set = 1;
+ return len;
+ } else {
+ rv = -1;
+
+ if (!s390x_aes_ccm(cctx, in, out, len, enc)) {
+ buf = EVP_CIPHER_CTX_buf_noconst(ctx);
+ if (!CRYPTO_memcmp(cctx->aes.ccm.kmac_param.icv.b, buf,
+ cctx->aes.ccm.m))
+ rv = len;
+ }
+
+ if (rv == -1)
+ OPENSSL_cleanse(out, len);
+
+ cctx->aes.ccm.iv_set = 0;
+ cctx->aes.ccm.tag_set = 0;
+ cctx->aes.ccm.len_set = 0;
+ return rv;
+ }
+}
+
+/*-
+ * Performs various operations on the context structure depending on control
+ * type. Returns 1 for success, 0 for failure and -1 for unknown control type.
+ * Code is big-endian.
+ */
+static int s390x_aes_ccm_ctrl(EVP_CIPHER_CTX *c, int type, int arg, void *ptr)
+{
+ S390X_AES_CCM_CTX *cctx = EVP_C_DATA(S390X_AES_CCM_CTX, c);
+ unsigned char *buf, *iv;
+ int enc, len;
+
+ switch (type) {
+ case EVP_CTRL_INIT:
+ cctx->aes.ccm.key_set = 0;
+ cctx->aes.ccm.iv_set = 0;
+ cctx->aes.ccm.l = 8;
+ cctx->aes.ccm.m = 12;
+ cctx->aes.ccm.tag_set = 0;
+ cctx->aes.ccm.len_set = 0;
+ cctx->aes.ccm.tls_aad_len = -1;
+ return 1;
+
+ case EVP_CTRL_AEAD_TLS1_AAD:
+ if (arg != EVP_AEAD_TLS1_AAD_LEN)
+ return 0;
+
+ /* Save the aad for later use. */
+ buf = EVP_CIPHER_CTX_buf_noconst(c);
+ memcpy(buf, ptr, arg);
+ cctx->aes.ccm.tls_aad_len = arg;
+
+ len = buf[arg - 2] << 8 | buf[arg - 1];
+ if (len < EVP_CCM_TLS_EXPLICIT_IV_LEN)
+ return 0;
+
+ /* Correct length for explicit iv. */
+ len -= EVP_CCM_TLS_EXPLICIT_IV_LEN;
+
+ enc = EVP_CIPHER_CTX_encrypting(c);
+ if (!enc) {
+ if (len < cctx->aes.ccm.m)
+ return 0;
+
+ /* Correct length for tag. */
+ len -= cctx->aes.ccm.m;
+ }
+
+ buf[arg - 2] = len >> 8;
+ buf[arg - 1] = len & 0xff;
+
+ /* Extra padding: tag appended to record. */
+ return cctx->aes.ccm.m;
+
+ case EVP_CTRL_CCM_SET_IV_FIXED:
+ if (arg != EVP_CCM_TLS_FIXED_IV_LEN)
+ return 0;
+
+ /* Copy to first part of the iv. */
+ iv = EVP_CIPHER_CTX_iv_noconst(c);
+ memcpy(iv, ptr, arg);
+ return 1;
+
+ case EVP_CTRL_AEAD_SET_IVLEN:
+ arg = 15 - arg;
+ /* fall-through */
+
+ case EVP_CTRL_CCM_SET_L:
+ if (arg < 2 || arg > 8)
+ return 0;
+
+ cctx->aes.ccm.l = arg;
+ return 1;
+
+ case EVP_CTRL_AEAD_SET_TAG:
+ if ((arg & 1) || arg < 4 || arg > 16)
+ return 0;
+
+ enc = EVP_CIPHER_CTX_encrypting(c);
+ if (enc && ptr)
+ return 0;
+
+ if (ptr) {
+ cctx->aes.ccm.tag_set = 1;
+ buf = EVP_CIPHER_CTX_buf_noconst(c);
+ memcpy(buf, ptr, arg);
+ }
+
+ cctx->aes.ccm.m = arg;
+ return 1;
+
+ case EVP_CTRL_AEAD_GET_TAG:
+ enc = EVP_CIPHER_CTX_encrypting(c);
+ if (!enc || !cctx->aes.ccm.tag_set)
+ return 0;
+
+ if(arg < cctx->aes.ccm.m)
+ return 0;
+
+ memcpy(ptr, cctx->aes.ccm.kmac_param.icv.b, cctx->aes.ccm.m);
+ cctx->aes.ccm.tag_set = 0;
+ cctx->aes.ccm.iv_set = 0;
+ cctx->aes.ccm.len_set = 0;
+ return 1;
+
+ case EVP_CTRL_COPY:
+ return 1;
+
+ default:
+ return -1;
+ }
+}
+
+# define s390x_aes_ccm_cleanup aes_ccm_cleanup
+
+# ifndef OPENSSL_NO_OCB
+# define S390X_AES_OCB_CTX EVP_AES_OCB_CTX
+# define S390X_aes_128_ocb_CAPABLE 0
+# define S390X_aes_192_ocb_CAPABLE 0
+# define S390X_aes_256_ocb_CAPABLE 0
+
+# define s390x_aes_ocb_init_key aes_ocb_init_key
+static int s390x_aes_ocb_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
+ const unsigned char *iv, int enc);
+# define s390x_aes_ocb_cipher aes_ocb_cipher
+static int s390x_aes_ocb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
+ const unsigned char *in, size_t len);
+# define s390x_aes_ocb_cleanup aes_ocb_cleanup
+static int s390x_aes_ocb_cleanup(EVP_CIPHER_CTX *);
+# define s390x_aes_ocb_ctrl aes_ocb_ctrl
+static int s390x_aes_ocb_ctrl(EVP_CIPHER_CTX *, int type, int arg, void *ptr);
+# endif
+
+# define BLOCK_CIPHER_generic(nid,keylen,blocksize,ivlen,nmode,mode, \
+ MODE,flags) \
+static const EVP_CIPHER s390x_aes_##keylen##_##mode = { \
+ nid##_##keylen##_##nmode,blocksize, \
+ keylen / 8, \
+ ivlen, \
+ flags | EVP_CIPH_##MODE##_MODE, \
+ s390x_aes_##mode##_init_key, \
+ s390x_aes_##mode##_cipher, \
+ NULL, \
+ sizeof(S390X_AES_##MODE##_CTX), \
+ NULL, \
+ NULL, \
+ NULL, \
+ NULL \
+}; \
+static const EVP_CIPHER aes_##keylen##_##mode = { \
+ nid##_##keylen##_##nmode, \
+ blocksize, \
+ keylen / 8, \
+ ivlen, \
+ flags | EVP_CIPH_##MODE##_MODE, \
+ aes_init_key, \
+ aes_##mode##_cipher, \
+ NULL, \
+ sizeof(EVP_AES_KEY), \
+ NULL, \
+ NULL, \
+ NULL, \
+ NULL \
+}; \
+const EVP_CIPHER *EVP_aes_##keylen##_##mode(void) \
+{ \
+ return S390X_aes_##keylen##_##mode##_CAPABLE ? \
+ &s390x_aes_##keylen##_##mode : &aes_##keylen##_##mode; \
+}
+
+# define BLOCK_CIPHER_custom(nid,keylen,blocksize,ivlen,mode,MODE,flags)\
+static const EVP_CIPHER s390x_aes_##keylen##_##mode = { \
+ nid##_##keylen##_##mode, \
+ blocksize, \
+ (EVP_CIPH_##MODE##_MODE == EVP_CIPH_XTS_MODE ? 2 : 1) * keylen / 8, \
+ ivlen, \
+ flags | EVP_CIPH_##MODE##_MODE, \
+ s390x_aes_##mode##_init_key, \
+ s390x_aes_##mode##_cipher, \
+ s390x_aes_##mode##_cleanup, \
+ sizeof(S390X_AES_##MODE##_CTX), \
+ NULL, \
+ NULL, \
+ s390x_aes_##mode##_ctrl, \
+ NULL \
+}; \
+static const EVP_CIPHER aes_##keylen##_##mode = { \
+ nid##_##keylen##_##mode,blocksize, \
+ (EVP_CIPH_##MODE##_MODE == EVP_CIPH_XTS_MODE ? 2 : 1) * keylen / 8, \
+ ivlen, \
+ flags | EVP_CIPH_##MODE##_MODE, \
+ aes_##mode##_init_key, \
+ aes_##mode##_cipher, \
+ aes_##mode##_cleanup, \
+ sizeof(EVP_AES_##MODE##_CTX), \
+ NULL, \
+ NULL, \
+ aes_##mode##_ctrl, \
+ NULL \
+}; \
+const EVP_CIPHER *EVP_aes_##keylen##_##mode(void) \
+{ \
+ return S390X_aes_##keylen##_##mode##_CAPABLE ? \
+ &s390x_aes_##keylen##_##mode : &aes_##keylen##_##mode; \
+}
+
#else
# define BLOCK_CIPHER_generic(nid,keylen,blocksize,ivlen,nmode,mode,MODE,flags) \
@@ -1278,22 +2809,6 @@ static int aes_gcm_cleanup(EVP_CIPHER_CTX *c)
return 1;
}
-/* increment counter (64-bit int) by 1 */
-static void ctr64_inc(unsigned char *counter)
-{
- int n = 8;
- unsigned char c;
-
- do {
- --n;
- c = counter[n];
- ++c;
- counter[n] = c;
- if (c)
- return;
- } while (n);
-}
-
static int aes_gcm_ctrl(EVP_CIPHER_CTX *c, int type, int arg, void *ptr)
{
EVP_AES_GCM_CTX *gctx = EVP_C_DATA(EVP_AES_GCM_CTX,c);
@@ -1301,8 +2816,8 @@ static int aes_gcm_ctrl(EVP_CIPHER_CTX *c, int type, int arg, void *ptr)
case EVP_CTRL_INIT:
gctx->key_set = 0;
gctx->iv_set = 0;
- gctx->ivlen = EVP_CIPHER_CTX_iv_length(c);
- gctx->iv = EVP_CIPHER_CTX_iv_noconst(c);
+ gctx->ivlen = c->cipher->iv_len;
+ gctx->iv = c->iv;
gctx->taglen = -1;
gctx->iv_gen = 0;
gctx->tls_aad_len = -1;
@@ -1313,27 +2828,28 @@ static int aes_gcm_ctrl(EVP_CIPHER_CTX *c, int type, int arg, void *ptr)
return 0;
/* Allocate memory for IV if needed */
if ((arg > EVP_MAX_IV_LENGTH) && (arg > gctx->ivlen)) {
- if (gctx->iv != EVP_CIPHER_CTX_iv_noconst(c))
+ if (gctx->iv != c->iv)
OPENSSL_free(gctx->iv);
- gctx->iv = OPENSSL_malloc(arg);
- if (gctx->iv == NULL)
+ if ((gctx->iv = OPENSSL_malloc(arg)) == NULL) {
+ EVPerr(EVP_F_AES_GCM_CTRL, ERR_R_MALLOC_FAILURE);
return 0;
+ }
}
gctx->ivlen = arg;
return 1;
case EVP_CTRL_AEAD_SET_TAG:
- if (arg <= 0 || arg > 16 || EVP_CIPHER_CTX_encrypting(c))
+ if (arg <= 0 || arg > 16 || c->encrypt)
return 0;
- memcpy(EVP_CIPHER_CTX_buf_noconst(c), ptr, arg);
+ memcpy(c->buf, ptr, arg);
gctx->taglen = arg;
return 1;
case EVP_CTRL_AEAD_GET_TAG:
- if (arg <= 0 || arg > 16 || !EVP_CIPHER_CTX_encrypting(c)
+ if (arg <= 0 || arg > 16 || !c->encrypt
|| gctx->taglen < 0)
return 0;
- memcpy(ptr, EVP_CIPHER_CTX_buf_noconst(c), arg);
+ memcpy(ptr, c->buf, arg);
return 1;
case EVP_CTRL_GCM_SET_IV_FIXED:
@@ -1351,8 +2867,7 @@ static int aes_gcm_ctrl(EVP_CIPHER_CTX *c, int type, int arg, void *ptr)
return 0;
if (arg)
memcpy(gctx->iv, ptr, arg);
- if (EVP_CIPHER_CTX_encrypting(c)
- && RAND_bytes(gctx->iv + arg, gctx->ivlen - arg) <= 0)
+ if (c->encrypt && RAND_bytes(gctx->iv + arg, gctx->ivlen - arg) <= 0)
return 0;
gctx->iv_gen = 1;
return 1;
@@ -1373,8 +2888,7 @@ static int aes_gcm_ctrl(EVP_CIPHER_CTX *c, int type, int arg, void *ptr)
return 1;
case EVP_CTRL_GCM_SET_IV_INV:
- if (gctx->iv_gen == 0 || gctx->key_set == 0
- || EVP_CIPHER_CTX_encrypting(c))
+ if (gctx->iv_gen == 0 || gctx->key_set == 0 || c->encrypt)
return 0;
memcpy(gctx->iv + gctx->ivlen - arg, ptr, arg);
CRYPTO_gcm128_setiv(&gctx->gcm, gctx->iv, gctx->ivlen);
@@ -1385,24 +2899,22 @@ static int aes_gcm_ctrl(EVP_CIPHER_CTX *c, int type, int arg, void *ptr)
/* Save the AAD for later use */
if (arg != EVP_AEAD_TLS1_AAD_LEN)
return 0;
- memcpy(EVP_CIPHER_CTX_buf_noconst(c), ptr, arg);
+ memcpy(c->buf, ptr, arg);
gctx->tls_aad_len = arg;
{
- unsigned int len =
- EVP_CIPHER_CTX_buf_noconst(c)[arg - 2] << 8
- | EVP_CIPHER_CTX_buf_noconst(c)[arg - 1];
+ unsigned int len = c->buf[arg - 2] << 8 | c->buf[arg - 1];
/* Correct length for explicit IV */
if (len < EVP_GCM_TLS_EXPLICIT_IV_LEN)
return 0;
len -= EVP_GCM_TLS_EXPLICIT_IV_LEN;
/* If decrypting correct for tag too */
- if (!EVP_CIPHER_CTX_encrypting(c)) {
+ if (!c->encrypt) {
if (len < EVP_GCM_TLS_TAG_LEN)
return 0;
len -= EVP_GCM_TLS_TAG_LEN;
}
- EVP_CIPHER_CTX_buf_noconst(c)[arg - 2] = len >> 8;
- EVP_CIPHER_CTX_buf_noconst(c)[arg - 1] = len & 0xff;
+ c->buf[arg - 2] = len >> 8;
+ c->buf[arg - 1] = len & 0xff;
}
/* Extra padding: tag appended to record */
return EVP_GCM_TLS_TAG_LEN;
@@ -1416,12 +2928,13 @@ static int aes_gcm_ctrl(EVP_CIPHER_CTX *c, int type, int arg, void *ptr)
return 0;
gctx_out->gcm.key = &gctx_out->ks;
}
- if (gctx->iv == EVP_CIPHER_CTX_iv_noconst(c))
- gctx_out->iv = EVP_CIPHER_CTX_iv_noconst(out);
+ if (gctx->iv == c->iv)
+ gctx_out->iv = out->iv;
else {
- gctx_out->iv = OPENSSL_malloc(gctx->ivlen);
- if (gctx_out->iv == NULL)
+ if ((gctx_out->iv = OPENSSL_malloc(gctx->ivlen)) == NULL) {
+ EVPerr(EVP_F_AES_GCM_CTRL, ERR_R_MALLOC_FAILURE);
return 0;
+ }
memcpy(gctx_out->iv, gctx->iv, gctx->ivlen);
}
return 1;
@@ -1443,8 +2956,7 @@ static int aes_gcm_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
do {
#ifdef HWAES_CAPABLE
if (HWAES_CAPABLE) {
- HWAES_set_encrypt_key(key, EVP_CIPHER_CTX_key_length(ctx) * 8,
- &gctx->ks.ks);
+ HWAES_set_encrypt_key(key, ctx->key_len * 8, &gctx->ks.ks);
CRYPTO_gcm128_init(&gctx->gcm, &gctx->ks,
(block128_f) HWAES_encrypt);
# ifdef HWAES_ctr32_encrypt_blocks
@@ -1457,8 +2969,7 @@ static int aes_gcm_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
#endif
#ifdef BSAES_CAPABLE
if (BSAES_CAPABLE) {
- AES_set_encrypt_key(key, EVP_CIPHER_CTX_key_length(ctx) * 8,
- &gctx->ks.ks);
+ AES_set_encrypt_key(key, ctx->key_len * 8, &gctx->ks.ks);
CRYPTO_gcm128_init(&gctx->gcm, &gctx->ks,
(block128_f) AES_encrypt);
gctx->ctr = (ctr128_f) bsaes_ctr32_encrypt_blocks;
@@ -1467,8 +2978,7 @@ static int aes_gcm_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
#endif
#ifdef VPAES_CAPABLE
if (VPAES_CAPABLE) {
- vpaes_set_encrypt_key(key, EVP_CIPHER_CTX_key_length(ctx) * 8,
- &gctx->ks.ks);
+ vpaes_set_encrypt_key(key, ctx->key_len * 8, &gctx->ks.ks);
CRYPTO_gcm128_init(&gctx->gcm, &gctx->ks,
(block128_f) vpaes_encrypt);
gctx->ctr = NULL;
@@ -1477,8 +2987,7 @@ static int aes_gcm_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
#endif
(void)0; /* terminate potentially open 'else' */
- AES_set_encrypt_key(key, EVP_CIPHER_CTX_key_length(ctx) * 8,
- &gctx->ks.ks);
+ AES_set_encrypt_key(key, ctx->key_len * 8, &gctx->ks.ks);
CRYPTO_gcm128_init(&gctx->gcm, &gctx->ks,
(block128_f) AES_encrypt);
#ifdef AES_CTR_ASM
@@ -1530,19 +3039,18 @@ static int aes_gcm_tls_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
* Set IV from start of buffer or generate IV and write to start of
* buffer.
*/
- if (EVP_CIPHER_CTX_ctrl(ctx, EVP_CIPHER_CTX_encrypting(ctx) ?
- EVP_CTRL_GCM_IV_GEN : EVP_CTRL_GCM_SET_IV_INV,
+ if (EVP_CIPHER_CTX_ctrl(ctx, ctx->encrypt ? EVP_CTRL_GCM_IV_GEN
+ : EVP_CTRL_GCM_SET_IV_INV,
EVP_GCM_TLS_EXPLICIT_IV_LEN, out) <= 0)
goto err;
/* Use saved AAD */
- if (CRYPTO_gcm128_aad(&gctx->gcm, EVP_CIPHER_CTX_buf_noconst(ctx),
- gctx->tls_aad_len))
+ if (CRYPTO_gcm128_aad(&gctx->gcm, ctx->buf, gctx->tls_aad_len))
goto err;
/* Fix buffer and length to point to payload */
in += EVP_GCM_TLS_EXPLICIT_IV_LEN;
out += EVP_GCM_TLS_EXPLICIT_IV_LEN;
len -= EVP_GCM_TLS_EXPLICIT_IV_LEN + EVP_GCM_TLS_TAG_LEN;
- if (EVP_CIPHER_CTX_encrypting(ctx)) {
+ if (ctx->encrypt) {
/* Encrypt payload */
if (gctx->ctr) {
size_t bulk = 0;
@@ -1621,11 +3129,9 @@ static int aes_gcm_tls_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
goto err;
}
/* Retrieve tag */
- CRYPTO_gcm128_tag(&gctx->gcm, EVP_CIPHER_CTX_buf_noconst(ctx),
- EVP_GCM_TLS_TAG_LEN);
+ CRYPTO_gcm128_tag(&gctx->gcm, ctx->buf, EVP_GCM_TLS_TAG_LEN);
/* If tag mismatch wipe buffer */
- if (CRYPTO_memcmp(EVP_CIPHER_CTX_buf_noconst(ctx), in + len,
- EVP_GCM_TLS_TAG_LEN)) {
+ if (CRYPTO_memcmp(ctx->buf, in + len, EVP_GCM_TLS_TAG_LEN)) {
OPENSSL_cleanse(out, len);
goto err;
}
@@ -1655,7 +3161,7 @@ static int aes_gcm_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
if (out == NULL) {
if (CRYPTO_gcm128_aad(&gctx->gcm, in, len))
return -1;
- } else if (EVP_CIPHER_CTX_encrypting(ctx)) {
+ } else if (ctx->encrypt) {
if (gctx->ctr) {
size_t bulk = 0;
#if defined(AES_GCM_ASM)
@@ -1746,17 +3252,15 @@ static int aes_gcm_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
}
return len;
} else {
- if (!EVP_CIPHER_CTX_encrypting(ctx)) {
+ if (!ctx->encrypt) {
if (gctx->taglen < 0)
return -1;
- if (CRYPTO_gcm128_finish(&gctx->gcm,
- EVP_CIPHER_CTX_buf_noconst(ctx),
- gctx->taglen) != 0)
+ if (CRYPTO_gcm128_finish(&gctx->gcm, ctx->buf, gctx->taglen) != 0)
return -1;
gctx->iv_set = 0;
return 0;
}
- CRYPTO_gcm128_tag(&gctx->gcm, EVP_CIPHER_CTX_buf_noconst(ctx), 16);
+ CRYPTO_gcm128_tag(&gctx->gcm, ctx->buf, 16);
gctx->taglen = 16;
/* Don't reuse the IV */
gctx->iv_set = 0;
@@ -2132,6 +3636,10 @@ static int aes_ccm_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
if (cctx->tls_aad_len >= 0)
return aes_ccm_tls_cipher(ctx, out, in, len);
+ /* EVP_*Final() doesn't return any data */
+ if (in == NULL && out != NULL)
+ return 0;
+
if (!cctx->iv_set)
return -1;
@@ -2151,9 +3659,6 @@ static int aes_ccm_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
CRYPTO_ccm128_aad(ccm, in, len);
return len;
}
- /* EVP_*Final() doesn't return any data */
- if (!in)
- return 0;
/* If not set length yet do it */
if (!cctx->len_set) {
if (CRYPTO_ccm128_setiv(ccm, EVP_CIPHER_CTX_iv_noconst(ctx),