1 /* ====================================================================
2 * Copyright (c) 2008 The OpenSSL Project. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in
13 * the documentation and/or other materials provided with the
16 * 3. All advertising materials mentioning features or use of this
17 * software must display the following acknowledgment:
18 * "This product includes software developed by the OpenSSL Project
19 * for use in the OpenSSL Toolkit. (http://www.openssl.org/)"
21 * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
22 * endorse or promote products derived from this software without
23 * prior written permission. For written permission, please contact
24 * openssl-core@openssl.org.
26 * 5. Products derived from this software may not be called "OpenSSL"
27 * nor may "OpenSSL" appear in their names without prior written
28 * permission of the OpenSSL Project.
30 * 6. Redistributions of any form whatsoever must retain the following
32 * "This product includes software developed by the OpenSSL Project
33 * for use in the OpenSSL Toolkit (http://www.openssl.org/)"
35 * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
36 * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
37 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
38 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR
39 * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
41 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
42 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
43 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
44 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
45 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
46 * OF THE POSSIBILITY OF SUCH DAMAGE.
47 * ==================================================================== */
49 #include <openssl/base.h>
54 #include <openssl/mem.h>
55 #include <openssl/cpu.h>
58 #include "../../internal.h"
60 #if !defined(OPENSSL_NO_ASM) && \
61 (defined(OPENSSL_X86) || defined(OPENSSL_X86_64) || \
62 defined(OPENSSL_ARM) || defined(OPENSSL_AARCH64) || \
63 defined(OPENSSL_PPC64LE))
67 #define PACK(s) ((size_t)(s) << (sizeof(size_t) * 8 - 16))
68 #define REDUCE1BIT(V) \
70 if (sizeof(size_t) == 8) { \
71 uint64_t T = UINT64_C(0xe100000000000000) & (0 - ((V).lo & 1)); \
72 (V).lo = ((V).hi << 63) | ((V).lo >> 1); \
73 (V).hi = ((V).hi >> 1) ^ T; \
75 uint32_t T = 0xe1000000U & (0 - (uint32_t)((V).lo & 1)); \
76 (V).lo = ((V).hi << 63) | ((V).lo >> 1); \
77 (V).hi = ((V).hi >> 1) ^ ((uint64_t)T << 32); \
81 // kSizeTWithoutLower4Bits is a mask that can be used to zero the lower four
82 // bits of a |size_t|.
83 static const size_t kSizeTWithoutLower4Bits = (size_t) -16;
85 static void gcm_init_4bit(u128 Htable[16], uint64_t H[2]) {
100 Htable[3].hi = V.hi ^ Htable[2].hi, Htable[3].lo = V.lo ^ Htable[2].lo;
102 Htable[5].hi = V.hi ^ Htable[1].hi, Htable[5].lo = V.lo ^ Htable[1].lo;
103 Htable[6].hi = V.hi ^ Htable[2].hi, Htable[6].lo = V.lo ^ Htable[2].lo;
104 Htable[7].hi = V.hi ^ Htable[3].hi, Htable[7].lo = V.lo ^ Htable[3].lo;
106 Htable[9].hi = V.hi ^ Htable[1].hi, Htable[9].lo = V.lo ^ Htable[1].lo;
107 Htable[10].hi = V.hi ^ Htable[2].hi, Htable[10].lo = V.lo ^ Htable[2].lo;
108 Htable[11].hi = V.hi ^ Htable[3].hi, Htable[11].lo = V.lo ^ Htable[3].lo;
109 Htable[12].hi = V.hi ^ Htable[4].hi, Htable[12].lo = V.lo ^ Htable[4].lo;
110 Htable[13].hi = V.hi ^ Htable[5].hi, Htable[13].lo = V.lo ^ Htable[5].lo;
111 Htable[14].hi = V.hi ^ Htable[6].hi, Htable[14].lo = V.lo ^ Htable[6].lo;
112 Htable[15].hi = V.hi ^ Htable[7].hi, Htable[15].lo = V.lo ^ Htable[7].lo;
114 #if defined(GHASH_ASM) && defined(OPENSSL_ARM)
115 for (int j = 0; j < 16; ++j) {
123 #if !defined(GHASH_ASM) || defined(OPENSSL_AARCH64) || defined(OPENSSL_PPC64LE)
124 static const size_t rem_4bit[16] = {
125 PACK(0x0000), PACK(0x1C20), PACK(0x3840), PACK(0x2460),
126 PACK(0x7080), PACK(0x6CA0), PACK(0x48C0), PACK(0x54E0),
127 PACK(0xE100), PACK(0xFD20), PACK(0xD940), PACK(0xC560),
128 PACK(0x9180), PACK(0x8DA0), PACK(0xA9C0), PACK(0xB5E0)};
130 static void gcm_gmult_4bit(uint64_t Xi[2], const u128 Htable[16]) {
133 size_t rem, nlo, nhi;
135 nlo = ((const uint8_t *)Xi)[15];
139 Z.hi = Htable[nlo].hi;
140 Z.lo = Htable[nlo].lo;
143 rem = (size_t)Z.lo & 0xf;
144 Z.lo = (Z.hi << 60) | (Z.lo >> 4);
146 if (sizeof(size_t) == 8) {
147 Z.hi ^= rem_4bit[rem];
149 Z.hi ^= (uint64_t)rem_4bit[rem] << 32;
152 Z.hi ^= Htable[nhi].hi;
153 Z.lo ^= Htable[nhi].lo;
159 nlo = ((const uint8_t *)Xi)[cnt];
163 rem = (size_t)Z.lo & 0xf;
164 Z.lo = (Z.hi << 60) | (Z.lo >> 4);
166 if (sizeof(size_t) == 8) {
167 Z.hi ^= rem_4bit[rem];
169 Z.hi ^= (uint64_t)rem_4bit[rem] << 32;
172 Z.hi ^= Htable[nlo].hi;
173 Z.lo ^= Htable[nlo].lo;
176 Xi[0] = CRYPTO_bswap8(Z.hi);
177 Xi[1] = CRYPTO_bswap8(Z.lo);
180 // Streamed gcm_mult_4bit, see CRYPTO_gcm128_[en|de]crypt for
181 // details... Compiler-generated code doesn't seem to give any
182 // performance improvement, at least not on x86[_64]. It's here
183 // mostly as reference and a placeholder for possible future
184 // non-trivial optimization[s]...
185 static void gcm_ghash_4bit(uint64_t Xi[2], const u128 Htable[16],
186 const uint8_t *inp, size_t len) {
189 size_t rem, nlo, nhi;
193 nlo = ((const uint8_t *)Xi)[15];
198 Z.hi = Htable[nlo].hi;
199 Z.lo = Htable[nlo].lo;
202 rem = (size_t)Z.lo & 0xf;
203 Z.lo = (Z.hi << 60) | (Z.lo >> 4);
205 if (sizeof(size_t) == 8) {
206 Z.hi ^= rem_4bit[rem];
208 Z.hi ^= (uint64_t)rem_4bit[rem] << 32;
211 Z.hi ^= Htable[nhi].hi;
212 Z.lo ^= Htable[nhi].lo;
218 nlo = ((const uint8_t *)Xi)[cnt];
223 rem = (size_t)Z.lo & 0xf;
224 Z.lo = (Z.hi << 60) | (Z.lo >> 4);
226 if (sizeof(size_t) == 8) {
227 Z.hi ^= rem_4bit[rem];
229 Z.hi ^= (uint64_t)rem_4bit[rem] << 32;
232 Z.hi ^= Htable[nlo].hi;
233 Z.lo ^= Htable[nlo].lo;
236 Xi[0] = CRYPTO_bswap8(Z.hi);
237 Xi[1] = CRYPTO_bswap8(Z.lo);
238 } while (inp += 16, len -= 16);
241 void gcm_gmult_4bit(uint64_t Xi[2], const u128 Htable[16]);
242 void gcm_ghash_4bit(uint64_t Xi[2], const u128 Htable[16], const uint8_t *inp,
246 #define GCM_MUL(ctx, Xi) gcm_gmult_4bit((ctx)->Xi.u, (ctx)->Htable)
247 #if defined(GHASH_ASM)
248 #define GHASH(ctx, in, len) gcm_ghash_4bit((ctx)->Xi.u, (ctx)->Htable, in, len)
249 // GHASH_CHUNK is "stride parameter" missioned to mitigate cache
250 // trashing effect. In other words idea is to hash data while it's
251 // still in L1 cache after encryption pass...
252 #define GHASH_CHUNK (3 * 1024)
256 #if defined(GHASH_ASM)
258 #if defined(OPENSSL_X86) || defined(OPENSSL_X86_64)
259 #define GCM_FUNCREF_4BIT
260 void gcm_init_clmul(u128 Htable[16], const uint64_t Xi[2]);
261 void gcm_gmult_clmul(uint64_t Xi[2], const u128 Htable[16]);
262 void gcm_ghash_clmul(uint64_t Xi[2], const u128 Htable[16], const uint8_t *inp,
265 #if defined(OPENSSL_X86_64)
266 #define GHASH_ASM_X86_64
267 void gcm_init_avx(u128 Htable[16], const uint64_t Xi[2]);
268 void gcm_gmult_avx(uint64_t Xi[2], const u128 Htable[16]);
269 void gcm_ghash_avx(uint64_t Xi[2], const u128 Htable[16], const uint8_t *in,
272 size_t aesni_gcm_encrypt(const uint8_t *in, uint8_t *out, size_t len,
273 const void *key, uint8_t ivec[16], uint64_t *Xi);
274 size_t aesni_gcm_decrypt(const uint8_t *in, uint8_t *out, size_t len,
275 const void *key, uint8_t ivec[16], uint64_t *Xi);
278 #if defined(OPENSSL_X86)
279 #define GHASH_ASM_X86
280 void gcm_gmult_4bit_mmx(uint64_t Xi[2], const u128 Htable[16]);
281 void gcm_ghash_4bit_mmx(uint64_t Xi[2], const u128 Htable[16], const uint8_t *inp,
285 #elif defined(OPENSSL_ARM) || defined(OPENSSL_AARCH64)
286 #include <openssl/arm_arch.h>
287 #if __ARM_ARCH__ >= 7
288 #define GHASH_ASM_ARM
289 #define GCM_FUNCREF_4BIT
291 static int pmull_capable(void) {
292 return CRYPTO_is_ARMv8_PMULL_capable();
295 void gcm_init_v8(u128 Htable[16], const uint64_t Xi[2]);
296 void gcm_gmult_v8(uint64_t Xi[2], const u128 Htable[16]);
297 void gcm_ghash_v8(uint64_t Xi[2], const u128 Htable[16], const uint8_t *inp,
300 #if defined(OPENSSL_ARM)
301 // 32-bit ARM also has support for doing GCM with NEON instructions.
302 static int neon_capable(void) {
303 return CRYPTO_is_NEON_capable();
306 void gcm_init_neon(u128 Htable[16], const uint64_t Xi[2]);
307 void gcm_gmult_neon(uint64_t Xi[2], const u128 Htable[16]);
308 void gcm_ghash_neon(uint64_t Xi[2], const u128 Htable[16], const uint8_t *inp,
311 // AArch64 only has the ARMv8 versions of functions.
312 static int neon_capable(void) {
315 static void gcm_init_neon(u128 Htable[16], const uint64_t Xi[2]) {
318 static void gcm_gmult_neon(uint64_t Xi[2], const u128 Htable[16]) {
321 static void gcm_ghash_neon(uint64_t Xi[2], const u128 Htable[16],
322 const uint8_t *inp, size_t len) {
328 #elif defined(OPENSSL_PPC64LE)
329 #define GHASH_ASM_PPC64LE
330 #define GCM_FUNCREF_4BIT
331 void gcm_init_p8(u128 Htable[16], const uint64_t Xi[2]);
332 void gcm_gmult_p8(uint64_t Xi[2], const u128 Htable[16]);
333 void gcm_ghash_p8(uint64_t Xi[2], const u128 Htable[16], const uint8_t *inp,
338 #ifdef GCM_FUNCREF_4BIT
340 #define GCM_MUL(ctx, Xi) (*gcm_gmult_p)((ctx)->Xi.u, (ctx)->Htable)
343 #define GHASH(ctx, in, len) (*gcm_ghash_p)((ctx)->Xi.u, (ctx)->Htable, in, len)
347 void CRYPTO_ghash_init(gmult_func *out_mult, ghash_func *out_hash,
348 u128 *out_key, u128 out_table[16],
350 const uint8_t *gcm_key) {
358 OPENSSL_memcpy(H.c, gcm_key, 16);
360 // H is stored in host byte order
361 H.u[0] = CRYPTO_bswap8(H.u[0]);
362 H.u[1] = CRYPTO_bswap8(H.u[1]);
364 OPENSSL_memcpy(out_key, H.c, 16);
366 #if defined(GHASH_ASM_X86_64)
367 if (crypto_gcm_clmul_enabled()) {
368 if (((OPENSSL_ia32cap_get()[1] >> 22) & 0x41) == 0x41) { // AVX+MOVBE
369 gcm_init_avx(out_table, H.u);
370 *out_mult = gcm_gmult_avx;
371 *out_hash = gcm_ghash_avx;
375 gcm_init_clmul(out_table, H.u);
376 *out_mult = gcm_gmult_clmul;
377 *out_hash = gcm_ghash_clmul;
380 #elif defined(GHASH_ASM_X86)
381 if (crypto_gcm_clmul_enabled()) {
382 gcm_init_clmul(out_table, H.u);
383 *out_mult = gcm_gmult_clmul;
384 *out_hash = gcm_ghash_clmul;
387 #elif defined(GHASH_ASM_ARM)
388 if (pmull_capable()) {
389 gcm_init_v8(out_table, H.u);
390 *out_mult = gcm_gmult_v8;
391 *out_hash = gcm_ghash_v8;
395 if (neon_capable()) {
396 gcm_init_neon(out_table, H.u);
397 *out_mult = gcm_gmult_neon;
398 *out_hash = gcm_ghash_neon;
401 #elif defined(GHASH_ASM_PPC64LE)
402 if (CRYPTO_is_PPC64LE_vcrypto_capable()) {
403 gcm_init_p8(out_table, H.u);
404 *out_mult = gcm_gmult_p8;
405 *out_hash = gcm_ghash_p8;
410 gcm_init_4bit(out_table, H.u);
411 #if defined(GHASH_ASM_X86)
412 *out_mult = gcm_gmult_4bit_mmx;
413 *out_hash = gcm_ghash_4bit_mmx;
415 *out_mult = gcm_gmult_4bit;
416 *out_hash = gcm_ghash_4bit;
420 void CRYPTO_gcm128_init(GCM128_CONTEXT *ctx, const void *aes_key,
421 block128_f block, int is_aesni_encrypt) {
422 OPENSSL_memset(ctx, 0, sizeof(*ctx));
426 OPENSSL_memset(gcm_key, 0, sizeof(gcm_key));
427 (*block)(gcm_key, gcm_key, aes_key);
430 CRYPTO_ghash_init(&ctx->gmult, &ctx->ghash, &ctx->H, ctx->Htable, &is_avx,
433 ctx->use_aesni_gcm_crypt = (is_avx && is_aesni_encrypt) ? 1 : 0;
436 void CRYPTO_gcm128_setiv(GCM128_CONTEXT *ctx, const void *key,
437 const uint8_t *iv, size_t len) {
439 #ifdef GCM_FUNCREF_4BIT
440 void (*gcm_gmult_p)(uint64_t Xi[2], const u128 Htable[16]) = ctx->gmult;
447 ctx->len.u[0] = 0; // AAD length
448 ctx->len.u[1] = 0; // message length
453 OPENSSL_memcpy(ctx->Yi.c, iv, 12);
460 for (size_t i = 0; i < 16; ++i) {
461 ctx->Yi.c[i] ^= iv[i];
468 for (size_t i = 0; i < len; ++i) {
469 ctx->Yi.c[i] ^= iv[i];
474 ctx->Yi.u[1] ^= CRYPTO_bswap8(len0);
477 ctr = CRYPTO_bswap4(ctx->Yi.d[3]);
480 (*ctx->block)(ctx->Yi.c, ctx->EK0.c, key);
482 ctx->Yi.d[3] = CRYPTO_bswap4(ctr);
485 int CRYPTO_gcm128_aad(GCM128_CONTEXT *ctx, const uint8_t *aad, size_t len) {
487 uint64_t alen = ctx->len.u[0];
488 #ifdef GCM_FUNCREF_4BIT
489 void (*gcm_gmult_p)(uint64_t Xi[2], const u128 Htable[16]) = ctx->gmult;
491 void (*gcm_ghash_p)(uint64_t Xi[2], const u128 Htable[16], const uint8_t *inp,
492 size_t len) = ctx->ghash;
501 if (alen > (UINT64_C(1) << 61) || (sizeof(len) == 8 && alen < len)) {
504 ctx->len.u[0] = alen;
509 ctx->Xi.c[n] ^= *(aad++);
521 // Process a whole number of blocks.
523 size_t len_blocks = len & kSizeTWithoutLower4Bits;
524 if (len_blocks != 0) {
525 GHASH(ctx, aad, len_blocks);
531 for (size_t i = 0; i < 16; ++i) {
532 ctx->Xi.c[i] ^= aad[i];
540 // Process the remainder.
542 n = (unsigned int)len;
543 for (size_t i = 0; i < len; ++i) {
544 ctx->Xi.c[i] ^= aad[i];
552 int CRYPTO_gcm128_encrypt(GCM128_CONTEXT *ctx, const void *key,
553 const uint8_t *in, uint8_t *out, size_t len) {
555 uint64_t mlen = ctx->len.u[1];
556 block128_f block = ctx->block;
557 #ifdef GCM_FUNCREF_4BIT
558 void (*gcm_gmult_p)(uint64_t Xi[2], const u128 Htable[16]) = ctx->gmult;
560 void (*gcm_ghash_p)(uint64_t Xi[2], const u128 Htable[16], const uint8_t *inp,
561 size_t len) = ctx->ghash;
566 if (mlen > ((UINT64_C(1) << 36) - 32) ||
567 (sizeof(len) == 8 && mlen < len)) {
570 ctx->len.u[1] = mlen;
573 // First call to encrypt finalizes GHASH(AAD)
578 ctr = CRYPTO_bswap4(ctx->Yi.d[3]);
583 ctx->Xi.c[n] ^= *(out++) = *(in++) ^ ctx->EKi.c[n];
594 if (STRICT_ALIGNMENT &&
595 ((uintptr_t)in | (uintptr_t)out) % sizeof(size_t) != 0) {
596 for (size_t i = 0; i < len; ++i) {
598 (*block)(ctx->Yi.c, ctx->EKi.c, key);
600 ctx->Yi.d[3] = CRYPTO_bswap4(ctr);
602 ctx->Xi.c[n] ^= out[i] = in[i] ^ ctx->EKi.c[n];
612 #if defined(GHASH) && defined(GHASH_CHUNK)
613 while (len >= GHASH_CHUNK) {
614 size_t j = GHASH_CHUNK;
617 (*block)(ctx->Yi.c, ctx->EKi.c, key);
619 ctx->Yi.d[3] = CRYPTO_bswap4(ctr);
620 for (size_t i = 0; i < 16; i += sizeof(size_t)) {
621 store_word_le(out + i,
622 load_word_le(in + i) ^ ctx->EKi.t[i / sizeof(size_t)]);
628 GHASH(ctx, out - GHASH_CHUNK, GHASH_CHUNK);
631 size_t len_blocks = len & kSizeTWithoutLower4Bits;
632 if (len_blocks != 0) {
634 (*block)(ctx->Yi.c, ctx->EKi.c, key);
636 ctx->Yi.d[3] = CRYPTO_bswap4(ctr);
637 for (size_t i = 0; i < 16; i += sizeof(size_t)) {
638 store_word_le(out + i,
639 load_word_le(in + i) ^ ctx->EKi.t[i / sizeof(size_t)]);
645 GHASH(ctx, out - len_blocks, len_blocks);
649 (*block)(ctx->Yi.c, ctx->EKi.c, key);
651 ctx->Yi.d[3] = CRYPTO_bswap4(ctr);
652 for (size_t i = 0; i < 16; i += sizeof(size_t)) {
653 size_t tmp = load_word_le(in + i) ^ ctx->EKi.t[i / sizeof(size_t)];
654 store_word_le(out + i, tmp);
655 ctx->Xi.t[i / sizeof(size_t)] ^= tmp;
664 (*block)(ctx->Yi.c, ctx->EKi.c, key);
666 ctx->Yi.d[3] = CRYPTO_bswap4(ctr);
668 ctx->Xi.c[n] ^= out[n] = in[n] ^ ctx->EKi.c[n];
677 int CRYPTO_gcm128_decrypt(GCM128_CONTEXT *ctx, const void *key,
678 const unsigned char *in, unsigned char *out,
681 uint64_t mlen = ctx->len.u[1];
682 block128_f block = ctx->block;
683 #ifdef GCM_FUNCREF_4BIT
684 void (*gcm_gmult_p)(uint64_t Xi[2], const u128 Htable[16]) = ctx->gmult;
686 void (*gcm_ghash_p)(uint64_t Xi[2], const u128 Htable[16], const uint8_t *inp,
687 size_t len) = ctx->ghash;
692 if (mlen > ((UINT64_C(1) << 36) - 32) ||
693 (sizeof(len) == 8 && mlen < len)) {
696 ctx->len.u[1] = mlen;
699 // First call to decrypt finalizes GHASH(AAD)
704 ctr = CRYPTO_bswap4(ctx->Yi.d[3]);
710 *(out++) = c ^ ctx->EKi.c[n];
722 if (STRICT_ALIGNMENT &&
723 ((uintptr_t)in | (uintptr_t)out) % sizeof(size_t) != 0) {
724 for (size_t i = 0; i < len; ++i) {
727 (*block)(ctx->Yi.c, ctx->EKi.c, key);
729 ctx->Yi.d[3] = CRYPTO_bswap4(ctr);
732 out[i] = c ^ ctx->EKi.c[n];
743 #if defined(GHASH) && defined(GHASH_CHUNK)
744 while (len >= GHASH_CHUNK) {
745 size_t j = GHASH_CHUNK;
747 GHASH(ctx, in, GHASH_CHUNK);
749 (*block)(ctx->Yi.c, ctx->EKi.c, key);
751 ctx->Yi.d[3] = CRYPTO_bswap4(ctr);
752 for (size_t i = 0; i < 16; i += sizeof(size_t)) {
753 store_word_le(out + i,
754 load_word_le(in + i) ^ ctx->EKi.t[i / sizeof(size_t)]);
762 size_t len_blocks = len & kSizeTWithoutLower4Bits;
763 if (len_blocks != 0) {
764 GHASH(ctx, in, len_blocks);
766 (*block)(ctx->Yi.c, ctx->EKi.c, key);
768 ctx->Yi.d[3] = CRYPTO_bswap4(ctr);
769 for (size_t i = 0; i < 16; i += sizeof(size_t)) {
770 store_word_le(out + i,
771 load_word_le(in + i) ^ ctx->EKi.t[i / sizeof(size_t)]);
780 (*block)(ctx->Yi.c, ctx->EKi.c, key);
782 ctx->Yi.d[3] = CRYPTO_bswap4(ctr);
783 for (size_t i = 0; i < 16; i += sizeof(size_t)) {
784 size_t c = load_word_le(in + i);
785 store_word_le(out + i, c ^ ctx->EKi.t[i / sizeof(size_t)]);
786 ctx->Xi.t[i / sizeof(size_t)] ^= c;
795 (*block)(ctx->Yi.c, ctx->EKi.c, key);
797 ctx->Yi.d[3] = CRYPTO_bswap4(ctr);
801 out[n] = c ^ ctx->EKi.c[n];
810 int CRYPTO_gcm128_encrypt_ctr32(GCM128_CONTEXT *ctx, const void *key,
811 const uint8_t *in, uint8_t *out, size_t len,
814 uint64_t mlen = ctx->len.u[1];
815 #ifdef GCM_FUNCREF_4BIT
816 void (*gcm_gmult_p)(uint64_t Xi[2], const u128 Htable[16]) = ctx->gmult;
818 void (*gcm_ghash_p)(uint64_t Xi[2], const u128 Htable[16], const uint8_t *inp,
819 size_t len) = ctx->ghash;
824 if (mlen > ((UINT64_C(1) << 36) - 32) ||
825 (sizeof(len) == 8 && mlen < len)) {
828 ctx->len.u[1] = mlen;
831 // First call to encrypt finalizes GHASH(AAD)
839 ctx->Xi.c[n] ^= *(out++) = *(in++) ^ ctx->EKi.c[n];
851 #if defined(AESNI_GCM)
852 if (ctx->use_aesni_gcm_crypt) {
853 // |aesni_gcm_encrypt| may not process all the input given to it. It may
854 // not process *any* of its input if it is deemed too small.
855 size_t bulk = aesni_gcm_encrypt(in, out, len, key, ctx->Yi.c, ctx->Xi.u);
862 ctr = CRYPTO_bswap4(ctx->Yi.d[3]);
865 while (len >= GHASH_CHUNK) {
866 (*stream)(in, out, GHASH_CHUNK / 16, key, ctx->Yi.c);
867 ctr += GHASH_CHUNK / 16;
868 ctx->Yi.d[3] = CRYPTO_bswap4(ctr);
869 GHASH(ctx, out, GHASH_CHUNK);
875 size_t i = len & kSizeTWithoutLower4Bits;
879 (*stream)(in, out, j, key, ctx->Yi.c);
880 ctr += (unsigned int)j;
881 ctx->Yi.d[3] = CRYPTO_bswap4(ctr);
889 for (i = 0; i < 16; ++i) {
890 ctx->Xi.c[i] ^= out[i];
898 (*ctx->block)(ctx->Yi.c, ctx->EKi.c, key);
900 ctx->Yi.d[3] = CRYPTO_bswap4(ctr);
902 ctx->Xi.c[n] ^= out[n] = in[n] ^ ctx->EKi.c[n];
911 int CRYPTO_gcm128_decrypt_ctr32(GCM128_CONTEXT *ctx, const void *key,
912 const uint8_t *in, uint8_t *out, size_t len,
915 uint64_t mlen = ctx->len.u[1];
916 #ifdef GCM_FUNCREF_4BIT
917 void (*gcm_gmult_p)(uint64_t Xi[2], const u128 Htable[16]) = ctx->gmult;
919 void (*gcm_ghash_p)(uint64_t Xi[2], const u128 Htable[16], const uint8_t *inp,
920 size_t len) = ctx->ghash;
925 if (mlen > ((UINT64_C(1) << 36) - 32) ||
926 (sizeof(len) == 8 && mlen < len)) {
929 ctx->len.u[1] = mlen;
932 // First call to decrypt finalizes GHASH(AAD)
941 *(out++) = c ^ ctx->EKi.c[n];
954 #if defined(AESNI_GCM)
955 if (ctx->use_aesni_gcm_crypt) {
956 // |aesni_gcm_decrypt| may not process all the input given to it. It may
957 // not process *any* of its input if it is deemed too small.
958 size_t bulk = aesni_gcm_decrypt(in, out, len, key, ctx->Yi.c, ctx->Xi.u);
965 ctr = CRYPTO_bswap4(ctx->Yi.d[3]);
968 while (len >= GHASH_CHUNK) {
969 GHASH(ctx, in, GHASH_CHUNK);
970 (*stream)(in, out, GHASH_CHUNK / 16, key, ctx->Yi.c);
971 ctr += GHASH_CHUNK / 16;
972 ctx->Yi.d[3] = CRYPTO_bswap4(ctr);
978 size_t i = len & kSizeTWithoutLower4Bits;
987 for (k = 0; k < 16; ++k) {
988 ctx->Xi.c[k] ^= in[k];
996 (*stream)(in, out, j, key, ctx->Yi.c);
997 ctr += (unsigned int)j;
998 ctx->Yi.d[3] = CRYPTO_bswap4(ctr);
1004 (*ctx->block)(ctx->Yi.c, ctx->EKi.c, key);
1006 ctx->Yi.d[3] = CRYPTO_bswap4(ctr);
1010 out[n] = c ^ ctx->EKi.c[n];
1019 int CRYPTO_gcm128_finish(GCM128_CONTEXT *ctx, const uint8_t *tag, size_t len) {
1020 uint64_t alen = ctx->len.u[0] << 3;
1021 uint64_t clen = ctx->len.u[1] << 3;
1022 #ifdef GCM_FUNCREF_4BIT
1023 void (*gcm_gmult_p)(uint64_t Xi[2], const u128 Htable[16]) = ctx->gmult;
1026 if (ctx->mres || ctx->ares) {
1030 alen = CRYPTO_bswap8(alen);
1031 clen = CRYPTO_bswap8(clen);
1033 ctx->Xi.u[0] ^= alen;
1034 ctx->Xi.u[1] ^= clen;
1037 ctx->Xi.u[0] ^= ctx->EK0.u[0];
1038 ctx->Xi.u[1] ^= ctx->EK0.u[1];
1040 if (tag && len <= sizeof(ctx->Xi)) {
1041 return CRYPTO_memcmp(ctx->Xi.c, tag, len) == 0;
1047 void CRYPTO_gcm128_tag(GCM128_CONTEXT *ctx, unsigned char *tag, size_t len) {
1048 CRYPTO_gcm128_finish(ctx, NULL, 0);
1049 OPENSSL_memcpy(tag, ctx->Xi.c,
1050 len <= sizeof(ctx->Xi.c) ? len : sizeof(ctx->Xi.c));
1053 #if defined(OPENSSL_X86) || defined(OPENSSL_X86_64)
1054 int crypto_gcm_clmul_enabled(void) {
1056 const uint32_t *ia32cap = OPENSSL_ia32cap_get();
1057 return (ia32cap[0] & (1 << 24)) && // check FXSR bit
1058 (ia32cap[1] & (1 << 1)); // check PCLMULQDQ bit