1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218
|
/* This file was automatically imported with
import_gcry.py. Please don't modify it */
/* Rijndael (AES) for GnuPG
* Copyright (C) 2000, 2001, 2002, 2003, 2007,
* 2008, 2011, 2012 Free Software Foundation, Inc.
*
* This file is part of Libgcrypt.
*
* Libgcrypt is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* Libgcrypt is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#ifndef G10_RIJNDAEL_INTERNAL_H
#define G10_RIJNDAEL_INTERNAL_H
#include "types.h" /* for byte and u32 typedefs */
#define MAXKC (256/32)
#define MAXROUNDS 14
#define BLOCKSIZE (128/8)
/* Helper macro to force alignment to 16 or 64 bytes. */
#ifdef HAVE_GCC_ATTRIBUTE_ALIGNED
# define ATTR_ALIGNED_16 __attribute__ ((aligned (16)))
# define ATTR_ALIGNED_64 __attribute__ ((aligned (64)))
#else
# define ATTR_ALIGNED_16
# define ATTR_ALIGNED_64
#endif
/* USE_AMD64_ASM indicates whether to use AMD64 assembly code. */
#undef USE_AMD64_ASM
#if defined(__x86_64__) && (defined(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS) || \
defined(HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS))
# define USE_AMD64_ASM 1
#endif
/* USE_SSSE3 indicates whether to use SSSE3 code. */
#if defined(__x86_64__) && defined(HAVE_GCC_INLINE_ASM_SSSE3) && \
(defined(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS) || \
defined(HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS))
# define USE_SSSE3 1
#endif
/* USE_ARM_ASM indicates whether to use ARM assembly code. */
#undef USE_ARM_ASM
#if defined(__ARMEL__)
# ifdef HAVE_COMPATIBLE_GCC_ARM_PLATFORM_AS
# define USE_ARM_ASM 1
# endif
#endif
#if defined(__AARCH64EL__)
# ifdef HAVE_COMPATIBLE_GCC_AARCH64_PLATFORM_AS
# define USE_ARM_ASM 1
# endif
#endif
/* USE_PADLOCK indicates whether to compile the padlock specific
code. */
#undef USE_PADLOCK
#ifdef ENABLE_PADLOCK_SUPPORT
# ifdef HAVE_GCC_ATTRIBUTE_ALIGNED
# if (defined (__i386__) && SIZEOF_UNSIGNED_LONG == 4) || defined(__x86_64__)
# define USE_PADLOCK 1
# endif
# endif
#endif /* ENABLE_PADLOCK_SUPPORT */
/* USE_AESNI inidicates whether to compile with Intel AES-NI code. We
need the vector-size attribute which seems to be available since
gcc 3. However, to be on the safe side we require at least gcc 4. */
#undef USE_AESNI
#ifdef ENABLE_AESNI_SUPPORT
# if ((defined (__i386__) && SIZEOF_UNSIGNED_LONG == 4) || defined(__x86_64__))
# if __GNUC__ >= 4
# define USE_AESNI 1
# endif
# endif
#endif /* ENABLE_AESNI_SUPPORT */
/* USE_VAES inidicates whether to compile with AMD64 VAES code. */
#undef USE_VAES
#if (defined(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS) || \
defined(HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS)) && \
defined(__x86_64__) && defined(ENABLE_AVX2_SUPPORT) && \
defined(HAVE_GCC_INLINE_ASM_VAES_VPCLMUL) && \
defined(USE_AESNI)
# define USE_VAES 1
#endif
/* USE_VAES_I386 inidicates whether to compile with i386 VAES code. */
#undef USE_VAES_I386
#if (defined(HAVE_COMPATIBLE_GCC_I386_PLATFORM_AS) || \
defined(HAVE_COMPATIBLE_GCC_WIN32_PLATFORM_AS)) && \
defined(__i386__) && defined(ENABLE_AVX2_SUPPORT) && \
defined(HAVE_GCC_INLINE_ASM_VAES_VPCLMUL) && \
defined(USE_AESNI)
# define USE_VAES_I386 1
#endif
/* USE_ARM_CE indicates whether to enable ARMv8 Crypto Extension assembly
* code. */
#undef USE_ARM_CE
#ifdef ENABLE_ARM_CRYPTO_SUPPORT
# if defined(HAVE_ARM_ARCH_V6) && defined(__ARMEL__) \
&& defined(HAVE_COMPATIBLE_GCC_ARM_PLATFORM_AS) \
&& defined(HAVE_GCC_INLINE_ASM_AARCH32_CRYPTO)
# define USE_ARM_CE 1
# elif defined(__AARCH64EL__) \
&& defined(HAVE_COMPATIBLE_GCC_AARCH64_PLATFORM_AS) \
&& defined(HAVE_GCC_INLINE_ASM_AARCH64_CRYPTO)
# define USE_ARM_CE 1
# endif
#endif /* ENABLE_ARM_CRYPTO_SUPPORT */
/* USE_PPC_CRYPTO indicates whether to enable PowerPC vector crypto
* accelerated code. USE_PPC_CRYPTO_WITH_PPC9LE indicates whether to
* enable POWER9 optimized variant. */
#undef USE_PPC_CRYPTO
#undef USE_PPC_CRYPTO_WITH_PPC9LE
#ifdef ENABLE_PPC_CRYPTO_SUPPORT
# if defined(HAVE_COMPATIBLE_CC_PPC_ALTIVEC) && \
defined(HAVE_GCC_INLINE_ASM_PPC_ALTIVEC)
# if __GNUC__ >= 4
# define USE_PPC_CRYPTO 1
# if !defined(WORDS_BIGENDIAN) && defined(HAVE_GCC_INLINE_ASM_PPC_ARCH_3_00)
# define USE_PPC_CRYPTO_WITH_PPC9LE 1
# endif
# endif
# endif
#endif /* ENABLE_PPC_CRYPTO_SUPPORT */
/* USE_S390X_CRYPTO indicates whether to enable zSeries code. */
#undef USE_S390X_CRYPTO
#if defined(HAVE_GCC_INLINE_ASM_S390X)
# define USE_S390X_CRYPTO 1
#endif /* USE_S390X_CRYPTO */
struct RIJNDAEL_context_s;
typedef unsigned int (*rijndael_cryptfn_t)(const struct RIJNDAEL_context_s *ctx,
unsigned char *bx,
const unsigned char *ax);
typedef void (*rijndael_prefetchfn_t)(void);
typedef void (*rijndael_prepare_decfn_t)(struct RIJNDAEL_context_s *ctx);
/* Our context object. */
typedef struct RIJNDAEL_context_s
{
/* The first fields are the keyschedule arrays. This is so that
they are aligned on a 16 byte boundary if using gcc. This
alignment is required for the AES-NI code and a good idea in any
case. The alignment is guaranteed due to the way cipher.c
allocates the space for the context. The PROPERLY_ALIGNED_TYPE
hack is used to force a minimal alignment if not using gcc of if
the alignment requirement is higher that 16 bytes. */
union
{
PROPERLY_ALIGNED_TYPE dummy;
byte keyschedule[MAXROUNDS+1][4][4];
u32 keyschedule32[MAXROUNDS+1][4];
u32 keyschedule32b[(MAXROUNDS+1)*4];
#ifdef USE_PADLOCK
/* The key as passed to the padlock engine. It is only used if
the padlock engine is used (USE_PADLOCK, below). */
unsigned char padlock_key[16] __attribute__ ((aligned (16)));
#endif /*USE_PADLOCK*/
} u1;
union
{
PROPERLY_ALIGNED_TYPE dummy;
byte keyschedule[MAXROUNDS+1][4][4];
u32 keyschedule32[MAXROUNDS+1][4];
} u2;
int rounds; /* Key-length-dependent number of rounds. */
unsigned int decryption_prepared:1; /* The decryption key schedule is available. */
#ifdef USE_AESNI
unsigned int use_avx:1; /* AVX shall be used by AES-NI implementation. */
unsigned int use_avx2:1; /* AVX2 shall be used by AES-NI implementation. */
#endif /*USE_AESNI*/
#ifdef USE_S390X_CRYPTO
byte km_func;
byte km_func_xts;
byte kmc_func;
byte kmac_func;
byte kmf_func;
byte kmo_func;
byte kma_func;
#endif /*USE_S390X_CRYPTO*/
rijndael_cryptfn_t encrypt_fn;
rijndael_cryptfn_t decrypt_fn;
rijndael_prefetchfn_t prefetch_enc_fn;
rijndael_prefetchfn_t prefetch_dec_fn;
rijndael_prepare_decfn_t prepare_decryption;
} RIJNDAEL_context ATTR_ALIGNED_16;
/* Macros defining alias for the keyschedules. */
#define keyschenc u1.keyschedule
#define keyschenc32 u1.keyschedule32
#define keyschenc32b u1.keyschedule32b
#define keyschdec u2.keyschedule
#define keyschdec32 u2.keyschedule32
#define padlockkey u1.padlock_key
#endif /* G10_RIJNDAEL_INTERNAL_H */
|