1212#if defined __GNUC__
1313#pragma GCC diagnostic push
1414#pragma GCC diagnostic ignored "-Wdeclaration-after-statement"
15+ #pragma GCC diagnostic ignored "-Wuninitialized"
1516#pragma GCC diagnostic ignored "-Wunused-function"
1617#include <emmintrin.h> /* SSE2 _mm_load_si128 _mm_loadu_si128 _mm_store_si128 _mm_set_epi64x _mm_add_epi32 _mm_shuffle_epi32 */
1718#include <tmmintrin.h> /* SSSE3 _mm_alignr_epi8 _mm_shuffle_epi8 */
@@ -93,13 +94,13 @@ static int ltc_attribute_sha256 s_sha256_x86_compress(hash_state * md, const uns
9394
9495 LTC_ARGCHK (md != NULL );
9596 LTC_ARGCHK (buf != NULL );
96- LTC_ARGCHK (((uintptr_t )(& md -> sha256_x86 .state [0 ])) % 16 == 0 );
97+ LTC_ARGCHK (((uintptr_t )(& md -> sha256 .state [0 ])) % 16 == 0 );
9798 LTC_ARGCHK (((uintptr_t )(& K [0 ])) % 16 == 0 );
9899 LTC_ARGCHK (sizeof (int ) == 4 );
99100
100101 reverse = _mm_set_epi64x (0x0c0d0e0f08090a0bull , 0x0405060700010203ull );
101- state_0 = _mm_load_si128 (((__m128i const * )(& md -> sha256_x86 .state [0 ])));
102- state_1 = _mm_load_si128 (((__m128i const * )(& md -> sha256_x86 .state [4 ])));
102+ state_0 = _mm_load_si128 (((__m128i const * )(& md -> sha256 .state [0 ])));
103+ state_1 = _mm_load_si128 (((__m128i const * )(& md -> sha256 .state [4 ])));
103104 tmp = _mm_shuffle_epi32 (state_0 , k_shuffle_epi32 (0x2 , 0x3 , 0x0 , 0x1 ));
104105 state_1 = _mm_shuffle_epi32 (state_1 , k_shuffle_epi32 (0x0 , 0x1 , 0x2 , 0x3 ));
105106 state_0 = _mm_alignr_epi8 (tmp , state_1 , k_alignr_epi8 (2 ));
@@ -250,8 +251,8 @@ static int ltc_attribute_sha256 s_sha256_x86_compress(hash_state * md, const uns
250251 state_1 = _mm_shuffle_epi32 (state_1 , k_shuffle_epi32 (0x2 , 0x3 , 0x0 , 0x1 ));
251252 state_0 = ltc_mm_blend_epi32 (tmp , state_1 , k_blend_epi32 (0x1 , 0x1 , 0x0 , 0x0 ));
252253 state_1 = _mm_alignr_epi8 (state_1 , tmp , k_alignr_epi8 (2 ));
253- _mm_store_si128 (((__m128i * )(& md -> sha256_x86 .state [0 ])), state_0 );
254- _mm_store_si128 (((__m128i * )(& md -> sha256_x86 .state [4 ])), state_1 );
254+ _mm_store_si128 (((__m128i * )(& md -> sha256 .state [0 ])), state_0 );
255+ _mm_store_si128 (((__m128i * )(& md -> sha256 .state [4 ])), state_1 );
255256 return CRYPT_OK ;
256257}
257258#undef K
@@ -275,16 +276,18 @@ int sha256_x86_init(hash_state * md)
275276{
276277 LTC_ARGCHK (md != NULL );
277278
278- md -> sha256_x86 .curlen = 0 ;
279- md -> sha256_x86 .length = 0 ;
280- md -> sha256_x86 .state [0 ] = 0x6A09E667UL ;
281- md -> sha256_x86 .state [1 ] = 0xBB67AE85UL ;
282- md -> sha256_x86 .state [2 ] = 0x3C6EF372UL ;
283- md -> sha256_x86 .state [3 ] = 0xA54FF53AUL ;
284- md -> sha256_x86 .state [4 ] = 0x510E527FUL ;
285- md -> sha256_x86 .state [5 ] = 0x9B05688CUL ;
286- md -> sha256_x86 .state [6 ] = 0x1F83D9ABUL ;
287- md -> sha256_x86 .state [7 ] = 0x5BE0CD19UL ;
279+ md -> sha256 .state = LTC_ALIGN_BUF (md -> sha256 .state_buf , 16 );
280+
281+ md -> sha256 .curlen = 0 ;
282+ md -> sha256 .length = 0 ;
283+ md -> sha256 .state [0 ] = 0x6A09E667UL ;
284+ md -> sha256 .state [1 ] = 0xBB67AE85UL ;
285+ md -> sha256 .state [2 ] = 0x3C6EF372UL ;
286+ md -> sha256 .state [3 ] = 0xA54FF53AUL ;
287+ md -> sha256 .state [4 ] = 0x510E527FUL ;
288+ md -> sha256 .state [5 ] = 0x9B05688CUL ;
289+ md -> sha256 .state [6 ] = 0x1F83D9ABUL ;
290+ md -> sha256 .state [7 ] = 0x5BE0CD19UL ;
288291 return CRYPT_OK ;
289292}
290293
@@ -295,7 +298,7 @@ int sha256_x86_init(hash_state * md)
295298 @param inlen The length of the data (octets)
296299 @return CRYPT_OK if successful
297300*/
298- HASH_PROCESS (sha256_x86_process ,s_sha256_x86_compress , sha256_x86 , 64 )
301+ HASH_PROCESS (sha256_x86_process ,s_sha256_x86_compress , sha256 , 64 )
299302
300303/**
301304 Terminate the hash to get the digest
@@ -310,41 +313,41 @@ int sha256_x86_done(hash_state * md, unsigned char *out)
310313 LTC_ARGCHK (md != NULL );
311314 LTC_ARGCHK (out != NULL );
312315
313- if (md -> sha256_x86 .curlen >= sizeof (md -> sha256_x86 .buf )) {
316+ if (md -> sha256 .curlen >= sizeof (md -> sha256 .buf )) {
314317 return CRYPT_INVALID_ARG ;
315318 }
316319
317320
318321 /* increase the length of the message */
319- md -> sha256_x86 .length += md -> sha256_x86 .curlen * 8 ;
322+ md -> sha256 .length += md -> sha256 .curlen * 8 ;
320323
321324 /* append the '1' bit */
322- md -> sha256_x86 .buf [md -> sha256_x86 .curlen ++ ] = (unsigned char )0x80 ;
325+ md -> sha256 .buf [md -> sha256 .curlen ++ ] = (unsigned char )0x80 ;
323326
324327 /* if the length is currently above 56 bytes we append zeros
325328 * then compress. Then we can fall back to padding zeros and length
326329 * encoding like normal.
327330 */
328- if (md -> sha256_x86 .curlen > 56 ) {
329- while (md -> sha256_x86 .curlen < 64 ) {
330- md -> sha256_x86 .buf [md -> sha256_x86 .curlen ++ ] = (unsigned char )0 ;
331+ if (md -> sha256 .curlen > 56 ) {
332+ while (md -> sha256 .curlen < 64 ) {
333+ md -> sha256 .buf [md -> sha256 .curlen ++ ] = (unsigned char )0 ;
331334 }
332- s_sha256_x86_compress (md , md -> sha256_x86 .buf );
333- md -> sha256_x86 .curlen = 0 ;
335+ s_sha256_x86_compress (md , md -> sha256 .buf );
336+ md -> sha256 .curlen = 0 ;
334337 }
335338
336339 /* pad upto 56 bytes of zeroes */
337- while (md -> sha256_x86 .curlen < 56 ) {
338- md -> sha256_x86 .buf [md -> sha256_x86 .curlen ++ ] = (unsigned char )0 ;
340+ while (md -> sha256 .curlen < 56 ) {
341+ md -> sha256 .buf [md -> sha256 .curlen ++ ] = (unsigned char )0 ;
339342 }
340343
341344 /* store length */
342- STORE64H (md -> sha256_x86 .length , md -> sha256_x86 .buf + 56 );
343- s_sha256_x86_compress (md , md -> sha256_x86 .buf );
345+ STORE64H (md -> sha256 .length , md -> sha256 .buf + 56 );
346+ s_sha256_x86_compress (md , md -> sha256 .buf );
344347
345348 /* copy output */
346349 for (i = 0 ; i < 8 ; i ++ ) {
347- STORE32H (md -> sha256_x86 .state [i ], out + (4 * i ));
350+ STORE32H (md -> sha256 .state [i ], out + (4 * i ));
348351 }
349352#ifdef LTC_CLEAN_STACK
350353 zeromem (md , sizeof (hash_state ));
0 commit comments