mirror of
https://github.com/bitcoin/bitcoin.git
synced 2025-04-29 14:59:39 -04:00
06bff6dec8 Merge bitcoin-core/secp256k1#1528: tests: call `secp256k1_ecmult_multi_var` with a non-`NULL` error callback 4155e62fcc Merge bitcoin-core/secp256k1#1526: cmake: Fix `check_arm32_assembly` when using as subproject 9554362b15 tests: call secp256k1_ecmult_multi_var with a non-NULL error callback 9f4c8cd730 cmake: Fix `check_arm32_assembly` when using as subproject 7712a53061 Merge bitcoin-core/secp256k1#1524: check-abi: explicitly provide public headers 7d0bc0870f Merge bitcoin-core/secp256k1#1525: changelog: Correct 0.5.0 release date d45d9b74bb changelog: Correct 0.5.0 release date d7f6613dbb Merge bitcoin-core/secp256k1#1523: release cleanup: bump version after 0.5.0 2f05e2da4b release cleanup: bump version after 0.5.0 e3a885d42a Merge bitcoin-core/secp256k1#1522: release: prepare for 0.5.0 dd695563e6 check-abi: explicitly provide public headers c0e4ec3fee release: prepare for 0.5.0 bb528cfb08 Merge bitcoin-core/secp256k1#1518: Add secp256k1_pubkey_sort 7d2591ce12 Add secp256k1_pubkey_sort da515074e3 Merge bitcoin-core/secp256k1#1058: Signed-digit multi-comb ecmult_gen algorithm 4c341f89ab Add changelog entry for SDMC a043940253 Permit COMB_BITS < 256 for exhaustive tests 39b2f2a321 Add test case for ecmult_gen recoded = {-1,0,1} 644e86de9a Reintroduce projective blinding 07810d9abb Reduce side channels from single-bit reads a0d32b597d Optimization: use Nx32 representation for recoded bits e03dcc44b5 Make secp256k1_scalar_get_bits support 32-bit reads 5005abee60 Rename scalar_get_bits -> scalar_get_bits_limb32; return uint32_t 6247f485b6 Optimization: avoid unnecessary doublings in precomputation 15d0cca2a6 Optimization: first table lookup needs no point addition 7a33db35cd Optimization: move (2^COMB_BITS-1)/2 term into ctx->scalar_offset ed2a056f3d Provide 3 configurations accessible through ./configure 5f7be9f6a5 Always generate tables for current (blocks,teeth) config fde1dfcd8d Signed-digit multi-comb ecmult_gen algorithm 486518b350 Make exhaustive tests's scalar_inverse(&x,&x) work ab45c3e089 Initial gej blinding -> final ge blinding aa00a6b892 Introduce CEIL_DIV macro and use it git-subtree-dir: src/secp256k1 git-subtree-split: 06bff6dec8d038f7b4112664a9b882293ebc5178
208 lines
5.8 KiB
C
208 lines
5.8 KiB
C
/***********************************************************************
|
|
* Copyright (c) 2015 Andrew Poelstra *
|
|
* Distributed under the MIT software license, see the accompanying *
|
|
* file COPYING or https://www.opensource.org/licenses/mit-license.php.*
|
|
***********************************************************************/
|
|
|
|
#ifndef SECP256K1_SCALAR_REPR_IMPL_H
|
|
#define SECP256K1_SCALAR_REPR_IMPL_H
|
|
|
|
#include "checkmem.h"
|
|
#include "scalar.h"
|
|
#include "util.h"
|
|
|
|
#include <string.h>
|
|
|
|
SECP256K1_INLINE static int secp256k1_scalar_is_even(const secp256k1_scalar *a) {
|
|
SECP256K1_SCALAR_VERIFY(a);
|
|
|
|
return !(*a & 1);
|
|
}
|
|
|
|
SECP256K1_INLINE static void secp256k1_scalar_clear(secp256k1_scalar *r) { *r = 0; }
|
|
|
|
SECP256K1_INLINE static void secp256k1_scalar_set_int(secp256k1_scalar *r, unsigned int v) {
|
|
*r = v % EXHAUSTIVE_TEST_ORDER;
|
|
|
|
SECP256K1_SCALAR_VERIFY(r);
|
|
}
|
|
|
|
SECP256K1_INLINE static uint32_t secp256k1_scalar_get_bits_limb32(const secp256k1_scalar *a, unsigned int offset, unsigned int count) {
|
|
SECP256K1_SCALAR_VERIFY(a);
|
|
|
|
VERIFY_CHECK(count > 0 && count <= 32);
|
|
if (offset < 32) {
|
|
return (*a >> offset) & (0xFFFFFFFF >> (32 - count));
|
|
} else {
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
SECP256K1_INLINE static uint32_t secp256k1_scalar_get_bits_var(const secp256k1_scalar *a, unsigned int offset, unsigned int count) {
|
|
SECP256K1_SCALAR_VERIFY(a);
|
|
|
|
return secp256k1_scalar_get_bits_limb32(a, offset, count);
|
|
}
|
|
|
|
SECP256K1_INLINE static int secp256k1_scalar_check_overflow(const secp256k1_scalar *a) { return *a >= EXHAUSTIVE_TEST_ORDER; }
|
|
|
|
static int secp256k1_scalar_add(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b) {
|
|
SECP256K1_SCALAR_VERIFY(a);
|
|
SECP256K1_SCALAR_VERIFY(b);
|
|
|
|
*r = (*a + *b) % EXHAUSTIVE_TEST_ORDER;
|
|
|
|
SECP256K1_SCALAR_VERIFY(r);
|
|
return *r < *b;
|
|
}
|
|
|
|
static void secp256k1_scalar_cadd_bit(secp256k1_scalar *r, unsigned int bit, int flag) {
|
|
SECP256K1_SCALAR_VERIFY(r);
|
|
|
|
if (flag && bit < 32)
|
|
*r += ((uint32_t)1 << bit);
|
|
|
|
SECP256K1_SCALAR_VERIFY(r);
|
|
VERIFY_CHECK(bit < 32);
|
|
/* Verify that adding (1 << bit) will not overflow any in-range scalar *r by overflowing the underlying uint32_t. */
|
|
VERIFY_CHECK(((uint32_t)1 << bit) - 1 <= UINT32_MAX - EXHAUSTIVE_TEST_ORDER);
|
|
}
|
|
|
|
static void secp256k1_scalar_set_b32(secp256k1_scalar *r, const unsigned char *b32, int *overflow) {
|
|
int i;
|
|
int over = 0;
|
|
*r = 0;
|
|
for (i = 0; i < 32; i++) {
|
|
*r = (*r * 0x100) + b32[i];
|
|
if (*r >= EXHAUSTIVE_TEST_ORDER) {
|
|
over = 1;
|
|
*r %= EXHAUSTIVE_TEST_ORDER;
|
|
}
|
|
}
|
|
if (overflow) *overflow = over;
|
|
|
|
SECP256K1_SCALAR_VERIFY(r);
|
|
}
|
|
|
|
static void secp256k1_scalar_get_b32(unsigned char *bin, const secp256k1_scalar* a) {
|
|
SECP256K1_SCALAR_VERIFY(a);
|
|
|
|
memset(bin, 0, 32);
|
|
bin[28] = *a >> 24; bin[29] = *a >> 16; bin[30] = *a >> 8; bin[31] = *a;
|
|
}
|
|
|
|
SECP256K1_INLINE static int secp256k1_scalar_is_zero(const secp256k1_scalar *a) {
|
|
SECP256K1_SCALAR_VERIFY(a);
|
|
|
|
return *a == 0;
|
|
}
|
|
|
|
static void secp256k1_scalar_negate(secp256k1_scalar *r, const secp256k1_scalar *a) {
|
|
SECP256K1_SCALAR_VERIFY(a);
|
|
|
|
if (*a == 0) {
|
|
*r = 0;
|
|
} else {
|
|
*r = EXHAUSTIVE_TEST_ORDER - *a;
|
|
}
|
|
|
|
SECP256K1_SCALAR_VERIFY(r);
|
|
}
|
|
|
|
SECP256K1_INLINE static int secp256k1_scalar_is_one(const secp256k1_scalar *a) {
|
|
SECP256K1_SCALAR_VERIFY(a);
|
|
|
|
return *a == 1;
|
|
}
|
|
|
|
static int secp256k1_scalar_is_high(const secp256k1_scalar *a) {
|
|
SECP256K1_SCALAR_VERIFY(a);
|
|
|
|
return *a > EXHAUSTIVE_TEST_ORDER / 2;
|
|
}
|
|
|
|
static int secp256k1_scalar_cond_negate(secp256k1_scalar *r, int flag) {
|
|
SECP256K1_SCALAR_VERIFY(r);
|
|
|
|
if (flag) secp256k1_scalar_negate(r, r);
|
|
|
|
SECP256K1_SCALAR_VERIFY(r);
|
|
return flag ? -1 : 1;
|
|
}
|
|
|
|
static void secp256k1_scalar_mul(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b) {
|
|
SECP256K1_SCALAR_VERIFY(a);
|
|
SECP256K1_SCALAR_VERIFY(b);
|
|
|
|
*r = (*a * *b) % EXHAUSTIVE_TEST_ORDER;
|
|
|
|
SECP256K1_SCALAR_VERIFY(r);
|
|
}
|
|
|
|
static void secp256k1_scalar_split_128(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *a) {
|
|
SECP256K1_SCALAR_VERIFY(a);
|
|
|
|
*r1 = *a;
|
|
*r2 = 0;
|
|
|
|
SECP256K1_SCALAR_VERIFY(r1);
|
|
SECP256K1_SCALAR_VERIFY(r2);
|
|
}
|
|
|
|
SECP256K1_INLINE static int secp256k1_scalar_eq(const secp256k1_scalar *a, const secp256k1_scalar *b) {
|
|
SECP256K1_SCALAR_VERIFY(a);
|
|
SECP256K1_SCALAR_VERIFY(b);
|
|
|
|
return *a == *b;
|
|
}
|
|
|
|
static SECP256K1_INLINE void secp256k1_scalar_cmov(secp256k1_scalar *r, const secp256k1_scalar *a, int flag) {
|
|
uint32_t mask0, mask1;
|
|
volatile int vflag = flag;
|
|
SECP256K1_SCALAR_VERIFY(a);
|
|
SECP256K1_CHECKMEM_CHECK_VERIFY(r, sizeof(*r));
|
|
|
|
mask0 = vflag + ~((uint32_t)0);
|
|
mask1 = ~mask0;
|
|
*r = (*r & mask0) | (*a & mask1);
|
|
|
|
SECP256K1_SCALAR_VERIFY(r);
|
|
}
|
|
|
|
static void secp256k1_scalar_inverse(secp256k1_scalar *r, const secp256k1_scalar *x) {
|
|
int i;
|
|
uint32_t res = 0;
|
|
SECP256K1_SCALAR_VERIFY(x);
|
|
|
|
for (i = 0; i < EXHAUSTIVE_TEST_ORDER; i++) {
|
|
if ((i * *x) % EXHAUSTIVE_TEST_ORDER == 1) {
|
|
res = i;
|
|
break;
|
|
}
|
|
}
|
|
|
|
/* If this VERIFY_CHECK triggers we were given a noninvertible scalar (and thus
|
|
* have a composite group order; fix it in exhaustive_tests.c). */
|
|
VERIFY_CHECK(res != 0);
|
|
*r = res;
|
|
|
|
SECP256K1_SCALAR_VERIFY(r);
|
|
}
|
|
|
|
static void secp256k1_scalar_inverse_var(secp256k1_scalar *r, const secp256k1_scalar *x) {
|
|
SECP256K1_SCALAR_VERIFY(x);
|
|
|
|
secp256k1_scalar_inverse(r, x);
|
|
|
|
SECP256K1_SCALAR_VERIFY(r);
|
|
}
|
|
|
|
static void secp256k1_scalar_half(secp256k1_scalar *r, const secp256k1_scalar *a) {
|
|
SECP256K1_SCALAR_VERIFY(a);
|
|
|
|
*r = (*a + ((-(uint32_t)(*a & 1)) & EXHAUSTIVE_TEST_ORDER)) >> 1;
|
|
|
|
SECP256K1_SCALAR_VERIFY(r);
|
|
}
|
|
|
|
#endif /* SECP256K1_SCALAR_REPR_IMPL_H */
|