add amd64-{51-30k,64-24k} from SUPERCOP, now use autoconf

This commit is contained in:
cathugger 2017-09-27 02:35:56 +03:00
parent ad9c6592ed
commit 81cebae449
162 changed files with 79451 additions and 277 deletions

262
Makefile
View file

@ -1,262 +0,0 @@
CC= gcc
CSTD= -std=c99 -Wall -D_POSIX_C_SOURCE=200112L
CFLAGS= $(CSTD) -O3 -march=native
#CFLAGS= $(CSTD) -O0 -g3 -fsanitize=address
MV= mv
ED25519OBJ= $(patsubst %.c,%.c.o,$(wildcard ed25519/ref10/*.c))
MAINOBJ= \
main.c.o \
base32_to.c.o \
base32_from.c.o \
$(ED25519OBJ) \
keccak.c.o
TEST_BASE32OBJ= \
test_base32.c.o \
base32_to.c.o \
base32_from.c.o
TEST_BASE16OBJ= \
test_base16.c.o \
base16_to.c.o \
base16_from.c.o
TEST_ED25519OBJ= \
test_ed25519.c.o \
base16_to.c.o \
base16_from.c.o \
$(ED25519OBJ)
MAINLIB= -lsodium -lpthread
TEST_ED25519LIB= -lsodium
EXE= mkp224o test_base32 test_base16 test_ed25519
default: mkp224o
all: $(EXE)
mkp224o: $(MAINOBJ)
$(CC) $(CFLAGS) -o $@.tmp $^ $(MAINLIB) && $(MV) $@.tmp $@
test_base32: $(TEST_BASE32OBJ)
$(CC) $(CFLAGS) -o $@.tmp $^ && $(MV) $@.tmp $@
test_base16: $(TEST_BASE16OBJ)
$(CC) $(CFLAGS) -o $@.tmp $^ && $(MV) $@.tmp $@
test_ed25519: $(TEST_ED25519OBJ)
$(CC) $(CFLAGS) -o $@.tmp $^ $(TEST_ED25519LIB) && $(MV) $@.tmp $@
%.c.o: %.c
$(CC) $(CFLAGS) -c -o $@.tmp $< && $(MV) $@.tmp $@
clean:
$(RM) $(MAINOBJ)
$(RM) $(TEST_BASE16OBJ)
$(RM) $(TEST_BASE32OBJ)
$(RM) $(TEST_ED25519OBJ)
$(RM) $(EXE)
depend:
makedepend -Y -- $(CSTD) -- $(MAINOBJ:.c.o=.c) $(TEST_BASE16OBJ:.c.o=.c) $(TEST_BASE32OBJ:.c.o=.c) $(TEST_ED25519OBJ:.c.o=.c)
# DO NOT DELETE THIS LINE
main.o: ed25519/ref10/ed25519_ref10.h ed25519/ref10/ge.h ed25519/ref10/fe.h
main.o: ed25519/ref10/crypto_int32.h types.h vec.h base32.h keccak.h
base32_to.o: types.h base32.h
base32_from.o: types.h base32.h
ed25519/ref10/sc_reduce.o: ed25519/ref10/sc.h ed25519/ref10/crypto_int64.h
ed25519/ref10/sc_reduce.o: ed25519/ref10/crypto_uint32.h
ed25519/ref10/sc_reduce.o: ed25519/ref10/crypto_uint64.h
ed25519/ref10/ge_msub.o: ed25519/ref10/ge.h ed25519/ref10/fe.h
ed25519/ref10/ge_msub.o: ed25519/ref10/crypto_int32.h ed25519/ref10/ge_msub.h
ed25519/ref10/fe_copy.o: ed25519/ref10/fe.h ed25519/ref10/crypto_int32.h
ed25519/ref10/fe_isnegative.o: ed25519/ref10/fe.h
ed25519/ref10/fe_isnegative.o: ed25519/ref10/crypto_int32.h
ed25519/ref10/fe_tobytes.o: ed25519/ref10/fe.h ed25519/ref10/crypto_int32.h
ed25519/ref10/ge_p3_tobytes.o: ed25519/ref10/ge.h ed25519/ref10/fe.h
ed25519/ref10/ge_p3_tobytes.o: ed25519/ref10/crypto_int32.h
ed25519/ref10/fe_0.o: ed25519/ref10/fe.h ed25519/ref10/crypto_int32.h
ed25519/ref10/ge_p3_0.o: ed25519/ref10/ge.h ed25519/ref10/fe.h
ed25519/ref10/ge_p3_0.o: ed25519/ref10/crypto_int32.h
ed25519/ref10/ge_double_scalarmult.o: ed25519/ref10/ge.h ed25519/ref10/fe.h
ed25519/ref10/ge_double_scalarmult.o: ed25519/ref10/crypto_int32.h
ed25519/ref10/ge_double_scalarmult.o: ed25519/ref10/base2.h
ed25519/ref10/ge_p1p1_to_p3.o: ed25519/ref10/ge.h ed25519/ref10/fe.h
ed25519/ref10/ge_p1p1_to_p3.o: ed25519/ref10/crypto_int32.h
ed25519/ref10/ge_p3_to_p2.o: ed25519/ref10/ge.h ed25519/ref10/fe.h
ed25519/ref10/ge_p3_to_p2.o: ed25519/ref10/crypto_int32.h
ed25519/ref10/ge_sub.o: ed25519/ref10/ge.h ed25519/ref10/fe.h
ed25519/ref10/ge_sub.o: ed25519/ref10/crypto_int32.h ed25519/ref10/ge_sub.h
ed25519/ref10/ge_madd.o: ed25519/ref10/ge.h ed25519/ref10/fe.h
ed25519/ref10/ge_madd.o: ed25519/ref10/crypto_int32.h ed25519/ref10/ge_madd.h
ed25519/ref10/fe_frombytes.o: ed25519/ref10/fe.h ed25519/ref10/crypto_int32.h
ed25519/ref10/fe_frombytes.o: ed25519/ref10/crypto_int64.h
ed25519/ref10/fe_frombytes.o: ed25519/ref10/crypto_uint64.h
ed25519/ref10/ge_scalarmult_base.o: ed25519/ref10/ge.h ed25519/ref10/fe.h
ed25519/ref10/ge_scalarmult_base.o: ed25519/ref10/crypto_int32.h
ed25519/ref10/ge_scalarmult_base.o: ed25519/ref10/crypto_uint32.h
ed25519/ref10/ge_scalarmult_base.o: ed25519/ref10/base.h
ed25519/ref10/fe_neg.o: ed25519/ref10/fe.h ed25519/ref10/crypto_int32.h
ed25519/ref10/ge_p2_dbl.o: ed25519/ref10/ge.h ed25519/ref10/fe.h
ed25519/ref10/ge_p2_dbl.o: ed25519/ref10/crypto_int32.h
ed25519/ref10/ge_p2_dbl.o: ed25519/ref10/ge_p2_dbl.h
ed25519/ref10/fe_1.o: ed25519/ref10/fe.h ed25519/ref10/crypto_int32.h
ed25519/ref10/sign.o: ed25519/ref10/crypto_sign.h
ed25519/ref10/sign.o: ed25519/ref10/ed25519_ref10.h
ed25519/ref10/sign.o: ed25519/ref10/crypto_hash_sha512.h ed25519/ref10/ge.h
ed25519/ref10/sign.o: ed25519/ref10/fe.h ed25519/ref10/crypto_int32.h
ed25519/ref10/sign.o: ed25519/ref10/sc.h
ed25519/ref10/fe_cmov.o: ed25519/ref10/fe.h ed25519/ref10/crypto_int32.h
ed25519/ref10/open.o: ed25519/ref10/crypto_sign.h
ed25519/ref10/open.o: ed25519/ref10/ed25519_ref10.h
ed25519/ref10/open.o: ed25519/ref10/crypto_hash_sha512.h
ed25519/ref10/open.o: ed25519/ref10/crypto_verify_32.h ed25519/ref10/ge.h
ed25519/ref10/open.o: ed25519/ref10/fe.h ed25519/ref10/crypto_int32.h
ed25519/ref10/open.o: ed25519/ref10/sc.h
ed25519/ref10/ge_add.o: ed25519/ref10/ge.h ed25519/ref10/fe.h
ed25519/ref10/ge_add.o: ed25519/ref10/crypto_int32.h ed25519/ref10/ge_add.h
ed25519/ref10/fe_sub.o: ed25519/ref10/fe.h ed25519/ref10/crypto_int32.h
ed25519/ref10/fe_pow22523.o: ed25519/ref10/fe.h ed25519/ref10/crypto_int32.h
ed25519/ref10/fe_pow22523.o: ed25519/ref10/pow22523.h
ed25519/ref10/fe_sq2.o: ed25519/ref10/fe.h ed25519/ref10/crypto_int32.h
ed25519/ref10/fe_sq2.o: ed25519/ref10/crypto_int64.h
ed25519/ref10/fe_mul.o: ed25519/ref10/fe.h ed25519/ref10/crypto_int32.h
ed25519/ref10/fe_mul.o: ed25519/ref10/crypto_int64.h
ed25519/ref10/ge_p2_0.o: ed25519/ref10/ge.h ed25519/ref10/fe.h
ed25519/ref10/ge_p2_0.o: ed25519/ref10/crypto_int32.h
ed25519/ref10/keypair.o: ed25519/ref10/randombytes.h
ed25519/ref10/keypair.o: ed25519/ref10/crypto_sign.h
ed25519/ref10/keypair.o: ed25519/ref10/ed25519_ref10.h
ed25519/ref10/keypair.o: ed25519/ref10/crypto_hash_sha512.h
ed25519/ref10/keypair.o: ed25519/ref10/ge.h ed25519/ref10/fe.h
ed25519/ref10/keypair.o: ed25519/ref10/crypto_int32.h
ed25519/ref10/fe_isnonzero.o: ed25519/ref10/fe.h ed25519/ref10/crypto_int32.h
ed25519/ref10/fe_isnonzero.o: ed25519/ref10/crypto_verify_32.h
ed25519/ref10/ge_p1p1_to_p2.o: ed25519/ref10/ge.h ed25519/ref10/fe.h
ed25519/ref10/ge_p1p1_to_p2.o: ed25519/ref10/crypto_int32.h
ed25519/ref10/fe_sq.o: ed25519/ref10/fe.h ed25519/ref10/crypto_int32.h
ed25519/ref10/fe_sq.o: ed25519/ref10/crypto_int64.h
ed25519/ref10/fe_add.o: ed25519/ref10/fe.h ed25519/ref10/crypto_int32.h
ed25519/ref10/ge_p3_to_cached.o: ed25519/ref10/ge.h ed25519/ref10/fe.h
ed25519/ref10/ge_p3_to_cached.o: ed25519/ref10/crypto_int32.h
ed25519/ref10/ge_p3_to_cached.o: ed25519/ref10/d2.h
ed25519/ref10/ge_tobytes.o: ed25519/ref10/ge.h ed25519/ref10/fe.h
ed25519/ref10/ge_tobytes.o: ed25519/ref10/crypto_int32.h
ed25519/ref10/sc_muladd.o: ed25519/ref10/sc.h ed25519/ref10/crypto_int64.h
ed25519/ref10/sc_muladd.o: ed25519/ref10/crypto_uint32.h
ed25519/ref10/sc_muladd.o: ed25519/ref10/crypto_uint64.h
ed25519/ref10/ge_p3_dbl.o: ed25519/ref10/ge.h ed25519/ref10/fe.h
ed25519/ref10/ge_p3_dbl.o: ed25519/ref10/crypto_int32.h
ed25519/ref10/ge_frombytes.o: ed25519/ref10/ge.h ed25519/ref10/fe.h
ed25519/ref10/ge_frombytes.o: ed25519/ref10/crypto_int32.h ed25519/ref10/d.h
ed25519/ref10/ge_frombytes.o: ed25519/ref10/sqrtm1.h
ed25519/ref10/fe_invert.o: ed25519/ref10/fe.h ed25519/ref10/crypto_int32.h
ed25519/ref10/fe_invert.o: ed25519/ref10/pow225521.h
ed25519/ref10/ge_precomp_0.o: ed25519/ref10/ge.h ed25519/ref10/fe.h
ed25519/ref10/ge_precomp_0.o: ed25519/ref10/crypto_int32.h
keccak.o: types.h keccak.h
test_base16.o: types.h base16.h
base16_to.o: types.h base16.h
base16_from.o: types.h base16.h
test_base32.o: types.h base32.h
base32_to.o: types.h base32.h
base32_from.o: types.h base32.h
test_ed25519.o: types.h base16.h ed25519/ref10/ed25519_ref10.h
base16_to.o: types.h base16.h
base16_from.o: types.h base16.h
ed25519/ref10/sc_reduce.o: ed25519/ref10/sc.h ed25519/ref10/crypto_int64.h
ed25519/ref10/sc_reduce.o: ed25519/ref10/crypto_uint32.h
ed25519/ref10/sc_reduce.o: ed25519/ref10/crypto_uint64.h
ed25519/ref10/ge_msub.o: ed25519/ref10/ge.h ed25519/ref10/fe.h
ed25519/ref10/ge_msub.o: ed25519/ref10/crypto_int32.h ed25519/ref10/ge_msub.h
ed25519/ref10/fe_copy.o: ed25519/ref10/fe.h ed25519/ref10/crypto_int32.h
ed25519/ref10/fe_isnegative.o: ed25519/ref10/fe.h
ed25519/ref10/fe_isnegative.o: ed25519/ref10/crypto_int32.h
ed25519/ref10/fe_tobytes.o: ed25519/ref10/fe.h ed25519/ref10/crypto_int32.h
ed25519/ref10/ge_p3_tobytes.o: ed25519/ref10/ge.h ed25519/ref10/fe.h
ed25519/ref10/ge_p3_tobytes.o: ed25519/ref10/crypto_int32.h
ed25519/ref10/fe_0.o: ed25519/ref10/fe.h ed25519/ref10/crypto_int32.h
ed25519/ref10/ge_p3_0.o: ed25519/ref10/ge.h ed25519/ref10/fe.h
ed25519/ref10/ge_p3_0.o: ed25519/ref10/crypto_int32.h
ed25519/ref10/ge_double_scalarmult.o: ed25519/ref10/ge.h ed25519/ref10/fe.h
ed25519/ref10/ge_double_scalarmult.o: ed25519/ref10/crypto_int32.h
ed25519/ref10/ge_double_scalarmult.o: ed25519/ref10/base2.h
ed25519/ref10/ge_p1p1_to_p3.o: ed25519/ref10/ge.h ed25519/ref10/fe.h
ed25519/ref10/ge_p1p1_to_p3.o: ed25519/ref10/crypto_int32.h
ed25519/ref10/ge_p3_to_p2.o: ed25519/ref10/ge.h ed25519/ref10/fe.h
ed25519/ref10/ge_p3_to_p2.o: ed25519/ref10/crypto_int32.h
ed25519/ref10/ge_sub.o: ed25519/ref10/ge.h ed25519/ref10/fe.h
ed25519/ref10/ge_sub.o: ed25519/ref10/crypto_int32.h ed25519/ref10/ge_sub.h
ed25519/ref10/ge_madd.o: ed25519/ref10/ge.h ed25519/ref10/fe.h
ed25519/ref10/ge_madd.o: ed25519/ref10/crypto_int32.h ed25519/ref10/ge_madd.h
ed25519/ref10/fe_frombytes.o: ed25519/ref10/fe.h ed25519/ref10/crypto_int32.h
ed25519/ref10/fe_frombytes.o: ed25519/ref10/crypto_int64.h
ed25519/ref10/fe_frombytes.o: ed25519/ref10/crypto_uint64.h
ed25519/ref10/ge_scalarmult_base.o: ed25519/ref10/ge.h ed25519/ref10/fe.h
ed25519/ref10/ge_scalarmult_base.o: ed25519/ref10/crypto_int32.h
ed25519/ref10/ge_scalarmult_base.o: ed25519/ref10/crypto_uint32.h
ed25519/ref10/ge_scalarmult_base.o: ed25519/ref10/base.h
ed25519/ref10/fe_neg.o: ed25519/ref10/fe.h ed25519/ref10/crypto_int32.h
ed25519/ref10/ge_p2_dbl.o: ed25519/ref10/ge.h ed25519/ref10/fe.h
ed25519/ref10/ge_p2_dbl.o: ed25519/ref10/crypto_int32.h
ed25519/ref10/ge_p2_dbl.o: ed25519/ref10/ge_p2_dbl.h
ed25519/ref10/fe_1.o: ed25519/ref10/fe.h ed25519/ref10/crypto_int32.h
ed25519/ref10/sign.o: ed25519/ref10/crypto_sign.h
ed25519/ref10/sign.o: ed25519/ref10/ed25519_ref10.h
ed25519/ref10/sign.o: ed25519/ref10/crypto_hash_sha512.h ed25519/ref10/ge.h
ed25519/ref10/sign.o: ed25519/ref10/fe.h ed25519/ref10/crypto_int32.h
ed25519/ref10/sign.o: ed25519/ref10/sc.h
ed25519/ref10/fe_cmov.o: ed25519/ref10/fe.h ed25519/ref10/crypto_int32.h
ed25519/ref10/open.o: ed25519/ref10/crypto_sign.h
ed25519/ref10/open.o: ed25519/ref10/ed25519_ref10.h
ed25519/ref10/open.o: ed25519/ref10/crypto_hash_sha512.h
ed25519/ref10/open.o: ed25519/ref10/crypto_verify_32.h ed25519/ref10/ge.h
ed25519/ref10/open.o: ed25519/ref10/fe.h ed25519/ref10/crypto_int32.h
ed25519/ref10/open.o: ed25519/ref10/sc.h
ed25519/ref10/ge_add.o: ed25519/ref10/ge.h ed25519/ref10/fe.h
ed25519/ref10/ge_add.o: ed25519/ref10/crypto_int32.h ed25519/ref10/ge_add.h
ed25519/ref10/fe_sub.o: ed25519/ref10/fe.h ed25519/ref10/crypto_int32.h
ed25519/ref10/fe_pow22523.o: ed25519/ref10/fe.h ed25519/ref10/crypto_int32.h
ed25519/ref10/fe_pow22523.o: ed25519/ref10/pow22523.h
ed25519/ref10/fe_sq2.o: ed25519/ref10/fe.h ed25519/ref10/crypto_int32.h
ed25519/ref10/fe_sq2.o: ed25519/ref10/crypto_int64.h
ed25519/ref10/fe_mul.o: ed25519/ref10/fe.h ed25519/ref10/crypto_int32.h
ed25519/ref10/fe_mul.o: ed25519/ref10/crypto_int64.h
ed25519/ref10/ge_p2_0.o: ed25519/ref10/ge.h ed25519/ref10/fe.h
ed25519/ref10/ge_p2_0.o: ed25519/ref10/crypto_int32.h
ed25519/ref10/keypair.o: ed25519/ref10/randombytes.h
ed25519/ref10/keypair.o: ed25519/ref10/crypto_sign.h
ed25519/ref10/keypair.o: ed25519/ref10/ed25519_ref10.h
ed25519/ref10/keypair.o: ed25519/ref10/crypto_hash_sha512.h
ed25519/ref10/keypair.o: ed25519/ref10/ge.h ed25519/ref10/fe.h
ed25519/ref10/keypair.o: ed25519/ref10/crypto_int32.h
ed25519/ref10/fe_isnonzero.o: ed25519/ref10/fe.h ed25519/ref10/crypto_int32.h
ed25519/ref10/fe_isnonzero.o: ed25519/ref10/crypto_verify_32.h
ed25519/ref10/ge_p1p1_to_p2.o: ed25519/ref10/ge.h ed25519/ref10/fe.h
ed25519/ref10/ge_p1p1_to_p2.o: ed25519/ref10/crypto_int32.h
ed25519/ref10/fe_sq.o: ed25519/ref10/fe.h ed25519/ref10/crypto_int32.h
ed25519/ref10/fe_sq.o: ed25519/ref10/crypto_int64.h
ed25519/ref10/fe_add.o: ed25519/ref10/fe.h ed25519/ref10/crypto_int32.h
ed25519/ref10/ge_p3_to_cached.o: ed25519/ref10/ge.h ed25519/ref10/fe.h
ed25519/ref10/ge_p3_to_cached.o: ed25519/ref10/crypto_int32.h
ed25519/ref10/ge_p3_to_cached.o: ed25519/ref10/d2.h
ed25519/ref10/ge_tobytes.o: ed25519/ref10/ge.h ed25519/ref10/fe.h
ed25519/ref10/ge_tobytes.o: ed25519/ref10/crypto_int32.h
ed25519/ref10/sc_muladd.o: ed25519/ref10/sc.h ed25519/ref10/crypto_int64.h
ed25519/ref10/sc_muladd.o: ed25519/ref10/crypto_uint32.h
ed25519/ref10/sc_muladd.o: ed25519/ref10/crypto_uint64.h
ed25519/ref10/ge_p3_dbl.o: ed25519/ref10/ge.h ed25519/ref10/fe.h
ed25519/ref10/ge_p3_dbl.o: ed25519/ref10/crypto_int32.h
ed25519/ref10/ge_frombytes.o: ed25519/ref10/ge.h ed25519/ref10/fe.h
ed25519/ref10/ge_frombytes.o: ed25519/ref10/crypto_int32.h ed25519/ref10/d.h
ed25519/ref10/ge_frombytes.o: ed25519/ref10/sqrtm1.h
ed25519/ref10/fe_invert.o: ed25519/ref10/fe.h ed25519/ref10/crypto_int32.h
ed25519/ref10/fe_invert.o: ed25519/ref10/pow225521.h
ed25519/ref10/ge_precomp_0.o: ed25519/ref10/ge.h ed25519/ref10/fe.h
ed25519/ref10/ge_precomp_0.o: ed25519/ref10/crypto_int32.h

359
Makefile.in Normal file
View file

@ -0,0 +1,359 @@
CC= @CC@
CSTD= -std=c99 -Wall -D_POSIX_C_SOURCE=200112L -no-pie
CFLAGS= $(CSTD) @CFLAGS@ -DED25519_@ED25519IMPL@
ASFLAGS= -no-pie
MV= mv
ED25519_ref10= $(patsubst %.c,%.c.o,$(wildcard ed25519/ref10/*.c))
ED25519_amd64_51_30k= \
$(patsubst %.c,%.c.o,$(wildcard ed25519/amd64-51-30k/*.c)) \
$(patsubst %.s,%.s.o,$(wildcard ed25519/amd64-51-30k/*.s))
ED25519_amd64_64_24k= \
$(patsubst %.c,%.c.o,$(wildcard ed25519/amd64-64-24k/*.c)) \
$(patsubst %.s,%.s.o,$(wildcard ed25519/amd64-64-24k/*.s))
ED25519OBJ= $(ED25519_@ED25519IMPL@)
MAINOBJ= \
main.c.o \
base32_to.c.o \
base32_from.c.o \
$(ED25519OBJ) \
keccak.c.o
TEST_BASE32OBJ= \
test_base32.c.o \
base32_to.c.o \
base32_from.c.o
TEST_BASE16OBJ= \
test_base16.c.o \
base16_to.c.o \
base16_from.c.o
TEST_ED25519OBJ= \
test_ed25519.c.o \
base16_to.c.o \
base16_from.c.o \
$(ED25519OBJ)
ALLO= $(sort $(MAINOBJ) $(TEST_BASE32OBJ) $(TEST_ED25519OBJ) $(ED25519_ref10) $(ED25519_amd64_51_30k) $(ED25519_amd64_64_24k))
ALLC= $(patsubst %.c.o,%.c,$(filter %.c.o %.c,$(ALLO)))
CLEANO= $(filter %.o,$(ALLO))
MAINLIB= -lsodium -lpthread
TEST_ED25519LIB= -lsodium
EXE= mkp224o test_base32 test_base16 test_ed25519
default: mkp224o
all: $(EXE)
mkp224o: $(MAINOBJ)
$(CC) $(CFLAGS) -o $@.tmp $^ $(MAINLIB) && $(MV) $@.tmp $@
test_base32: $(TEST_BASE32OBJ)
$(CC) $(CFLAGS) -o $@.tmp $^ && $(MV) $@.tmp $@
test_base16: $(TEST_BASE16OBJ)
$(CC) $(CFLAGS) -o $@.tmp $^ && $(MV) $@.tmp $@
test_ed25519: $(TEST_ED25519OBJ)
$(CC) $(CFLAGS) -o $@.tmp $^ $(TEST_ED25519LIB) && $(MV) $@.tmp $@
%.c.o: %.c
$(CC) $(CFLAGS) -c -o $@.tmp $< && $(MV) $@.tmp $@
%.s.o: %.s
$(CC) $(ASFLAGS) -c -o $@.tmp $< && $(MV) $@.tmp $@
clean:
$(RM) $(MAINOBJ)
$(RM) $(CLEANO)
$(RM) $(EXE)
depend:
makedepend -Y -fMakefile.in -o.c.o -- $(CSTD) -DED25519_ref10 -DED25519_amd64_51_30k -DED25519_amd64_64_24k -- $(ALLC)
# DO NOT DELETE THIS LINE
base16_from.c.o: types.h base16.h
base16_to.c.o: types.h base16.h
base32_from.c.o: types.h base32.h
base32_to.c.o: types.h base32.h
ed25519/amd64-51-30k/batch.c.o: ed25519/amd64-51-30k/crypto_sign.h
ed25519/amd64-51-30k/batch.c.o: ed25519/amd64-51-30k/ed25519.h
ed25519/amd64-51-30k/batch.c.o: ed25519/amd64-51-30k/crypto_verify_32.h
ed25519/amd64-51-30k/batch.c.o: ed25519/amd64-51-30k/crypto_hash_sha512.h
ed25519/amd64-51-30k/batch.c.o: ed25519/amd64-51-30k/randombytes.h
ed25519/amd64-51-30k/batch.c.o: ed25519/amd64-51-30k/ge25519.h
ed25519/amd64-51-30k/batch.c.o: ed25519/amd64-51-30k/fe25519.h
ed25519/amd64-51-30k/batch.c.o: ed25519/amd64-51-30k/sc25519.h
ed25519/amd64-51-30k/batch.c.o: ed25519/amd64-51-30k/hram.h
ed25519/amd64-51-30k/fe25519_add.c.o: ed25519/amd64-51-30k/fe25519.h
ed25519/amd64-51-30k/fe25519_getparity.c.o: ed25519/amd64-51-30k/fe25519.h
ed25519/amd64-51-30k/fe25519_invert.c.o: ed25519/amd64-51-30k/fe25519.h
ed25519/amd64-51-30k/fe25519_iseq.c.o: ed25519/amd64-51-30k/fe25519.h
ed25519/amd64-51-30k/fe25519_iszero.c.o: ed25519/amd64-51-30k/fe25519.h
ed25519/amd64-51-30k/fe25519_neg.c.o: ed25519/amd64-51-30k/fe25519.h
ed25519/amd64-51-30k/fe25519_pack.c.o: ed25519/amd64-51-30k/fe25519.h
ed25519/amd64-51-30k/fe25519_pow2523.c.o: ed25519/amd64-51-30k/fe25519.h
ed25519/amd64-51-30k/fe25519_setint.c.o: ed25519/amd64-51-30k/fe25519.h
ed25519/amd64-51-30k/fe25519_sub.c.o: ed25519/amd64-51-30k/fe25519.h
ed25519/amd64-51-30k/fe25519_unpack.c.o: ed25519/amd64-51-30k/fe25519.h
ed25519/amd64-51-30k/ge25519_add.c.o: ed25519/amd64-51-30k/ge25519.h
ed25519/amd64-51-30k/ge25519_add.c.o: ed25519/amd64-51-30k/fe25519.h
ed25519/amd64-51-30k/ge25519_add.c.o: ed25519/amd64-51-30k/sc25519.h
ed25519/amd64-51-30k/ge25519_base.c.o: ed25519/amd64-51-30k/ge25519.h
ed25519/amd64-51-30k/ge25519_base.c.o: ed25519/amd64-51-30k/fe25519.h
ed25519/amd64-51-30k/ge25519_base.c.o: ed25519/amd64-51-30k/sc25519.h
ed25519/amd64-51-30k/ge25519_double.c.o: ed25519/amd64-51-30k/ge25519.h
ed25519/amd64-51-30k/ge25519_double.c.o: ed25519/amd64-51-30k/fe25519.h
ed25519/amd64-51-30k/ge25519_double.c.o: ed25519/amd64-51-30k/sc25519.h
ed25519/amd64-51-30k/ge25519_double_scalarmult.c.o: ed25519/amd64-51-30k/fe25519.h
ed25519/amd64-51-30k/ge25519_double_scalarmult.c.o: ed25519/amd64-51-30k/sc25519.h
ed25519/amd64-51-30k/ge25519_double_scalarmult.c.o: ed25519/amd64-51-30k/ge25519.h
ed25519/amd64-51-30k/ge25519_double_scalarmult.c.o: ed25519/amd64-51-30k/ge25519_base_slide_multiples.data
ed25519/amd64-51-30k/ge25519_isneutral.c.o: ed25519/amd64-51-30k/fe25519.h
ed25519/amd64-51-30k/ge25519_isneutral.c.o: ed25519/amd64-51-30k/ge25519.h
ed25519/amd64-51-30k/ge25519_isneutral.c.o: ed25519/amd64-51-30k/sc25519.h
ed25519/amd64-51-30k/ge25519_multi_scalarmult.c.o: ed25519/amd64-51-30k/fe25519.h
ed25519/amd64-51-30k/ge25519_multi_scalarmult.c.o: ed25519/amd64-51-30k/sc25519.h
ed25519/amd64-51-30k/ge25519_multi_scalarmult.c.o: ed25519/amd64-51-30k/ge25519.h
ed25519/amd64-51-30k/ge25519_multi_scalarmult.c.o: ed25519/amd64-51-30k/index_heap.h
ed25519/amd64-51-30k/ge25519_pack.c.o: ed25519/amd64-51-30k/fe25519.h
ed25519/amd64-51-30k/ge25519_pack.c.o: ed25519/amd64-51-30k/sc25519.h
ed25519/amd64-51-30k/ge25519_pack.c.o: ed25519/amd64-51-30k/ge25519.h
ed25519/amd64-51-30k/ge25519_scalarmult_base.c.o: ed25519/amd64-51-30k/fe25519.h
ed25519/amd64-51-30k/ge25519_scalarmult_base.c.o: ed25519/amd64-51-30k/sc25519.h
ed25519/amd64-51-30k/ge25519_scalarmult_base.c.o: ed25519/amd64-51-30k/ge25519.h
ed25519/amd64-51-30k/ge25519_scalarmult_base.c.o: ed25519/amd64-51-30k/ge25519_base_niels_smalltables.data
ed25519/amd64-51-30k/ge25519_unpackneg.c.o: ed25519/amd64-51-30k/fe25519.h
ed25519/amd64-51-30k/ge25519_unpackneg.c.o: ed25519/amd64-51-30k/ge25519.h
ed25519/amd64-51-30k/ge25519_unpackneg.c.o: ed25519/amd64-51-30k/sc25519.h
ed25519/amd64-51-30k/hram.c.o: ed25519/amd64-51-30k/crypto_hash_sha512.h
ed25519/amd64-51-30k/hram.c.o: ed25519/amd64-51-30k/hram.h
ed25519/amd64-51-30k/index_heap.c.o: ed25519/amd64-51-30k/sc25519.h
ed25519/amd64-51-30k/index_heap.c.o: ed25519/amd64-51-30k/index_heap.h
ed25519/amd64-51-30k/keypair.c.o: ed25519/amd64-51-30k/crypto_sign.h
ed25519/amd64-51-30k/keypair.c.o: ed25519/amd64-51-30k/ed25519.h
ed25519/amd64-51-30k/keypair.c.o: ed25519/amd64-51-30k/crypto_hash_sha512.h
ed25519/amd64-51-30k/keypair.c.o: ed25519/amd64-51-30k/randombytes.h
ed25519/amd64-51-30k/keypair.c.o: ed25519/amd64-51-30k/ge25519.h
ed25519/amd64-51-30k/keypair.c.o: ed25519/amd64-51-30k/fe25519.h
ed25519/amd64-51-30k/keypair.c.o: ed25519/amd64-51-30k/sc25519.h
ed25519/amd64-51-30k/open.c.o: ed25519/amd64-51-30k/crypto_sign.h
ed25519/amd64-51-30k/open.c.o: ed25519/amd64-51-30k/ed25519.h
ed25519/amd64-51-30k/open.c.o: ed25519/amd64-51-30k/crypto_verify_32.h
ed25519/amd64-51-30k/open.c.o: ed25519/amd64-51-30k/crypto_hash_sha512.h
ed25519/amd64-51-30k/open.c.o: ed25519/amd64-51-30k/ge25519.h
ed25519/amd64-51-30k/open.c.o: ed25519/amd64-51-30k/fe25519.h
ed25519/amd64-51-30k/open.c.o: ed25519/amd64-51-30k/sc25519.h
ed25519/amd64-51-30k/sc25519_from32bytes.c.o: ed25519/amd64-51-30k/sc25519.h
ed25519/amd64-51-30k/sc25519_from64bytes.c.o: ed25519/amd64-51-30k/sc25519.h
ed25519/amd64-51-30k/sc25519_from_shortsc.c.o: ed25519/amd64-51-30k/sc25519.h
ed25519/amd64-51-30k/sc25519_iszero.c.o: ed25519/amd64-51-30k/sc25519.h
ed25519/amd64-51-30k/sc25519_mul.c.o: ed25519/amd64-51-30k/sc25519.h
ed25519/amd64-51-30k/sc25519_mul_shortsc.c.o: ed25519/amd64-51-30k/sc25519.h
ed25519/amd64-51-30k/sc25519_slide.c.o: ed25519/amd64-51-30k/sc25519.h
ed25519/amd64-51-30k/sc25519_to32bytes.c.o: ed25519/amd64-51-30k/sc25519.h
ed25519/amd64-51-30k/sc25519_window4.c.o: ed25519/amd64-51-30k/sc25519.h
ed25519/amd64-51-30k/sign.c.o: ed25519/amd64-51-30k/crypto_sign.h
ed25519/amd64-51-30k/sign.c.o: ed25519/amd64-51-30k/ed25519.h
ed25519/amd64-51-30k/sign.c.o: ed25519/amd64-51-30k/crypto_hash_sha512.h
ed25519/amd64-51-30k/sign.c.o: ed25519/amd64-51-30k/ge25519.h
ed25519/amd64-51-30k/sign.c.o: ed25519/amd64-51-30k/fe25519.h
ed25519/amd64-51-30k/sign.c.o: ed25519/amd64-51-30k/sc25519.h
ed25519/amd64-64-24k/batch.c.o: ed25519/amd64-51-30k/crypto_sign.h
ed25519/amd64-64-24k/batch.c.o: ed25519/amd64-51-30k/ed25519.h
ed25519/amd64-64-24k/batch.c.o: ed25519/amd64-51-30k/crypto_verify_32.h
ed25519/amd64-64-24k/batch.c.o: ed25519/amd64-51-30k/crypto_hash_sha512.h
ed25519/amd64-64-24k/batch.c.o: ed25519/amd64-51-30k/randombytes.h
ed25519/amd64-64-24k/batch.c.o: ed25519/amd64-51-30k/ge25519.h
ed25519/amd64-64-24k/batch.c.o: ed25519/amd64-51-30k/fe25519.h
ed25519/amd64-64-24k/batch.c.o: ed25519/amd64-51-30k/sc25519.h
ed25519/amd64-64-24k/batch.c.o: ed25519/amd64-51-30k/hram.h
ed25519/amd64-64-24k/fe25519_getparity.c.o: ed25519/amd64-51-30k/fe25519.h
ed25519/amd64-64-24k/fe25519_invert.c.o: ed25519/amd64-51-30k/fe25519.h
ed25519/amd64-64-24k/fe25519_iseq.c.o: ed25519/amd64-51-30k/fe25519.h
ed25519/amd64-64-24k/fe25519_iszero.c.o: ed25519/amd64-51-30k/fe25519.h
ed25519/amd64-64-24k/fe25519_neg.c.o: ed25519/amd64-51-30k/fe25519.h
ed25519/amd64-64-24k/fe25519_pack.c.o: ed25519/amd64-51-30k/fe25519.h
ed25519/amd64-64-24k/fe25519_pow2523.c.o: ed25519/amd64-51-30k/fe25519.h
ed25519/amd64-64-24k/fe25519_setint.c.o: ed25519/amd64-51-30k/fe25519.h
ed25519/amd64-64-24k/fe25519_unpack.c.o: ed25519/amd64-51-30k/fe25519.h
ed25519/amd64-64-24k/ge25519_add.c.o: ed25519/amd64-51-30k/ge25519.h
ed25519/amd64-64-24k/ge25519_add.c.o: ed25519/amd64-51-30k/fe25519.h
ed25519/amd64-64-24k/ge25519_add.c.o: ed25519/amd64-51-30k/sc25519.h
ed25519/amd64-64-24k/ge25519_base.c.o: ed25519/amd64-51-30k/ge25519.h
ed25519/amd64-64-24k/ge25519_base.c.o: ed25519/amd64-51-30k/fe25519.h
ed25519/amd64-64-24k/ge25519_base.c.o: ed25519/amd64-51-30k/sc25519.h
ed25519/amd64-64-24k/ge25519_double.c.o: ed25519/amd64-51-30k/ge25519.h
ed25519/amd64-64-24k/ge25519_double.c.o: ed25519/amd64-51-30k/fe25519.h
ed25519/amd64-64-24k/ge25519_double.c.o: ed25519/amd64-51-30k/sc25519.h
ed25519/amd64-64-24k/ge25519_double_scalarmult.c.o: ed25519/amd64-51-30k/fe25519.h
ed25519/amd64-64-24k/ge25519_double_scalarmult.c.o: ed25519/amd64-51-30k/sc25519.h
ed25519/amd64-64-24k/ge25519_double_scalarmult.c.o: ed25519/amd64-51-30k/ge25519.h
ed25519/amd64-64-24k/ge25519_double_scalarmult.c.o: ed25519/amd64-51-30k/ge25519_base_slide_multiples.data
ed25519/amd64-64-24k/ge25519_isneutral.c.o: ed25519/amd64-51-30k/fe25519.h
ed25519/amd64-64-24k/ge25519_isneutral.c.o: ed25519/amd64-51-30k/ge25519.h
ed25519/amd64-64-24k/ge25519_isneutral.c.o: ed25519/amd64-51-30k/sc25519.h
ed25519/amd64-64-24k/ge25519_multi_scalarmult.c.o: ed25519/amd64-51-30k/fe25519.h
ed25519/amd64-64-24k/ge25519_multi_scalarmult.c.o: ed25519/amd64-51-30k/sc25519.h
ed25519/amd64-64-24k/ge25519_multi_scalarmult.c.o: ed25519/amd64-51-30k/ge25519.h
ed25519/amd64-64-24k/ge25519_multi_scalarmult.c.o: ed25519/amd64-51-30k/index_heap.h
ed25519/amd64-64-24k/ge25519_pack.c.o: ed25519/amd64-51-30k/fe25519.h
ed25519/amd64-64-24k/ge25519_pack.c.o: ed25519/amd64-51-30k/sc25519.h
ed25519/amd64-64-24k/ge25519_pack.c.o: ed25519/amd64-51-30k/ge25519.h
ed25519/amd64-64-24k/ge25519_scalarmult_base.c.o: ed25519/amd64-51-30k/fe25519.h
ed25519/amd64-64-24k/ge25519_scalarmult_base.c.o: ed25519/amd64-51-30k/sc25519.h
ed25519/amd64-64-24k/ge25519_scalarmult_base.c.o: ed25519/amd64-51-30k/ge25519.h
ed25519/amd64-64-24k/ge25519_scalarmult_base.c.o: ed25519/amd64-64-24k/ge25519_base_niels.data
ed25519/amd64-64-24k/ge25519_unpackneg.c.o: ed25519/amd64-51-30k/fe25519.h
ed25519/amd64-64-24k/ge25519_unpackneg.c.o: ed25519/amd64-51-30k/ge25519.h
ed25519/amd64-64-24k/ge25519_unpackneg.c.o: ed25519/amd64-51-30k/sc25519.h
ed25519/amd64-64-24k/hram.c.o: ed25519/amd64-51-30k/crypto_hash_sha512.h
ed25519/amd64-64-24k/hram.c.o: ed25519/amd64-51-30k/hram.h
ed25519/amd64-64-24k/index_heap.c.o: ed25519/amd64-51-30k/sc25519.h
ed25519/amd64-64-24k/index_heap.c.o: ed25519/amd64-51-30k/index_heap.h
ed25519/amd64-64-24k/keypair.c.o: ed25519/amd64-51-30k/crypto_sign.h
ed25519/amd64-64-24k/keypair.c.o: ed25519/amd64-51-30k/ed25519.h
ed25519/amd64-64-24k/keypair.c.o: ed25519/amd64-51-30k/crypto_hash_sha512.h
ed25519/amd64-64-24k/keypair.c.o: ed25519/amd64-51-30k/randombytes.h
ed25519/amd64-64-24k/keypair.c.o: ed25519/amd64-51-30k/ge25519.h
ed25519/amd64-64-24k/keypair.c.o: ed25519/amd64-51-30k/fe25519.h
ed25519/amd64-64-24k/keypair.c.o: ed25519/amd64-51-30k/sc25519.h
ed25519/amd64-64-24k/open.c.o: ed25519/amd64-51-30k/crypto_sign.h
ed25519/amd64-64-24k/open.c.o: ed25519/amd64-51-30k/ed25519.h
ed25519/amd64-64-24k/open.c.o: ed25519/amd64-51-30k/crypto_verify_32.h
ed25519/amd64-64-24k/open.c.o: ed25519/amd64-51-30k/crypto_hash_sha512.h
ed25519/amd64-64-24k/open.c.o: ed25519/amd64-51-30k/ge25519.h
ed25519/amd64-64-24k/open.c.o: ed25519/amd64-51-30k/fe25519.h
ed25519/amd64-64-24k/open.c.o: ed25519/amd64-51-30k/sc25519.h
ed25519/amd64-64-24k/sc25519_from32bytes.c.o: ed25519/amd64-51-30k/sc25519.h
ed25519/amd64-64-24k/sc25519_from64bytes.c.o: ed25519/amd64-51-30k/sc25519.h
ed25519/amd64-64-24k/sc25519_from_shortsc.c.o: ed25519/amd64-51-30k/sc25519.h
ed25519/amd64-64-24k/sc25519_iszero.c.o: ed25519/amd64-51-30k/sc25519.h
ed25519/amd64-64-24k/sc25519_mul.c.o: ed25519/amd64-51-30k/sc25519.h
ed25519/amd64-64-24k/sc25519_mul_shortsc.c.o: ed25519/amd64-51-30k/sc25519.h
ed25519/amd64-64-24k/sc25519_slide.c.o: ed25519/amd64-51-30k/sc25519.h
ed25519/amd64-64-24k/sc25519_to32bytes.c.o: ed25519/amd64-51-30k/sc25519.h
ed25519/amd64-64-24k/sc25519_window4.c.o: ed25519/amd64-51-30k/sc25519.h
ed25519/amd64-64-24k/sign.c.o: ed25519/amd64-51-30k/crypto_sign.h
ed25519/amd64-64-24k/sign.c.o: ed25519/amd64-51-30k/ed25519.h
ed25519/amd64-64-24k/sign.c.o: ed25519/amd64-51-30k/crypto_hash_sha512.h
ed25519/amd64-64-24k/sign.c.o: ed25519/amd64-51-30k/ge25519.h
ed25519/amd64-64-24k/sign.c.o: ed25519/amd64-51-30k/fe25519.h
ed25519/amd64-64-24k/sign.c.o: ed25519/amd64-51-30k/sc25519.h
ed25519/ref10/fe_0.c.o: ed25519/ref10/fe.h ed25519/ref10/crypto_int32.h
ed25519/ref10/fe_1.c.o: ed25519/ref10/fe.h ed25519/ref10/crypto_int32.h
ed25519/ref10/fe_add.c.o: ed25519/ref10/fe.h ed25519/ref10/crypto_int32.h
ed25519/ref10/fe_cmov.c.o: ed25519/ref10/fe.h ed25519/ref10/crypto_int32.h
ed25519/ref10/fe_copy.c.o: ed25519/ref10/fe.h ed25519/ref10/crypto_int32.h
ed25519/ref10/fe_frombytes.c.o: ed25519/ref10/fe.h
ed25519/ref10/fe_frombytes.c.o: ed25519/ref10/crypto_int32.h
ed25519/ref10/fe_frombytes.c.o: ed25519/ref10/crypto_int64.h
ed25519/ref10/fe_frombytes.c.o: ed25519/ref10/crypto_uint64.h
ed25519/ref10/fe_invert.c.o: ed25519/ref10/fe.h ed25519/ref10/crypto_int32.h
ed25519/ref10/fe_invert.c.o: ed25519/ref10/pow225521.h
ed25519/ref10/fe_isnegative.c.o: ed25519/ref10/fe.h
ed25519/ref10/fe_isnegative.c.o: ed25519/ref10/crypto_int32.h
ed25519/ref10/fe_isnonzero.c.o: ed25519/ref10/fe.h
ed25519/ref10/fe_isnonzero.c.o: ed25519/ref10/crypto_int32.h
ed25519/ref10/fe_isnonzero.c.o: ed25519/amd64-51-30k/crypto_verify_32.h
ed25519/ref10/fe_mul.c.o: ed25519/ref10/fe.h ed25519/ref10/crypto_int32.h
ed25519/ref10/fe_mul.c.o: ed25519/ref10/crypto_int64.h
ed25519/ref10/fe_neg.c.o: ed25519/ref10/fe.h ed25519/ref10/crypto_int32.h
ed25519/ref10/fe_pow22523.c.o: ed25519/ref10/fe.h
ed25519/ref10/fe_pow22523.c.o: ed25519/ref10/crypto_int32.h
ed25519/ref10/fe_pow22523.c.o: ed25519/ref10/pow22523.h
ed25519/ref10/fe_sq.c.o: ed25519/ref10/fe.h ed25519/ref10/crypto_int32.h
ed25519/ref10/fe_sq.c.o: ed25519/ref10/crypto_int64.h
ed25519/ref10/fe_sq2.c.o: ed25519/ref10/fe.h ed25519/ref10/crypto_int32.h
ed25519/ref10/fe_sq2.c.o: ed25519/ref10/crypto_int64.h
ed25519/ref10/fe_sub.c.o: ed25519/ref10/fe.h ed25519/ref10/crypto_int32.h
ed25519/ref10/fe_tobytes.c.o: ed25519/ref10/fe.h ed25519/ref10/crypto_int32.h
ed25519/ref10/ge_add.c.o: ed25519/ref10/ge.h ed25519/ref10/fe.h
ed25519/ref10/ge_add.c.o: ed25519/ref10/crypto_int32.h ed25519/ref10/ge_add.h
ed25519/ref10/ge_double_scalarmult.c.o: ed25519/ref10/ge.h ed25519/ref10/fe.h
ed25519/ref10/ge_double_scalarmult.c.o: ed25519/ref10/crypto_int32.h
ed25519/ref10/ge_double_scalarmult.c.o: ed25519/ref10/base2.h
ed25519/ref10/ge_frombytes.c.o: ed25519/ref10/ge.h ed25519/ref10/fe.h
ed25519/ref10/ge_frombytes.c.o: ed25519/ref10/crypto_int32.h
ed25519/ref10/ge_frombytes.c.o: ed25519/ref10/d.h ed25519/ref10/sqrtm1.h
ed25519/ref10/ge_madd.c.o: ed25519/ref10/ge.h ed25519/ref10/fe.h
ed25519/ref10/ge_madd.c.o: ed25519/ref10/crypto_int32.h
ed25519/ref10/ge_madd.c.o: ed25519/ref10/ge_madd.h
ed25519/ref10/ge_msub.c.o: ed25519/ref10/ge.h ed25519/ref10/fe.h
ed25519/ref10/ge_msub.c.o: ed25519/ref10/crypto_int32.h
ed25519/ref10/ge_msub.c.o: ed25519/ref10/ge_msub.h
ed25519/ref10/ge_p1p1_to_p2.c.o: ed25519/ref10/ge.h ed25519/ref10/fe.h
ed25519/ref10/ge_p1p1_to_p2.c.o: ed25519/ref10/crypto_int32.h
ed25519/ref10/ge_p1p1_to_p3.c.o: ed25519/ref10/ge.h ed25519/ref10/fe.h
ed25519/ref10/ge_p1p1_to_p3.c.o: ed25519/ref10/crypto_int32.h
ed25519/ref10/ge_p2_0.c.o: ed25519/ref10/ge.h ed25519/ref10/fe.h
ed25519/ref10/ge_p2_0.c.o: ed25519/ref10/crypto_int32.h
ed25519/ref10/ge_p2_dbl.c.o: ed25519/ref10/ge.h ed25519/ref10/fe.h
ed25519/ref10/ge_p2_dbl.c.o: ed25519/ref10/crypto_int32.h
ed25519/ref10/ge_p2_dbl.c.o: ed25519/ref10/ge_p2_dbl.h
ed25519/ref10/ge_p3_0.c.o: ed25519/ref10/ge.h ed25519/ref10/fe.h
ed25519/ref10/ge_p3_0.c.o: ed25519/ref10/crypto_int32.h
ed25519/ref10/ge_p3_dbl.c.o: ed25519/ref10/ge.h ed25519/ref10/fe.h
ed25519/ref10/ge_p3_dbl.c.o: ed25519/ref10/crypto_int32.h
ed25519/ref10/ge_p3_to_cached.c.o: ed25519/ref10/ge.h ed25519/ref10/fe.h
ed25519/ref10/ge_p3_to_cached.c.o: ed25519/ref10/crypto_int32.h
ed25519/ref10/ge_p3_to_cached.c.o: ed25519/ref10/d2.h
ed25519/ref10/ge_p3_to_p2.c.o: ed25519/ref10/ge.h ed25519/ref10/fe.h
ed25519/ref10/ge_p3_to_p2.c.o: ed25519/ref10/crypto_int32.h
ed25519/ref10/ge_p3_tobytes.c.o: ed25519/ref10/ge.h ed25519/ref10/fe.h
ed25519/ref10/ge_p3_tobytes.c.o: ed25519/ref10/crypto_int32.h
ed25519/ref10/ge_precomp_0.c.o: ed25519/ref10/ge.h ed25519/ref10/fe.h
ed25519/ref10/ge_precomp_0.c.o: ed25519/ref10/crypto_int32.h
ed25519/ref10/ge_scalarmult_base.c.o: ed25519/ref10/ge.h ed25519/ref10/fe.h
ed25519/ref10/ge_scalarmult_base.c.o: ed25519/ref10/crypto_int32.h
ed25519/ref10/ge_scalarmult_base.c.o: ed25519/ref10/crypto_uint32.h
ed25519/ref10/ge_scalarmult_base.c.o: ed25519/ref10/base.h
ed25519/ref10/ge_sub.c.o: ed25519/ref10/ge.h ed25519/ref10/fe.h
ed25519/ref10/ge_sub.c.o: ed25519/ref10/crypto_int32.h ed25519/ref10/ge_sub.h
ed25519/ref10/ge_tobytes.c.o: ed25519/ref10/ge.h ed25519/ref10/fe.h
ed25519/ref10/ge_tobytes.c.o: ed25519/ref10/crypto_int32.h
ed25519/ref10/keypair.c.o: ed25519/amd64-51-30k/randombytes.h
ed25519/ref10/keypair.c.o: ed25519/amd64-51-30k/crypto_sign.h
ed25519/ref10/keypair.c.o: ed25519/amd64-51-30k/ed25519.h
ed25519/ref10/keypair.c.o: ed25519/amd64-51-30k/crypto_hash_sha512.h
ed25519/ref10/keypair.c.o: ed25519/ref10/ge.h ed25519/ref10/fe.h
ed25519/ref10/keypair.c.o: ed25519/ref10/crypto_int32.h
ed25519/ref10/open.c.o: ed25519/amd64-51-30k/crypto_sign.h
ed25519/ref10/open.c.o: ed25519/amd64-51-30k/ed25519.h
ed25519/ref10/open.c.o: ed25519/amd64-51-30k/crypto_hash_sha512.h
ed25519/ref10/open.c.o: ed25519/amd64-51-30k/crypto_verify_32.h
ed25519/ref10/open.c.o: ed25519/ref10/ge.h ed25519/ref10/fe.h
ed25519/ref10/open.c.o: ed25519/ref10/crypto_int32.h ed25519/ref10/sc.h
ed25519/ref10/sc_muladd.c.o: ed25519/ref10/sc.h ed25519/ref10/crypto_int64.h
ed25519/ref10/sc_muladd.c.o: ed25519/ref10/crypto_uint32.h
ed25519/ref10/sc_muladd.c.o: ed25519/ref10/crypto_uint64.h
ed25519/ref10/sc_reduce.c.o: ed25519/ref10/sc.h ed25519/ref10/crypto_int64.h
ed25519/ref10/sc_reduce.c.o: ed25519/ref10/crypto_uint32.h
ed25519/ref10/sc_reduce.c.o: ed25519/ref10/crypto_uint64.h
ed25519/ref10/sign.c.o: ed25519/amd64-51-30k/crypto_sign.h
ed25519/ref10/sign.c.o: ed25519/amd64-51-30k/ed25519.h
ed25519/ref10/sign.c.o: ed25519/amd64-51-30k/crypto_hash_sha512.h
ed25519/ref10/sign.c.o: ed25519/ref10/ge.h ed25519/ref10/fe.h
ed25519/ref10/sign.c.o: ed25519/ref10/crypto_int32.h ed25519/ref10/sc.h
keccak.c.o: types.h keccak.h
main.c.o: ed25519/ed25519.h ed25519/ref10/ed25519.h ed25519/ref10/ge.h
main.c.o: ed25519/ref10/fe.h ed25519/ref10/crypto_int32.h
main.c.o: ed25519/amd64-51-30k/ed25519.h ed25519/amd64-51-30k/ge25519.h
main.c.o: ed25519/amd64-51-30k/fe25519.h ed25519/amd64-51-30k/sc25519.h
main.c.o: ed25519/amd64-64-24k/ed25519.h ed25519/amd64-64-24k/ge25519.h
main.c.o: types.h vec.h base32.h keccak.h
test_base32.c.o: types.h base32.h
test_ed25519.c.o: types.h base16.h ed25519/ed25519.h ed25519/ref10/ed25519.h
test_ed25519.c.o: ed25519/ref10/ge.h ed25519/ref10/fe.h
test_ed25519.c.o: ed25519/ref10/crypto_int32.h ed25519/amd64-51-30k/ed25519.h
test_ed25519.c.o: ed25519/amd64-51-30k/ge25519.h
test_ed25519.c.o: ed25519/amd64-51-30k/fe25519.h
test_ed25519.c.o: ed25519/amd64-51-30k/sc25519.h
test_ed25519.c.o: ed25519/amd64-64-24k/ed25519.h
test_ed25519.c.o: ed25519/amd64-64-24k/ge25519.h

2
autogen.sh Executable file
View file

@ -0,0 +1,2 @@
#!/bin/sh
exec autoconf -f

8
configure.ac Normal file
View file

@ -0,0 +1,8 @@
AC_INIT(mkp224o)
# safety check
AC_CONFIG_SRCDIR([main.c])
# C compiler
: ${CFLAGS="-O3 -march=native"}
AC_PROG_CC
AC_SUBST(ED25519IMPL,[ref10])
AC_OUTPUT(Makefile)

View file

@ -0,0 +1,4 @@
#define CRYPTO_SECRETKEYBYTES 64
#define CRYPTO_PUBLICKEYBYTES 32
#define CRYPTO_BYTES 64
#define CRYPTO_DETERMINISTIC 1

View file

@ -0,0 +1 @@
amd64

View file

@ -0,0 +1,94 @@
#include "crypto_sign.h"
#include "crypto_verify_32.h"
#include "crypto_hash_sha512.h"
#include "randombytes.h"
#include "ge25519.h"
#include "hram.h"
#define MAXBATCH 64
int crypto_sign_open_batch(
unsigned char* const m[],unsigned long long mlen[],
unsigned char* const sm[],const unsigned long long smlen[],
unsigned char* const pk[],
unsigned long long num
)
{
int ret = 0;
unsigned long long i, j;
shortsc25519 r[MAXBATCH];
sc25519 scalars[2*MAXBATCH+1];
ge25519 points[2*MAXBATCH+1];
unsigned char hram[crypto_hash_sha512_BYTES];
unsigned long long batchsize;
for (i = 0;i < num;++i) mlen[i] = -1;
while (num >= 3) {
batchsize = num;
if (batchsize > MAXBATCH) batchsize = MAXBATCH;
for (i = 0;i < batchsize;++i)
if (smlen[i] < 64) goto fallback;
randombytes((unsigned char*)r,sizeof(shortsc25519) * batchsize);
/* Computing scalars[0] = ((r1s1 + r2s2 + ...)) */
for(i=0;i<batchsize;i++)
{
sc25519_from32bytes(&scalars[i], sm[i]+32);
sc25519_mul_shortsc(&scalars[i], &scalars[i], &r[i]);
}
for(i=1;i<batchsize;i++)
sc25519_add(&scalars[0], &scalars[0], &scalars[i]);
/* Computing scalars[1] ... scalars[batchsize] as r[i]*H(R[i],A[i],m[i]) */
for(i=0;i<batchsize;i++)
{
get_hram(hram, sm[i], pk[i], m[i], smlen[i]);
sc25519_from64bytes(&scalars[i+1],hram);
sc25519_mul_shortsc(&scalars[i+1],&scalars[i+1],&r[i]);
}
/* Setting scalars[batchsize+1] ... scalars[2*batchsize] to r[i] */
for(i=0;i<batchsize;i++)
sc25519_from_shortsc(&scalars[batchsize+i+1],&r[i]);
/* Computing points */
points[0] = ge25519_base;
for(i=0;i<batchsize;i++)
if (ge25519_unpackneg_vartime(&points[i+1], pk[i])) goto fallback;
for(i=0;i<batchsize;i++)
if (ge25519_unpackneg_vartime(&points[batchsize+i+1], sm[i])) goto fallback;
ge25519_multi_scalarmult_vartime(points, points, scalars, 2*batchsize+1);
if (ge25519_isneutral_vartime(points)) {
for(i=0;i<batchsize;i++)
{
for(j=0;j<smlen[i]-64;j++)
m[i][j] = sm[i][j + 64];
mlen[i] = smlen[i]-64;
}
} else {
fallback:
for (i = 0;i < batchsize;++i)
ret |= crypto_sign_open(m[i], &mlen[i], sm[i], smlen[i], pk[i]);
}
m += batchsize;
mlen += batchsize;
sm += batchsize;
smlen += batchsize;
pk += batchsize;
num -= batchsize;
}
for (i = 0;i < num;++i)
ret |= crypto_sign_open(m[i], &mlen[i], sm[i], smlen[i], pk[i]);
return ret;
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,47 @@
.data
.globl crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51
.globl crypto_sign_ed25519_amd64_51_30k_batch_121666_213
.globl crypto_sign_ed25519_amd64_51_30k_batch_2P0
.globl crypto_sign_ed25519_amd64_51_30k_batch_2P1234
.globl crypto_sign_ed25519_amd64_51_30k_batch_4P0
.globl crypto_sign_ed25519_amd64_51_30k_batch_4P1234
.globl crypto_sign_ed25519_amd64_51_30k_batch_MU0
.globl crypto_sign_ed25519_amd64_51_30k_batch_MU1
.globl crypto_sign_ed25519_amd64_51_30k_batch_MU2
.globl crypto_sign_ed25519_amd64_51_30k_batch_MU3
.globl crypto_sign_ed25519_amd64_51_30k_batch_MU4
.globl crypto_sign_ed25519_amd64_51_30k_batch_ORDER0
.globl crypto_sign_ed25519_amd64_51_30k_batch_ORDER1
.globl crypto_sign_ed25519_amd64_51_30k_batch_ORDER2
.globl crypto_sign_ed25519_amd64_51_30k_batch_ORDER3
.globl crypto_sign_ed25519_amd64_51_30k_batch_EC2D0
.globl crypto_sign_ed25519_amd64_51_30k_batch_EC2D1
.globl crypto_sign_ed25519_amd64_51_30k_batch_EC2D2
.globl crypto_sign_ed25519_amd64_51_30k_batch_EC2D3
.globl crypto_sign_ed25519_amd64_51_30k_batch_EC2D4
.globl crypto_sign_ed25519_amd64_51_30k_batch__38
.p2align 4
crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51: .quad 0x0007FFFFFFFFFFFF
crypto_sign_ed25519_amd64_51_30k_batch_121666_213: .quad 996687872
crypto_sign_ed25519_amd64_51_30k_batch_2P0: .quad 0xFFFFFFFFFFFDA
crypto_sign_ed25519_amd64_51_30k_batch_2P1234: .quad 0xFFFFFFFFFFFFE
crypto_sign_ed25519_amd64_51_30k_batch_4P0: .quad 0x1FFFFFFFFFFFB4
crypto_sign_ed25519_amd64_51_30k_batch_4P1234: .quad 0x1FFFFFFFFFFFFC
crypto_sign_ed25519_amd64_51_30k_batch_MU0: .quad 0xED9CE5A30A2C131B
crypto_sign_ed25519_amd64_51_30k_batch_MU1: .quad 0x2106215D086329A7
crypto_sign_ed25519_amd64_51_30k_batch_MU2: .quad 0xFFFFFFFFFFFFFFEB
crypto_sign_ed25519_amd64_51_30k_batch_MU3: .quad 0xFFFFFFFFFFFFFFFF
crypto_sign_ed25519_amd64_51_30k_batch_MU4: .quad 0x000000000000000F
crypto_sign_ed25519_amd64_51_30k_batch_ORDER0: .quad 0x5812631A5CF5D3ED
crypto_sign_ed25519_amd64_51_30k_batch_ORDER1: .quad 0x14DEF9DEA2F79CD6
crypto_sign_ed25519_amd64_51_30k_batch_ORDER2: .quad 0x0000000000000000
crypto_sign_ed25519_amd64_51_30k_batch_ORDER3: .quad 0x1000000000000000
crypto_sign_ed25519_amd64_51_30k_batch_EC2D0: .quad 1859910466990425
crypto_sign_ed25519_amd64_51_30k_batch_EC2D1: .quad 932731440258426
crypto_sign_ed25519_amd64_51_30k_batch_EC2D2: .quad 1072319116312658
crypto_sign_ed25519_amd64_51_30k_batch_EC2D3: .quad 1815898335770999
crypto_sign_ed25519_amd64_51_30k_batch_EC2D4: .quad 633789495995903
crypto_sign_ed25519_amd64_51_30k_batch__38: .quad 38

View file

@ -0,0 +1 @@
#include <sodium/crypto_hash_sha512.h>

View file

@ -0,0 +1,2 @@
#include <stdint.h>
#define crypto_int32 int32_t

View file

@ -0,0 +1,2 @@
#include <stdint.h>
#define crypto_int64 int64_t

View file

@ -0,0 +1,8 @@
#define crypto_sign ed25519_amd64_51_30k_sign
#define crypto_sign_keypair ed25519_amd64_51_30k_keygen
#define crypto_sign_seckey ed25519_ramd64_51_30k_seckey
#define crypto_sign_seckey_expand ed25519_amd64_51_30k_seckey_expand
#define crypto_sign_pubkey ed25519_amd64_51_30k_pubkey
#define crypto_sign_open ed25519_amd64_51_30k_open
#include "ed25519.h"

View file

@ -0,0 +1,2 @@
#include <stdint.h>
#define crypto_uint32 uint32_t

View file

@ -0,0 +1,2 @@
#include <stdint.h>
#define crypto_uint64 uint64_t

View file

@ -0,0 +1,4 @@
#include <sodium/utils.h>
#define crypto_verify_32(a,b) \
(!sodium_memcmp((a), (b), 32))

View file

@ -0,0 +1,4 @@
int ed25519_amd64_51_30k_seckey(unsigned char *sk);
int ed25519_amd64_51_30k_seckey_expand(unsigned char *sk,const unsigned char *seed);
int ed25519_amd64_51_30k_pubkey(unsigned char *pk,const unsigned char *sk);
int ed25519_amd64_51_30k_keygen(unsigned char *pk,unsigned char *sk);

View file

@ -0,0 +1,65 @@
#ifndef FE25519_H
#define FE25519_H
#define fe25519 crypto_sign_ed25519_amd64_51_30k_batch_fe25519
#define fe25519_freeze crypto_sign_ed25519_amd64_51_30k_batch_fe25519_freeze
#define fe25519_unpack crypto_sign_ed25519_amd64_51_30k_batch_fe25519_unpack
#define fe25519_pack crypto_sign_ed25519_amd64_51_30k_batch_fe25519_pack
#define fe25519_iszero_vartime crypto_sign_ed25519_amd64_51_30k_batch_fe25519_iszero_vartime
#define fe25519_iseq_vartime crypto_sign_ed25519_amd64_51_30k_batch_fe25519_iseq_vartime
#define fe25519_cmov crypto_sign_ed25519_amd64_51_30k_batch_fe25519_cmov
#define fe25519_setint crypto_sign_ed25519_amd64_51_30k_batch_fe25519_setint
#define fe25519_neg crypto_sign_ed25519_amd64_51_30k_batch_fe25519_neg
#define fe25519_getparity crypto_sign_ed25519_amd64_51_30k_batch_fe25519_getparity
#define fe25519_add crypto_sign_ed25519_amd64_51_30k_batch_fe25519_add
#define fe25519_sub crypto_sign_ed25519_amd64_51_30k_batch_fe25519_sub
#define fe25519_mul crypto_sign_ed25519_amd64_51_30k_batch_fe25519_mul
#define fe25519_mul121666 crypto_sign_ed25519_amd64_51_30k_batch_fe25519_mul121666
#define fe25519_square crypto_sign_ed25519_amd64_51_30k_batch_fe25519_square
#define fe25519_nsquare crypto_sign_ed25519_amd64_51_30k_batch_fe25519_nsquare
#define fe25519_invert crypto_sign_ed25519_amd64_51_30k_batch_fe25519_invert
#define fe25519_pow2523 crypto_sign_ed25519_amd64_51_30k_batch_fe25519_pow2523
typedef struct
{
unsigned long long v[5];
}
fe25519;
void fe25519_freeze(fe25519 *r);
void fe25519_unpack(fe25519 *r, const unsigned char x[32]);
void fe25519_pack(unsigned char r[32], const fe25519 *x);
void fe25519_cmov(fe25519 *r, const fe25519 *x, unsigned char b);
void fe25519_cswap(fe25519 *r, fe25519 *x, unsigned char b);
void fe25519_setint(fe25519 *r, unsigned int v);
void fe25519_neg(fe25519 *r, const fe25519 *x);
unsigned char fe25519_getparity(const fe25519 *x);
int fe25519_iszero_vartime(const fe25519 *x);
int fe25519_iseq_vartime(const fe25519 *x, const fe25519 *y);
void fe25519_add(fe25519 *r, const fe25519 *x, const fe25519 *y);
void fe25519_sub(fe25519 *r, const fe25519 *x, const fe25519 *y);
void fe25519_mul(fe25519 *r, const fe25519 *x, const fe25519 *y);
void fe25519_mul121666(fe25519 *r, const fe25519 *x);
void fe25519_square(fe25519 *r, const fe25519 *x);
void fe25519_nsquare(fe25519 *r, unsigned long long n);
void fe25519_invert(fe25519 *r, const fe25519 *x);
void fe25519_pow2523(fe25519 *r, const fe25519 *x);
#endif

View file

@ -0,0 +1,10 @@
#include "fe25519.h"
void fe25519_add(fe25519 *r, const fe25519 *x, const fe25519 *y)
{
r->v[0] = x->v[0] + y->v[0];
r->v[1] = x->v[1] + y->v[1];
r->v[2] = x->v[2] + y->v[2];
r->v[3] = x->v[3] + y->v[3];
r->v[4] = x->v[4] + y->v[4];
}

View file

@ -0,0 +1,434 @@
# qhasm: int64 rp
# qhasm: input rp
# qhasm: int64 r0
# qhasm: int64 r1
# qhasm: int64 r2
# qhasm: int64 r3
# qhasm: int64 r4
# qhasm: int64 t
# qhasm: int64 loop
# qhasm: int64 two51minus1
# qhasm: int64 two51minus19
# qhasm: int64 caller1
# qhasm: int64 caller2
# qhasm: int64 caller3
# qhasm: int64 caller4
# qhasm: int64 caller5
# qhasm: int64 caller6
# qhasm: int64 caller7
# qhasm: caller caller1
# qhasm: caller caller2
# qhasm: caller caller3
# qhasm: caller caller4
# qhasm: caller caller5
# qhasm: caller caller6
# qhasm: caller caller7
# qhasm: stack64 caller1_stack
# qhasm: stack64 caller2_stack
# qhasm: stack64 caller3_stack
# qhasm: stack64 caller4_stack
# qhasm: stack64 caller5_stack
# qhasm: stack64 caller6_stack
# qhasm: stack64 caller7_stack
# qhasm: enter crypto_sign_ed25519_amd64_51_30k_batch_fe25519_freeze
.text
.p2align 5
.globl _crypto_sign_ed25519_amd64_51_30k_batch_fe25519_freeze
.globl crypto_sign_ed25519_amd64_51_30k_batch_fe25519_freeze
_crypto_sign_ed25519_amd64_51_30k_batch_fe25519_freeze:
crypto_sign_ed25519_amd64_51_30k_batch_fe25519_freeze:
mov %rsp,%r11
and $31,%r11
add $64,%r11
sub %r11,%rsp
# qhasm: caller1_stack = caller1
# asm 1: movq <caller1=int64#9,>caller1_stack=stack64#1
# asm 2: movq <caller1=%r11,>caller1_stack=0(%rsp)
movq %r11,0(%rsp)
# qhasm: caller2_stack = caller2
# asm 1: movq <caller2=int64#10,>caller2_stack=stack64#2
# asm 2: movq <caller2=%r12,>caller2_stack=8(%rsp)
movq %r12,8(%rsp)
# qhasm: caller3_stack = caller3
# asm 1: movq <caller3=int64#11,>caller3_stack=stack64#3
# asm 2: movq <caller3=%r13,>caller3_stack=16(%rsp)
movq %r13,16(%rsp)
# qhasm: caller4_stack = caller4
# asm 1: movq <caller4=int64#12,>caller4_stack=stack64#4
# asm 2: movq <caller4=%r14,>caller4_stack=24(%rsp)
movq %r14,24(%rsp)
# qhasm: caller5_stack = caller5
# asm 1: movq <caller5=int64#13,>caller5_stack=stack64#5
# asm 2: movq <caller5=%r15,>caller5_stack=32(%rsp)
movq %r15,32(%rsp)
# qhasm: caller6_stack = caller6
# asm 1: movq <caller6=int64#14,>caller6_stack=stack64#6
# asm 2: movq <caller6=%rbx,>caller6_stack=40(%rsp)
movq %rbx,40(%rsp)
# qhasm: caller7_stack = caller7
# asm 1: movq <caller7=int64#15,>caller7_stack=stack64#7
# asm 2: movq <caller7=%rbp,>caller7_stack=48(%rsp)
movq %rbp,48(%rsp)
# qhasm: r0 = *(uint64 *) (rp + 0)
# asm 1: movq 0(<rp=int64#1),>r0=int64#2
# asm 2: movq 0(<rp=%rdi),>r0=%rsi
movq 0(%rdi),%rsi
# qhasm: r1 = *(uint64 *) (rp + 8)
# asm 1: movq 8(<rp=int64#1),>r1=int64#3
# asm 2: movq 8(<rp=%rdi),>r1=%rdx
movq 8(%rdi),%rdx
# qhasm: r2 = *(uint64 *) (rp + 16)
# asm 1: movq 16(<rp=int64#1),>r2=int64#4
# asm 2: movq 16(<rp=%rdi),>r2=%rcx
movq 16(%rdi),%rcx
# qhasm: r3 = *(uint64 *) (rp + 24)
# asm 1: movq 24(<rp=int64#1),>r3=int64#5
# asm 2: movq 24(<rp=%rdi),>r3=%r8
movq 24(%rdi),%r8
# qhasm: r4 = *(uint64 *) (rp + 32)
# asm 1: movq 32(<rp=int64#1),>r4=int64#6
# asm 2: movq 32(<rp=%rdi),>r4=%r9
movq 32(%rdi),%r9
# qhasm: two51minus1 = *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51
# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>two51minus1=int64#7
# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>two51minus1=%rax
movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rax
# qhasm: two51minus19 = two51minus1
# asm 1: mov <two51minus1=int64#7,>two51minus19=int64#8
# asm 2: mov <two51minus1=%rax,>two51minus19=%r10
mov %rax,%r10
# qhasm: two51minus19 -= 18
# asm 1: sub $18,<two51minus19=int64#8
# asm 2: sub $18,<two51minus19=%r10
sub $18,%r10
# qhasm: loop = 3
# asm 1: mov $3,>loop=int64#9
# asm 2: mov $3,>loop=%r11
mov $3,%r11
# qhasm: reduceloop:
._reduceloop:
# qhasm: t = r0
# asm 1: mov <r0=int64#2,>t=int64#10
# asm 2: mov <r0=%rsi,>t=%r12
mov %rsi,%r12
# qhasm: (uint64) t >>= 51
# asm 1: shr $51,<t=int64#10
# asm 2: shr $51,<t=%r12
shr $51,%r12
# qhasm: r0 &= two51minus1
# asm 1: and <two51minus1=int64#7,<r0=int64#2
# asm 2: and <two51minus1=%rax,<r0=%rsi
and %rax,%rsi
# qhasm: r1 += t
# asm 1: add <t=int64#10,<r1=int64#3
# asm 2: add <t=%r12,<r1=%rdx
add %r12,%rdx
# qhasm: t = r1
# asm 1: mov <r1=int64#3,>t=int64#10
# asm 2: mov <r1=%rdx,>t=%r12
mov %rdx,%r12
# qhasm: (uint64) t >>= 51
# asm 1: shr $51,<t=int64#10
# asm 2: shr $51,<t=%r12
shr $51,%r12
# qhasm: r1 &= two51minus1
# asm 1: and <two51minus1=int64#7,<r1=int64#3
# asm 2: and <two51minus1=%rax,<r1=%rdx
and %rax,%rdx
# qhasm: r2 += t
# asm 1: add <t=int64#10,<r2=int64#4
# asm 2: add <t=%r12,<r2=%rcx
add %r12,%rcx
# qhasm: t = r2
# asm 1: mov <r2=int64#4,>t=int64#10
# asm 2: mov <r2=%rcx,>t=%r12
mov %rcx,%r12
# qhasm: (uint64) t >>= 51
# asm 1: shr $51,<t=int64#10
# asm 2: shr $51,<t=%r12
shr $51,%r12
# qhasm: r2 &= two51minus1
# asm 1: and <two51minus1=int64#7,<r2=int64#4
# asm 2: and <two51minus1=%rax,<r2=%rcx
and %rax,%rcx
# qhasm: r3 += t
# asm 1: add <t=int64#10,<r3=int64#5
# asm 2: add <t=%r12,<r3=%r8
add %r12,%r8
# qhasm: t = r3
# asm 1: mov <r3=int64#5,>t=int64#10
# asm 2: mov <r3=%r8,>t=%r12
mov %r8,%r12
# qhasm: (uint64) t >>= 51
# asm 1: shr $51,<t=int64#10
# asm 2: shr $51,<t=%r12
shr $51,%r12
# qhasm: r3 &= two51minus1
# asm 1: and <two51minus1=int64#7,<r3=int64#5
# asm 2: and <two51minus1=%rax,<r3=%r8
and %rax,%r8
# qhasm: r4 += t
# asm 1: add <t=int64#10,<r4=int64#6
# asm 2: add <t=%r12,<r4=%r9
add %r12,%r9
# qhasm: t = r4
# asm 1: mov <r4=int64#6,>t=int64#10
# asm 2: mov <r4=%r9,>t=%r12
mov %r9,%r12
# qhasm: (uint64) t >>= 51
# asm 1: shr $51,<t=int64#10
# asm 2: shr $51,<t=%r12
shr $51,%r12
# qhasm: r4 &= two51minus1
# asm 1: and <two51minus1=int64#7,<r4=int64#6
# asm 2: and <two51minus1=%rax,<r4=%r9
and %rax,%r9
# qhasm: t *= 19
# asm 1: imulq $19,<t=int64#10,>t=int64#10
# asm 2: imulq $19,<t=%r12,>t=%r12
imulq $19,%r12,%r12
# qhasm: r0 += t
# asm 1: add <t=int64#10,<r0=int64#2
# asm 2: add <t=%r12,<r0=%rsi
add %r12,%rsi
# qhasm: unsigned>? loop -= 1
# asm 1: sub $1,<loop=int64#9
# asm 2: sub $1,<loop=%r11
sub $1,%r11
# comment:fp stack unchanged by jump
# qhasm: goto reduceloop if unsigned>
ja ._reduceloop
# qhasm: t = 1
# asm 1: mov $1,>t=int64#10
# asm 2: mov $1,>t=%r12
mov $1,%r12
# qhasm: signed<? r0 - two51minus19
# asm 1: cmp <two51minus19=int64#8,<r0=int64#2
# asm 2: cmp <two51minus19=%r10,<r0=%rsi
cmp %r10,%rsi
# qhasm: t = loop if signed<
# asm 1: cmovl <loop=int64#9,<t=int64#10
# asm 2: cmovl <loop=%r11,<t=%r12
cmovl %r11,%r12
# qhasm: =? r1 - two51minus1
# asm 1: cmp <two51minus1=int64#7,<r1=int64#3
# asm 2: cmp <two51minus1=%rax,<r1=%rdx
cmp %rax,%rdx
# qhasm: t = loop if !=
# asm 1: cmovne <loop=int64#9,<t=int64#10
# asm 2: cmovne <loop=%r11,<t=%r12
cmovne %r11,%r12
# qhasm: =? r2 - two51minus1
# asm 1: cmp <two51minus1=int64#7,<r2=int64#4
# asm 2: cmp <two51minus1=%rax,<r2=%rcx
cmp %rax,%rcx
# qhasm: t = loop if !=
# asm 1: cmovne <loop=int64#9,<t=int64#10
# asm 2: cmovne <loop=%r11,<t=%r12
cmovne %r11,%r12
# qhasm: =? r3 - two51minus1
# asm 1: cmp <two51minus1=int64#7,<r3=int64#5
# asm 2: cmp <two51minus1=%rax,<r3=%r8
cmp %rax,%r8
# qhasm: t = loop if !=
# asm 1: cmovne <loop=int64#9,<t=int64#10
# asm 2: cmovne <loop=%r11,<t=%r12
cmovne %r11,%r12
# qhasm: =? r4 - two51minus1
# asm 1: cmp <two51minus1=int64#7,<r4=int64#6
# asm 2: cmp <two51minus1=%rax,<r4=%r9
cmp %rax,%r9
# qhasm: t = loop if !=
# asm 1: cmovne <loop=int64#9,<t=int64#10
# asm 2: cmovne <loop=%r11,<t=%r12
cmovne %r11,%r12
# qhasm: t = -t
# asm 1: neg <t=int64#10
# asm 2: neg <t=%r12
neg %r12
# qhasm: two51minus1 &= t
# asm 1: and <t=int64#10,<two51minus1=int64#7
# asm 2: and <t=%r12,<two51minus1=%rax
and %r12,%rax
# qhasm: two51minus19 &= t
# asm 1: and <t=int64#10,<two51minus19=int64#8
# asm 2: and <t=%r12,<two51minus19=%r10
and %r12,%r10
# qhasm: r0 -= two51minus19
# asm 1: sub <two51minus19=int64#8,<r0=int64#2
# asm 2: sub <two51minus19=%r10,<r0=%rsi
sub %r10,%rsi
# qhasm: r1 -= two51minus1
# asm 1: sub <two51minus1=int64#7,<r1=int64#3
# asm 2: sub <two51minus1=%rax,<r1=%rdx
sub %rax,%rdx
# qhasm: r2 -= two51minus1
# asm 1: sub <two51minus1=int64#7,<r2=int64#4
# asm 2: sub <two51minus1=%rax,<r2=%rcx
sub %rax,%rcx
# qhasm: r3 -= two51minus1
# asm 1: sub <two51minus1=int64#7,<r3=int64#5
# asm 2: sub <two51minus1=%rax,<r3=%r8
sub %rax,%r8
# qhasm: r4 -= two51minus1
# asm 1: sub <two51minus1=int64#7,<r4=int64#6
# asm 2: sub <two51minus1=%rax,<r4=%r9
sub %rax,%r9
# qhasm: *(uint64 *)(rp + 0) = r0
# asm 1: movq <r0=int64#2,0(<rp=int64#1)
# asm 2: movq <r0=%rsi,0(<rp=%rdi)
movq %rsi,0(%rdi)
# qhasm: *(uint64 *)(rp + 8) = r1
# asm 1: movq <r1=int64#3,8(<rp=int64#1)
# asm 2: movq <r1=%rdx,8(<rp=%rdi)
movq %rdx,8(%rdi)
# qhasm: *(uint64 *)(rp + 16) = r2
# asm 1: movq <r2=int64#4,16(<rp=int64#1)
# asm 2: movq <r2=%rcx,16(<rp=%rdi)
movq %rcx,16(%rdi)
# qhasm: *(uint64 *)(rp + 24) = r3
# asm 1: movq <r3=int64#5,24(<rp=int64#1)
# asm 2: movq <r3=%r8,24(<rp=%rdi)
movq %r8,24(%rdi)
# qhasm: *(uint64 *)(rp + 32) = r4
# asm 1: movq <r4=int64#6,32(<rp=int64#1)
# asm 2: movq <r4=%r9,32(<rp=%rdi)
movq %r9,32(%rdi)
# qhasm: caller1 = caller1_stack
# asm 1: movq <caller1_stack=stack64#1,>caller1=int64#9
# asm 2: movq <caller1_stack=0(%rsp),>caller1=%r11
movq 0(%rsp),%r11
# qhasm: caller2 = caller2_stack
# asm 1: movq <caller2_stack=stack64#2,>caller2=int64#10
# asm 2: movq <caller2_stack=8(%rsp),>caller2=%r12
movq 8(%rsp),%r12
# qhasm: caller3 = caller3_stack
# asm 1: movq <caller3_stack=stack64#3,>caller3=int64#11
# asm 2: movq <caller3_stack=16(%rsp),>caller3=%r13
movq 16(%rsp),%r13
# qhasm: caller4 = caller4_stack
# asm 1: movq <caller4_stack=stack64#4,>caller4=int64#12
# asm 2: movq <caller4_stack=24(%rsp),>caller4=%r14
movq 24(%rsp),%r14
# qhasm: caller5 = caller5_stack
# asm 1: movq <caller5_stack=stack64#5,>caller5=int64#13
# asm 2: movq <caller5_stack=32(%rsp),>caller5=%r15
movq 32(%rsp),%r15
# qhasm: caller6 = caller6_stack
# asm 1: movq <caller6_stack=stack64#6,>caller6=int64#14
# asm 2: movq <caller6_stack=40(%rsp),>caller6=%rbx
movq 40(%rsp),%rbx
# qhasm: caller7 = caller7_stack
# asm 1: movq <caller7_stack=stack64#7,>caller7=int64#15
# asm 2: movq <caller7_stack=48(%rsp),>caller7=%rbp
movq 48(%rsp),%rbp
# qhasm: leave
add %r11,%rsp
mov %rdi,%rax
mov %rsi,%rdx
ret

View file

@ -0,0 +1,8 @@
#include "fe25519.h"
unsigned char fe25519_getparity(const fe25519 *x)
{
fe25519 t = *x;
fe25519_freeze(&t);
return (unsigned char)t.v[0] & 1;
}

View file

@ -0,0 +1,59 @@
#include "fe25519.h"
void fe25519_invert(fe25519 *r, const fe25519 *x)
{
fe25519 z2;
fe25519 z9;
fe25519 z11;
fe25519 z2_5_0;
fe25519 z2_10_0;
fe25519 z2_20_0;
fe25519 z2_50_0;
fe25519 z2_100_0;
fe25519 t;
/* 2 */ fe25519_square(&z2,x);
/* 4 */ fe25519_square(&t,&z2);
/* 8 */ fe25519_square(&t,&t);
/* 9 */ fe25519_mul(&z9,&t,x);
/* 11 */ fe25519_mul(&z11,&z9,&z2);
/* 22 */ fe25519_square(&t,&z11);
/* 2^5 - 2^0 = 31 */ fe25519_mul(&z2_5_0,&t,&z9);
/* 2^6 - 2^1 */ fe25519_square(&t,&z2_5_0);
/* 2^10 - 2^5 */ fe25519_nsquare(&t,4);
/* 2^10 - 2^0 */ fe25519_mul(&z2_10_0,&t,&z2_5_0);
/* 2^11 - 2^1 */ fe25519_square(&t,&z2_10_0);
/* 2^20 - 2^10 */ fe25519_nsquare(&t,9);
/* 2^20 - 2^0 */ fe25519_mul(&z2_20_0,&t,&z2_10_0);
/* 2^21 - 2^1 */ fe25519_square(&t,&z2_20_0);
/* 2^40 - 2^20 */ fe25519_nsquare(&t,19);
/* 2^40 - 2^0 */ fe25519_mul(&t,&t,&z2_20_0);
/* 2^41 - 2^1 */ fe25519_square(&t,&t);
/* 2^50 - 2^10 */ fe25519_nsquare(&t,9);
/* 2^50 - 2^0 */ fe25519_mul(&z2_50_0,&t,&z2_10_0);
/* 2^51 - 2^1 */ fe25519_square(&t,&z2_50_0);
/* 2^100 - 2^50 */ fe25519_nsquare(&t,49);
/* 2^100 - 2^0 */ fe25519_mul(&z2_100_0,&t,&z2_50_0);
/* 2^101 - 2^1 */ fe25519_square(&t,&z2_100_0);
/* 2^200 - 2^100 */ fe25519_nsquare(&t,99);
/* 2^200 - 2^0 */ fe25519_mul(&t,&t,&z2_100_0);
/* 2^201 - 2^1 */ fe25519_square(&t,&t);
/* 2^250 - 2^50 */ fe25519_nsquare(&t,49);
/* 2^250 - 2^0 */ fe25519_mul(&t,&t,&z2_50_0);
/* 2^251 - 2^1 */ fe25519_square(&t,&t);
/* 2^252 - 2^2 */ fe25519_square(&t,&t);
/* 2^253 - 2^3 */ fe25519_square(&t,&t);
/* 2^254 - 2^4 */ fe25519_square(&t,&t);
/* 2^255 - 2^5 */ fe25519_square(&t,&t);
/* 2^255 - 21 */ fe25519_mul(r,&t,&z11);
}

View file

@ -0,0 +1,15 @@
#include "fe25519.h"
int fe25519_iseq_vartime(const fe25519 *x, const fe25519 *y)
{
fe25519 t1 = *x;
fe25519 t2 = *y;
fe25519_freeze(&t1);
fe25519_freeze(&t2);
if(t1.v[0] != t2.v[0]) return 0;
if(t1.v[1] != t2.v[1]) return 0;
if(t1.v[2] != t2.v[2]) return 0;
if(t1.v[3] != t2.v[3]) return 0;
if(t1.v[4] != t2.v[4]) return 0;
return 1;
}

View file

@ -0,0 +1,13 @@
#include "fe25519.h"
int fe25519_iszero_vartime(const fe25519 *x)
{
fe25519 t = *x;
fe25519_freeze(&t);
if (t.v[0]) return 0;
if (t.v[1]) return 0;
if (t.v[2]) return 0;
if (t.v[3]) return 0;
if (t.v[4]) return 0;
return 1;
}

View file

@ -0,0 +1,946 @@
# qhasm: int64 rp
# qhasm: int64 xp
# qhasm: int64 yp
# qhasm: input rp
# qhasm: input xp
# qhasm: input yp
# qhasm: int64 r0
# qhasm: int64 r1
# qhasm: int64 r2
# qhasm: int64 r3
# qhasm: int64 r4
# qhasm: int64 c1
# qhasm: int64 c2
# qhasm: int64 c3
# qhasm: int64 c4
# qhasm: int64 c5
# qhasm: int64 c6
# qhasm: int64 c7
# qhasm: caller c1
# qhasm: caller c2
# qhasm: caller c3
# qhasm: caller c4
# qhasm: caller c5
# qhasm: caller c6
# qhasm: caller c7
# qhasm: stack64 c1_stack
# qhasm: stack64 c2_stack
# qhasm: stack64 c3_stack
# qhasm: stack64 c4_stack
# qhasm: stack64 c5_stack
# qhasm: stack64 c6_stack
# qhasm: stack64 c7_stack
# qhasm: stack64 x119_stack
# qhasm: stack64 x219_stack
# qhasm: stack64 x319_stack
# qhasm: stack64 x419_stack
# qhasm: stack64 rp_stack
# qhasm: int64 mulr01
# qhasm: int64 mulr11
# qhasm: int64 mulr21
# qhasm: int64 mulr31
# qhasm: int64 mulr41
# qhasm: int64 mulrax
# qhasm: int64 mulrdx
# qhasm: int64 mult
# qhasm: int64 mulredmask
# qhasm: stack64 mulx219_stack
# qhasm: stack64 mulx319_stack
# qhasm: stack64 mulx419_stack
# qhasm: enter crypto_sign_ed25519_amd64_51_30k_batch_fe25519_mul
.text
.p2align 5
.globl _crypto_sign_ed25519_amd64_51_30k_batch_fe25519_mul
.globl crypto_sign_ed25519_amd64_51_30k_batch_fe25519_mul
_crypto_sign_ed25519_amd64_51_30k_batch_fe25519_mul:
crypto_sign_ed25519_amd64_51_30k_batch_fe25519_mul:
mov %rsp,%r11
and $31,%r11
add $96,%r11
sub %r11,%rsp
# qhasm: c1_stack = c1
# asm 1: movq <c1=int64#9,>c1_stack=stack64#1
# asm 2: movq <c1=%r11,>c1_stack=0(%rsp)
movq %r11,0(%rsp)
# qhasm: c2_stack = c2
# asm 1: movq <c2=int64#10,>c2_stack=stack64#2
# asm 2: movq <c2=%r12,>c2_stack=8(%rsp)
movq %r12,8(%rsp)
# qhasm: c3_stack = c3
# asm 1: movq <c3=int64#11,>c3_stack=stack64#3
# asm 2: movq <c3=%r13,>c3_stack=16(%rsp)
movq %r13,16(%rsp)
# qhasm: c4_stack = c4
# asm 1: movq <c4=int64#12,>c4_stack=stack64#4
# asm 2: movq <c4=%r14,>c4_stack=24(%rsp)
movq %r14,24(%rsp)
# qhasm: c5_stack = c5
# asm 1: movq <c5=int64#13,>c5_stack=stack64#5
# asm 2: movq <c5=%r15,>c5_stack=32(%rsp)
movq %r15,32(%rsp)
# qhasm: c6_stack = c6
# asm 1: movq <c6=int64#14,>c6_stack=stack64#6
# asm 2: movq <c6=%rbx,>c6_stack=40(%rsp)
movq %rbx,40(%rsp)
# qhasm: c7_stack = c7
# asm 1: movq <c7=int64#15,>c7_stack=stack64#7
# asm 2: movq <c7=%rbp,>c7_stack=48(%rsp)
movq %rbp,48(%rsp)
# qhasm: rp_stack = rp
# asm 1: movq <rp=int64#1,>rp_stack=stack64#8
# asm 2: movq <rp=%rdi,>rp_stack=56(%rsp)
movq %rdi,56(%rsp)
# qhasm: yp = yp
# asm 1: mov <yp=int64#3,>yp=int64#4
# asm 2: mov <yp=%rdx,>yp=%rcx
mov %rdx,%rcx
# qhasm: mulrax = *(uint64 *)(xp + 24)
# asm 1: movq 24(<xp=int64#2),>mulrax=int64#3
# asm 2: movq 24(<xp=%rsi),>mulrax=%rdx
movq 24(%rsi),%rdx
# qhasm: mulrax *= 19
# asm 1: imulq $19,<mulrax=int64#3,>mulrax=int64#7
# asm 2: imulq $19,<mulrax=%rdx,>mulrax=%rax
imulq $19,%rdx,%rax
# qhasm: mulx319_stack = mulrax
# asm 1: movq <mulrax=int64#7,>mulx319_stack=stack64#9
# asm 2: movq <mulrax=%rax,>mulx319_stack=64(%rsp)
movq %rax,64(%rsp)
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(yp + 16)
# asm 1: mulq 16(<yp=int64#4)
# asm 2: mulq 16(<yp=%rcx)
mulq 16(%rcx)
# qhasm: r0 = mulrax
# asm 1: mov <mulrax=int64#7,>r0=int64#5
# asm 2: mov <mulrax=%rax,>r0=%r8
mov %rax,%r8
# qhasm: mulr01 = mulrdx
# asm 1: mov <mulrdx=int64#3,>mulr01=int64#6
# asm 2: mov <mulrdx=%rdx,>mulr01=%r9
mov %rdx,%r9
# qhasm: mulrax = *(uint64 *)(xp + 32)
# asm 1: movq 32(<xp=int64#2),>mulrax=int64#3
# asm 2: movq 32(<xp=%rsi),>mulrax=%rdx
movq 32(%rsi),%rdx
# qhasm: mulrax *= 19
# asm 1: imulq $19,<mulrax=int64#3,>mulrax=int64#7
# asm 2: imulq $19,<mulrax=%rdx,>mulrax=%rax
imulq $19,%rdx,%rax
# qhasm: mulx419_stack = mulrax
# asm 1: movq <mulrax=int64#7,>mulx419_stack=stack64#10
# asm 2: movq <mulrax=%rax,>mulx419_stack=72(%rsp)
movq %rax,72(%rsp)
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(yp + 8)
# asm 1: mulq 8(<yp=int64#4)
# asm 2: mulq 8(<yp=%rcx)
mulq 8(%rcx)
# qhasm: carry? r0 += mulrax
# asm 1: add <mulrax=int64#7,<r0=int64#5
# asm 2: add <mulrax=%rax,<r0=%r8
add %rax,%r8
# qhasm: mulr01 += mulrdx + carry
# asm 1: adc <mulrdx=int64#3,<mulr01=int64#6
# asm 2: adc <mulrdx=%rdx,<mulr01=%r9
adc %rdx,%r9
# qhasm: mulrax = *(uint64 *)(xp + 0)
# asm 1: movq 0(<xp=int64#2),>mulrax=int64#7
# asm 2: movq 0(<xp=%rsi),>mulrax=%rax
movq 0(%rsi),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(yp + 0)
# asm 1: mulq 0(<yp=int64#4)
# asm 2: mulq 0(<yp=%rcx)
mulq 0(%rcx)
# qhasm: carry? r0 += mulrax
# asm 1: add <mulrax=int64#7,<r0=int64#5
# asm 2: add <mulrax=%rax,<r0=%r8
add %rax,%r8
# qhasm: mulr01 += mulrdx + carry
# asm 1: adc <mulrdx=int64#3,<mulr01=int64#6
# asm 2: adc <mulrdx=%rdx,<mulr01=%r9
adc %rdx,%r9
# qhasm: mulrax = *(uint64 *)(xp + 0)
# asm 1: movq 0(<xp=int64#2),>mulrax=int64#7
# asm 2: movq 0(<xp=%rsi),>mulrax=%rax
movq 0(%rsi),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(yp + 8)
# asm 1: mulq 8(<yp=int64#4)
# asm 2: mulq 8(<yp=%rcx)
mulq 8(%rcx)
# qhasm: r1 = mulrax
# asm 1: mov <mulrax=int64#7,>r1=int64#8
# asm 2: mov <mulrax=%rax,>r1=%r10
mov %rax,%r10
# qhasm: mulr11 = mulrdx
# asm 1: mov <mulrdx=int64#3,>mulr11=int64#9
# asm 2: mov <mulrdx=%rdx,>mulr11=%r11
mov %rdx,%r11
# qhasm: mulrax = *(uint64 *)(xp + 0)
# asm 1: movq 0(<xp=int64#2),>mulrax=int64#7
# asm 2: movq 0(<xp=%rsi),>mulrax=%rax
movq 0(%rsi),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(yp + 16)
# asm 1: mulq 16(<yp=int64#4)
# asm 2: mulq 16(<yp=%rcx)
mulq 16(%rcx)
# qhasm: r2 = mulrax
# asm 1: mov <mulrax=int64#7,>r2=int64#10
# asm 2: mov <mulrax=%rax,>r2=%r12
mov %rax,%r12
# qhasm: mulr21 = mulrdx
# asm 1: mov <mulrdx=int64#3,>mulr21=int64#11
# asm 2: mov <mulrdx=%rdx,>mulr21=%r13
mov %rdx,%r13
# qhasm: mulrax = *(uint64 *)(xp + 0)
# asm 1: movq 0(<xp=int64#2),>mulrax=int64#7
# asm 2: movq 0(<xp=%rsi),>mulrax=%rax
movq 0(%rsi),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(yp + 24)
# asm 1: mulq 24(<yp=int64#4)
# asm 2: mulq 24(<yp=%rcx)
mulq 24(%rcx)
# qhasm: r3 = mulrax
# asm 1: mov <mulrax=int64#7,>r3=int64#12
# asm 2: mov <mulrax=%rax,>r3=%r14
mov %rax,%r14
# qhasm: mulr31 = mulrdx
# asm 1: mov <mulrdx=int64#3,>mulr31=int64#13
# asm 2: mov <mulrdx=%rdx,>mulr31=%r15
mov %rdx,%r15
# qhasm: mulrax = *(uint64 *)(xp + 0)
# asm 1: movq 0(<xp=int64#2),>mulrax=int64#7
# asm 2: movq 0(<xp=%rsi),>mulrax=%rax
movq 0(%rsi),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(yp + 32)
# asm 1: mulq 32(<yp=int64#4)
# asm 2: mulq 32(<yp=%rcx)
mulq 32(%rcx)
# qhasm: r4 = mulrax
# asm 1: mov <mulrax=int64#7,>r4=int64#14
# asm 2: mov <mulrax=%rax,>r4=%rbx
mov %rax,%rbx
# qhasm: mulr41 = mulrdx
# asm 1: mov <mulrdx=int64#3,>mulr41=int64#15
# asm 2: mov <mulrdx=%rdx,>mulr41=%rbp
mov %rdx,%rbp
# qhasm: mulrax = *(uint64 *)(xp + 8)
# asm 1: movq 8(<xp=int64#2),>mulrax=int64#7
# asm 2: movq 8(<xp=%rsi),>mulrax=%rax
movq 8(%rsi),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(yp + 0)
# asm 1: mulq 0(<yp=int64#4)
# asm 2: mulq 0(<yp=%rcx)
mulq 0(%rcx)
# qhasm: carry? r1 += mulrax
# asm 1: add <mulrax=int64#7,<r1=int64#8
# asm 2: add <mulrax=%rax,<r1=%r10
add %rax,%r10
# qhasm: mulr11 += mulrdx + carry
# asm 1: adc <mulrdx=int64#3,<mulr11=int64#9
# asm 2: adc <mulrdx=%rdx,<mulr11=%r11
adc %rdx,%r11
# qhasm: mulrax = *(uint64 *)(xp + 8)
# asm 1: movq 8(<xp=int64#2),>mulrax=int64#7
# asm 2: movq 8(<xp=%rsi),>mulrax=%rax
movq 8(%rsi),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(yp + 8)
# asm 1: mulq 8(<yp=int64#4)
# asm 2: mulq 8(<yp=%rcx)
mulq 8(%rcx)
# qhasm: carry? r2 += mulrax
# asm 1: add <mulrax=int64#7,<r2=int64#10
# asm 2: add <mulrax=%rax,<r2=%r12
add %rax,%r12
# qhasm: mulr21 += mulrdx + carry
# asm 1: adc <mulrdx=int64#3,<mulr21=int64#11
# asm 2: adc <mulrdx=%rdx,<mulr21=%r13
adc %rdx,%r13
# qhasm: mulrax = *(uint64 *)(xp + 8)
# asm 1: movq 8(<xp=int64#2),>mulrax=int64#7
# asm 2: movq 8(<xp=%rsi),>mulrax=%rax
movq 8(%rsi),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(yp + 16)
# asm 1: mulq 16(<yp=int64#4)
# asm 2: mulq 16(<yp=%rcx)
mulq 16(%rcx)
# qhasm: carry? r3 += mulrax
# asm 1: add <mulrax=int64#7,<r3=int64#12
# asm 2: add <mulrax=%rax,<r3=%r14
add %rax,%r14
# qhasm: mulr31 += mulrdx + carry
# asm 1: adc <mulrdx=int64#3,<mulr31=int64#13
# asm 2: adc <mulrdx=%rdx,<mulr31=%r15
adc %rdx,%r15
# qhasm: mulrax = *(uint64 *)(xp + 8)
# asm 1: movq 8(<xp=int64#2),>mulrax=int64#7
# asm 2: movq 8(<xp=%rsi),>mulrax=%rax
movq 8(%rsi),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(yp + 24)
# asm 1: mulq 24(<yp=int64#4)
# asm 2: mulq 24(<yp=%rcx)
mulq 24(%rcx)
# qhasm: carry? r4 += mulrax
# asm 1: add <mulrax=int64#7,<r4=int64#14
# asm 2: add <mulrax=%rax,<r4=%rbx
add %rax,%rbx
# qhasm: mulr41 += mulrdx + carry
# asm 1: adc <mulrdx=int64#3,<mulr41=int64#15
# asm 2: adc <mulrdx=%rdx,<mulr41=%rbp
adc %rdx,%rbp
# qhasm: mulrax = *(uint64 *)(xp + 8)
# asm 1: movq 8(<xp=int64#2),>mulrax=int64#3
# asm 2: movq 8(<xp=%rsi),>mulrax=%rdx
movq 8(%rsi),%rdx
# qhasm: mulrax *= 19
# asm 1: imulq $19,<mulrax=int64#3,>mulrax=int64#7
# asm 2: imulq $19,<mulrax=%rdx,>mulrax=%rax
imulq $19,%rdx,%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(yp + 32)
# asm 1: mulq 32(<yp=int64#4)
# asm 2: mulq 32(<yp=%rcx)
mulq 32(%rcx)
# qhasm: carry? r0 += mulrax
# asm 1: add <mulrax=int64#7,<r0=int64#5
# asm 2: add <mulrax=%rax,<r0=%r8
add %rax,%r8
# qhasm: mulr01 += mulrdx + carry
# asm 1: adc <mulrdx=int64#3,<mulr01=int64#6
# asm 2: adc <mulrdx=%rdx,<mulr01=%r9
adc %rdx,%r9
# qhasm: mulrax = *(uint64 *)(xp + 16)
# asm 1: movq 16(<xp=int64#2),>mulrax=int64#7
# asm 2: movq 16(<xp=%rsi),>mulrax=%rax
movq 16(%rsi),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(yp + 0)
# asm 1: mulq 0(<yp=int64#4)
# asm 2: mulq 0(<yp=%rcx)
mulq 0(%rcx)
# qhasm: carry? r2 += mulrax
# asm 1: add <mulrax=int64#7,<r2=int64#10
# asm 2: add <mulrax=%rax,<r2=%r12
add %rax,%r12
# qhasm: mulr21 += mulrdx + carry
# asm 1: adc <mulrdx=int64#3,<mulr21=int64#11
# asm 2: adc <mulrdx=%rdx,<mulr21=%r13
adc %rdx,%r13
# qhasm: mulrax = *(uint64 *)(xp + 16)
# asm 1: movq 16(<xp=int64#2),>mulrax=int64#7
# asm 2: movq 16(<xp=%rsi),>mulrax=%rax
movq 16(%rsi),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(yp + 8)
# asm 1: mulq 8(<yp=int64#4)
# asm 2: mulq 8(<yp=%rcx)
mulq 8(%rcx)
# qhasm: carry? r3 += mulrax
# asm 1: add <mulrax=int64#7,<r3=int64#12
# asm 2: add <mulrax=%rax,<r3=%r14
add %rax,%r14
# qhasm: mulr31 += mulrdx + carry
# asm 1: adc <mulrdx=int64#3,<mulr31=int64#13
# asm 2: adc <mulrdx=%rdx,<mulr31=%r15
adc %rdx,%r15
# qhasm: mulrax = *(uint64 *)(xp + 16)
# asm 1: movq 16(<xp=int64#2),>mulrax=int64#7
# asm 2: movq 16(<xp=%rsi),>mulrax=%rax
movq 16(%rsi),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(yp + 16)
# asm 1: mulq 16(<yp=int64#4)
# asm 2: mulq 16(<yp=%rcx)
mulq 16(%rcx)
# qhasm: carry? r4 += mulrax
# asm 1: add <mulrax=int64#7,<r4=int64#14
# asm 2: add <mulrax=%rax,<r4=%rbx
add %rax,%rbx
# qhasm: mulr41 += mulrdx + carry
# asm 1: adc <mulrdx=int64#3,<mulr41=int64#15
# asm 2: adc <mulrdx=%rdx,<mulr41=%rbp
adc %rdx,%rbp
# qhasm: mulrax = *(uint64 *)(xp + 16)
# asm 1: movq 16(<xp=int64#2),>mulrax=int64#3
# asm 2: movq 16(<xp=%rsi),>mulrax=%rdx
movq 16(%rsi),%rdx
# qhasm: mulrax *= 19
# asm 1: imulq $19,<mulrax=int64#3,>mulrax=int64#7
# asm 2: imulq $19,<mulrax=%rdx,>mulrax=%rax
imulq $19,%rdx,%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(yp + 24)
# asm 1: mulq 24(<yp=int64#4)
# asm 2: mulq 24(<yp=%rcx)
mulq 24(%rcx)
# qhasm: carry? r0 += mulrax
# asm 1: add <mulrax=int64#7,<r0=int64#5
# asm 2: add <mulrax=%rax,<r0=%r8
add %rax,%r8
# qhasm: mulr01 += mulrdx + carry
# asm 1: adc <mulrdx=int64#3,<mulr01=int64#6
# asm 2: adc <mulrdx=%rdx,<mulr01=%r9
adc %rdx,%r9
# qhasm: mulrax = *(uint64 *)(xp + 16)
# asm 1: movq 16(<xp=int64#2),>mulrax=int64#3
# asm 2: movq 16(<xp=%rsi),>mulrax=%rdx
movq 16(%rsi),%rdx
# qhasm: mulrax *= 19
# asm 1: imulq $19,<mulrax=int64#3,>mulrax=int64#7
# asm 2: imulq $19,<mulrax=%rdx,>mulrax=%rax
imulq $19,%rdx,%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(yp + 32)
# asm 1: mulq 32(<yp=int64#4)
# asm 2: mulq 32(<yp=%rcx)
mulq 32(%rcx)
# qhasm: carry? r1 += mulrax
# asm 1: add <mulrax=int64#7,<r1=int64#8
# asm 2: add <mulrax=%rax,<r1=%r10
add %rax,%r10
# qhasm: mulr11 += mulrdx + carry
# asm 1: adc <mulrdx=int64#3,<mulr11=int64#9
# asm 2: adc <mulrdx=%rdx,<mulr11=%r11
adc %rdx,%r11
# qhasm: mulrax = *(uint64 *)(xp + 24)
# asm 1: movq 24(<xp=int64#2),>mulrax=int64#7
# asm 2: movq 24(<xp=%rsi),>mulrax=%rax
movq 24(%rsi),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(yp + 0)
# asm 1: mulq 0(<yp=int64#4)
# asm 2: mulq 0(<yp=%rcx)
mulq 0(%rcx)
# qhasm: carry? r3 += mulrax
# asm 1: add <mulrax=int64#7,<r3=int64#12
# asm 2: add <mulrax=%rax,<r3=%r14
add %rax,%r14
# qhasm: mulr31 += mulrdx + carry
# asm 1: adc <mulrdx=int64#3,<mulr31=int64#13
# asm 2: adc <mulrdx=%rdx,<mulr31=%r15
adc %rdx,%r15
# qhasm: mulrax = *(uint64 *)(xp + 24)
# asm 1: movq 24(<xp=int64#2),>mulrax=int64#7
# asm 2: movq 24(<xp=%rsi),>mulrax=%rax
movq 24(%rsi),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(yp + 8)
# asm 1: mulq 8(<yp=int64#4)
# asm 2: mulq 8(<yp=%rcx)
mulq 8(%rcx)
# qhasm: carry? r4 += mulrax
# asm 1: add <mulrax=int64#7,<r4=int64#14
# asm 2: add <mulrax=%rax,<r4=%rbx
add %rax,%rbx
# qhasm: mulr41 += mulrdx + carry
# asm 1: adc <mulrdx=int64#3,<mulr41=int64#15
# asm 2: adc <mulrdx=%rdx,<mulr41=%rbp
adc %rdx,%rbp
# qhasm: mulrax = mulx319_stack
# asm 1: movq <mulx319_stack=stack64#9,>mulrax=int64#7
# asm 2: movq <mulx319_stack=64(%rsp),>mulrax=%rax
movq 64(%rsp),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(yp + 24)
# asm 1: mulq 24(<yp=int64#4)
# asm 2: mulq 24(<yp=%rcx)
mulq 24(%rcx)
# qhasm: carry? r1 += mulrax
# asm 1: add <mulrax=int64#7,<r1=int64#8
# asm 2: add <mulrax=%rax,<r1=%r10
add %rax,%r10
# qhasm: mulr11 += mulrdx + carry
# asm 1: adc <mulrdx=int64#3,<mulr11=int64#9
# asm 2: adc <mulrdx=%rdx,<mulr11=%r11
adc %rdx,%r11
# qhasm: mulrax = mulx319_stack
# asm 1: movq <mulx319_stack=stack64#9,>mulrax=int64#7
# asm 2: movq <mulx319_stack=64(%rsp),>mulrax=%rax
movq 64(%rsp),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(yp + 32)
# asm 1: mulq 32(<yp=int64#4)
# asm 2: mulq 32(<yp=%rcx)
mulq 32(%rcx)
# qhasm: carry? r2 += mulrax
# asm 1: add <mulrax=int64#7,<r2=int64#10
# asm 2: add <mulrax=%rax,<r2=%r12
add %rax,%r12
# qhasm: mulr21 += mulrdx + carry
# asm 1: adc <mulrdx=int64#3,<mulr21=int64#11
# asm 2: adc <mulrdx=%rdx,<mulr21=%r13
adc %rdx,%r13
# qhasm: mulrax = *(uint64 *)(xp + 32)
# asm 1: movq 32(<xp=int64#2),>mulrax=int64#7
# asm 2: movq 32(<xp=%rsi),>mulrax=%rax
movq 32(%rsi),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(yp + 0)
# asm 1: mulq 0(<yp=int64#4)
# asm 2: mulq 0(<yp=%rcx)
mulq 0(%rcx)
# qhasm: carry? r4 += mulrax
# asm 1: add <mulrax=int64#7,<r4=int64#14
# asm 2: add <mulrax=%rax,<r4=%rbx
add %rax,%rbx
# qhasm: mulr41 += mulrdx + carry
# asm 1: adc <mulrdx=int64#3,<mulr41=int64#15
# asm 2: adc <mulrdx=%rdx,<mulr41=%rbp
adc %rdx,%rbp
# qhasm: mulrax = mulx419_stack
# asm 1: movq <mulx419_stack=stack64#10,>mulrax=int64#7
# asm 2: movq <mulx419_stack=72(%rsp),>mulrax=%rax
movq 72(%rsp),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(yp + 16)
# asm 1: mulq 16(<yp=int64#4)
# asm 2: mulq 16(<yp=%rcx)
mulq 16(%rcx)
# qhasm: carry? r1 += mulrax
# asm 1: add <mulrax=int64#7,<r1=int64#8
# asm 2: add <mulrax=%rax,<r1=%r10
add %rax,%r10
# qhasm: mulr11 += mulrdx + carry
# asm 1: adc <mulrdx=int64#3,<mulr11=int64#9
# asm 2: adc <mulrdx=%rdx,<mulr11=%r11
adc %rdx,%r11
# qhasm: mulrax = mulx419_stack
# asm 1: movq <mulx419_stack=stack64#10,>mulrax=int64#7
# asm 2: movq <mulx419_stack=72(%rsp),>mulrax=%rax
movq 72(%rsp),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(yp + 24)
# asm 1: mulq 24(<yp=int64#4)
# asm 2: mulq 24(<yp=%rcx)
mulq 24(%rcx)
# qhasm: carry? r2 += mulrax
# asm 1: add <mulrax=int64#7,<r2=int64#10
# asm 2: add <mulrax=%rax,<r2=%r12
add %rax,%r12
# qhasm: mulr21 += mulrdx + carry
# asm 1: adc <mulrdx=int64#3,<mulr21=int64#11
# asm 2: adc <mulrdx=%rdx,<mulr21=%r13
adc %rdx,%r13
# qhasm: mulrax = mulx419_stack
# asm 1: movq <mulx419_stack=stack64#10,>mulrax=int64#7
# asm 2: movq <mulx419_stack=72(%rsp),>mulrax=%rax
movq 72(%rsp),%rax
# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(yp + 32)
# asm 1: mulq 32(<yp=int64#4)
# asm 2: mulq 32(<yp=%rcx)
mulq 32(%rcx)
# qhasm: carry? r3 += mulrax
# asm 1: add <mulrax=int64#7,<r3=int64#12
# asm 2: add <mulrax=%rax,<r3=%r14
add %rax,%r14
# qhasm: mulr31 += mulrdx + carry
# asm 1: adc <mulrdx=int64#3,<mulr31=int64#13
# asm 2: adc <mulrdx=%rdx,<mulr31=%r15
adc %rdx,%r15
# qhasm: mulredmask = *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51
# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=int64#2
# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rsi
movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rsi
# qhasm: mulr01 = (mulr01.r0) << 13
# asm 1: shld $13,<r0=int64#5,<mulr01=int64#6
# asm 2: shld $13,<r0=%r8,<mulr01=%r9
shld $13,%r8,%r9
# qhasm: r0 &= mulredmask
# asm 1: and <mulredmask=int64#2,<r0=int64#5
# asm 2: and <mulredmask=%rsi,<r0=%r8
and %rsi,%r8
# qhasm: mulr11 = (mulr11.r1) << 13
# asm 1: shld $13,<r1=int64#8,<mulr11=int64#9
# asm 2: shld $13,<r1=%r10,<mulr11=%r11
shld $13,%r10,%r11
# qhasm: r1 &= mulredmask
# asm 1: and <mulredmask=int64#2,<r1=int64#8
# asm 2: and <mulredmask=%rsi,<r1=%r10
and %rsi,%r10
# qhasm: r1 += mulr01
# asm 1: add <mulr01=int64#6,<r1=int64#8
# asm 2: add <mulr01=%r9,<r1=%r10
add %r9,%r10
# qhasm: mulr21 = (mulr21.r2) << 13
# asm 1: shld $13,<r2=int64#10,<mulr21=int64#11
# asm 2: shld $13,<r2=%r12,<mulr21=%r13
shld $13,%r12,%r13
# qhasm: r2 &= mulredmask
# asm 1: and <mulredmask=int64#2,<r2=int64#10
# asm 2: and <mulredmask=%rsi,<r2=%r12
and %rsi,%r12
# qhasm: r2 += mulr11
# asm 1: add <mulr11=int64#9,<r2=int64#10
# asm 2: add <mulr11=%r11,<r2=%r12
add %r11,%r12
# qhasm: mulr31 = (mulr31.r3) << 13
# asm 1: shld $13,<r3=int64#12,<mulr31=int64#13
# asm 2: shld $13,<r3=%r14,<mulr31=%r15
shld $13,%r14,%r15
# qhasm: r3 &= mulredmask
# asm 1: and <mulredmask=int64#2,<r3=int64#12
# asm 2: and <mulredmask=%rsi,<r3=%r14
and %rsi,%r14
# qhasm: r3 += mulr21
# asm 1: add <mulr21=int64#11,<r3=int64#12
# asm 2: add <mulr21=%r13,<r3=%r14
add %r13,%r14
# qhasm: mulr41 = (mulr41.r4) << 13
# asm 1: shld $13,<r4=int64#14,<mulr41=int64#15
# asm 2: shld $13,<r4=%rbx,<mulr41=%rbp
shld $13,%rbx,%rbp
# qhasm: r4 &= mulredmask
# asm 1: and <mulredmask=int64#2,<r4=int64#14
# asm 2: and <mulredmask=%rsi,<r4=%rbx
and %rsi,%rbx
# qhasm: r4 += mulr31
# asm 1: add <mulr31=int64#13,<r4=int64#14
# asm 2: add <mulr31=%r15,<r4=%rbx
add %r15,%rbx
# qhasm: mulr41 = mulr41 * 19
# asm 1: imulq $19,<mulr41=int64#15,>mulr41=int64#3
# asm 2: imulq $19,<mulr41=%rbp,>mulr41=%rdx
imulq $19,%rbp,%rdx
# qhasm: r0 += mulr41
# asm 1: add <mulr41=int64#3,<r0=int64#5
# asm 2: add <mulr41=%rdx,<r0=%r8
add %rdx,%r8
# qhasm: mult = r0
# asm 1: mov <r0=int64#5,>mult=int64#3
# asm 2: mov <r0=%r8,>mult=%rdx
mov %r8,%rdx
# qhasm: (uint64) mult >>= 51
# asm 1: shr $51,<mult=int64#3
# asm 2: shr $51,<mult=%rdx
shr $51,%rdx
# qhasm: mult += r1
# asm 1: add <r1=int64#8,<mult=int64#3
# asm 2: add <r1=%r10,<mult=%rdx
add %r10,%rdx
# qhasm: r1 = mult
# asm 1: mov <mult=int64#3,>r1=int64#4
# asm 2: mov <mult=%rdx,>r1=%rcx
mov %rdx,%rcx
# qhasm: (uint64) mult >>= 51
# asm 1: shr $51,<mult=int64#3
# asm 2: shr $51,<mult=%rdx
shr $51,%rdx
# qhasm: r0 &= mulredmask
# asm 1: and <mulredmask=int64#2,<r0=int64#5
# asm 2: and <mulredmask=%rsi,<r0=%r8
and %rsi,%r8
# qhasm: mult += r2
# asm 1: add <r2=int64#10,<mult=int64#3
# asm 2: add <r2=%r12,<mult=%rdx
add %r12,%rdx
# qhasm: r2 = mult
# asm 1: mov <mult=int64#3,>r2=int64#6
# asm 2: mov <mult=%rdx,>r2=%r9
mov %rdx,%r9
# qhasm: (uint64) mult >>= 51
# asm 1: shr $51,<mult=int64#3
# asm 2: shr $51,<mult=%rdx
shr $51,%rdx
# qhasm: r1 &= mulredmask
# asm 1: and <mulredmask=int64#2,<r1=int64#4
# asm 2: and <mulredmask=%rsi,<r1=%rcx
and %rsi,%rcx
# qhasm: mult += r3
# asm 1: add <r3=int64#12,<mult=int64#3
# asm 2: add <r3=%r14,<mult=%rdx
add %r14,%rdx
# qhasm: r3 = mult
# asm 1: mov <mult=int64#3,>r3=int64#7
# asm 2: mov <mult=%rdx,>r3=%rax
mov %rdx,%rax
# qhasm: (uint64) mult >>= 51
# asm 1: shr $51,<mult=int64#3
# asm 2: shr $51,<mult=%rdx
shr $51,%rdx
# qhasm: r2 &= mulredmask
# asm 1: and <mulredmask=int64#2,<r2=int64#6
# asm 2: and <mulredmask=%rsi,<r2=%r9
and %rsi,%r9
# qhasm: mult += r4
# asm 1: add <r4=int64#14,<mult=int64#3
# asm 2: add <r4=%rbx,<mult=%rdx
add %rbx,%rdx
# qhasm: r4 = mult
# asm 1: mov <mult=int64#3,>r4=int64#8
# asm 2: mov <mult=%rdx,>r4=%r10
mov %rdx,%r10
# qhasm: (uint64) mult >>= 51
# asm 1: shr $51,<mult=int64#3
# asm 2: shr $51,<mult=%rdx
shr $51,%rdx
# qhasm: r3 &= mulredmask
# asm 1: and <mulredmask=int64#2,<r3=int64#7
# asm 2: and <mulredmask=%rsi,<r3=%rax
and %rsi,%rax
# qhasm: mult *= 19
# asm 1: imulq $19,<mult=int64#3,>mult=int64#3
# asm 2: imulq $19,<mult=%rdx,>mult=%rdx
imulq $19,%rdx,%rdx
# qhasm: r0 += mult
# asm 1: add <mult=int64#3,<r0=int64#5
# asm 2: add <mult=%rdx,<r0=%r8
add %rdx,%r8
# qhasm: r4 &= mulredmask
# asm 1: and <mulredmask=int64#2,<r4=int64#8
# asm 2: and <mulredmask=%rsi,<r4=%r10
and %rsi,%r10
# qhasm: *(uint64 *)(rp + 0) = r0
# asm 1: movq <r0=int64#5,0(<rp=int64#1)
# asm 2: movq <r0=%r8,0(<rp=%rdi)
movq %r8,0(%rdi)
# qhasm: *(uint64 *)(rp + 8) = r1
# asm 1: movq <r1=int64#4,8(<rp=int64#1)
# asm 2: movq <r1=%rcx,8(<rp=%rdi)
movq %rcx,8(%rdi)
# qhasm: *(uint64 *)(rp + 16) = r2
# asm 1: movq <r2=int64#6,16(<rp=int64#1)
# asm 2: movq <r2=%r9,16(<rp=%rdi)
movq %r9,16(%rdi)
# qhasm: *(uint64 *)(rp + 24) = r3
# asm 1: movq <r3=int64#7,24(<rp=int64#1)
# asm 2: movq <r3=%rax,24(<rp=%rdi)
movq %rax,24(%rdi)
# qhasm: *(uint64 *)(rp + 32) = r4
# asm 1: movq <r4=int64#8,32(<rp=int64#1)
# asm 2: movq <r4=%r10,32(<rp=%rdi)
movq %r10,32(%rdi)
# qhasm: c1 =c1_stack
# asm 1: movq <c1_stack=stack64#1,>c1=int64#9
# asm 2: movq <c1_stack=0(%rsp),>c1=%r11
movq 0(%rsp),%r11
# qhasm: c2 =c2_stack
# asm 1: movq <c2_stack=stack64#2,>c2=int64#10
# asm 2: movq <c2_stack=8(%rsp),>c2=%r12
movq 8(%rsp),%r12
# qhasm: c3 =c3_stack
# asm 1: movq <c3_stack=stack64#3,>c3=int64#11
# asm 2: movq <c3_stack=16(%rsp),>c3=%r13
movq 16(%rsp),%r13
# qhasm: c4 =c4_stack
# asm 1: movq <c4_stack=stack64#4,>c4=int64#12
# asm 2: movq <c4_stack=24(%rsp),>c4=%r14
movq 24(%rsp),%r14
# qhasm: c5 =c5_stack
# asm 1: movq <c5_stack=stack64#5,>c5=int64#13
# asm 2: movq <c5_stack=32(%rsp),>c5=%r15
movq 32(%rsp),%r15
# qhasm: c6 =c6_stack
# asm 1: movq <c6_stack=stack64#6,>c6=int64#14
# asm 2: movq <c6_stack=40(%rsp),>c6=%rbx
movq 40(%rsp),%rbx
# qhasm: c7 =c7_stack
# asm 1: movq <c7_stack=stack64#7,>c7=int64#15
# asm 2: movq <c7_stack=48(%rsp),>c7=%rbp
movq 48(%rsp),%rbp
# qhasm: leave
add %r11,%rsp
mov %rdi,%rax
mov %rsi,%rdx
ret

View file

@ -0,0 +1,8 @@
#include "fe25519.h"
void fe25519_neg(fe25519 *r, const fe25519 *x)
{
fe25519 t;
fe25519_setint(&t,0);
fe25519_sub(r,&t,x);
}

View file

@ -0,0 +1,763 @@
# qhasm: int64 rp
# qhasm: int64 n
# qhasm: input rp
# qhasm: input n
# qhasm: int64 r0
# qhasm: int64 r1
# qhasm: int64 r2
# qhasm: int64 r3
# qhasm: int64 r4
# qhasm: int64 c1
# qhasm: int64 c2
# qhasm: int64 c3
# qhasm: int64 c4
# qhasm: int64 c5
# qhasm: int64 c6
# qhasm: int64 c7
# qhasm: caller c1
# qhasm: caller c2
# qhasm: caller c3
# qhasm: caller c4
# qhasm: caller c5
# qhasm: caller c6
# qhasm: caller c7
# qhasm: stack64 c1_stack
# qhasm: stack64 c2_stack
# qhasm: stack64 c3_stack
# qhasm: stack64 c4_stack
# qhasm: stack64 c5_stack
# qhasm: stack64 c6_stack
# qhasm: stack64 c7_stack
# qhasm: stack64 x119_stack
# qhasm: stack64 x219_stack
# qhasm: stack64 x319_stack
# qhasm: stack64 x419_stack
# qhasm: int64 squarer01
# qhasm: int64 squarer11
# qhasm: int64 squarer21
# qhasm: int64 squarer31
# qhasm: int64 squarer41
# qhasm: int64 squarerax
# qhasm: int64 squarerdx
# qhasm: int64 squaret
# qhasm: int64 squareredmask
# qhasm: stack64 n_stack
# qhasm: enter crypto_sign_ed25519_amd64_51_30k_batch_fe25519_nsquare
.text
.p2align 5
.globl _crypto_sign_ed25519_amd64_51_30k_batch_fe25519_nsquare
.globl crypto_sign_ed25519_amd64_51_30k_batch_fe25519_nsquare
_crypto_sign_ed25519_amd64_51_30k_batch_fe25519_nsquare:
crypto_sign_ed25519_amd64_51_30k_batch_fe25519_nsquare:
mov %rsp,%r11
and $31,%r11
add $64,%r11
sub %r11,%rsp
# qhasm: c1_stack = c1
# asm 1: movq <c1=int64#9,>c1_stack=stack64#1
# asm 2: movq <c1=%r11,>c1_stack=0(%rsp)
movq %r11,0(%rsp)
# qhasm: c2_stack = c2
# asm 1: movq <c2=int64#10,>c2_stack=stack64#2
# asm 2: movq <c2=%r12,>c2_stack=8(%rsp)
movq %r12,8(%rsp)
# qhasm: c3_stack = c3
# asm 1: movq <c3=int64#11,>c3_stack=stack64#3
# asm 2: movq <c3=%r13,>c3_stack=16(%rsp)
movq %r13,16(%rsp)
# qhasm: c4_stack = c4
# asm 1: movq <c4=int64#12,>c4_stack=stack64#4
# asm 2: movq <c4=%r14,>c4_stack=24(%rsp)
movq %r14,24(%rsp)
# qhasm: c5_stack = c5
# asm 1: movq <c5=int64#13,>c5_stack=stack64#5
# asm 2: movq <c5=%r15,>c5_stack=32(%rsp)
movq %r15,32(%rsp)
# qhasm: c6_stack = c6
# asm 1: movq <c6=int64#14,>c6_stack=stack64#6
# asm 2: movq <c6=%rbx,>c6_stack=40(%rsp)
movq %rbx,40(%rsp)
# qhasm: c7_stack = c7
# asm 1: movq <c7=int64#15,>c7_stack=stack64#7
# asm 2: movq <c7=%rbp,>c7_stack=48(%rsp)
movq %rbp,48(%rsp)
# qhasm: loop:
._loop:
# qhasm: squarerax = *(uint64 *)(rp + 0)
# asm 1: movq 0(<rp=int64#1),>squarerax=int64#7
# asm 2: movq 0(<rp=%rdi),>squarerax=%rax
movq 0(%rdi),%rax
# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(rp + 0)
# asm 1: mulq 0(<rp=int64#1)
# asm 2: mulq 0(<rp=%rdi)
mulq 0(%rdi)
# qhasm: r0 = squarerax
# asm 1: mov <squarerax=int64#7,>r0=int64#4
# asm 2: mov <squarerax=%rax,>r0=%rcx
mov %rax,%rcx
# qhasm: squarer01 = squarerdx
# asm 1: mov <squarerdx=int64#3,>squarer01=int64#5
# asm 2: mov <squarerdx=%rdx,>squarer01=%r8
mov %rdx,%r8
# qhasm: squarerax = *(uint64 *)(rp + 0)
# asm 1: movq 0(<rp=int64#1),>squarerax=int64#7
# asm 2: movq 0(<rp=%rdi),>squarerax=%rax
movq 0(%rdi),%rax
# qhasm: squarerax <<= 1
# asm 1: shl $1,<squarerax=int64#7
# asm 2: shl $1,<squarerax=%rax
shl $1,%rax
# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(rp + 8)
# asm 1: mulq 8(<rp=int64#1)
# asm 2: mulq 8(<rp=%rdi)
mulq 8(%rdi)
# qhasm: r1 = squarerax
# asm 1: mov <squarerax=int64#7,>r1=int64#6
# asm 2: mov <squarerax=%rax,>r1=%r9
mov %rax,%r9
# qhasm: squarer11 = squarerdx
# asm 1: mov <squarerdx=int64#3,>squarer11=int64#8
# asm 2: mov <squarerdx=%rdx,>squarer11=%r10
mov %rdx,%r10
# qhasm: squarerax = *(uint64 *)(rp + 0)
# asm 1: movq 0(<rp=int64#1),>squarerax=int64#7
# asm 2: movq 0(<rp=%rdi),>squarerax=%rax
movq 0(%rdi),%rax
# qhasm: squarerax <<= 1
# asm 1: shl $1,<squarerax=int64#7
# asm 2: shl $1,<squarerax=%rax
shl $1,%rax
# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(rp + 16)
# asm 1: mulq 16(<rp=int64#1)
# asm 2: mulq 16(<rp=%rdi)
mulq 16(%rdi)
# qhasm: r2 = squarerax
# asm 1: mov <squarerax=int64#7,>r2=int64#9
# asm 2: mov <squarerax=%rax,>r2=%r11
mov %rax,%r11
# qhasm: squarer21 = squarerdx
# asm 1: mov <squarerdx=int64#3,>squarer21=int64#10
# asm 2: mov <squarerdx=%rdx,>squarer21=%r12
mov %rdx,%r12
# qhasm: squarerax = *(uint64 *)(rp + 0)
# asm 1: movq 0(<rp=int64#1),>squarerax=int64#7
# asm 2: movq 0(<rp=%rdi),>squarerax=%rax
movq 0(%rdi),%rax
# qhasm: squarerax <<= 1
# asm 1: shl $1,<squarerax=int64#7
# asm 2: shl $1,<squarerax=%rax
shl $1,%rax
# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(rp + 24)
# asm 1: mulq 24(<rp=int64#1)
# asm 2: mulq 24(<rp=%rdi)
mulq 24(%rdi)
# qhasm: r3 = squarerax
# asm 1: mov <squarerax=int64#7,>r3=int64#11
# asm 2: mov <squarerax=%rax,>r3=%r13
mov %rax,%r13
# qhasm: squarer31 = squarerdx
# asm 1: mov <squarerdx=int64#3,>squarer31=int64#12
# asm 2: mov <squarerdx=%rdx,>squarer31=%r14
mov %rdx,%r14
# qhasm: squarerax = *(uint64 *)(rp + 0)
# asm 1: movq 0(<rp=int64#1),>squarerax=int64#7
# asm 2: movq 0(<rp=%rdi),>squarerax=%rax
movq 0(%rdi),%rax
# qhasm: squarerax <<= 1
# asm 1: shl $1,<squarerax=int64#7
# asm 2: shl $1,<squarerax=%rax
shl $1,%rax
# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(rp + 32)
# asm 1: mulq 32(<rp=int64#1)
# asm 2: mulq 32(<rp=%rdi)
mulq 32(%rdi)
# qhasm: r4 = squarerax
# asm 1: mov <squarerax=int64#7,>r4=int64#13
# asm 2: mov <squarerax=%rax,>r4=%r15
mov %rax,%r15
# qhasm: squarer41 = squarerdx
# asm 1: mov <squarerdx=int64#3,>squarer41=int64#14
# asm 2: mov <squarerdx=%rdx,>squarer41=%rbx
mov %rdx,%rbx
# qhasm: squarerax = *(uint64 *)(rp + 8)
# asm 1: movq 8(<rp=int64#1),>squarerax=int64#7
# asm 2: movq 8(<rp=%rdi),>squarerax=%rax
movq 8(%rdi),%rax
# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(rp + 8)
# asm 1: mulq 8(<rp=int64#1)
# asm 2: mulq 8(<rp=%rdi)
mulq 8(%rdi)
# qhasm: carry? r2 += squarerax
# asm 1: add <squarerax=int64#7,<r2=int64#9
# asm 2: add <squarerax=%rax,<r2=%r11
add %rax,%r11
# qhasm: squarer21 += squarerdx + carry
# asm 1: adc <squarerdx=int64#3,<squarer21=int64#10
# asm 2: adc <squarerdx=%rdx,<squarer21=%r12
adc %rdx,%r12
# qhasm: squarerax = *(uint64 *)(rp + 8)
# asm 1: movq 8(<rp=int64#1),>squarerax=int64#7
# asm 2: movq 8(<rp=%rdi),>squarerax=%rax
movq 8(%rdi),%rax
# qhasm: squarerax <<= 1
# asm 1: shl $1,<squarerax=int64#7
# asm 2: shl $1,<squarerax=%rax
shl $1,%rax
# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(rp + 16)
# asm 1: mulq 16(<rp=int64#1)
# asm 2: mulq 16(<rp=%rdi)
mulq 16(%rdi)
# qhasm: carry? r3 += squarerax
# asm 1: add <squarerax=int64#7,<r3=int64#11
# asm 2: add <squarerax=%rax,<r3=%r13
add %rax,%r13
# qhasm: squarer31 += squarerdx + carry
# asm 1: adc <squarerdx=int64#3,<squarer31=int64#12
# asm 2: adc <squarerdx=%rdx,<squarer31=%r14
adc %rdx,%r14
# qhasm: squarerax = *(uint64 *)(rp + 8)
# asm 1: movq 8(<rp=int64#1),>squarerax=int64#7
# asm 2: movq 8(<rp=%rdi),>squarerax=%rax
movq 8(%rdi),%rax
# qhasm: squarerax <<= 1
# asm 1: shl $1,<squarerax=int64#7
# asm 2: shl $1,<squarerax=%rax
shl $1,%rax
# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(rp + 24)
# asm 1: mulq 24(<rp=int64#1)
# asm 2: mulq 24(<rp=%rdi)
mulq 24(%rdi)
# qhasm: carry? r4 += squarerax
# asm 1: add <squarerax=int64#7,<r4=int64#13
# asm 2: add <squarerax=%rax,<r4=%r15
add %rax,%r15
# qhasm: squarer41 += squarerdx + carry
# asm 1: adc <squarerdx=int64#3,<squarer41=int64#14
# asm 2: adc <squarerdx=%rdx,<squarer41=%rbx
adc %rdx,%rbx
# qhasm: squarerax = *(uint64 *)(rp + 8)
# asm 1: movq 8(<rp=int64#1),>squarerax=int64#3
# asm 2: movq 8(<rp=%rdi),>squarerax=%rdx
movq 8(%rdi),%rdx
# qhasm: squarerax *= 38
# asm 1: imulq $38,<squarerax=int64#3,>squarerax=int64#7
# asm 2: imulq $38,<squarerax=%rdx,>squarerax=%rax
imulq $38,%rdx,%rax
# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(rp + 32)
# asm 1: mulq 32(<rp=int64#1)
# asm 2: mulq 32(<rp=%rdi)
mulq 32(%rdi)
# qhasm: carry? r0 += squarerax
# asm 1: add <squarerax=int64#7,<r0=int64#4
# asm 2: add <squarerax=%rax,<r0=%rcx
add %rax,%rcx
# qhasm: squarer01 += squarerdx + carry
# asm 1: adc <squarerdx=int64#3,<squarer01=int64#5
# asm 2: adc <squarerdx=%rdx,<squarer01=%r8
adc %rdx,%r8
# qhasm: squarerax = *(uint64 *)(rp + 16)
# asm 1: movq 16(<rp=int64#1),>squarerax=int64#7
# asm 2: movq 16(<rp=%rdi),>squarerax=%rax
movq 16(%rdi),%rax
# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(rp + 16)
# asm 1: mulq 16(<rp=int64#1)
# asm 2: mulq 16(<rp=%rdi)
mulq 16(%rdi)
# qhasm: carry? r4 += squarerax
# asm 1: add <squarerax=int64#7,<r4=int64#13
# asm 2: add <squarerax=%rax,<r4=%r15
add %rax,%r15
# qhasm: squarer41 += squarerdx + carry
# asm 1: adc <squarerdx=int64#3,<squarer41=int64#14
# asm 2: adc <squarerdx=%rdx,<squarer41=%rbx
adc %rdx,%rbx
# qhasm: squarerax = *(uint64 *)(rp + 16)
# asm 1: movq 16(<rp=int64#1),>squarerax=int64#3
# asm 2: movq 16(<rp=%rdi),>squarerax=%rdx
movq 16(%rdi),%rdx
# qhasm: squarerax *= 38
# asm 1: imulq $38,<squarerax=int64#3,>squarerax=int64#7
# asm 2: imulq $38,<squarerax=%rdx,>squarerax=%rax
imulq $38,%rdx,%rax
# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(rp + 24)
# asm 1: mulq 24(<rp=int64#1)
# asm 2: mulq 24(<rp=%rdi)
mulq 24(%rdi)
# qhasm: carry? r0 += squarerax
# asm 1: add <squarerax=int64#7,<r0=int64#4
# asm 2: add <squarerax=%rax,<r0=%rcx
add %rax,%rcx
# qhasm: squarer01 += squarerdx + carry
# asm 1: adc <squarerdx=int64#3,<squarer01=int64#5
# asm 2: adc <squarerdx=%rdx,<squarer01=%r8
adc %rdx,%r8
# qhasm: squarerax = *(uint64 *)(rp + 16)
# asm 1: movq 16(<rp=int64#1),>squarerax=int64#3
# asm 2: movq 16(<rp=%rdi),>squarerax=%rdx
movq 16(%rdi),%rdx
# qhasm: squarerax *= 38
# asm 1: imulq $38,<squarerax=int64#3,>squarerax=int64#7
# asm 2: imulq $38,<squarerax=%rdx,>squarerax=%rax
imulq $38,%rdx,%rax
# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(rp + 32)
# asm 1: mulq 32(<rp=int64#1)
# asm 2: mulq 32(<rp=%rdi)
mulq 32(%rdi)
# qhasm: carry? r1 += squarerax
# asm 1: add <squarerax=int64#7,<r1=int64#6
# asm 2: add <squarerax=%rax,<r1=%r9
add %rax,%r9
# qhasm: squarer11 += squarerdx + carry
# asm 1: adc <squarerdx=int64#3,<squarer11=int64#8
# asm 2: adc <squarerdx=%rdx,<squarer11=%r10
adc %rdx,%r10
# qhasm: squarerax = *(uint64 *)(rp + 24)
# asm 1: movq 24(<rp=int64#1),>squarerax=int64#3
# asm 2: movq 24(<rp=%rdi),>squarerax=%rdx
movq 24(%rdi),%rdx
# qhasm: squarerax *= 19
# asm 1: imulq $19,<squarerax=int64#3,>squarerax=int64#7
# asm 2: imulq $19,<squarerax=%rdx,>squarerax=%rax
imulq $19,%rdx,%rax
# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(rp + 24)
# asm 1: mulq 24(<rp=int64#1)
# asm 2: mulq 24(<rp=%rdi)
mulq 24(%rdi)
# qhasm: carry? r1 += squarerax
# asm 1: add <squarerax=int64#7,<r1=int64#6
# asm 2: add <squarerax=%rax,<r1=%r9
add %rax,%r9
# qhasm: squarer11 += squarerdx + carry
# asm 1: adc <squarerdx=int64#3,<squarer11=int64#8
# asm 2: adc <squarerdx=%rdx,<squarer11=%r10
adc %rdx,%r10
# qhasm: squarerax = *(uint64 *)(rp + 24)
# asm 1: movq 24(<rp=int64#1),>squarerax=int64#3
# asm 2: movq 24(<rp=%rdi),>squarerax=%rdx
movq 24(%rdi),%rdx
# qhasm: squarerax *= 38
# asm 1: imulq $38,<squarerax=int64#3,>squarerax=int64#7
# asm 2: imulq $38,<squarerax=%rdx,>squarerax=%rax
imulq $38,%rdx,%rax
# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(rp + 32)
# asm 1: mulq 32(<rp=int64#1)
# asm 2: mulq 32(<rp=%rdi)
mulq 32(%rdi)
# qhasm: carry? r2 += squarerax
# asm 1: add <squarerax=int64#7,<r2=int64#9
# asm 2: add <squarerax=%rax,<r2=%r11
add %rax,%r11
# qhasm: squarer21 += squarerdx + carry
# asm 1: adc <squarerdx=int64#3,<squarer21=int64#10
# asm 2: adc <squarerdx=%rdx,<squarer21=%r12
adc %rdx,%r12
# qhasm: squarerax = *(uint64 *)(rp + 32)
# asm 1: movq 32(<rp=int64#1),>squarerax=int64#3
# asm 2: movq 32(<rp=%rdi),>squarerax=%rdx
movq 32(%rdi),%rdx
# qhasm: squarerax *= 19
# asm 1: imulq $19,<squarerax=int64#3,>squarerax=int64#7
# asm 2: imulq $19,<squarerax=%rdx,>squarerax=%rax
imulq $19,%rdx,%rax
# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(rp + 32)
# asm 1: mulq 32(<rp=int64#1)
# asm 2: mulq 32(<rp=%rdi)
mulq 32(%rdi)
# qhasm: carry? r3 += squarerax
# asm 1: add <squarerax=int64#7,<r3=int64#11
# asm 2: add <squarerax=%rax,<r3=%r13
add %rax,%r13
# qhasm: squarer31 += squarerdx + carry
# asm 1: adc <squarerdx=int64#3,<squarer31=int64#12
# asm 2: adc <squarerdx=%rdx,<squarer31=%r14
adc %rdx,%r14
# qhasm: squareredmask = *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51
# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>squareredmask=int64#3
# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>squareredmask=%rdx
movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rdx
# qhasm: squarer01 = (squarer01.r0) << 13
# asm 1: shld $13,<r0=int64#4,<squarer01=int64#5
# asm 2: shld $13,<r0=%rcx,<squarer01=%r8
shld $13,%rcx,%r8
# qhasm: r0 &= squareredmask
# asm 1: and <squareredmask=int64#3,<r0=int64#4
# asm 2: and <squareredmask=%rdx,<r0=%rcx
and %rdx,%rcx
# qhasm: squarer11 = (squarer11.r1) << 13
# asm 1: shld $13,<r1=int64#6,<squarer11=int64#8
# asm 2: shld $13,<r1=%r9,<squarer11=%r10
shld $13,%r9,%r10
# qhasm: r1 &= squareredmask
# asm 1: and <squareredmask=int64#3,<r1=int64#6
# asm 2: and <squareredmask=%rdx,<r1=%r9
and %rdx,%r9
# qhasm: r1 += squarer01
# asm 1: add <squarer01=int64#5,<r1=int64#6
# asm 2: add <squarer01=%r8,<r1=%r9
add %r8,%r9
# qhasm: squarer21 = (squarer21.r2) << 13
# asm 1: shld $13,<r2=int64#9,<squarer21=int64#10
# asm 2: shld $13,<r2=%r11,<squarer21=%r12
shld $13,%r11,%r12
# qhasm: r2 &= squareredmask
# asm 1: and <squareredmask=int64#3,<r2=int64#9
# asm 2: and <squareredmask=%rdx,<r2=%r11
and %rdx,%r11
# qhasm: r2 += squarer11
# asm 1: add <squarer11=int64#8,<r2=int64#9
# asm 2: add <squarer11=%r10,<r2=%r11
add %r10,%r11
# qhasm: squarer31 = (squarer31.r3) << 13
# asm 1: shld $13,<r3=int64#11,<squarer31=int64#12
# asm 2: shld $13,<r3=%r13,<squarer31=%r14
shld $13,%r13,%r14
# qhasm: r3 &= squareredmask
# asm 1: and <squareredmask=int64#3,<r3=int64#11
# asm 2: and <squareredmask=%rdx,<r3=%r13
and %rdx,%r13
# qhasm: r3 += squarer21
# asm 1: add <squarer21=int64#10,<r3=int64#11
# asm 2: add <squarer21=%r12,<r3=%r13
add %r12,%r13
# qhasm: squarer41 = (squarer41.r4) << 13
# asm 1: shld $13,<r4=int64#13,<squarer41=int64#14
# asm 2: shld $13,<r4=%r15,<squarer41=%rbx
shld $13,%r15,%rbx
# qhasm: r4 &= squareredmask
# asm 1: and <squareredmask=int64#3,<r4=int64#13
# asm 2: and <squareredmask=%rdx,<r4=%r15
and %rdx,%r15
# qhasm: r4 += squarer31
# asm 1: add <squarer31=int64#12,<r4=int64#13
# asm 2: add <squarer31=%r14,<r4=%r15
add %r14,%r15
# qhasm: squarer41 = squarer41 * 19
# asm 1: imulq $19,<squarer41=int64#14,>squarer41=int64#5
# asm 2: imulq $19,<squarer41=%rbx,>squarer41=%r8
imulq $19,%rbx,%r8
# qhasm: r0 += squarer41
# asm 1: add <squarer41=int64#5,<r0=int64#4
# asm 2: add <squarer41=%r8,<r0=%rcx
add %r8,%rcx
# qhasm: squaret = r0
# asm 1: mov <r0=int64#4,>squaret=int64#5
# asm 2: mov <r0=%rcx,>squaret=%r8
mov %rcx,%r8
# qhasm: (uint64) squaret >>= 51
# asm 1: shr $51,<squaret=int64#5
# asm 2: shr $51,<squaret=%r8
shr $51,%r8
# qhasm: squaret += r1
# asm 1: add <r1=int64#6,<squaret=int64#5
# asm 2: add <r1=%r9,<squaret=%r8
add %r9,%r8
# qhasm: r0 &= squareredmask
# asm 1: and <squareredmask=int64#3,<r0=int64#4
# asm 2: and <squareredmask=%rdx,<r0=%rcx
and %rdx,%rcx
# qhasm: r1 = squaret
# asm 1: mov <squaret=int64#5,>r1=int64#6
# asm 2: mov <squaret=%r8,>r1=%r9
mov %r8,%r9
# qhasm: (uint64) squaret >>= 51
# asm 1: shr $51,<squaret=int64#5
# asm 2: shr $51,<squaret=%r8
shr $51,%r8
# qhasm: squaret += r2
# asm 1: add <r2=int64#9,<squaret=int64#5
# asm 2: add <r2=%r11,<squaret=%r8
add %r11,%r8
# qhasm: r1 &= squareredmask
# asm 1: and <squareredmask=int64#3,<r1=int64#6
# asm 2: and <squareredmask=%rdx,<r1=%r9
and %rdx,%r9
# qhasm: r2 = squaret
# asm 1: mov <squaret=int64#5,>r2=int64#7
# asm 2: mov <squaret=%r8,>r2=%rax
mov %r8,%rax
# qhasm: (uint64) squaret >>= 51
# asm 1: shr $51,<squaret=int64#5
# asm 2: shr $51,<squaret=%r8
shr $51,%r8
# qhasm: squaret += r3
# asm 1: add <r3=int64#11,<squaret=int64#5
# asm 2: add <r3=%r13,<squaret=%r8
add %r13,%r8
# qhasm: r2 &= squareredmask
# asm 1: and <squareredmask=int64#3,<r2=int64#7
# asm 2: and <squareredmask=%rdx,<r2=%rax
and %rdx,%rax
# qhasm: r3 = squaret
# asm 1: mov <squaret=int64#5,>r3=int64#8
# asm 2: mov <squaret=%r8,>r3=%r10
mov %r8,%r10
# qhasm: (uint64) squaret >>= 51
# asm 1: shr $51,<squaret=int64#5
# asm 2: shr $51,<squaret=%r8
shr $51,%r8
# qhasm: squaret += r4
# asm 1: add <r4=int64#13,<squaret=int64#5
# asm 2: add <r4=%r15,<squaret=%r8
add %r15,%r8
# qhasm: r3 &= squareredmask
# asm 1: and <squareredmask=int64#3,<r3=int64#8
# asm 2: and <squareredmask=%rdx,<r3=%r10
and %rdx,%r10
# qhasm: r4 = squaret
# asm 1: mov <squaret=int64#5,>r4=int64#9
# asm 2: mov <squaret=%r8,>r4=%r11
mov %r8,%r11
# qhasm: (uint64) squaret >>= 51
# asm 1: shr $51,<squaret=int64#5
# asm 2: shr $51,<squaret=%r8
shr $51,%r8
# qhasm: squaret *= 19
# asm 1: imulq $19,<squaret=int64#5,>squaret=int64#5
# asm 2: imulq $19,<squaret=%r8,>squaret=%r8
imulq $19,%r8,%r8
# qhasm: r0 += squaret
# asm 1: add <squaret=int64#5,<r0=int64#4
# asm 2: add <squaret=%r8,<r0=%rcx
add %r8,%rcx
# qhasm: r4 &= squareredmask
# asm 1: and <squareredmask=int64#3,<r4=int64#9
# asm 2: and <squareredmask=%rdx,<r4=%r11
and %rdx,%r11
# qhasm: *(uint64 *)(rp + 0) = r0
# asm 1: movq <r0=int64#4,0(<rp=int64#1)
# asm 2: movq <r0=%rcx,0(<rp=%rdi)
movq %rcx,0(%rdi)
# qhasm: *(uint64 *)(rp + 8) = r1
# asm 1: movq <r1=int64#6,8(<rp=int64#1)
# asm 2: movq <r1=%r9,8(<rp=%rdi)
movq %r9,8(%rdi)
# qhasm: *(uint64 *)(rp + 16) = r2
# asm 1: movq <r2=int64#7,16(<rp=int64#1)
# asm 2: movq <r2=%rax,16(<rp=%rdi)
movq %rax,16(%rdi)
# qhasm: *(uint64 *)(rp + 24) = r3
# asm 1: movq <r3=int64#8,24(<rp=int64#1)
# asm 2: movq <r3=%r10,24(<rp=%rdi)
movq %r10,24(%rdi)
# qhasm: *(uint64 *)(rp + 32) = r4
# asm 1: movq <r4=int64#9,32(<rp=int64#1)
# asm 2: movq <r4=%r11,32(<rp=%rdi)
movq %r11,32(%rdi)
# qhasm: signed>? n -= 1
# asm 1: sub $1,<n=int64#2
# asm 2: sub $1,<n=%rsi
sub $1,%rsi
# comment:fp stack unchanged by jump
# qhasm: goto loop if signed>
jg ._loop
# qhasm: c1 =c1_stack
# asm 1: movq <c1_stack=stack64#1,>c1=int64#9
# asm 2: movq <c1_stack=0(%rsp),>c1=%r11
movq 0(%rsp),%r11
# qhasm: c2 =c2_stack
# asm 1: movq <c2_stack=stack64#2,>c2=int64#10
# asm 2: movq <c2_stack=8(%rsp),>c2=%r12
movq 8(%rsp),%r12
# qhasm: c3 =c3_stack
# asm 1: movq <c3_stack=stack64#3,>c3=int64#11
# asm 2: movq <c3_stack=16(%rsp),>c3=%r13
movq 16(%rsp),%r13
# qhasm: c4 =c4_stack
# asm 1: movq <c4_stack=stack64#4,>c4=int64#12
# asm 2: movq <c4_stack=24(%rsp),>c4=%r14
movq 24(%rsp),%r14
# qhasm: c5 =c5_stack
# asm 1: movq <c5_stack=stack64#5,>c5=int64#13
# asm 2: movq <c5_stack=32(%rsp),>c5=%r15
movq 32(%rsp),%r15
# qhasm: c6 =c6_stack
# asm 1: movq <c6_stack=stack64#6,>c6=int64#14
# asm 2: movq <c6_stack=40(%rsp),>c6=%rbx
movq 40(%rsp),%rbx
# qhasm: c7 =c7_stack
# asm 1: movq <c7_stack=stack64#7,>c7=int64#15
# asm 2: movq <c7_stack=48(%rsp),>c7=%rbp
movq 48(%rsp),%rbp
# qhasm: leave
add %r11,%rsp
mov %rdi,%rax
mov %rsi,%rdx
ret

View file

@ -0,0 +1,49 @@
#include "fe25519.h"
/* Assumes input x being reduced below 2^255 */
void fe25519_pack(unsigned char r[32], const fe25519 *x)
{
fe25519 t;
t = *x;
fe25519_freeze(&t);
r[0] = (unsigned char) ( t.v[0] & 0xff);
r[1] = (unsigned char) ((t.v[0] >> 8) & 0xff);
r[2] = (unsigned char) ((t.v[0] >> 16) & 0xff);
r[3] = (unsigned char) ((t.v[0] >> 24) & 0xff);
r[4] = (unsigned char) ((t.v[0] >> 32) & 0xff);
r[5] = (unsigned char) ((t.v[0] >> 40) & 0xff);
r[6] = (unsigned char) ((t.v[0] >> 48));
r[6] ^= (unsigned char) ((t.v[1] << 3) & 0xf8);
r[7] = (unsigned char) ((t.v[1] >> 5) & 0xff);
r[8] = (unsigned char) ((t.v[1] >> 13) & 0xff);
r[9] = (unsigned char) ((t.v[1] >> 21) & 0xff);
r[10] = (unsigned char) ((t.v[1] >> 29) & 0xff);
r[11] = (unsigned char) ((t.v[1] >> 37) & 0xff);
r[12] = (unsigned char) ((t.v[1] >> 45));
r[12] ^= (unsigned char) ((t.v[2] << 6) & 0xc0);
r[13] = (unsigned char) ((t.v[2] >> 2) & 0xff);
r[14] = (unsigned char) ((t.v[2] >> 10) & 0xff);
r[15] = (unsigned char) ((t.v[2] >> 18) & 0xff);
r[16] = (unsigned char) ((t.v[2] >> 26) & 0xff);
r[17] = (unsigned char) ((t.v[2] >> 34) & 0xff);
r[18] = (unsigned char) ((t.v[2] >> 42) & 0xff);
r[19] = (unsigned char) ((t.v[2] >> 50));
r[19] ^= (unsigned char) ((t.v[3] << 1) & 0xfe);
r[20] = (unsigned char) ((t.v[3] >> 7) & 0xff);
r[21] = (unsigned char) ((t.v[3] >> 15) & 0xff);
r[22] = (unsigned char) ((t.v[3] >> 23) & 0xff);
r[23] = (unsigned char) ((t.v[3] >> 31) & 0xff);
r[24] = (unsigned char) ((t.v[3] >> 39) & 0xff);
r[25] = (unsigned char) ((t.v[3] >> 47));
r[25] ^= (unsigned char) ((t.v[4] << 4) & 0xf0);
r[26] = (unsigned char) ((t.v[4] >> 4) & 0xff);
r[27] = (unsigned char) ((t.v[4] >> 12) & 0xff);
r[28] = (unsigned char) ((t.v[4] >> 20) & 0xff);
r[29] = (unsigned char) ((t.v[4] >> 28) & 0xff);
r[30] = (unsigned char) ((t.v[4] >> 36) & 0xff);
r[31] = (unsigned char) ((t.v[4] >> 44));
}

View file

@ -0,0 +1,54 @@
#include "fe25519.h"
void fe25519_pow2523(fe25519 *r, const fe25519 *x)
{
fe25519 z2;
fe25519 z9;
fe25519 z11;
fe25519 z2_5_0;
fe25519 z2_10_0;
fe25519 z2_20_0;
fe25519 z2_50_0;
fe25519 z2_100_0;
fe25519 t;
/* 2 */ fe25519_square(&z2,x);
/* 4 */ fe25519_square(&t,&z2);
/* 8 */ fe25519_square(&t,&t);
/* 9 */ fe25519_mul(&z9,&t,x);
/* 11 */ fe25519_mul(&z11,&z9,&z2);
/* 22 */ fe25519_square(&t,&z11);
/* 2^5 - 2^0 = 31 */ fe25519_mul(&z2_5_0,&t,&z9);
/* 2^6 - 2^1 */ fe25519_square(&t,&z2_5_0);
/* 2^10 - 2^5 */ fe25519_nsquare(&t,4);
/* 2^10 - 2^0 */ fe25519_mul(&z2_10_0,&t,&z2_5_0);
/* 2^11 - 2^1 */ fe25519_square(&t,&z2_10_0);
/* 2^20 - 2^10 */ fe25519_nsquare(&t,9);
/* 2^20 - 2^0 */ fe25519_mul(&z2_20_0,&t,&z2_10_0);
/* 2^21 - 2^1 */ fe25519_square(&t,&z2_20_0);
/* 2^40 - 2^20 */ fe25519_nsquare(&t,19);
/* 2^40 - 2^0 */ fe25519_mul(&t,&t,&z2_20_0);
/* 2^41 - 2^1 */ fe25519_square(&t,&t);
/* 2^50 - 2^10 */ fe25519_nsquare(&t,9);
/* 2^50 - 2^0 */ fe25519_mul(&z2_50_0,&t,&z2_10_0);
/* 2^51 - 2^1 */ fe25519_square(&t,&z2_50_0);
/* 2^100 - 2^50 */ fe25519_nsquare(&t,49);
/* 2^100 - 2^0 */ fe25519_mul(&z2_100_0,&t,&z2_50_0);
/* 2^101 - 2^1 */ fe25519_square(&t,&z2_100_0);
/* 2^200 - 2^100 */ fe25519_nsquare(&t,99);
/* 2^200 - 2^0 */ fe25519_mul(&t,&t,&z2_100_0);
/* 2^201 - 2^1 */ fe25519_square(&t,&t);
/* 2^250 - 2^50 */ fe25519_nsquare(&t,49);
/* 2^250 - 2^0 */ fe25519_mul(&t,&t,&z2_50_0);
/* 2^251 - 2^1 */ fe25519_square(&t,&t);
/* 2^252 - 2^2 */ fe25519_square(&t,&t);
/* 2^252 - 3 */ fe25519_mul(r,&t,x);
}

View file

@ -0,0 +1,10 @@
#include "fe25519.h"
void fe25519_setint(fe25519 *r, unsigned int v)
{
r->v[0] = v;
r->v[1] = 0;
r->v[2] = 0;
r->v[3] = 0;
r->v[4] = 0;
}

View file

@ -0,0 +1,749 @@
# qhasm: int64 rp
# qhasm: int64 xp
# qhasm: input rp
# qhasm: input xp
# qhasm: int64 r0
# qhasm: int64 r1
# qhasm: int64 r2
# qhasm: int64 r3
# qhasm: int64 r4
# qhasm: int64 c1
# qhasm: int64 c2
# qhasm: int64 c3
# qhasm: int64 c4
# qhasm: int64 c5
# qhasm: int64 c6
# qhasm: int64 c7
# qhasm: caller c1
# qhasm: caller c2
# qhasm: caller c3
# qhasm: caller c4
# qhasm: caller c5
# qhasm: caller c6
# qhasm: caller c7
# qhasm: stack64 c1_stack
# qhasm: stack64 c2_stack
# qhasm: stack64 c3_stack
# qhasm: stack64 c4_stack
# qhasm: stack64 c5_stack
# qhasm: stack64 c6_stack
# qhasm: stack64 c7_stack
# qhasm: stack64 x119_stack
# qhasm: stack64 x219_stack
# qhasm: stack64 x319_stack
# qhasm: stack64 x419_stack
# qhasm: int64 squarer01
# qhasm: int64 squarer11
# qhasm: int64 squarer21
# qhasm: int64 squarer31
# qhasm: int64 squarer41
# qhasm: int64 squarerax
# qhasm: int64 squarerdx
# qhasm: int64 squaret
# qhasm: int64 squareredmask
# qhasm: enter crypto_sign_ed25519_amd64_51_30k_batch_fe25519_square
.text
.p2align 5
.globl _crypto_sign_ed25519_amd64_51_30k_batch_fe25519_square
.globl crypto_sign_ed25519_amd64_51_30k_batch_fe25519_square
_crypto_sign_ed25519_amd64_51_30k_batch_fe25519_square:
crypto_sign_ed25519_amd64_51_30k_batch_fe25519_square:
mov %rsp,%r11
and $31,%r11
add $64,%r11
sub %r11,%rsp
# qhasm: c1_stack = c1
# asm 1: movq <c1=int64#9,>c1_stack=stack64#1
# asm 2: movq <c1=%r11,>c1_stack=0(%rsp)
movq %r11,0(%rsp)
# qhasm: c2_stack = c2
# asm 1: movq <c2=int64#10,>c2_stack=stack64#2
# asm 2: movq <c2=%r12,>c2_stack=8(%rsp)
movq %r12,8(%rsp)
# qhasm: c3_stack = c3
# asm 1: movq <c3=int64#11,>c3_stack=stack64#3
# asm 2: movq <c3=%r13,>c3_stack=16(%rsp)
movq %r13,16(%rsp)
# qhasm: c4_stack = c4
# asm 1: movq <c4=int64#12,>c4_stack=stack64#4
# asm 2: movq <c4=%r14,>c4_stack=24(%rsp)
movq %r14,24(%rsp)
# qhasm: c5_stack = c5
# asm 1: movq <c5=int64#13,>c5_stack=stack64#5
# asm 2: movq <c5=%r15,>c5_stack=32(%rsp)
movq %r15,32(%rsp)
# qhasm: c6_stack = c6
# asm 1: movq <c6=int64#14,>c6_stack=stack64#6
# asm 2: movq <c6=%rbx,>c6_stack=40(%rsp)
movq %rbx,40(%rsp)
# qhasm: c7_stack = c7
# asm 1: movq <c7=int64#15,>c7_stack=stack64#7
# asm 2: movq <c7=%rbp,>c7_stack=48(%rsp)
movq %rbp,48(%rsp)
# qhasm: squarerax = *(uint64 *)(xp + 0)
# asm 1: movq 0(<xp=int64#2),>squarerax=int64#7
# asm 2: movq 0(<xp=%rsi),>squarerax=%rax
movq 0(%rsi),%rax
# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(xp + 0)
# asm 1: mulq 0(<xp=int64#2)
# asm 2: mulq 0(<xp=%rsi)
mulq 0(%rsi)
# qhasm: r0 = squarerax
# asm 1: mov <squarerax=int64#7,>r0=int64#4
# asm 2: mov <squarerax=%rax,>r0=%rcx
mov %rax,%rcx
# qhasm: squarer01 = squarerdx
# asm 1: mov <squarerdx=int64#3,>squarer01=int64#5
# asm 2: mov <squarerdx=%rdx,>squarer01=%r8
mov %rdx,%r8
# qhasm: squarerax = *(uint64 *)(xp + 0)
# asm 1: movq 0(<xp=int64#2),>squarerax=int64#7
# asm 2: movq 0(<xp=%rsi),>squarerax=%rax
movq 0(%rsi),%rax
# qhasm: squarerax <<= 1
# asm 1: shl $1,<squarerax=int64#7
# asm 2: shl $1,<squarerax=%rax
shl $1,%rax
# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(xp + 8)
# asm 1: mulq 8(<xp=int64#2)
# asm 2: mulq 8(<xp=%rsi)
mulq 8(%rsi)
# qhasm: r1 = squarerax
# asm 1: mov <squarerax=int64#7,>r1=int64#6
# asm 2: mov <squarerax=%rax,>r1=%r9
mov %rax,%r9
# qhasm: squarer11 = squarerdx
# asm 1: mov <squarerdx=int64#3,>squarer11=int64#8
# asm 2: mov <squarerdx=%rdx,>squarer11=%r10
mov %rdx,%r10
# qhasm: squarerax = *(uint64 *)(xp + 0)
# asm 1: movq 0(<xp=int64#2),>squarerax=int64#7
# asm 2: movq 0(<xp=%rsi),>squarerax=%rax
movq 0(%rsi),%rax
# qhasm: squarerax <<= 1
# asm 1: shl $1,<squarerax=int64#7
# asm 2: shl $1,<squarerax=%rax
shl $1,%rax
# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(xp + 16)
# asm 1: mulq 16(<xp=int64#2)
# asm 2: mulq 16(<xp=%rsi)
mulq 16(%rsi)
# qhasm: r2 = squarerax
# asm 1: mov <squarerax=int64#7,>r2=int64#9
# asm 2: mov <squarerax=%rax,>r2=%r11
mov %rax,%r11
# qhasm: squarer21 = squarerdx
# asm 1: mov <squarerdx=int64#3,>squarer21=int64#10
# asm 2: mov <squarerdx=%rdx,>squarer21=%r12
mov %rdx,%r12
# qhasm: squarerax = *(uint64 *)(xp + 0)
# asm 1: movq 0(<xp=int64#2),>squarerax=int64#7
# asm 2: movq 0(<xp=%rsi),>squarerax=%rax
movq 0(%rsi),%rax
# qhasm: squarerax <<= 1
# asm 1: shl $1,<squarerax=int64#7
# asm 2: shl $1,<squarerax=%rax
shl $1,%rax
# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(xp + 24)
# asm 1: mulq 24(<xp=int64#2)
# asm 2: mulq 24(<xp=%rsi)
mulq 24(%rsi)
# qhasm: r3 = squarerax
# asm 1: mov <squarerax=int64#7,>r3=int64#11
# asm 2: mov <squarerax=%rax,>r3=%r13
mov %rax,%r13
# qhasm: squarer31 = squarerdx
# asm 1: mov <squarerdx=int64#3,>squarer31=int64#12
# asm 2: mov <squarerdx=%rdx,>squarer31=%r14
mov %rdx,%r14
# qhasm: squarerax = *(uint64 *)(xp + 0)
# asm 1: movq 0(<xp=int64#2),>squarerax=int64#7
# asm 2: movq 0(<xp=%rsi),>squarerax=%rax
movq 0(%rsi),%rax
# qhasm: squarerax <<= 1
# asm 1: shl $1,<squarerax=int64#7
# asm 2: shl $1,<squarerax=%rax
shl $1,%rax
# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(xp + 32)
# asm 1: mulq 32(<xp=int64#2)
# asm 2: mulq 32(<xp=%rsi)
mulq 32(%rsi)
# qhasm: r4 = squarerax
# asm 1: mov <squarerax=int64#7,>r4=int64#13
# asm 2: mov <squarerax=%rax,>r4=%r15
mov %rax,%r15
# qhasm: squarer41 = squarerdx
# asm 1: mov <squarerdx=int64#3,>squarer41=int64#14
# asm 2: mov <squarerdx=%rdx,>squarer41=%rbx
mov %rdx,%rbx
# qhasm: squarerax = *(uint64 *)(xp + 8)
# asm 1: movq 8(<xp=int64#2),>squarerax=int64#7
# asm 2: movq 8(<xp=%rsi),>squarerax=%rax
movq 8(%rsi),%rax
# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(xp + 8)
# asm 1: mulq 8(<xp=int64#2)
# asm 2: mulq 8(<xp=%rsi)
mulq 8(%rsi)
# qhasm: carry? r2 += squarerax
# asm 1: add <squarerax=int64#7,<r2=int64#9
# asm 2: add <squarerax=%rax,<r2=%r11
add %rax,%r11
# qhasm: squarer21 += squarerdx + carry
# asm 1: adc <squarerdx=int64#3,<squarer21=int64#10
# asm 2: adc <squarerdx=%rdx,<squarer21=%r12
adc %rdx,%r12
# qhasm: squarerax = *(uint64 *)(xp + 8)
# asm 1: movq 8(<xp=int64#2),>squarerax=int64#7
# asm 2: movq 8(<xp=%rsi),>squarerax=%rax
movq 8(%rsi),%rax
# qhasm: squarerax <<= 1
# asm 1: shl $1,<squarerax=int64#7
# asm 2: shl $1,<squarerax=%rax
shl $1,%rax
# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(xp + 16)
# asm 1: mulq 16(<xp=int64#2)
# asm 2: mulq 16(<xp=%rsi)
mulq 16(%rsi)
# qhasm: carry? r3 += squarerax
# asm 1: add <squarerax=int64#7,<r3=int64#11
# asm 2: add <squarerax=%rax,<r3=%r13
add %rax,%r13
# qhasm: squarer31 += squarerdx + carry
# asm 1: adc <squarerdx=int64#3,<squarer31=int64#12
# asm 2: adc <squarerdx=%rdx,<squarer31=%r14
adc %rdx,%r14
# qhasm: squarerax = *(uint64 *)(xp + 8)
# asm 1: movq 8(<xp=int64#2),>squarerax=int64#7
# asm 2: movq 8(<xp=%rsi),>squarerax=%rax
movq 8(%rsi),%rax
# qhasm: squarerax <<= 1
# asm 1: shl $1,<squarerax=int64#7
# asm 2: shl $1,<squarerax=%rax
shl $1,%rax
# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(xp + 24)
# asm 1: mulq 24(<xp=int64#2)
# asm 2: mulq 24(<xp=%rsi)
mulq 24(%rsi)
# qhasm: carry? r4 += squarerax
# asm 1: add <squarerax=int64#7,<r4=int64#13
# asm 2: add <squarerax=%rax,<r4=%r15
add %rax,%r15
# qhasm: squarer41 += squarerdx + carry
# asm 1: adc <squarerdx=int64#3,<squarer41=int64#14
# asm 2: adc <squarerdx=%rdx,<squarer41=%rbx
adc %rdx,%rbx
# qhasm: squarerax = *(uint64 *)(xp + 8)
# asm 1: movq 8(<xp=int64#2),>squarerax=int64#3
# asm 2: movq 8(<xp=%rsi),>squarerax=%rdx
movq 8(%rsi),%rdx
# qhasm: squarerax *= 38
# asm 1: imulq $38,<squarerax=int64#3,>squarerax=int64#7
# asm 2: imulq $38,<squarerax=%rdx,>squarerax=%rax
imulq $38,%rdx,%rax
# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(xp + 32)
# asm 1: mulq 32(<xp=int64#2)
# asm 2: mulq 32(<xp=%rsi)
mulq 32(%rsi)
# qhasm: carry? r0 += squarerax
# asm 1: add <squarerax=int64#7,<r0=int64#4
# asm 2: add <squarerax=%rax,<r0=%rcx
add %rax,%rcx
# qhasm: squarer01 += squarerdx + carry
# asm 1: adc <squarerdx=int64#3,<squarer01=int64#5
# asm 2: adc <squarerdx=%rdx,<squarer01=%r8
adc %rdx,%r8
# qhasm: squarerax = *(uint64 *)(xp + 16)
# asm 1: movq 16(<xp=int64#2),>squarerax=int64#7
# asm 2: movq 16(<xp=%rsi),>squarerax=%rax
movq 16(%rsi),%rax
# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(xp + 16)
# asm 1: mulq 16(<xp=int64#2)
# asm 2: mulq 16(<xp=%rsi)
mulq 16(%rsi)
# qhasm: carry? r4 += squarerax
# asm 1: add <squarerax=int64#7,<r4=int64#13
# asm 2: add <squarerax=%rax,<r4=%r15
add %rax,%r15
# qhasm: squarer41 += squarerdx + carry
# asm 1: adc <squarerdx=int64#3,<squarer41=int64#14
# asm 2: adc <squarerdx=%rdx,<squarer41=%rbx
adc %rdx,%rbx
# qhasm: squarerax = *(uint64 *)(xp + 16)
# asm 1: movq 16(<xp=int64#2),>squarerax=int64#3
# asm 2: movq 16(<xp=%rsi),>squarerax=%rdx
movq 16(%rsi),%rdx
# qhasm: squarerax *= 38
# asm 1: imulq $38,<squarerax=int64#3,>squarerax=int64#7
# asm 2: imulq $38,<squarerax=%rdx,>squarerax=%rax
imulq $38,%rdx,%rax
# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(xp + 24)
# asm 1: mulq 24(<xp=int64#2)
# asm 2: mulq 24(<xp=%rsi)
mulq 24(%rsi)
# qhasm: carry? r0 += squarerax
# asm 1: add <squarerax=int64#7,<r0=int64#4
# asm 2: add <squarerax=%rax,<r0=%rcx
add %rax,%rcx
# qhasm: squarer01 += squarerdx + carry
# asm 1: adc <squarerdx=int64#3,<squarer01=int64#5
# asm 2: adc <squarerdx=%rdx,<squarer01=%r8
adc %rdx,%r8
# qhasm: squarerax = *(uint64 *)(xp + 16)
# asm 1: movq 16(<xp=int64#2),>squarerax=int64#3
# asm 2: movq 16(<xp=%rsi),>squarerax=%rdx
movq 16(%rsi),%rdx
# qhasm: squarerax *= 38
# asm 1: imulq $38,<squarerax=int64#3,>squarerax=int64#7
# asm 2: imulq $38,<squarerax=%rdx,>squarerax=%rax
imulq $38,%rdx,%rax
# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(xp + 32)
# asm 1: mulq 32(<xp=int64#2)
# asm 2: mulq 32(<xp=%rsi)
mulq 32(%rsi)
# qhasm: carry? r1 += squarerax
# asm 1: add <squarerax=int64#7,<r1=int64#6
# asm 2: add <squarerax=%rax,<r1=%r9
add %rax,%r9
# qhasm: squarer11 += squarerdx + carry
# asm 1: adc <squarerdx=int64#3,<squarer11=int64#8
# asm 2: adc <squarerdx=%rdx,<squarer11=%r10
adc %rdx,%r10
# qhasm: squarerax = *(uint64 *)(xp + 24)
# asm 1: movq 24(<xp=int64#2),>squarerax=int64#3
# asm 2: movq 24(<xp=%rsi),>squarerax=%rdx
movq 24(%rsi),%rdx
# qhasm: squarerax *= 19
# asm 1: imulq $19,<squarerax=int64#3,>squarerax=int64#7
# asm 2: imulq $19,<squarerax=%rdx,>squarerax=%rax
imulq $19,%rdx,%rax
# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(xp + 24)
# asm 1: mulq 24(<xp=int64#2)
# asm 2: mulq 24(<xp=%rsi)
mulq 24(%rsi)
# qhasm: carry? r1 += squarerax
# asm 1: add <squarerax=int64#7,<r1=int64#6
# asm 2: add <squarerax=%rax,<r1=%r9
add %rax,%r9
# qhasm: squarer11 += squarerdx + carry
# asm 1: adc <squarerdx=int64#3,<squarer11=int64#8
# asm 2: adc <squarerdx=%rdx,<squarer11=%r10
adc %rdx,%r10
# qhasm: squarerax = *(uint64 *)(xp + 24)
# asm 1: movq 24(<xp=int64#2),>squarerax=int64#3
# asm 2: movq 24(<xp=%rsi),>squarerax=%rdx
movq 24(%rsi),%rdx
# qhasm: squarerax *= 38
# asm 1: imulq $38,<squarerax=int64#3,>squarerax=int64#7
# asm 2: imulq $38,<squarerax=%rdx,>squarerax=%rax
imulq $38,%rdx,%rax
# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(xp + 32)
# asm 1: mulq 32(<xp=int64#2)
# asm 2: mulq 32(<xp=%rsi)
mulq 32(%rsi)
# qhasm: carry? r2 += squarerax
# asm 1: add <squarerax=int64#7,<r2=int64#9
# asm 2: add <squarerax=%rax,<r2=%r11
add %rax,%r11
# qhasm: squarer21 += squarerdx + carry
# asm 1: adc <squarerdx=int64#3,<squarer21=int64#10
# asm 2: adc <squarerdx=%rdx,<squarer21=%r12
adc %rdx,%r12
# qhasm: squarerax = *(uint64 *)(xp + 32)
# asm 1: movq 32(<xp=int64#2),>squarerax=int64#3
# asm 2: movq 32(<xp=%rsi),>squarerax=%rdx
movq 32(%rsi),%rdx
# qhasm: squarerax *= 19
# asm 1: imulq $19,<squarerax=int64#3,>squarerax=int64#7
# asm 2: imulq $19,<squarerax=%rdx,>squarerax=%rax
imulq $19,%rdx,%rax
# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(xp + 32)
# asm 1: mulq 32(<xp=int64#2)
# asm 2: mulq 32(<xp=%rsi)
mulq 32(%rsi)
# qhasm: carry? r3 += squarerax
# asm 1: add <squarerax=int64#7,<r3=int64#11
# asm 2: add <squarerax=%rax,<r3=%r13
add %rax,%r13
# qhasm: squarer31 += squarerdx + carry
# asm 1: adc <squarerdx=int64#3,<squarer31=int64#12
# asm 2: adc <squarerdx=%rdx,<squarer31=%r14
adc %rdx,%r14
# qhasm: squareredmask = *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51
# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>squareredmask=int64#2
# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>squareredmask=%rsi
movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rsi
# qhasm: squarer01 = (squarer01.r0) << 13
# asm 1: shld $13,<r0=int64#4,<squarer01=int64#5
# asm 2: shld $13,<r0=%rcx,<squarer01=%r8
shld $13,%rcx,%r8
# qhasm: r0 &= squareredmask
# asm 1: and <squareredmask=int64#2,<r0=int64#4
# asm 2: and <squareredmask=%rsi,<r0=%rcx
and %rsi,%rcx
# qhasm: squarer11 = (squarer11.r1) << 13
# asm 1: shld $13,<r1=int64#6,<squarer11=int64#8
# asm 2: shld $13,<r1=%r9,<squarer11=%r10
shld $13,%r9,%r10
# qhasm: r1 &= squareredmask
# asm 1: and <squareredmask=int64#2,<r1=int64#6
# asm 2: and <squareredmask=%rsi,<r1=%r9
and %rsi,%r9
# qhasm: r1 += squarer01
# asm 1: add <squarer01=int64#5,<r1=int64#6
# asm 2: add <squarer01=%r8,<r1=%r9
add %r8,%r9
# qhasm: squarer21 = (squarer21.r2) << 13
# asm 1: shld $13,<r2=int64#9,<squarer21=int64#10
# asm 2: shld $13,<r2=%r11,<squarer21=%r12
shld $13,%r11,%r12
# qhasm: r2 &= squareredmask
# asm 1: and <squareredmask=int64#2,<r2=int64#9
# asm 2: and <squareredmask=%rsi,<r2=%r11
and %rsi,%r11
# qhasm: r2 += squarer11
# asm 1: add <squarer11=int64#8,<r2=int64#9
# asm 2: add <squarer11=%r10,<r2=%r11
add %r10,%r11
# qhasm: squarer31 = (squarer31.r3) << 13
# asm 1: shld $13,<r3=int64#11,<squarer31=int64#12
# asm 2: shld $13,<r3=%r13,<squarer31=%r14
shld $13,%r13,%r14
# qhasm: r3 &= squareredmask
# asm 1: and <squareredmask=int64#2,<r3=int64#11
# asm 2: and <squareredmask=%rsi,<r3=%r13
and %rsi,%r13
# qhasm: r3 += squarer21
# asm 1: add <squarer21=int64#10,<r3=int64#11
# asm 2: add <squarer21=%r12,<r3=%r13
add %r12,%r13
# qhasm: squarer41 = (squarer41.r4) << 13
# asm 1: shld $13,<r4=int64#13,<squarer41=int64#14
# asm 2: shld $13,<r4=%r15,<squarer41=%rbx
shld $13,%r15,%rbx
# qhasm: r4 &= squareredmask
# asm 1: and <squareredmask=int64#2,<r4=int64#13
# asm 2: and <squareredmask=%rsi,<r4=%r15
and %rsi,%r15
# qhasm: r4 += squarer31
# asm 1: add <squarer31=int64#12,<r4=int64#13
# asm 2: add <squarer31=%r14,<r4=%r15
add %r14,%r15
# qhasm: squarer41 = squarer41 * 19
# asm 1: imulq $19,<squarer41=int64#14,>squarer41=int64#3
# asm 2: imulq $19,<squarer41=%rbx,>squarer41=%rdx
imulq $19,%rbx,%rdx
# qhasm: r0 += squarer41
# asm 1: add <squarer41=int64#3,<r0=int64#4
# asm 2: add <squarer41=%rdx,<r0=%rcx
add %rdx,%rcx
# qhasm: squaret = r0
# asm 1: mov <r0=int64#4,>squaret=int64#3
# asm 2: mov <r0=%rcx,>squaret=%rdx
mov %rcx,%rdx
# qhasm: (uint64) squaret >>= 51
# asm 1: shr $51,<squaret=int64#3
# asm 2: shr $51,<squaret=%rdx
shr $51,%rdx
# qhasm: squaret += r1
# asm 1: add <r1=int64#6,<squaret=int64#3
# asm 2: add <r1=%r9,<squaret=%rdx
add %r9,%rdx
# qhasm: r0 &= squareredmask
# asm 1: and <squareredmask=int64#2,<r0=int64#4
# asm 2: and <squareredmask=%rsi,<r0=%rcx
and %rsi,%rcx
# qhasm: r1 = squaret
# asm 1: mov <squaret=int64#3,>r1=int64#5
# asm 2: mov <squaret=%rdx,>r1=%r8
mov %rdx,%r8
# qhasm: (uint64) squaret >>= 51
# asm 1: shr $51,<squaret=int64#3
# asm 2: shr $51,<squaret=%rdx
shr $51,%rdx
# qhasm: squaret += r2
# asm 1: add <r2=int64#9,<squaret=int64#3
# asm 2: add <r2=%r11,<squaret=%rdx
add %r11,%rdx
# qhasm: r1 &= squareredmask
# asm 1: and <squareredmask=int64#2,<r1=int64#5
# asm 2: and <squareredmask=%rsi,<r1=%r8
and %rsi,%r8
# qhasm: r2 = squaret
# asm 1: mov <squaret=int64#3,>r2=int64#6
# asm 2: mov <squaret=%rdx,>r2=%r9
mov %rdx,%r9
# qhasm: (uint64) squaret >>= 51
# asm 1: shr $51,<squaret=int64#3
# asm 2: shr $51,<squaret=%rdx
shr $51,%rdx
# qhasm: squaret += r3
# asm 1: add <r3=int64#11,<squaret=int64#3
# asm 2: add <r3=%r13,<squaret=%rdx
add %r13,%rdx
# qhasm: r2 &= squareredmask
# asm 1: and <squareredmask=int64#2,<r2=int64#6
# asm 2: and <squareredmask=%rsi,<r2=%r9
and %rsi,%r9
# qhasm: r3 = squaret
# asm 1: mov <squaret=int64#3,>r3=int64#7
# asm 2: mov <squaret=%rdx,>r3=%rax
mov %rdx,%rax
# qhasm: (uint64) squaret >>= 51
# asm 1: shr $51,<squaret=int64#3
# asm 2: shr $51,<squaret=%rdx
shr $51,%rdx
# qhasm: squaret += r4
# asm 1: add <r4=int64#13,<squaret=int64#3
# asm 2: add <r4=%r15,<squaret=%rdx
add %r15,%rdx
# qhasm: r3 &= squareredmask
# asm 1: and <squareredmask=int64#2,<r3=int64#7
# asm 2: and <squareredmask=%rsi,<r3=%rax
and %rsi,%rax
# qhasm: r4 = squaret
# asm 1: mov <squaret=int64#3,>r4=int64#8
# asm 2: mov <squaret=%rdx,>r4=%r10
mov %rdx,%r10
# qhasm: (uint64) squaret >>= 51
# asm 1: shr $51,<squaret=int64#3
# asm 2: shr $51,<squaret=%rdx
shr $51,%rdx
# qhasm: squaret *= 19
# asm 1: imulq $19,<squaret=int64#3,>squaret=int64#3
# asm 2: imulq $19,<squaret=%rdx,>squaret=%rdx
imulq $19,%rdx,%rdx
# qhasm: r0 += squaret
# asm 1: add <squaret=int64#3,<r0=int64#4
# asm 2: add <squaret=%rdx,<r0=%rcx
add %rdx,%rcx
# qhasm: r4 &= squareredmask
# asm 1: and <squareredmask=int64#2,<r4=int64#8
# asm 2: and <squareredmask=%rsi,<r4=%r10
and %rsi,%r10
# qhasm: *(uint64 *)(rp + 0) = r0
# asm 1: movq <r0=int64#4,0(<rp=int64#1)
# asm 2: movq <r0=%rcx,0(<rp=%rdi)
movq %rcx,0(%rdi)
# qhasm: *(uint64 *)(rp + 8) = r1
# asm 1: movq <r1=int64#5,8(<rp=int64#1)
# asm 2: movq <r1=%r8,8(<rp=%rdi)
movq %r8,8(%rdi)
# qhasm: *(uint64 *)(rp + 16) = r2
# asm 1: movq <r2=int64#6,16(<rp=int64#1)
# asm 2: movq <r2=%r9,16(<rp=%rdi)
movq %r9,16(%rdi)
# qhasm: *(uint64 *)(rp + 24) = r3
# asm 1: movq <r3=int64#7,24(<rp=int64#1)
# asm 2: movq <r3=%rax,24(<rp=%rdi)
movq %rax,24(%rdi)
# qhasm: *(uint64 *)(rp + 32) = r4
# asm 1: movq <r4=int64#8,32(<rp=int64#1)
# asm 2: movq <r4=%r10,32(<rp=%rdi)
movq %r10,32(%rdi)
# qhasm: c1 =c1_stack
# asm 1: movq <c1_stack=stack64#1,>c1=int64#9
# asm 2: movq <c1_stack=0(%rsp),>c1=%r11
movq 0(%rsp),%r11
# qhasm: c2 =c2_stack
# asm 1: movq <c2_stack=stack64#2,>c2=int64#10
# asm 2: movq <c2_stack=8(%rsp),>c2=%r12
movq 8(%rsp),%r12
# qhasm: c3 =c3_stack
# asm 1: movq <c3_stack=stack64#3,>c3=int64#11
# asm 2: movq <c3_stack=16(%rsp),>c3=%r13
movq 16(%rsp),%r13
# qhasm: c4 =c4_stack
# asm 1: movq <c4_stack=stack64#4,>c4=int64#12
# asm 2: movq <c4_stack=24(%rsp),>c4=%r14
movq 24(%rsp),%r14
# qhasm: c5 =c5_stack
# asm 1: movq <c5_stack=stack64#5,>c5=int64#13
# asm 2: movq <c5_stack=32(%rsp),>c5=%r15
movq 32(%rsp),%r15
# qhasm: c6 =c6_stack
# asm 1: movq <c6_stack=stack64#6,>c6=int64#14
# asm 2: movq <c6_stack=40(%rsp),>c6=%rbx
movq 40(%rsp),%rbx
# qhasm: c7 =c7_stack
# asm 1: movq <c7_stack=stack64#7,>c7=int64#15
# asm 2: movq <c7_stack=48(%rsp),>c7=%rbp
movq 48(%rsp),%rbp
# qhasm: leave
add %r11,%rsp
mov %rdi,%rax
mov %rsi,%rdx
ret

View file

@ -0,0 +1,34 @@
#include "fe25519.h"
void fe25519_sub(fe25519 *r, const fe25519 *x, const fe25519 *y)
{
fe25519 yt = *y;
/* Not required for reduced input */
unsigned long long t;
t = yt.v[0] >> 51;
yt.v[0] &= 2251799813685247;
yt.v[1] += t;
t = yt.v[1] >> 51;
yt.v[1] &= 2251799813685247;
yt.v[2] += t;
t = yt.v[2] >> 51;
yt.v[2] &= 2251799813685247;
yt.v[3] += t;
t = yt.v[3] >> 51;
yt.v[3] &= 2251799813685247;
yt.v[4] += t;
t = yt.v[4] >> 51;
yt.v[4] &= 2251799813685247;
yt.v[0] += 19*t;
r->v[0] = x->v[0] + 0xFFFFFFFFFFFDA - yt.v[0];
r->v[1] = x->v[1] + 0xFFFFFFFFFFFFE - yt.v[1];
r->v[2] = x->v[2] + 0xFFFFFFFFFFFFE - yt.v[2];
r->v[3] = x->v[3] + 0xFFFFFFFFFFFFE - yt.v[3];
r->v[4] = x->v[4] + 0xFFFFFFFFFFFFE - yt.v[4];
}

View file

@ -0,0 +1,46 @@
#include "fe25519.h"
void fe25519_unpack(fe25519 *r, const unsigned char x[32])
{
r->v[0] = x[0];
r->v[0] += (unsigned long long)x[1] << 8;
r->v[0] += (unsigned long long)x[2] << 16;
r->v[0] += (unsigned long long)x[3] << 24;
r->v[0] += (unsigned long long)x[4] << 32;
r->v[0] += (unsigned long long)x[5] << 40;
r->v[0] += ((unsigned long long)x[6] & 7) << 48;
r->v[1] = x[6] >> 3;
r->v[1] += (unsigned long long)x[7] << 5;
r->v[1] += (unsigned long long)x[8] << 13;
r->v[1] += (unsigned long long)x[9] << 21;
r->v[1] += (unsigned long long)x[10] << 29;
r->v[1] += (unsigned long long)x[11] << 37;
r->v[1] += ((unsigned long long)x[12] & 63) << 45;
r->v[2] = x[12] >> 6;
r->v[2] += (unsigned long long)x[13] << 2;
r->v[2] += (unsigned long long)x[14] << 10;
r->v[2] += (unsigned long long)x[15] << 18;
r->v[2] += (unsigned long long)x[16] << 26;
r->v[2] += (unsigned long long)x[17] << 34;
r->v[2] += (unsigned long long)x[18] << 42;
r->v[2] += ((unsigned long long)x[19] & 1) << 50;
r->v[3] = x[19] >> 1;
r->v[3] += (unsigned long long)x[20] << 7;
r->v[3] += (unsigned long long)x[21] << 15;
r->v[3] += (unsigned long long)x[22] << 23;
r->v[3] += (unsigned long long)x[23] << 31;
r->v[3] += (unsigned long long)x[24] << 39;
r->v[3] += ((unsigned long long)x[25] & 15) << 47;
r->v[4] = x[25] >> 4;
r->v[4] += (unsigned long long)x[26] << 4;
r->v[4] += (unsigned long long)x[27] << 12;
r->v[4] += (unsigned long long)x[28] << 20;
r->v[4] += (unsigned long long)x[29] << 28;
r->v[4] += (unsigned long long)x[30] << 36;
r->v[4] += ((unsigned long long)x[31] & 127) << 44;
}

View file

@ -0,0 +1,106 @@
#ifndef GE25519_H
#define GE25519_H
/*
* Arithmetic on the twisted Edwards curve -x^2 + y^2 = 1 + dx^2y^2
* with d = -(121665/121666) =
* 37095705934669439343138083508754565189542113879843219016388785533085940283555
* Base point:
* (15112221349535400772501151409588531511454012693041857206046113283949847762202,46316835694926478169428394003475163141307993866256225615783033603165251855960);
*/
#include "fe25519.h"
#include "sc25519.h"
#define ge25519 crypto_sign_ed25519_amd64_51_30k_batch_ge25519
#define ge25519_base crypto_sign_ed25519_amd64_51_30k_batch_ge25519_base
#define ge25519_unpackneg_vartime crypto_sign_ed25519_amd64_51_30k_batch_unpackneg_vartime
#define ge25519_pack crypto_sign_ed25519_amd64_51_30k_batch_pack
#define ge25519_isneutral_vartime crypto_sign_ed25519_amd64_51_30k_batch_isneutral_vartime
#define ge25519_add crypto_sign_ed25519_amd64_51_30k_batch_ge25519_add
#define ge25519_double crypto_sign_ed25519_amd64_51_30k_batch_ge25519_double
#define ge25519_double_scalarmult_vartime crypto_sign_ed25519_amd64_51_30k_batch_double_scalarmult_vartime
#define ge25519_multi_scalarmult_vartime crypto_sign_ed25519_amd64_51_30k_batch_ge25519_multi_scalarmult_vartime
#define ge25519_scalarmult_base crypto_sign_ed25519_amd64_51_30k_batch_scalarmult_base
#define ge25519_p1p1_to_p2 crypto_sign_ed25519_amd64_51_30k_batch_ge25519_p1p1_to_p2
#define ge25519_p1p1_to_p3 crypto_sign_ed25519_amd64_51_30k_batch_ge25519_p1p1_to_p3
#define ge25519_p1p1_to_pniels crypto_sign_ed25519_amd64_51_30k_batch_ge25519_p1p1_to_pniels
#define ge25519_add_p1p1 crypto_sign_ed25519_amd64_51_30k_batch_ge25519_add_p1p1
#define ge25519_dbl_p1p1 crypto_sign_ed25519_amd64_51_30k_batch_ge25519_dbl_p1p1
#define choose_t crypto_sign_ed25519_amd64_51_30k_batch_choose_t
#define choose_t_smultq crypto_sign_ed25519_amd64_51_30k_batch_choose_t_smultq
#define ge25519_nielsadd2 crypto_sign_ed25519_amd64_51_30k_batch_ge25519_nielsadd2
#define ge25519_nielsadd_p1p1 crypto_sign_ed25519_amd64_51_30k_batch_ge25519_nielsadd_p1p1
#define ge25519_pnielsadd_p1p1 crypto_sign_ed25519_amd64_51_30k_batch_ge25519_pnielsadd_p1p1
#define ge25519_p3 ge25519
typedef struct
{
fe25519 x;
fe25519 y;
fe25519 z;
fe25519 t;
} ge25519;
typedef struct
{
fe25519 x;
fe25519 z;
fe25519 y;
fe25519 t;
} ge25519_p1p1;
typedef struct
{
fe25519 x;
fe25519 y;
fe25519 z;
} ge25519_p2;
typedef struct
{
fe25519 ysubx;
fe25519 xaddy;
fe25519 t2d;
} ge25519_niels;
typedef struct
{
fe25519 ysubx;
fe25519 xaddy;
fe25519 z;
fe25519 t2d;
} ge25519_pniels;
extern void ge25519_p1p1_to_p2(ge25519_p2 *r, const ge25519_p1p1 *p);
extern void ge25519_p1p1_to_p3(ge25519_p3 *r, const ge25519_p1p1 *p);
extern void ge25519_p1p1_to_pniels(ge25519_pniels *r, const ge25519_p1p1 *p);
extern void ge25519_add_p1p1(ge25519_p1p1 *r, const ge25519_p3 *p, const ge25519_p3 *q);
extern void ge25519_dbl_p1p1(ge25519_p1p1 *r, const ge25519_p2 *p);
extern void choose_t(ge25519_niels *t, unsigned long long pos, signed long long b, const ge25519_niels *base_multiples);
extern void choose_t_smultq(ge25519_pniels *t, signed long long b, const ge25519_pniels *pre);
extern void ge25519_nielsadd2(ge25519_p3 *r, const ge25519_niels *q);
extern void ge25519_nielsadd_p1p1(ge25519_p1p1 *r, const ge25519_p3 *p, const ge25519_niels *q);
extern void ge25519_pnielsadd_p1p1(ge25519_p1p1 *r, const ge25519_p3 *p, const ge25519_pniels *q);
extern const ge25519 ge25519_base;
extern int ge25519_unpackneg_vartime(ge25519 *r, const unsigned char p[32]);
extern void ge25519_pack(unsigned char r[32], const ge25519 *p);
extern int ge25519_isneutral_vartime(const ge25519 *p);
extern void ge25519_add(ge25519 *r, const ge25519 *p, const ge25519 *q);
extern void ge25519_double(ge25519 *r, const ge25519 *p);
/* computes [s1]p1 + [s2]ge25519_base */
extern void ge25519_double_scalarmult_vartime(ge25519 *r, const ge25519 *p1, const sc25519 *s1, const sc25519 *s2);
extern void ge25519_multi_scalarmult_vartime(ge25519 *r, ge25519 *p, sc25519 *s, const unsigned long long npoints);
extern void ge25519_scalarmult_base(ge25519 *r, const sc25519 *s);
#endif

View file

@ -0,0 +1,8 @@
#include "ge25519.h"
void ge25519_add(ge25519_p3 *r, const ge25519_p3 *p, const ge25519_p3 *q)
{
ge25519_p1p1 grp1p1;
ge25519_add_p1p1(&grp1p1, p, q);
ge25519_p1p1_to_p3(r, &grp1p1);
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,7 @@
#include "ge25519.h"
/* Base point in P^3 coordinates (with Z=1) */
const ge25519 ge25519_base = {{{0x00062d608f25d51a, 0x000412a4b4f6592a, 0x00075b7171a4b31d, 0x0001ff60527118fe, 0x000216936d3cd6e5}},
{{0x0006666666666658, 0x0004cccccccccccc, 0x0001999999999999, 0x0003333333333333, 0x0006666666666666}},
{{0x0000000000000001, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000}},
{{0x00068AB3A5B7DDA3, 0x00000EEA2A5EADBB, 0x0002AF8DF483C27E, 0x000332B375274732, 0x00067875F0FD78B7}}};

View file

@ -0,0 +1,768 @@
{{{0x00003905d740913e, 0x0000ba2817d673a2, 0x00023e2827f4e67c, 0x000133d2e0c21a34, 0x00044fd2f9298f81}},
{{0x000493c6f58c3b85, 0x0000df7181c325f7, 0x0000f50b0b3e4cb7, 0x0005329385a44c32, 0x00007cf9d3a33d4b}},
{{0x000515674b6fbb59, 0x00001dd454bd5b77, 0x00055f1be90784fc, 0x00066566ea4e8e64, 0x0004f0ebe1faf16e}}},
{{{0x0001a56042b4d5a8, 0x000189cc159ed153, 0x0005b8deaa3cae04, 0x0002aaf04f11b5d8, 0x0006bb595a669c92}},
{{0x0004e7fc933c71d7, 0x0002cf41feb6b244, 0x0007581c0a7d1a76, 0x0007172d534d32f0, 0x000590c063fa87d2}},
{{0x00047eaadad36802, 0x0002707dc900adc6, 0x00001da09aebcd66, 0x0000dc1de55f0873, 0x00049314f0a165ed}}},
{{{0x00011fe8a4fcd265, 0x0007bcb8374faacc, 0x00052f5af4ef4d4f, 0x0005314098f98d10, 0x0002ab91587555bd}},
{{0x0005b0a84cee9730, 0x00061d10c97155e4, 0x0004059cc8096a10, 0x00047a608da8014f, 0x0007a164e1b9a80f}},
{{0x000211f4f1674834, 0x0002fec5bf12b37e, 0x0005c8a93ae139ac, 0x000510ebef3783ad, 0x000549a04b963bb2}}},
{{{0x0006050a056818bf, 0x00062acc1f5532bf, 0x00028141ccc9fa25, 0x00024d61f471e683, 0x00027933f4c7445a}},
{{0x000351b98efc099f, 0x00068fbfa4a7050e, 0x00042a49959d971b, 0x000393e51a469efd, 0x000680e910321e58}},
{{0x000645ece51426b0, 0x0007adb741f297e3, 0x0003e14b038caf18, 0x00060c7214ba6ac6, 0x00044f079b1b0e64}}},
{{{0x000182c3a447d6ba, 0x00022964e536eff2, 0x000192821f540053, 0x0002f9f19e788e5c, 0x000154a7e73eb1b5}},
{{0x0002bc4408a5bb33, 0x000078ebdda05442, 0x0002ffb112354123, 0x000375ee8df5862d, 0x0002945ccf146e20}},
{{0x0002a179e7d003b3, 0x0001400249afd906, 0x0001b63fcd7dca74, 0x00054c3160ea5087, 0x00068b8ac5938b27}}},
{{{0x000006b67b7d8ca4, 0x000084fa44e72933, 0x0001154ee55d6f8a, 0x0004425d842e7390, 0x00038b64c41ae417}},
{{0x0004eeeb77157131, 0x0001201915f10741, 0x0001669cda6c9c56, 0x00045ec032db346d, 0x00051e57bb6a2cc3}},
{{0x0005ad91689de3a4, 0x00051f1f7226f1f3, 0x00073ee6205d7c90, 0x00004f82855a994f, 0x0007865dfa21354c}}},
{{{0x00072c9aaa3221b1, 0x000267774474f74d, 0x000064b0e9b28085, 0x0003f04ef53b27c9, 0x0001d6edd5d2e531}},
{{0x00025cd0944ea3bf, 0x00075673b81a4d63, 0x000150b925d1c0d4, 0x00013f38d9294114, 0x000461bea69283c9}},
{{0x00028aacab0fda36, 0x000287a6a939042f, 0x0006add5a294a319, 0x00061b9be82dc589, 0x000233cef623a2cb}}},
{{{0x00075dedf39234d9, 0x00001c36ab1f3c54, 0x0000f08fee58f5da, 0x0000e19613a0d637, 0x0003a9024a1320e0}},
{{0x0007596604dd3e8f, 0x0006fc510e058b36, 0x0003670c8db2cc0d, 0x000297d899ce332f, 0x0000915e76061bce}},
{{0x0005e835a834a37e, 0x00034d130afd5bef, 0x00059ecc9a2f8673, 0x0002e11608c29b38, 0x000589eb3d9dbefd}}},
{{{0x0003635449aa515e, 0x0003e178d0475dab, 0x00050b4712a19712, 0x0002dcc2860ff4ad, 0x00030d76d6f03d31}},
{{0x0004dd0e632f9c1d, 0x0002ced12622a5d9, 0x00018de9614742da, 0x00079ca96fdbb5d4, 0x0006dd37d49a00ee}},
{{0x000444172106e4c7, 0x00001251afed2d88, 0x000534fc9bed4f5a, 0x0005d85a39cf5234, 0x00010c697112e864}}},
{{{0x0003c4277dbe5fde, 0x0005a335afd44c92, 0x0000c1164099753e, 0x00070487006fe423, 0x00025e61cabed66f}},
{{0x00062aa08358c805, 0x00046f440848e194, 0x000447b771a8f52b, 0x000377ba3269d31d, 0x00003bf9baf55080}},
{{0x0003e128cc586604, 0x0005968b2e8fc7e2, 0x000049a3d5bd61cf, 0x000116505b1ef6e6, 0x000566d78634586e}}},
{{{0x0007a49f9cc10834, 0x0002b513788a22c6, 0x0005ff4b6ef2395b, 0x0002ec8e5af607bf, 0x00033975bca5ecc3}},
{{0x00054285c65a2fd0, 0x00055e62ccf87420, 0x00046bb961b19044, 0x0001153405712039, 0x00014fba5f34793b}},
{{0x000746166985f7d4, 0x00009939000ae79a, 0x0005844c7964f97a, 0x00013617e1f95b3d, 0x00014829cea83fc5}}},
{{{0x00037b8497dd95c2, 0x00061549d6b4ffe8, 0x000217a22db1d138, 0x0000b9cf062eb09e, 0x0002fd9c71e5f758}},
{{0x00070b2f4e71ecb8, 0x000728148efc643c, 0x0000753e03995b76, 0x0005bf5fb2ab6767, 0x00005fc3bc4535d7}},
{{0x0000b3ae52afdedd, 0x00019da76619e497, 0x0006fa0654d2558e, 0x00078219d25e41d4, 0x000373767475c651}}},
{{{0x000299fd40d1add9, 0x0005f2de9a04e5f7, 0x0007c0eebacc1c59, 0x0004cca1b1f8290a, 0x0001fbea56c3b18f}},
{{0x000095cb14246590, 0x000002d82aa6ac68, 0x000442f183bc4851, 0x0006464f1c0a0644, 0x0006bf5905730907}},
{{0x000778f1e1415b8a, 0x0006f75874efc1f4, 0x00028a694019027f, 0x00052b37a96bdc4d, 0x00002521cf67a635}}},
{{{0x0007ee0b0a9d5294, 0x000381fbeb4cca27, 0x0007841f3a3e639d, 0x000676ea30c3445f, 0x0003fa00a7e71382}},
{{0x00046720772f5ee4, 0x000632c0f359d622, 0x0002b2092ba3e252, 0x000662257c112680, 0x000001753d9f7cd6}},
{{0x0001232d963ddb34, 0x00035692e70b078d, 0x000247ca14777a1f, 0x0006db556be8fcd0, 0x00012b5fe2fa048e}}},
{{{0x0000fbc496fce34d, 0x000575be6b7dae3e, 0x0004a31585cee609, 0x000037e9023930ff, 0x000749b76f96fb12}},
{{0x00037c26ad6f1e92, 0x00046a0971227be5, 0x0004722f0d2d9b4c, 0x0003dc46204ee03a, 0x0006f7e93c20796c}},
{{0x0002f604aea6ae05, 0x000637dc939323eb, 0x0003fdad9b048d47, 0x0000a8b0d4045af7, 0x0000fcec10f01e02}}},
{{{0x000558a649fe1e44, 0x00044635aeefcc89, 0x0001ff434887f2ba, 0x0000f981220e2d44, 0x0004901aa7183c51}},
{{0x0002d29dc4244e45, 0x0006927b1bc147be, 0x0000308534ac0839, 0x0004853664033f41, 0x000413779166feab}},
{{0x0001b7548c1af8f0, 0x0007848c53368116, 0x00001b64e7383de9, 0x000109fbb0587c8f, 0x00041bb887b726d1}}},
{{{0x00007d44744346be, 0x000282b6a564a81d, 0x0004ed80f875236b, 0x0006fbbe1d450c50, 0x0004eb728c12fcdb}},
{{0x00034c597c6691ae, 0x0007a150b6990fc4, 0x00052beb9d922274, 0x00070eed7164861a, 0x0000a871e070c6a9}},
{{0x0001b5994bbc8989, 0x00074b7ba84c0660, 0x00075678f1cdaeb8, 0x00023206b0d6f10c, 0x0003ee7300f2685d}}},
{{{0x000255e49e7dd6b7, 0x00038c2163d59eba, 0x0003861f2a005845, 0x0002e11e4ccbaec9, 0x0001381576297912}},
{{0x00027947841e7518, 0x00032c7388dae87f, 0x000414add3971be9, 0x00001850832f0ef1, 0x0007d47c6a2cfb89}},
{{0x0002d0148ef0d6e0, 0x0003522a8de787fb, 0x0002ee055e74f9d2, 0x00064038f6310813, 0x000148cf58d34c9e}}},
{{{0x000492f67934f027, 0x0007ded0815528d4, 0x00058461511a6612, 0x0005ea2e50de1544, 0x0003ff2fa1ebd5db}},
{{0x00072f7d9ae4756d, 0x0007711e690ffc4a, 0x000582a2355b0d16, 0x0000dccfe885b6b4, 0x000278febad4eaea}},
{{0x0002681f8c933966, 0x0003840521931635, 0x000674f14a308652, 0x0003bd9c88a94890, 0x0004104dd02fe9c6}}},
{{{0x0002bf5e1124422a, 0x000673146756ae56, 0x00014ad99a87e830, 0x0001eaca65b080fd, 0x0002c863b00afaf5}},
{{0x00014e06db096ab8, 0x0001219c89e6b024, 0x000278abd486a2db, 0x000240b292609520, 0x0000165b5a48efca}},
{{0x0000a474a0846a76, 0x000099a5ef981e32, 0x0002a8ae3c4bbfe6, 0x00045c34af14832c, 0x000591b67d9bffec}}},
{{{0x00070d1c80b49bfa, 0x0003d57e7d914625, 0x0003c0722165e545, 0x0005e5b93819e04f, 0x0003de02ec7ca8f7}},
{{0x0001b3719f18b55d, 0x000754318c83d337, 0x00027c17b7919797, 0x000145b084089b61, 0x000489b4f8670301}},
{{0x0002102d3aeb92ef, 0x00068c22d50c3a46, 0x00042ea89385894e, 0x00075f9ebf55f38c, 0x00049f5fbba496cb}}},
{{{0x00049a108a5bcfd4, 0x0006178c8e7d6612, 0x0001f03473710375, 0x00073a49614a6098, 0x0005604a86dcbfa6}},
{{0x0005628c1e9c572e, 0x000598b108e822ab, 0x00055d8fae29361a, 0x0000adc8d1a97b28, 0x00006a1a6c288675}},
{{0x0000d1d47c1764b6, 0x00001c08316a2e51, 0x0002b3db45c95045, 0x0001634f818d300c, 0x00020989e89fe274}}},
{{{0x000777fd3a2dcc7f, 0x000594a9fb124932, 0x00001f8e80ca15f0, 0x000714d13cec3269, 0x0000403ed1d0ca67}},
{{0x0004278b85eaec2e, 0x0000ef59657be2ce, 0x00072fd169588770, 0x0002e9b205260b30, 0x000730b9950f7059}},
{{0x00032d35874ec552, 0x0001f3048df1b929, 0x000300d73b179b23, 0x0006e67be5a37d0b, 0x0005bd7454308303}}},
{{{0x0002d19528b24cc2, 0x0004ac66b8302ff3, 0x000701c8d9fdad51, 0x0006c1b35c5b3727, 0x000133a78007380a}},
{{0x0004932115e7792a, 0x000457b9bbb930b8, 0x00068f5d8b193226, 0x0004164e8f1ed456, 0x0005bb7db123067f}},
{{0x0001f467c6ca62be, 0x0002c4232a5dc12c, 0x0007551dc013b087, 0x0000690c11b03bcd, 0x000740dca6d58f0e}}},
{{{0x0000ee0752cfce4e, 0x000660dd8116fbe9, 0x00055167130fffeb, 0x0001c682b885955c, 0x000161d25fa963ea}},
{{0x00028c570478433c, 0x0001d8502873a463, 0x0007641e7eded49c, 0x0001ecedd54cf571, 0x0002c03f5256c2b0}},
{{0x000718757b53a47d, 0x000619e18b0f2f21, 0x0005fbdfe4c1ec04, 0x0005d798c81ebb92, 0x000699468bdbd96b}}},
{{{0x00072f46f4dafecf, 0x0002948ffadef7a3, 0x00011ecdfdf3bc04, 0x0003c2e98ffeed25, 0x000525219a473905}},
{{0x00053de66aa91948, 0x000045f81a599b1b, 0x0003f7a8bd214193, 0x00071d4da412331a, 0x000293e1c4e6c4a2}},
{{0x0006134b925112e1, 0x0006bb942bb406ed, 0x000070c445c0dde2, 0x000411d822c4d7a3, 0x0005b605c447f032}}},
{{{0x0005805920c47c89, 0x0001924771f9972c, 0x00038bbddf9fc040, 0x0001f7000092b281, 0x00024a76dcea8aeb}},
{{0x0001fec6f0e7f04c, 0x0003cebc692c477d, 0x000077986a19a95e, 0x0006eaaaa1778b0f, 0x0002f12fef4cc5ab}},
{{0x000522b2dfc0c740, 0x0007e8193480e148, 0x00033fd9a04341b9, 0x0003c863678a20bc, 0x0005e607b2518a43}}},
{{{0x00031d8f6cdf1818, 0x0001f86c4b144b16, 0x00039875b8d73e9d, 0x0002fbf0d9ffa7b3, 0x0005067acab6ccdd}},
{{0x0004431ca596cf14, 0x000015da7c801405, 0x00003c9b6f8f10b5, 0x0000346922934017, 0x000201f33139e457}},
{{0x00027f6b08039d51, 0x0004802f8000dfaa, 0x00009692a062c525, 0x0001baea91075817, 0x000397cba8862460}}},
{{{0x00013093f05959b2, 0x0001bd352f2ec618, 0x000075789b88ea86, 0x00061d1117ea48b9, 0x0002339d320766e6}},
{{0x0005c3fbc81379e7, 0x00041bbc255e2f02, 0x0006a3f756998650, 0x0001297fd4e07c42, 0x000771b4022c1e1c}},
{{0x0005d986513a2fa7, 0x00063f3a99e11b0f, 0x00028a0ecfd6b26d, 0x00053b6835e18d8f, 0x000331a189219971}}},
{{{0x00066f45fb4f80c6, 0x0003cc38eeb9fea2, 0x000107647270db1f, 0x000710f1ea740dc8, 0x00031167c6b83bdf}},
{{0x00012f3a9d7572af, 0x00010d00e953c4ca, 0x000603df116f2f8a, 0x00033dc276e0e088, 0x0001ac9619ff649a}},
{{0x00033842524b1068, 0x00077dd39d30fe45, 0x000189432141a0d0, 0x000088fe4eb8c225, 0x000612436341f08b}}},
{{{0x000541db874e898d, 0x00062d80fb841b33, 0x00003e6ef027fa97, 0x0007a03c9e9633e8, 0x00046ebe2309e5ef}},
{{0x000349e31a2d2638, 0x0000137a7fa6b16c, 0x000681ae92777edc, 0x000222bfc5f8dc51, 0x0001522aa3178d90}},
{{0x00002f5369614938, 0x000356e5ada20587, 0x00011bc89f6bf902, 0x000036746419c8db, 0x00045fe70f505243}}},
{{{0x000075a6960c0b8c, 0x0006dde1c5e41b49, 0x00042e3f516da341, 0x00016a03fda8e79e, 0x000428d1623a0e39}},
{{0x00024920c8951491, 0x000107ec61944c5e, 0x00072752e017c01f, 0x000122b7dda2e97a, 0x00016619f6db57a2}},
{{0x00074a4401a308fd, 0x00006ed4b9558109, 0x000746f1f6a08867, 0x0004636f5c6f2321, 0x0001d81592d60bd3}}},
{{{0x00068756a60dac5f, 0x00055d757b8aec26, 0x0003383df45f80bd, 0x0006783f8c9f96a6, 0x00020234a7789ecd}},
{{0x0005b69f7b85c5e8, 0x00017a2d175650ec, 0x0004cc3e6dbfc19e, 0x00073e1d3873be0e, 0x0003a5f6d51b0af8}},
{{0x00020db67178b252, 0x00073aa3da2c0eda, 0x00079045c01c70d3, 0x0001b37b15251059, 0x0007cd682353cffe}}},
{{{0x0001a45bd887fab6, 0x00065748076dc17c, 0x0005b98000aa11a8, 0x0004a1ecc9080974, 0x0002838c8863bdc0}},
{{0x0005cd6068acf4f3, 0x0003079afc7a74cc, 0x00058097650b64b4, 0x00047fabac9c4e99, 0x0003ef0253b2b2cd}},
{{0x0003b0cf4a465030, 0x000022b8aef57a2d, 0x0002ad0677e925ad, 0x0004094167d7457a, 0x00021dcb8a606a82}}},
{{{0x000004468c9d9fc8, 0x0005da8554796b8c, 0x0003b8be70950025, 0x0006d5892da6a609, 0x0000bc3d08194a31}},
{{0x000500fabe7731ba, 0x0007cc53c3113351, 0x0007cf65fe080d81, 0x0003c5d966011ba1, 0x0005d840dbf6c6f6}},
{{0x0006380d309fe18b, 0x0004d73c2cb8ee0d, 0x0006b882adbac0b6, 0x00036eabdddd4cbe, 0x0003a4276232ac19}}},
{{{0x0002432c8a7084fa, 0x00047bf73ca8a968, 0x0001639176262867, 0x0005e8df4f8010ce, 0x0001ff177cea16de}},
{{0x0000c172db447ecb, 0x0003f8c505b7a77f, 0x0006a857f97f3f10, 0x0004fcc0567fe03a, 0x0000770c9e824e1a}},
{{0x0001d99a45b5b5fd, 0x000523674f2499ec, 0x0000f8fa26182613, 0x00058f7398048c98, 0x00039f264fd41500}}},
{{{0x00053417dbe7e29c, 0x00054573827394f5, 0x000565eea6f650dd, 0x00042050748dc749, 0x0001712d73468889}},
{{0x00034aabfe097be1, 0x00043bfc03253a33, 0x00029bc7fe91b7f3, 0x0000a761e4844a16, 0x00065c621272c35f}},
{{0x000389f8ce3193dd, 0x0002d424b8177ce5, 0x000073fa0d3440cd, 0x000139020cd49e97, 0x00022f9800ab19ce}}},
{{{0x0002368a3e9ef8cb, 0x000454aa08e2ac0b, 0x000490923f8fa700, 0x000372aa9ea4582f, 0x00013f416cd64762}},
{{0x00029fdd9a6efdac, 0x0007c694a9282840, 0x0006f7cdeee44b3a, 0x00055a3207b25cc3, 0x0004171a4d38598c}},
{{0x000758aa99c94c8c, 0x0005f6001700ff44, 0x0007694e488c01bd, 0x0000d5fde948eed6, 0x000508214fa574bd}}},
{{{0x000269153ed6fe4b, 0x00072a23aef89840, 0x000052be5299699c, 0x0003a5e5ef132316, 0x00022f960ec6faba}},
{{0x000215bb53d003d6, 0x0001179e792ca8c3, 0x0001a0e96ac840a2, 0x00022393e2bb3ab6, 0x0003a7758a4c86cb}},
{{0x000111f693ae5076, 0x0003e3bfaa94ca90, 0x000445799476b887, 0x00024a0912464879, 0x0005d9fd15f8de7f}}},
{{{0x000408d36d63727f, 0x0005faf8f6a66062, 0x0002bb892da8de6b, 0x000769d4f0c7e2e6, 0x000332f35914f8fb}},
{{0x00044d2aeed7521e, 0x00050865d2c2a7e4, 0x0002705b5238ea40, 0x00046c70b25d3b97, 0x0003bc187fa47eb9}},
{{0x00070115ea86c20c, 0x00016d88da24ada8, 0x0001980622662adf, 0x000501ebbc195a9d, 0x000450d81ce906fb}}},
{{{0x0003b6a1a6205275, 0x0002e82791d06dcf, 0x00023d72caa93c87, 0x0005f0b7ab68aaf4, 0x0002de25d4ba6345}},
{{0x0004d8961cae743f, 0x0006bdc38c7dba0e, 0x0007d3b4a7e1b463, 0x0000844bdee2adf3, 0x0004cbad279663ab}},
{{0x00019024a0d71fcd, 0x00015f65115f101a, 0x0004e99067149708, 0x000119d8d1cba5af, 0x0007d7fbcefe2007}}},
{{{0x00071e6a266b2801, 0x00009aae73e2df5d, 0x00040dd8b219b1a3, 0x000546fb4517de0d, 0x0005975435e87b75}},
{{0x00045dc5f3c29094, 0x0003455220b579af, 0x000070c1631e068a, 0x00026bc0630e9b21, 0x0004f9cd196dcd8d}},
{{0x000297d86a7b3768, 0x0004835a2f4c6332, 0x000070305f434160, 0x000183dd014e56ae, 0x0007ccdd084387a0}}},
{{{0x0006422c6d260417, 0x000212904817bb94, 0x0005a319deb854f5, 0x0007a9d4e060da7d, 0x000428bd0ed61d0c}},
{{0x000484186760cc93, 0x0007435665533361, 0x00002f686336b801, 0x0005225446f64331, 0x0003593ca848190c}},
{{0x0003189a5e849aa7, 0x0006acbb1f59b242, 0x0007f6ef4753630c, 0x0001f346292a2da9, 0x00027398308da2d6}}},
{{{0x00038d28435ed413, 0x0004064f19992858, 0x0007680fbef543cd, 0x0001aadd83d58d3c, 0x000269597aebe8c3}},
{{0x00010e4c0a702453, 0x0004daafa37bd734, 0x00049f6bdc3e8961, 0x0001feffdcecdae6, 0x000572c2945492c3}},
{{0x0007c745d6cd30be, 0x00027c7755df78ef, 0x0001776833937fa3, 0x0005405116441855, 0x0007f985498c05bc}}},
{{{0x0001ce889f0be117, 0x00036f6a94510709, 0x0007f248720016b4, 0x0001821ed1e1cf91, 0x00076c2ec470a31f}},
{{0x000615520fbf6363, 0x0000b9e9bf74da6a, 0x0004fe8308201169, 0x000173f76127de43, 0x00030f2653cd69b1}},
{{0x0000c938aac10c85, 0x00041b64ed797141, 0x0001beb1c1185e6d, 0x0001ed5490600f07, 0x0002f1273f159647}}},
{{{0x0001fc7c8ae01e11, 0x0002094d5573e8e7, 0x0005ca3cbbf549d2, 0x0004f920ecc54143, 0x0005d9e572ad85b6}},
{{0x00008bd755a70bc0, 0x00049e3a885ce609, 0x00016585881b5ad6, 0x0003c27568d34f5e, 0x00038ac1997edc5f}},
{{0x0006b517a751b13b, 0x0000cfd370b180cc, 0x0005377925d1f41a, 0x00034e56566008a2, 0x00022dfcd9cbfe9e}}},
{{{0x0003d2e0c30d0cd9, 0x0003f597686671bb, 0x0000aa587eb63999, 0x0000e3c7b592c619, 0x0006b2916c05448c}},
{{0x000459b4103be0a1, 0x00059a4b3f2d2add, 0x0007d734c8bb8eeb, 0x0002393cbe594a09, 0x0000fe9877824cde}},
{{0x000334d10aba913b, 0x000045cdb581cfdb, 0x0005e3e0553a8f36, 0x00050bb3041effb2, 0x0004c303f307ff00}}},
{{{0x00023bd617b28c85, 0x0006e72ee77d5a61, 0x0001a972ff174dde, 0x0003e2636373c60f, 0x0000d61b8f78b2ab}},
{{0x000403580dd94500, 0x00048df77d92653f, 0x00038a9fe3b349ea, 0x0000ea89850aafe1, 0x000416b151ab706a}},
{{0x0000d7efe9c136b0, 0x0001ab1c89640ad5, 0x00055f82aef41f97, 0x00046957f317ed0d, 0x000191a2af74277e}}},
{{{0x0006f74bc53c1431, 0x0001c40e5dbbd9c2, 0x0006c8fb9cae5c97, 0x0004845c5ce1b7da, 0x0007e2e0e450b5cc}},
{{0x00062b434f460efb, 0x000294c6c0fad3fc, 0x00068368937b4c0f, 0x0005c9f82910875b, 0x000237e7dbe00545}},
{{0x000575ed6701b430, 0x0004d3e17fa20026, 0x000791fc888c4253, 0x0002f1ba99078ac1, 0x00071afa699b1115}}},
{{{0x00066f9b3953b61d, 0x000555f4283cccb9, 0x0007dd67fb1960e7, 0x00014707a1affed4, 0x000021142e9c2b1c}},
{{0x00023c1c473b50d6, 0x0003e7671de21d48, 0x000326fa5547a1e8, 0x00050e4dc25fafd9, 0x00000731fbc78f89}},
{{0x0000c71848f81880, 0x00044bd9d8233c86, 0x0006e8578efe5830, 0x0004045b6d7041b5, 0x0004c4d6f3347e15}}},
{{{0x0007eccfc17d1fc9, 0x0004ca280782831e, 0x0007b8337db1d7d6, 0x0005116def3895fb, 0x000193fddaaa7e47}},
{{0x0004ddfc988f1970, 0x0004f6173ea365e1, 0x000645daf9ae4588, 0x0007d43763db623b, 0x00038bf9500a88f9}},
{{0x0002c93c37e8876f, 0x0003431a28c583fa, 0x00049049da8bd879, 0x0004b4a8407ac11c, 0x0006a6fb99ebf0d4}}},
{{{0x0006c1bb560855eb, 0x00071f127e13ad48, 0x0005c6b304905aec, 0x0003756b8e889bc7, 0x00075f76914a3189}},
{{0x000122b5b6e423c6, 0x00021e50dff1ddd6, 0x00073d76324e75c0, 0x000588485495418e, 0x000136fda9f42c5e}},
{{0x0004dfb1a305bdd1, 0x0003b3ff05811f29, 0x0006ed62283cd92e, 0x00065d1543ec52e1, 0x000022183510be8d}}},
{{{0x000766385ead2d14, 0x0000194f8b06095e, 0x00008478f6823b62, 0x0006018689d37308, 0x0006a071ce17b806}},
{{0x0002710143307a7f, 0x0003d88fb48bf3ab, 0x000249eb4ec18f7a, 0x000136115dff295f, 0x0001387c441fd404}},
{{0x0003c3d187978af8, 0x0007afe1c88276ba, 0x00051df281c8ad68, 0x00064906bda4245d, 0x0003171b26aaf1ed}}},
{{{0x0007319097564ca8, 0x0001844ebc233525, 0x00021d4543fdeee1, 0x0001ad27aaff1bd2, 0x000221fd4873cf08}},
{{0x0005b7d8b28a47d1, 0x0002c2ee149e34c1, 0x000776f5629afc53, 0x0001f4ea50fc49a9, 0x0006c514a6334424}},
{{0x0002204f3a156341, 0x000537414065a464, 0x00043c0c3bedcf83, 0x0005557e706ea620, 0x00048daa596fb924}}},
{{{0x00028e665ca59cc7, 0x000165c715940dd9, 0x0000785f3aa11c95, 0x00057b98d7e38469, 0x000676dd6fccad84}},
{{0x00061d5dc84c9793, 0x00047de83040c29e, 0x000189deb26507e7, 0x0004d4e6fadc479a, 0x00058c837fa0e8a7}},
{{0x0001688596fc9058, 0x00066f6ad403619f, 0x0004d759a87772ef, 0x0007856e6173bea4, 0x0001c4f73f2c6a57}}},
{{{0x00024fbd305fa0bb, 0x00040a98cc75a1cf, 0x00078ce1220a7533, 0x0006217a10e1c197, 0x000795ac80d1bf64}},
{{0x0006706efc7c3484, 0x0006987839ec366d, 0x0000731f95cf7f26, 0x0003ae758ebce4bc, 0x00070459adb7daf6}},
{{0x0001db4991b42bb3, 0x000469605b994372, 0x000631e3715c9a58, 0x0007e9cfefcf728f, 0x0005fe162848ce21}}},
{{{0x0001214fe194961a, 0x0000e1ae39a9e9cb, 0x000543c8b526f9f7, 0x000119498067e91d, 0x0004789d446fc917}},
{{0x0001852d5d7cb208, 0x00060d0fbe5ce50f, 0x0005a1e246e37b75, 0x00051aee05ffd590, 0x0002b44c043677da}},
{{0x000487ab074eb78e, 0x0001d33b5e8ce343, 0x00013e419feb1b46, 0x0002721f565de6a4, 0x00060c52eef2bb9a}}},
{{{0x000589bc3bfd8bf1, 0x0006f93e6aa3416b, 0x0004c0a3d6c1ae48, 0x00055587260b586a, 0x00010bc9c312ccfc}},
{{0x0003c5c27cae6d11, 0x00036a9491956e05, 0x000124bac9131da6, 0x0003b6f7de202b5d, 0x00070d77248d9b66}},
{{0x0002e84b3ec2a05b, 0x00069da2f03c1551, 0x00023a174661a67b, 0x000209bca289f238, 0x00063755bd3a976f}}},
{{{0x0007a03e2ad10853, 0x000213dcc6ad36ab, 0x0001a6e240d5bdd6, 0x0007c24ffcf8fedf, 0x0000d8cc1c48bc16}},
{{0x0007101897f1acb7, 0x0003d82cb77b07b8, 0x000684083d7769f5, 0x00052b28472dce07, 0x0002763751737c52}},
{{0x000402d36eb419a9, 0x0007cef68c14a052, 0x0000f1255bc2d139, 0x000373e7d431186a, 0x00070c2dd8a7ad16}}},
{{{0x000194509f6fec0e, 0x000528d8ca31acac, 0x0007826d73b8b9fa, 0x00024acb99e0f9b3, 0x0002e0fac6363948}},
{{0x0004967db8ed7e13, 0x00015aeed02f523a, 0x0006149591d094bc, 0x000672f204c17006, 0x00032b8613816a53}},
{{0x0007f7bee448cd64, 0x0004e10f10da0f3c, 0x0003936cb9ab20e9, 0x0007a0fc4fea6cd0, 0x0004179215c735a4}}},
{{{0x000094e7d7dced2a, 0x000068fa738e118e, 0x00041b640a5fee2b, 0x0006bb709df019d4, 0x000700344a30cd99}},
{{0x000633b9286bcd34, 0x0006cab3badb9c95, 0x00074e387edfbdfa, 0x00014313c58a0fd9, 0x00031fa85662241c}},
{{0x00026c422e3622f4, 0x0000f3066a05b5f0, 0x0004e2448f0480a6, 0x000244cde0dbf095, 0x00024bb2312a9952}}},
{{{0x0000ed1732de67c3, 0x000308c369291635, 0x00033ef348f2d250, 0x000004475ea1a1bb, 0x0000fee3e871e188}},
{{0x00000c2af5f85c6b, 0x0000609f4cf2883f, 0x0006e86eb5a1ca13, 0x00068b44a2efccd1, 0x0000d1d2af9ffeb5}},
{{0x00028aa132621edf, 0x00042b244caf353b, 0x00066b064cc2e08a, 0x0006bb20020cbdd3, 0x00016acd79718531}}},
{{{0x000772af2d9b1d3d, 0x0006d486448b4e5b, 0x0002ce58dd8d18a8, 0x0001849f67503c8b, 0x000123e0ef6b9302}},
{{0x0001c6c57887b6ad, 0x0005abf21fd7592b, 0x00050bd41253867a, 0x0003800b71273151, 0x000164ed34b18161}},
{{0x0006d94c192fe69a, 0x0005475222a2690f, 0x000693789d86b8b3, 0x0001f5c3bdfb69dc, 0x00078da0fc61073f}}},
{{{0x00015d28e52bc66a, 0x00030e1e0351cb7e, 0x00030a2f74b11f8c, 0x00039d120cd7de03, 0x0002d25deeb256b1}},
{{0x000780f1680c3a94, 0x0002a35d3cfcd453, 0x000005e5cdc7ddf8, 0x0006ee888078ac24, 0x000054aa4b316b38}},
{{0x0000468d19267cb8, 0x00038cdca9b5fbf9, 0x0001bbb05c2ca1e2, 0x0003b015758e9533, 0x000134610a6ab7da}}},
{{{0x00038ec78df6b0fe, 0x00013caebea36a22, 0x0005ebc6e54e5f6a, 0x00032804903d0eb8, 0x0002102fdba2b20d}},
{{0x000265e777d1f515, 0x0000f1f54c1e39a5, 0x0002f01b95522646, 0x0004fdd8db9dde6d, 0x000654878cba97cc}},
{{0x0006e405055ce6a1, 0x0005024a35a532d3, 0x0001f69054daf29d, 0x00015d1d0d7a8bd5, 0x0000ad725db29ecb}}},
{{{0x000267b1834e2457, 0x0006ae19c378bb88, 0x0007457b5ed9d512, 0x0003280d783d05fb, 0x0004aefcffb71a03}},
{{0x0007bc0c9b056f85, 0x00051cfebffaffd8, 0x00044abbe94df549, 0x0007ecbbd7e33121, 0x0004f675f5302399}},
{{0x000536360415171e, 0x0002313309077865, 0x000251444334afbc, 0x0002b0c3853756e8, 0x0000bccbb72a2a86}}},
{{{0x0006962feab1a9c8, 0x0006aca28fb9a30b, 0x00056db7ca1b9f98, 0x00039f58497018dd, 0x0004024f0ab59d6b}},
{{0x00055e4c50fe1296, 0x00005fdd13efc30d, 0x0001c0c6c380e5ee, 0x0003e11de3fb62a8, 0x0006678fd69108f3}},
{{0x0006fa31636863c2, 0x00010ae5a67e42b0, 0x00027abbf01fda31, 0x000380a7b9e64fbc, 0x0002d42e2108ead4}}},
{{{0x0005131594dfd29b, 0x0003a627e98d52fe, 0x0001154041855661, 0x00019175d09f8384, 0x000676b2608b8d2d}},
{{0x00017b0d0f537593, 0x00016263c0c9842e, 0x0004ab827e4539a4, 0x0006370ddb43d73a, 0x000420bf3a79b423}},
{{0x0000ba651c5b2b47, 0x0005862363701027, 0x0000c4d6c219c6db, 0x0000f03dff8658de, 0x000745d2ffa9c0cf}}},
{{{0x00025a1e2bc9c8bd, 0x000104c8f3b037ea, 0x000405576fa96c98, 0x0002e86a88e3876f, 0x0001ae23ceb960cf}},
{{0x0006df5721d34e6a, 0x0004f32f767a0c06, 0x0001d5abeac76e20, 0x00041ce9e104e1e4, 0x00006e15be54c1dc}},
{{0x00025d871932994a, 0x0006b9d63b560b6e, 0x0002df2814c8d472, 0x0000fbbee20aa4ed, 0x00058ded861278ec}}},
{{{0x00073793f266c55c, 0x0000b988a9c93b02, 0x00009b0ea32325db, 0x00037cae71c17c5e, 0x0002ff39de85485f}},
{{0x00035ba8b6c2c9a8, 0x0001dea58b3185bf, 0x0004b455cd23bbbe, 0x0005ec19c04883f8, 0x00008ba696b531d5}},
{{0x00053eeec3efc57a, 0x0002fa9fe9022efd, 0x000699c72c138154, 0x00072a751ebd1ff8, 0x000120633b4947cf}}},
{{{0x0004987891610042, 0x00079d9d7f5d0172, 0x0003c293013b9ec4, 0x0000c2b85f39caca, 0x00035d30a99b4d59}},
{{0x000531474912100a, 0x0005afcdf7c0d057, 0x0007a9e71b788ded, 0x0005ef708f3b0c88, 0x00007433be3cb393}},
{{0x000144c05ce997f4, 0x0004960b8a347fef, 0x0001da11f15d74f7, 0x00054fac19c0fead, 0x0002d873ede7af6d}}},
{{{0x0002316443373409, 0x0005de95503b22af, 0x000699201beae2df, 0x0003db5849ff737a, 0x0002e773654707fa}},
{{0x000202e14e5df981, 0x0002ea02bc3eb54c, 0x00038875b2883564, 0x0001298c513ae9dd, 0x0000543618a01600}},
{{0x0002bdf4974c23c1, 0x0004b3b9c8d261bd, 0x00026ae8b2a9bc28, 0x0003068210165c51, 0x0004b1443362d079}}},
{{{0x0004b7c7b66e1f7a, 0x0004bea185efd998, 0x0004fabc711055f8, 0x0001fb9f7836fe38, 0x000582f446752da6}},
{{0x000454e91c529ccb, 0x00024c98c6bf72cf, 0x0000486594c3d89a, 0x0007ae13a3d7fa3c, 0x00017038418eaf66}},
{{0x00017bd320324ce4, 0x00051489117898c6, 0x0001684d92a0410b, 0x0006e4d90f78c5a7, 0x0000c2a1c4bcda28}}},
{{{0x0005c7d06f1f0447, 0x0007db70f80b3a49, 0x0006cb4a3ec89a78, 0x00043be8ad81397d, 0x0007c558bd1c6f64}},
{{0x0004814869bd6945, 0x0007b7c391a45db8, 0x00057316ac35b641, 0x000641e31de9096a, 0x0005a6a9b30a314d}},
{{0x00041524d396463d, 0x0001586b449e1a1d, 0x0002f17e904aed8a, 0x0007e1d2861d3c8e, 0x0000404a5ca0afba}}},
{{{0x000740070aa743d6, 0x00016b64cbdd1183, 0x00023f4b7b32eb43, 0x000319aba58235b3, 0x00046395bfdcadd9}},
{{0x00049e1b2a416fd1, 0x00051c6a0b316c57, 0x000575a59ed71bdc, 0x00074c021a1fec1e, 0x00039527516e7f8e}},
{{0x0007db2d1a5d9a9c, 0x00079a200b85422f, 0x000355bfaa71dd16, 0x00000b77ea5f78aa, 0x00076579a29e822d}}},
{{{0x00068e7e49c02a17, 0x00045795346fe8b6, 0x000089306c8f3546, 0x0006d89f6b2f88f6, 0x00043a384dc9e05b}},
{{0x0004b51352b434f2, 0x0001327bd01c2667, 0x000434d73b60c8a1, 0x0003e0daa89443ba, 0x00002c514bb2a277}},
{{0x0003d5da8bf1b645, 0x0007ded6a96a6d09, 0x0006c3494fee2f4d, 0x00002c989c8b6bd4, 0x0001160920961548}}},
{{{0x0005166929dacfaa, 0x000190826b31f689, 0x0004f55567694a7d, 0x000705f4f7b1e522, 0x000351e125bc5698}},
{{0x00005616369b4dcd, 0x0004ecab86ac6f47, 0x0003c60085d700b2, 0x0000213ee10dfcea, 0x0002f637d7491e6e}},
{{0x00049b461af67bbe, 0x00075915712c3a96, 0x00069a67ef580c0d, 0x00054d38ef70cffc, 0x0007f182d06e7ce2}}},
{{{0x00048e64ab0168ec, 0x0002a2bdb8a86f4f, 0x0007343b6b2d6929, 0x0001d804aa8ce9a3, 0x00067d4ac8c343e9}},
{{0x00054b728e217522, 0x00069a90971b0128, 0x00051a40f2a963a3, 0x00010be9ac12a6bf, 0x00044acc043241c5}},
{{0x00056bbb4f7a5777, 0x00029230627c238f, 0x0005ad1a122cd7fb, 0x0000dea56e50e364, 0x000556d1c8312ad7}}},
{{{0x000740e30c8d3982, 0x0007c2b47f4682fd, 0x0005cd91b8c7dc1c, 0x00077fa790f9e583, 0x000746c6c6d1d824}},
{{0x00006756b11be821, 0x000462147e7bb03e, 0x00026519743ebfe0, 0x000782fc59682ab5, 0x000097abe38cc8c7}},
{{0x0001c9877ea52da4, 0x0002b37b83a86189, 0x000733af49310da5, 0x00025e81161c04fb, 0x000577e14a34bee8}}},
{{{0x000268ac61a73b0a, 0x000206f234bebe1c, 0x0005b403a7cbebe8, 0x0007a160f09f4135, 0x00060fa7ee96fd78}},
{{0x0006cebebd4dd72b, 0x000340c1e442329f, 0x00032347ffd1a93f, 0x00014a89252cbbe0, 0x000705304b8fb009}},
{{0x00051d354d296ec6, 0x0007cbf5a63b16c7, 0x0002f50bb3cf0c14, 0x0001feb385cac65a, 0x00021398e0ca1635}}},
{{{0x0005058a382b33f3, 0x000175a91816913e, 0x0004f6cdb96b8ae8, 0x00017347c9da81d2, 0x0005aa3ed9d95a23}},
{{0x0000aaf9b4b75601, 0x00026b91b5ae44f3, 0x0006de808d7ab1c8, 0x0006a769675530b0, 0x0001bbfb284e98f7}},
{{0x000777e9c7d96561, 0x00028e58f006ccac, 0x000541bbbb2cac49, 0x0003e63282994cec, 0x0004a07e14e5e895}}},
{{{0x000412cb980df999, 0x0005e78dd8ee29dc, 0x000171dff68c575d, 0x0002015dd2f6ef49, 0x0003f0bac391d313}},
{{0x000358cdc477a49b, 0x0003cc88fe02e481, 0x000721aab7f4e36b, 0x0000408cc9469953, 0x00050af7aed84afa}},
{{0x0007de0115f65be5, 0x0004242c21364dc9, 0x0006b75b64a66098, 0x0000033c0102c085, 0x0001921a316baebd}}},
{{{0x00022f7edfb870fc, 0x000569eed677b128, 0x00030937dcb0a5af, 0x000758039c78ea1b, 0x0006458df41e273a}},
{{0x0002ad9ad9f3c18b, 0x0005ec1638339aeb, 0x0005703b6559a83b, 0x0003fa9f4d05d612, 0x0007b049deca062c}},
{{0x0003e37a35444483, 0x000661fdb7d27b99, 0x000317761dd621e4, 0x0007323c30026189, 0x0006093dccbc2950}}},
{{{0x00039a8585e0706d, 0x0003167ce72663fe, 0x00063d14ecdb4297, 0x0004be21dcf970b8, 0x00057d1ea084827a}},
{{0x0006eebe6084034b, 0x0006cf01f70a8d7b, 0x0000b41a54c6670a, 0x0006c84b99bb55db, 0x0006e3180c98b647}},
{{0x0002b6e7a128b071, 0x0005b27511755dcf, 0x00008584c2930565, 0x00068c7bda6f4159, 0x000363e999ddd97b}}},
{{{0x000043c135ee1fc4, 0x0002a11c9919f2d5, 0x0006334cc25dbacd, 0x000295da17b400da, 0x00048ee9b78693a0}},
{{0x000048dce24baec6, 0x0002b75795ec05e3, 0x0003bfa4c5da6dc9, 0x0001aac8659e371e, 0x000231f979bc6f9b}},
{{0x0001de4bcc2af3c6, 0x00061fc411a3eb86, 0x00053ed19ac12ec0, 0x000209dbc6b804e0, 0x000079bfa9b08792}}},
{{{0x00003a51da300df4, 0x000467b52b561c72, 0x0004d5920210e590, 0x0000ca769e789685, 0x000038c77f684817}},
{{0x0001ed80a2d54245, 0x00070efec72a5e79, 0x00042151d42a822d, 0x0001b5ebb6d631e8, 0x0001ef4fb1594706}},
{{0x00065ee65b167bec, 0x000052da19b850a9, 0x0000408665656429, 0x0007ab39596f9a4c, 0x000575ee92a4a0bf}}},
{{{0x000080908a182fcf, 0x0000532913b7ba98, 0x0003dccf78c385c3, 0x00068002dd5eaba9, 0x00043d4e7112cd3f}},
{{0x0006bc450aa4d801, 0x0004f4a6773b0ba8, 0x0006241b0b0ebc48, 0x00040d9c4f1d9315, 0x000200a1e7e382f5}},
{{0x0005b967eaf93ac5, 0x000360acca580a31, 0x0001c65fd5c6f262, 0x00071c7f15c2ecab, 0x000050eca52651e4}}},
{{{0x00031ade453f0c9c, 0x0003dfee07737868, 0x000611ecf7a7d411, 0x0002637e6cbd64f6, 0x0004b0ee6c21c58f}},
{{0x0004397660e668ea, 0x0007c2a75692f2f5, 0x0003b29e7e6c66ef, 0x00072ba658bcda9a, 0x0006151c09fa131a}},
{{0x00055c0dfdf05d96, 0x000405569dcf475e, 0x00005c5c277498bb, 0x00018588d95dc389, 0x0001fef24fa800f0}}},
{{{0x000653fb1aa73196, 0x000607faec8306fa, 0x0004e85ec83e5254, 0x00009f56900584fd, 0x000544d49292fc86}},
{{0x0002aff530976b86, 0x0000d85a48c0845a, 0x000796eb963642e0, 0x00060bee50c4b626, 0x00028005fe6c8340}},
{{0x0007ba9f34528688, 0x000284a20fb42d5d, 0x0003652cd9706ffe, 0x0006fd7baddde6b3, 0x00072e472930f316}}},
{{{0x0005208c9781084f, 0x00016468a1dc24d2, 0x0007bf780ac540a8, 0x0001a67eced75301, 0x0005a9d2e8c2733a}},
{{0x0003f635d32a7627, 0x0000cbecacde00fe, 0x0003411141eaa936, 0x00021c1e42f3cb94, 0x0001fee7f000fe06}},
{{0x000305da03dbf7e5, 0x0001228699b7aeca, 0x00012a23b2936bc9, 0x0002a1bda56ae6e9, 0x00000f94051ee040}}},
{{{0x00056b23c3d330b2, 0x00037608e360d1a6, 0x00010ae0f3c8722e, 0x000086d9b618b637, 0x00007d79c7e8beab}},
{{0x000793bb07af9753, 0x0001e7b6ecd4fafd, 0x00002c7b1560fb43, 0x0002296734cc5fb7, 0x00047b7ffd25dd40}},
{{0x0003fb9cbc08dd12, 0x00075c3dd85370ff, 0x00047f06fe2819ac, 0x0005db06ab9215ed, 0x0001c3520a35ea64}}},
{{{0x000253a6bccba34a, 0x000427070433701a, 0x00020b8e58f9870e, 0x000337c861db00cc, 0x0001c3d05775d0ee}},
{{0x00006f40216bc059, 0x0003a2579b0fd9b5, 0x00071c26407eec8c, 0x00072ada4ab54f0b, 0x00038750c3b66d12}},
{{0x0006f1409422e51a, 0x0007856bbece2d25, 0x00013380a72f031c, 0x00043e1080a7f3ba, 0x0000621e2c7d3304}}},
{{{0x000060cc8259838d, 0x000038d3f35b95f3, 0x00056078c243a923, 0x0002de3293241bb2, 0x0000007d6097bd3a}},
{{0x00061796b0dbf0f3, 0x00073c2f9c32d6f5, 0x0006aa8ed1537ebe, 0x00074e92c91838f4, 0x0005d8e589ca1002}},
{{0x00071d950842a94b, 0x00046b11e5c7d817, 0x0005478bbecb4f0d, 0x0007c3054b0a1c5d, 0x0001583d7783c1cb}}},
{{{0x0006a2ef5da27ae1, 0x00028aace02e9d9d, 0x00002459e965f0e8, 0x0007b864d3150933, 0x000252a5f2e81ed8}},
{{0x00034704cc9d28c7, 0x0003dee598b1f200, 0x00016e1c98746d9e, 0x0004050b7095afdf, 0x0004958064e83c55}},
{{0x000094265066e80d, 0x0000a60f918d61a5, 0x0000444bf7f30fde, 0x0001c40da9ed3c06, 0x000079c170bd843b}}},
{{{0x0006ece464fa6fff, 0x0003cc40bca460a0, 0x0006e3a90afb8d0c, 0x0005801abca11228, 0x0006dec05e34ac9f}},
{{0x0006cd50c0d5d056, 0x0005b7606ae779ba, 0x00070fbd226bdda1, 0x0005661e53391ff9, 0x0006768c0d7317b8}},
{{0x000625e5f155c1b3, 0x0004f32f6f723296, 0x0005ac980105efce, 0x00017a61165eee36, 0x00051445e14ddcd5}}},
{{{0x00002b4b3b144951, 0x0005688977966aea, 0x00018e176e399ffd, 0x0002e45c5eb4938b, 0x00013186f31e3929}},
{{0x000147ab2bbea455, 0x0001f240f2253126, 0x0000c3de9e314e89, 0x00021ea5a4fca45f, 0x00012e990086e4fd}},
{{0x000496b37fdfbb2e, 0x0003c2439d5f3e21, 0x00016e60fe7e6a4d, 0x0004d7ef889b621d, 0x00077b2e3f05d3e9}}},
{{{0x0007a9c59c2ec4de, 0x0007e9f09e79652d, 0x0006a3e422f22d86, 0x0002ae8e3b836c8b, 0x00063b795fc7ad32}},
{{0x0000639c12ddb0a4, 0x0006180490cd7ab3, 0x0003f3918297467c, 0x00074568be1781ac, 0x00007a195152e095}},
{{0x00068f02389e5fc8, 0x000059f1bc877506, 0x000504990e410cec, 0x00009bd7d0feaee2, 0x0003e8fe83d032f0}}},
{{{0x000315b90570a294, 0x00060ce108a925f1, 0x0006eff61253c909, 0x000003ef0e2d70b0, 0x00075ba3b797fac4}},
{{0x00004c8de8efd13c, 0x0001c67c06e6210e, 0x000183378f7f146a, 0x00064352ceaed289, 0x00022d60899a6258}},
{{0x0001dbc070cdd196, 0x00016d8fb1534c47, 0x000500498183fa2a, 0x00072f59c423de75, 0x0000904d07b87779}}},
{{{0x00061fd4ddba919c, 0x0007d8e991b55699, 0x00061b31473cc76c, 0x0007039631e631d6, 0x00043e2143fbc1dd}},
{{0x00022d6648f940b9, 0x000197a5a1873e86, 0x000207e4c41a54bc, 0x0005360b3b4bd6d0, 0x0006240aacebaf72}},
{{0x0004749c5ba295a0, 0x00037946fa4b5f06, 0x000724c5ab5a51f1, 0x00065633789dd3f3, 0x00056bdaf238db40}}},
{{{0x0002b9e3f53533eb, 0x0002add727a806c5, 0x00056955c8ce15a3, 0x00018c4f070a290e, 0x0001d24a86d83741}},
{{0x0000d36cc19d3bb2, 0x0006ec4470d72262, 0x0006853d7018a9ae, 0x0003aa3e4dc2c8eb, 0x00003aa31507e1e5}},
{{0x00047648ffd4ce1f, 0x00060a9591839e9d, 0x000424d5f38117ab, 0x00042cc46912c10e, 0x00043b261dc9aeb4}}},
{{{0x00031e1988bb79bb, 0x0007b82f46b3bcab, 0x0000f7a8ce827b41, 0x0005e15816177130, 0x000326055cf5b276}},
{{0x00013d8b6c951364, 0x0004c0017e8f632a, 0x00053e559e53f9c4, 0x0004b20146886eea, 0x00002b4d5e242940}},
{{0x000155cb28d18df2, 0x0000c30d9ca11694, 0x0002090e27ab3119, 0x000208624e7a49b6, 0x00027a6c809ae5d3}}},
{{{0x0006ebcd1f0db188, 0x00074ceb4b7d1174, 0x0007d56168df4f5c, 0x0000bf79176fd18a, 0x0002cb67174ff60a}},
{{0x0004270ac43d6954, 0x0002ed4cd95659a5, 0x00075c0db37528f9, 0x0002ccbcfd2c9234, 0x000221503603d8c2}},
{{0x0006cdf9390be1d0, 0x00008e519c7e2b3d, 0x000253c3d2a50881, 0x00021b41448e333d, 0x0007b1df4b73890f}}},
{{{0x0002f2e0b3b2a224, 0x0000c56aa22c1c92, 0x0005fdec39f1b278, 0x0004c90af5c7f106, 0x00061fcef2658fc5}},
{{0x0006221807f8f58c, 0x0003fa92813a8be5, 0x0006da98c38d5572, 0x00001ed95554468f, 0x00068698245d352e}},
{{0x00015d852a18187a, 0x000270dbb59afb76, 0x0007db120bcf92ab, 0x0000e7a25d714087, 0x00046cf4c473daf0}}},
{{{0x000525ed9ec4e5f9, 0x000022d20660684c, 0x0007972b70397b68, 0x0007a03958d3f965, 0x00029387bcd14eb5}},
{{0x00046ea7f1498140, 0x00070725690a8427, 0x0000a73ae9f079fb, 0x0002dd924461c62b, 0x0001065aae50d8cc}},
{{0x00044525df200d57, 0x0002d7f94ce94385, 0x00060d00c170ecb7, 0x00038b0503f3d8f0, 0x00069a198e64f1ce}}},
{{{0x0002b2e0d91a78bc, 0x0003990a12ccf20c, 0x000141c2e11f2622, 0x0000dfcefaa53320, 0x0007369e6a92493a}},
{{0x00014434dcc5caed, 0x0002c7909f667c20, 0x00061a839d1fb576, 0x0004f23800cabb76, 0x00025b2697bd267f}},
{{0x00073ffb13986864, 0x0003282bb8f713ac, 0x00049ced78f297ef, 0x0006697027661def, 0x0001420683db54e4}}},
{{{0x0000bd1e249dd197, 0x00000bcb1820568f, 0x0002eab1718830d4, 0x000396fd816997e6, 0x00060b63bebf508a}},
{{0x0006bb6fc1cc5ad0, 0x000532c8d591669d, 0x0001af794da86c33, 0x0000e0e9d86d24d3, 0x00031e83b4161d08}},
{{0x0000c7129e062b4f, 0x0001e526415b12fd, 0x000461a0fd27923d, 0x00018badf670a5b7, 0x00055cf1eb62d550}}},
{{{0x0001101065c23d58, 0x0005aa1290338b0f, 0x0003157e9e2e7421, 0x0000ea712017d489, 0x000669a656457089}},
{{0x0006b5e37df58c52, 0x0003bcf33986c60e, 0x00044fb8835ceae7, 0x000099dec18e71a4, 0x0001a56fbaa62ba0}},
{{0x00066b505c9dc9ec, 0x000774ef86e35287, 0x0004d1d944c0955e, 0x00052e4c39d72b20, 0x00013c4836799c58}}},
{{{0x00025d465ab3e1b9, 0x0000f8fe27ec2847, 0x0002d6e6dbf04f06, 0x0003038cfc1b3276, 0x00066f80c93a637b}},
{{0x0004fb6a5d8bd080, 0x00058ae34908589b, 0x0003954d977baf13, 0x000413ea597441dc, 0x00050bdc87dc8e5b}},
{{0x000537836edfe111, 0x0002be02357b2c0d, 0x0006dcee58c8d4f8, 0x0002d732581d6192, 0x0001dd56444725fd}}},
{{{0x00047ff83362127d, 0x00008e39af82b1f4, 0x000488322ef27dab, 0x0001973738a2a1a4, 0x0000e645912219f7}},
{{0x0007e60008bac89a, 0x00023d5c387c1852, 0x00079e5df1f533a8, 0x0002e6f9f1c5f0cf, 0x0003a3a450f63a30}},
{{0x00072f31d8394627, 0x00007bd294a200f1, 0x000665be00e274c6, 0x00043de8f1b6368b, 0x000318c8d9393a9a}}},
{{{0x00045d032afffe19, 0x00012fe49b6cde4e, 0x00021663bc327cf1, 0x00018a5e4c69f1dd, 0x000224c7c679a1d5}},
{{0x00069e29ab1dd398, 0x00030685b3c76bac, 0x000565cf37f24859, 0x00057b2ac28efef9, 0x000509a41c325950}},
{{0x00006edca6f925e9, 0x00068c8363e677b8, 0x00060cfa25e4fbcf, 0x0001c4c17609404e, 0x00005bff02328a11}}},
{{{0x0002137023cae00b, 0x00015a3599eb26c6, 0x0000687221512b3c, 0x000253cb3a0824e9, 0x000780b8cc3fa2a4}},
{{0x0001a0dd0dc512e4, 0x00010894bf5fcd10, 0x00052949013f9c37, 0x0001f50fba4735c7, 0x000576277cdee01a}},
{{0x00038abc234f305f, 0x0007a280bbc103de, 0x000398a836695dfe, 0x0003d0af41528a1a, 0x0005ff418726271b}}},
{{{0x0006080c1789db9d, 0x0004be7cef1ea731, 0x0002f40d769d8080, 0x00035f7d4c44a603, 0x000106a03dc25a96}},
{{0x000347e813b69540, 0x00076864c21c3cbb, 0x0001e049dbcd74a8, 0x0005b4d60f93749c, 0x00029d4db8ca0a0c}},
{{0x00050aaf333353d0, 0x0004b59a613cbb35, 0x000223dfc0e19a76, 0x00077d1e2bb2c564, 0x0004ab38a51052cb}}},
{{{0x00042b256768d593, 0x0002e88459427b4f, 0x00002b3876630701, 0x00034878d405eae5, 0x00029cdd1adc088a}},
{{0x0007d1ef5fddc09c, 0x0007beeaebb9dad9, 0x000058d30ba0acfb, 0x0005cd92eab5ae90, 0x0003041c6bb04ed2}},
{{0x0002f2f9d956e148, 0x0006b3e6ad65c1fe, 0x0005b00972b79e5d, 0x00053d8d234c5daf, 0x000104bbd6814049}}},
{{{0x0000fd3168f1ed67, 0x0001bb0de7784a3e, 0x00034bcb78b20477, 0x0000a4a26e2e2182, 0x0005be8cc57092a7}},
{{0x00059a5fd67ff163, 0x0003a998ead0352b, 0x000083c95fa4af9a, 0x0006fadbfc01266f, 0x000204f2a20fb072}},
{{0x00043b3d30ebb079, 0x000357aca5c61902, 0x0005b570c5d62455, 0x00030fb29e1e18c7, 0x0002570fb17c2791}}},
{{{0x0002367f2cb61575, 0x0006c39ac04d87df, 0x0006d4958bd7e5bd, 0x000566f4638a1532, 0x0003dcb65ea53030}},
{{0x0006a9550bb8245a, 0x000511f20a1a2325, 0x00029324d7239bee, 0x0003343cc37516c4, 0x000241c5f91de018}},
{{0x0000172940de6caa, 0x0006045b2e67451b, 0x00056c07463efcb3, 0x0000728b6bfe6e91, 0x00008420edd5fcdf}}},
{{{0x000720ab8362fa4a, 0x00029c4347cdd9bf, 0x0000e798ad5f8463, 0x0004fef18bcb0bfe, 0x0000d9a53efbc176}},
{{0x0000c34e04f410ce, 0x000344edc0d0a06b, 0x0006e45486d84d6d, 0x00044e2ecb3863f5, 0x00004d654f321db8}},
{{0x0005c116ddbdb5d5, 0x0006d1b4bba5abcf, 0x0004d28a48a5537a, 0x00056b8e5b040b99, 0x0004a7a4f2618991}}},
{{{0x000718025fb15f95, 0x00068d6b8371fe94, 0x0003804448f7d97c, 0x00042466fe784280, 0x00011b50c4cddd31}},
{{0x0003b291af372a4b, 0x00060e3028fe4498, 0x0002267bca4f6a09, 0x000719eec242b243, 0x0004a96314223e0e}},
{{0x0000274408a4ffd6, 0x0007d382aedb34dd, 0x00040acfc9ce385d, 0x000628bb99a45b1e, 0x0004f4bce4dce6bc}}},
{{{0x0007ce5ae2242584, 0x0002d25eb153d4e3, 0x0003a8f3d09ba9c9, 0x0000f3690d04eb8e, 0x00073fcdd14b71c0}},
{{0x0002616ec49d0b6f, 0x0001f95d8462e61c, 0x0001ad3e9b9159c6, 0x00079ba475a04df9, 0x0003042cee561595}},
{{0x00067079449bac41, 0x0005b79c4621484f, 0x00061069f2156b8d, 0x0000eb26573b10af, 0x000389e740c9a9ce}}},
{{{0x0004b3ae34dcb9ce, 0x00047c691a15ac9f, 0x000318e06e5d400c, 0x0003c422d9f83eb1, 0x00061545379465a6}},
{{0x000578f6570eac28, 0x000644f2339c3937, 0x00066e47b7956c2c, 0x00034832fe1f55d0, 0x00025c425e5d6263}},
{{0x000606a6f1d7de6e, 0x0004f1c0c46107e7, 0x000229b1dcfbe5d8, 0x0003acc60a7b1327, 0x0006539a08915484}}},
{{{0x00021f74c3d2f773, 0x000024b88d08bd3a, 0x0006e678cf054151, 0x00043631272e747c, 0x00011c5e4aac5cd1}},
{{0x0004dbd414bb4a19, 0x0007930849f1dbb8, 0x000329c5a466caf0, 0x0006c824544feb9b, 0x0000f65320ef019b}},
{{0x0006d1b1cafde0c6, 0x000462c76a303a90, 0x0003ca4e693cff9b, 0x0003952cd45786fd, 0x0004cabc7bdec330}}},
{{{0x00069624089c0a2e, 0x0000075fc8e70473, 0x00013e84ab1d2313, 0x0002c10bedf6953b, 0x000639b93f0321c8}},
{{0x0007788f3f78d289, 0x0005942809b3f811, 0x0005973277f8c29c, 0x000010f93bc5fe67, 0x0007ee498165acb2}},
{{0x000508e39111a1c3, 0x000290120e912f7a, 0x0001cbf464acae43, 0x00015373e9576157, 0x0000edf493c85b60}}},
{{{0x00048158599b5a68, 0x0001fd75bc41d5d9, 0x0002d9fc1fa95d3c, 0x0007da27f20eba11, 0x000403b92e3019d4}},
{{0x0007c4d284764113, 0x0007fefebf06acec, 0x00039afb7a824100, 0x0001b48e47e7fd65, 0x00004c00c54d1dfa}},
{{0x00022f818b465cf8, 0x000342901dff09b8, 0x00031f595dc683cd, 0x00037a57745fd682, 0x000355bb12ab2617}}},
{{{0x000664cc7493bbf4, 0x00033d94761874e3, 0x0000179e1796f613, 0x0001890535e2867d, 0x0000f9b8132182ec}},
{{0x0001dac75a8c7318, 0x0003b679d5423460, 0x0006b8fcb7b6400e, 0x0006c73783be5f9d, 0x0007518eaf8e052a}},
{{0x000059c41b7f6c32, 0x00079e8706531491, 0x0006c747643cb582, 0x0002e20c0ad494e4, 0x00047c3871bbb175}}},
{{{0x0004539771ec4f48, 0x0007b9318badca28, 0x00070f19afe016c5, 0x0004ee7bb1608d23, 0x00000b89b8576469}},
{{0x00065d50c85066b0, 0x0006167453361f7c, 0x00006ba3818bb312, 0x0006aff29baa7522, 0x00008fea02ce8d48}},
{{0x0005dd7668deead0, 0x0004096d0ba47049, 0x0006275997219114, 0x00029bda8a67e6ae, 0x000473829a74f75d}}},
{{{0x0002da754679c418, 0x0003164c31be105a, 0x00011fac2b98ef5f, 0x00035a1aaf779256, 0x0002078684c4833c}},
{{0x0001533aad3902c9, 0x0001dde06b11e47b, 0x000784bed1930b77, 0x0001c80a92b9c867, 0x0006c668b4d44e4d}},
{{0x0000cf217a78820c, 0x00065024e7d2e769, 0x00023bb5efdda82a, 0x00019fd4b632d3c6, 0x0007411a6054f8a4}}},
{{{0x00059d32b99dc86d, 0x0006ac075e22a9ac, 0x00030b9220113371, 0x00027fd9a638966e, 0x0007c136574fb813}},
{{0x0002e53d18b175b4, 0x00033e7254204af3, 0x0003bcd7d5a1c4c5, 0x0004c7c22af65d0f, 0x0001ec9a872458c3}},
{{0x0006a4d400a2509b, 0x000041791056971c, 0x000655d5866e075c, 0x0002302bf3e64df8, 0x0003add88a5c7cd6}}},
{{{0x00015770b635dcf2, 0x00059ecd83f79571, 0x0002db461c0b7fbd, 0x00073a42a981345f, 0x000249929fccc879}},
{{0x000298d459393046, 0x00030bfecb3d90b8, 0x0003d9b8ea3df8d6, 0x0003900e96511579, 0x00061ba1131a406a}},
{{0x0000a0f116959029, 0x0005974fd7b1347a, 0x0001e0cc1c08edad, 0x000673bdf8ad1f13, 0x0005620310cbbd8e}}},
{{{0x000193434934d643, 0x0000d4a2445eaa51, 0x0007d0708ae76fe0, 0x00039847b6c3c7e1, 0x00037676a2a4d9d9}},
{{0x0006b5f477e285d6, 0x0004ed91ec326cc8, 0x0006d6537503a3fd, 0x000626d3763988d5, 0x0007ec846f3658ce}},
{{0x00068f3f1da22ec7, 0x0006ed8039a2736b, 0x0002627ee04c3c75, 0x0006ea90a647e7d1, 0x0006daaf723399b9}}},
{{{0x00027562eb3dbe47, 0x000291d7b4170be7, 0x0005d1ca67dfa8e1, 0x0002a88061f298a2, 0x0001304e9e71627d}},
{{0x000304bfacad8ea2, 0x000502917d108b07, 0x000043176ca6dd0f, 0x0005d5158f2c1d84, 0x0002b5449e58eb3b}},
{{0x000014d26adc9cfe, 0x0007f1691ba16f13, 0x0005e71828f06eac, 0x000349ed07f0fffc, 0x0004468de2d7c2dd}}},
{{{0x0003355e9419469e, 0x0001847bb8ea8a37, 0x0001fe6588cf9b71, 0x0006b1c9d2db6b22, 0x0006cce7c6ffb44b}},
{{0x0002d8c6f86307ce, 0x0006286ba1850973, 0x0005e9dcb08444d4, 0x0001a96a543362b2, 0x0005da6427e63247}},
{{0x0004c688deac22ca, 0x0006f775c3ff0352, 0x000565603ee419bb, 0x0006544456c61c46, 0x00058f29abfe79f2}}},
{{{0x0006cfab8de73e68, 0x0003e6efced4bd21, 0x0000056609500dbe, 0x00071b7824ad85df, 0x000577629c4a7f41}},
{{0x000264bf710ecdf6, 0x000708c58527896b, 0x00042ceae6c53394, 0x0004381b21e82b6a, 0x0006af93724185b4}},
{{0x0000024509c6a888, 0x0002696ab12e6644, 0x0000cca27f4b80d8, 0x0000c7c1f11b119e, 0x000701f25bb0caec}}},
{{{0x0000b0f8e4616ced, 0x0001d3c4b50fb875, 0x0002f29673dc0198, 0x0005f4b0f1830ffa, 0x0002e0c92bfbdc40}},
{{0x0000f6d97cbec113, 0x0004ce97fb7c93a3, 0x000139835a11281b, 0x000728907ada9156, 0x000720a5bc050955}},
{{0x000709439b805a35, 0x0006ec48557f8187, 0x00008a4d1ba13a2c, 0x000076348a0bf9ae, 0x0000e9b9cbb144ef}}},
{{{0x0002d48ffb5720ad, 0x00057b7f21a1df77, 0x0005550effba0645, 0x0005ec6a4098a931, 0x000221104eb3f337}},
{{0x00069bd55db1beee, 0x0006e14e47f731bd, 0x0001a35e47270eac, 0x00066f225478df8e, 0x000366d44191cfd3}},
{{0x00041743f2bc8c14, 0x000796b0ad8773c7, 0x00029fee5cbb689b, 0x000122665c178734, 0x0004167a4e6bc593}}},
{{{0x00039d2876f62700, 0x000001cecd1d6c87, 0x0007f01a11747675, 0x0002350da5a18190, 0x0007938bb7e22552}},
{{0x00062665f8ce8fee, 0x00029d101ac59857, 0x0004d93bbba59ffc, 0x00017b7897373f17, 0x00034b33370cb7ed}},
{{0x000591ee8681d6cc, 0x00039db0b4ea79b8, 0x000202220f380842, 0x0002f276ba42e0ac, 0x0001176fc6e2dfe6}}},
{{{0x00076cd05b9c619b, 0x00069654b0901695, 0x0007a53710b77f27, 0x00079a1ea7d28175, 0x00008fc3a4c677d5}},
{{0x0000e28949770eb8, 0x0005559e88147b72, 0x00035e1e6e63ef30, 0x00035b109aa7ff6f, 0x0001f6a3e54f2690}},
{{0x0004c199d30734ea, 0x0006c622cb9acc14, 0x0005660a55030216, 0x000068f1199f11fb, 0x0004f2fad0116b90}}},
{{{0x0006b24194ae4e54, 0x0002230afded8897, 0x00023412617d5071, 0x0003d5d30f35969b, 0x000445484a4972ef}},
{{0x0004d91db73bb638, 0x00055f82538112c5, 0x0006d85a279815de, 0x000740b7b0cd9cf9, 0x0003451995f2944e}},
{{0x0002fcd09fea7d7c, 0x000296126b9ed22a, 0x0004a171012a05b2, 0x0001db92c74d5523, 0x00010b89ca604289}}},
{{{0x000147499718289c, 0x0000a48a67e4c7ab, 0x00030fbc544bafe3, 0x0000c701315fe58a, 0x00020b878d577b75}},
{{0x000141be5a45f06e, 0x0005adb38becaea7, 0x0003fd46db41f2bb, 0x0006d488bbb5ce39, 0x00017d2d1d9ef0d4}},
{{0x0002af18073f3e6a, 0x00033aea420d24fe, 0x000298008bf4ff94, 0x0003539171db961e, 0x00072214f63cc65c}}},
{{{0x00037f405307a693, 0x0002e5e66cf2b69c, 0x0005d84266ae9c53, 0x0005e4eb7de853b9, 0x0005fdf48c58171c}},
{{0x0005b7b9f43b29c9, 0x000149ea31eea3b3, 0x0004be7713581609, 0x0002d87960395e98, 0x0001f24ac855a154}},
{{0x000608328e9505aa, 0x00022182841dc49a, 0x0003ec96891d2307, 0x0002f363fff22e03, 0x00000ba739e2ae39}}},
{{{0x000698de5c8790d6, 0x000268b8545beb25, 0x0006d2648b96fedf, 0x00047988ad1db07c, 0x00003283a3e67ad7}},
{{0x000426f5ea88bb26, 0x00033092e77f75c8, 0x0001a53940d819e7, 0x0001132e4f818613, 0x00072297de7d518d}},
{{0x00041dc7be0cb939, 0x0001b16c66100904, 0x0000a24c20cbc66d, 0x0004a2e9efe48681, 0x00005e1296846271}}},
{{{0x0002eeb32d9c495a, 0x00079e25772f9750, 0x0006d747833bbf23, 0x0006cdd816d5d749, 0x00039c00c9c13698}},
{{0x0007bbc8242c4550, 0x00059a06103b35b7, 0x0007237e4af32033, 0x000726421ab3537a, 0x00078cf25d38258c}},
{{0x00066b8e31489d68, 0x000573857e10e2b5, 0x00013be816aa1472, 0x00041964d3ad4bf8, 0x000006b52076b3ff}}},
{{{0x0000cfe19d95781c, 0x000312cc621c453c, 0x000145ace6da077c, 0x0000912bef9ce9b8, 0x0004d57e3443bc76}},
{{0x00037e16b9ce082d, 0x0001882f57853eb9, 0x0007d29eacd01fc5, 0x0002e76a59b5e715, 0x0007de2e9561a9f7}},
{{0x0000d4f4b6a55ecb, 0x0007ebb0bb733bce, 0x0007ba6a05200549, 0x0004f6ede4e22069, 0x0006b2a90af1a602}}},
{{{0x0003f4fc9ae61e97, 0x0003bc07ebfa2d24, 0x0003b744b55cd4a0, 0x00072553b25721f3, 0x0005fd8f4e9d12d3}},
{{0x0003f3245bb2d80a, 0x0000e5f720f36efd, 0x0003b9cccf60c06d, 0x000084e323f37926, 0x000465812c8276c2}},
{{0x0003beb22a1062d9, 0x0006a7063b82c9a8, 0x0000a5a35dc197ed, 0x0003c80c06a53def, 0x00005b32c2b1cb16}}},
{{{0x00005eccd24da8fd, 0x000580bbfdf07918, 0x0007e73586873c6a, 0x00074ceddf77f93e, 0x0003b5556a37b471}},
{{0x0004a42c7ad58195, 0x0005c8667e799eff, 0x00002e5e74c850a1, 0x0003f0db614e869a, 0x00031771a4856730}},
{{0x0000c524e14dd482, 0x000283457496c656, 0x0000ad6bcfb6cd45, 0x000375d1e8b02414, 0x0004fc079d27a733}}},
{{{0x000138b089bf2f7f, 0x0004a05bfd34ea39, 0x000203914c925ef5, 0x0007497fffe04e3c, 0x000124567cecaf98}},
{{0x00048b440c86c50d, 0x000139929cca3b86, 0x0000f8f2e44cdf2f, 0x00068432117ba6b2, 0x000241170c2bae3c}},
{{0x0001ab860ac473b4, 0x0005c0227c86a7ff, 0x00071b12bfc24477, 0x000006a573a83075, 0x0003f8612966c870}}},
{{{0x00020cc9782a0dde, 0x00065d4e3070aab3, 0x0007bc8e31547736, 0x00009ebfb1432d98, 0x000504aa77679736}},
{{0x0000fcfa36048d13, 0x00066e7133bbb383, 0x00064b42a8a45676, 0x0004ea6e4f9a85cf, 0x00026f57eee878a1}},
{{0x00032cd55687efb1, 0x0004448f5e2f6195, 0x000568919d460345, 0x000034c2e0ad1a27, 0x0004041943d9dba3}}},
{{{0x0000eeba43ebcc96, 0x000384dd5395f878, 0x0001df331a35d272, 0x000207ecfd4af70e, 0x0001420a1d976843}},
{{0x00017743a26caadd, 0x00048c9156f9c964, 0x0007ef278d1e9ad0, 0x00000ce58ea7bd01, 0x00012d931429800d}},
{{0x00067799d337594f, 0x00001647548f6018, 0x00057fce5578f145, 0x000009220c142a71, 0x0001b4f92314359a}}},
{{{0x0004109d89150951, 0x000225bd2d2d47cb, 0x00057cc080e73bea, 0x0006d71075721fcb, 0x000239b572a7f132}},
{{0x00073030a49866b1, 0x0002442be90b2679, 0x00077bd3d8947dcf, 0x0001fb55c1552028, 0x0005ff191d56f9a2}},
{{0x0006d433ac2d9068, 0x00072bf930a47033, 0x00064facf4a20ead, 0x000365f7a2b9402a, 0x000020c526a758f3}}},
{{{0x000034f89ed8dbbc, 0x00073b8f948d8ef3, 0x000786c1d323caab, 0x00043bd4a9266e51, 0x00002aacc4615313}},
{{0x0001ef59f042cc89, 0x0003b1c24976dd26, 0x00031d665cb16272, 0x00028656e470c557, 0x000452cfe0a5602c}},
{{0x0000f7a0647877df, 0x0004e1cc0f93f0d4, 0x0007ec4726ef1190, 0x0003bdd58bf512f8, 0x0004cfb7d7b304b8}}},
{{{0x00043d6cb89b75fe, 0x0003338d5b900e56, 0x00038d327d531a53, 0x0001b25c61d51b9f, 0x00014b4622b39075}},
{{0x000699c29789ef12, 0x00063beae321bc50, 0x000325c340adbb35, 0x000562e1a1e42bf6, 0x0005b1d4cbc434d3}},
{{0x00032615cc0a9f26, 0x00057711b99cb6df, 0x0005a69c14e93c38, 0x0006e88980a4c599, 0x0002f98f71258592}}},
{{{0x0004a74cb50f9e56, 0x000531d1c2640192, 0x0000c03d9d6c7fd2, 0x00057ccd156610c1, 0x0003a6ae249d806a}},
{{0x0002ae444f54a701, 0x000615397afbc5c2, 0x00060d7783f3f8fb, 0x0002aa675fc486ba, 0x0001d8062e9e7614}},
{{0x0002da85a9907c5a, 0x0006b23721ec4caf, 0x0004d2d3a4683aa2, 0x0007f9c6870efdef, 0x000298b8ce8aef25}}},
{{{0x00027953eff70cb2, 0x00054f22ae0ec552, 0x00029f3da92e2724, 0x000242ca0c22bd18, 0x00034b8a8404d5ce}},
{{0x000272ea0a2165de, 0x00068179ef3ed06f, 0x0004e2b9c0feac1e, 0x0003ee290b1b63bb, 0x0006ba6271803a7d}},
{{0x0006ecb583693335, 0x0003ec76bfdfb84d, 0x0002c895cf56a04f, 0x0006355149d54d52, 0x00071d62bdd465e1}}},
{{{0x0003cc28d378df80, 0x00072141f4968ca6, 0x000407696bdb6d0d, 0x0005d271b22ffcfb, 0x00074d5f317f3172}},
{{0x0005b5dab1f75ef5, 0x0001e2d60cbeb9a5, 0x000527c2175dfe57, 0x00059e8a2b8ff51f, 0x0001c333621262b2}},
{{0x0007e55467d9ca81, 0x0006a5653186f50d, 0x0006b188ece62df1, 0x0004c66d36844971, 0x0004aebcc4547e9d}}},
{{{0x0000071b276d01c9, 0x0000b0d8918e025e, 0x00075beea79ee2eb, 0x0003c92984094db8, 0x0005d88fbf95a3db}},
{{0x00008d9e7354b610, 0x00026b750b6dc168, 0x000162881e01acc9, 0x0007966df31d01a5, 0x000173bd9ddc9a1d}},
{{0x00000f1efe5872df, 0x0005da872318256a, 0x00059ceb81635960, 0x00018cf37693c764, 0x00006e1cd13b19ea}}},
{{{0x0000ad516f166f23, 0x000263f56d57c81a, 0x00013422384638ca, 0x0001331ff1af0a50, 0x0003080603526e16}},
{{0x0003af629e5b0353, 0x000204f1a088e8e5, 0x00010efc9ceea82e, 0x000589863c2fa34b, 0x0007f3a6a1a8d837}},
{{0x000644395d3d800b, 0x0002b9203dbedefc, 0x0004b18ce656a355, 0x00003f3466bc182c, 0x00030d0fded2e513}}},
{{{0x00014d1af21233b3, 0x0001de1989b39c0b, 0x00052669dc6f6f9e, 0x00043434b28c3fc7, 0x0000a9214202c099}},
{{0x0004971e68b84750, 0x00052ccc9779f396, 0x0003e904ae8255c8, 0x0004ecae46f39339, 0x0004615084351c58}},
{{0x000019c0aeb9a02e, 0x0001a2c06995d792, 0x000664cbb1571c44, 0x0006ff0736fa80b2, 0x0003bca0d2895ca5}}},
{{{0x000031bc3c5d62a4, 0x0007d9fe0f4c081e, 0x00043ed51467f22c, 0x0001e6cc0c1ed109, 0x0005631deddae8f1}},
{{0x00008eb69ecc01bf, 0x0005b4c8912df38d, 0x0005ea7f8bc2f20e, 0x000120e516caafaf, 0x0004ea8b4038df28}},
{{0x0005460af1cad202, 0x0000b4919dd0655d, 0x0007c4697d18c14c, 0x000231c890bba2a4, 0x00024ce0930542ca}}},
{{{0x000090f5fd06c106, 0x0006abb1021e43fd, 0x000232bcfad711a0, 0x0003a5c13c047f37, 0x00041d4e3c28a06d}},
{{0x0007a155fdf30b85, 0x0001c6c6e5d487f9, 0x00024be1134bdc5a, 0x0001405970326f32, 0x000549928a7324f4}},
{{0x000632a763ee1a2e, 0x0006fa4bffbd5e4d, 0x0005fd35a6ba4792, 0x0007b55e1de99de8, 0x000491b66dec0dcf}}},
{{{0x0005b13dc7ea32a7, 0x00018fc2db73131e, 0x0007e3651f8f57e3, 0x00025656055fa965, 0x00008f338d0c85ee}},
{{0x00004a8ed0da64a1, 0x0005ecfc45096ebe, 0x0005edee93b488b2, 0x0005b3c11a51bc8f, 0x0004cf6b8b0b7018}},
{{0x0003a821991a73bd, 0x00003be6418f5870, 0x0001ddc18eac9ef0, 0x00054ce09e998dc2, 0x000530d4a82eb078}}},
{{{0x00043630e1f94825, 0x0004d1956a6b4009, 0x000213fe2df8b5e0, 0x00005ce3a41191e6, 0x00065ea753f10177}},
{{0x000173456c9abf9e, 0x0007892015100dad, 0x00033ee14095fecb, 0x0006ad95d67a0964, 0x0000db3e7e00cbfb}},
{{0x0006fc3ee2096363, 0x0007ec36b96d67ac, 0x000510ec6a0758b1, 0x0000ed87df022109, 0x00002a4ec1921e1a}}},
{{{0x0006259a3b24b8a2, 0x000188b5f4170b9c, 0x000681c0dee15deb, 0x0004dfe665f37445, 0x0003d143c5112780}},
{{0x00006162f1cf795f, 0x000324ddcafe5eb9, 0x000018d5e0463218, 0x0007e78b9092428e, 0x00036d12b5dec067}},
{{0x0005279179154557, 0x00039f8f0741424d, 0x00045e6eb357923d, 0x00042c9b5edb746f, 0x0002ef517885ba82}}},
{{{0x0007974e8c58aedc, 0x0007757e083488c6, 0x000601c62ae7bc8b, 0x00045370c2ecab74, 0x0002f1b78fab143a}},
{{0x0006bffb305b2f51, 0x0005b112b2d712dd, 0x00035774974fe4e2, 0x00004af87a96e3a3, 0x00057968290bb3a0}},
{{0x0002b8430a20e101, 0x0001a49e1d88fee3, 0x00038bbb47ce4d96, 0x0001f0e7ba84d437, 0x0007dc43e35dc2aa}}},
{{{0x00066665887dd9c3, 0x000629760a6ab0b2, 0x000481e6c7243e6c, 0x000097e37046fc77, 0x0007ef72016758cc}},
{{0x00002a5c273e9718, 0x00032bc9dfb28b4f, 0x00048df4f8d5db1a, 0x00054c87976c028f, 0x000044fb81d82d50}},
{{0x000718c5a907e3d9, 0x0003b9c98c6b383b, 0x000006ed255eccdc, 0x0006976538229a59, 0x0007f79823f9c30d}}},
{{{0x0004d239a3b513e8, 0x00029723f51b1066, 0x000642f4cf04d9c3, 0x0004da095aa09b7a, 0x0000a4e0373d784d}},
{{0x00041ff068f587ba, 0x0001c00a191bcd53, 0x0007b56f9c209e25, 0x0003781e5fccaabe, 0x00064a9b0431c06d}},
{{0x0003d6a15b7d2919, 0x00041aa75046a5d6, 0x000691751ec2d3da, 0x00023638ab6721c4, 0x000071a7d0ace183}}},
{{{0x00072daac887ba0b, 0x0000b7f4ac5dda60, 0x0003bdda2c0498a4, 0x00074e67aa180160, 0x0002c3bcc7146ea7}},
{{0x0004355220e14431, 0x0000e1362a283981, 0x0002757cd8359654, 0x0002e9cd7ab10d90, 0x0007c69bcf761775}},
{{0x0000d7eb04e8295f, 0x0004a5ea1e6fa0fe, 0x00045e635c436c60, 0x00028ef4a8d4d18b, 0x0006f5a9a7322aca}}},
{{{0x0001000c2f41c6c5, 0x0000219fdf737174, 0x000314727f127de7, 0x0007e5277d23b81e, 0x000494e21a2e147a}},
{{0x0001d4eba3d944be, 0x0000100f15f3dce5, 0x00061a700e367825, 0x0005922292ab3d23, 0x00002ab9680ee8d3}},
{{0x00048a85dde50d9a, 0x0001c1f734493df4, 0x00047bdb64866889, 0x00059a7d048f8eec, 0x0006b5d76cbea46b}}},
{{{0x0007556cec0cd994, 0x0005eb9a03b7510a, 0x00050ad1dd91cb71, 0x0001aa5780b48a47, 0x0000ae333f685277}},
{{0x000141171e782522, 0x0006806d26da7c1f, 0x0003f31d1bc79ab9, 0x00009f20459f5168, 0x00016fb869c03dd3}},
{{0x0006199733b60962, 0x00069b157c266511, 0x00064740f893f1ca, 0x00003aa408fbf684, 0x0003f81e38b8f70d}}},
{{{0x00010fcc7ed9affe, 0x0004248cb0e96ff2, 0x0004311c115172e2, 0x0004c9d41cbf6925, 0x00050510fc104f50}},
{{0x00037f355f17c824, 0x00007ae85334815b, 0x0007e3abddd2e48f, 0x00061eeabe1f45e5, 0x0000ad3e2d34cded}},
{{0x00040fc5336e249d, 0x0003386639fb2de1, 0x0007bbf871d17b78, 0x00075f796b7e8004, 0x000127c158bf0fa1}}},
{{{0x00017c422e9879a2, 0x00028a5946c8fec3, 0x00053ab32e912b77, 0x0007b44da09fe0a5, 0x000354ef87d07ef4}},
{{0x00028fc4ae51b974, 0x00026e89bfd2dbd4, 0x0004e122a07665cf, 0x0007cab1203405c3, 0x0004ed82479d167d}},
{{0x0003b52260c5d975, 0x00079d6836171fdc, 0x0007d994f140d4bb, 0x0001b6c404561854, 0x000302d92d205392}}},
{{{0x0003c1a2bca4283d, 0x00023430c7bb2f02, 0x0001a3ea1bb58bc2, 0x0007265763de5c61, 0x00010e5d3b76f1ca}},
{{0x00046fb6e4e0f177, 0x00053497ad5265b7, 0x0001ebdba01386fc, 0x0000302f0cb36a3c, 0x0000edc5f5eb426d}},
{{0x0003bfd653da8e67, 0x000584953ec82a8a, 0x00055e288fa7707b, 0x0005395fc3931d81, 0x00045b46c51361cb}}},
{{{0x00002abf314f7fa1, 0x000391d19e8a1528, 0x0006a2fa13895fc7, 0x00009d8eddeaa591, 0x0002177bfa36dcb7}},
{{0x00054ddd8a7fe3e4, 0x0002cecc41c619d3, 0x00043a6562ac4d91, 0x0004efa5aca7bdd9, 0x0005c1c0aef32122}},
{{0x00001bbcfa79db8f, 0x0003d84beb3666e1, 0x00020c921d812204, 0x0002dd843d3b32ce, 0x0004ae619387d8ab}}},
{{{0x0003f6aa5344a32e, 0x00069683680f11bb, 0x00004c3581f623aa, 0x000701af5875cba5, 0x0001a00d91b17bf3}},
{{0x00017e44985bfb83, 0x00054e32c626cc22, 0x000096412ff38118, 0x0006b241d61a246a, 0x00075685abe5ba43}},
{{0x00060933eb61f2b2, 0x0005193fe92a4dd2, 0x0003d995a550f43e, 0x0003556fb93a883d, 0x000135529b623b0e}}},
{{{0x0000dbd7add1d518, 0x000119f823e2231e, 0x000451d66e5e7de2, 0x000500c39970f838, 0x00079b5b81a65ca3}},
{{0x000716bce22e83fe, 0x00033d0130b83eb8, 0x0000952abad0afac, 0x000309f64ed31b8a, 0x0005972ea051590a}},
{{0x0004ac20dc8f7811, 0x00029589a9f501fa, 0x0004d810d26a6b4a, 0x0005ede00d96b259, 0x0004f7e9c95905f3}}},
{{{0x00074bbc5781302e, 0x00073135bb81ec4c, 0x0007ef671b61483c, 0x0007264614ccd729, 0x00031993ad92e638}},
{{0x0000443d355299fe, 0x00039b7d7d5aee39, 0x000692519a2f34ec, 0x0006e4404924cf78, 0x0001942eec4a144a}},
{{0x00045319ae234992, 0x0002219d47d24fb5, 0x0004f04488b06cf6, 0x00053aaa9e724a12, 0x0002a0a65314ef9c}}},
{{{0x0007937ff7f927c2, 0x0000c2fa14c6a5b6, 0x000556bddb6dd07c, 0x0006f6acc179d108, 0x0004cf6e218647c2}},
{{0x00061acd3c1c793a, 0x00058b46b78779e6, 0x0003369aacbe7af2, 0x000509b0743074d4, 0x000055dc39b6dea1}},
{{0x0001227cc28d5bb6, 0x00078ee9bff57623, 0x00028cb2241f893a, 0x00025b541e3c6772, 0x000121a307710aa2}}},
{{{0x00035d5e9f034a97, 0x000126069785bc9b, 0x0005474ec7854ff0, 0x000296a302a348ca, 0x000333fc76c7a40e}},
{{0x0001713ec77483c9, 0x0006f70572d5facb, 0x00025ef34e22ff81, 0x00054d944f141188, 0x000527bb94a6ced3}},
{{0x0005992a995b482e, 0x00078dc707002ac7, 0x0005936394d01741, 0x0004fba4281aef17, 0x0006b89069b20a7a}}},
{{{0x0002a0416270220d, 0x00075f248b69d025, 0x0001cbbc16656a27, 0x0005b9ffd6e26728, 0x00023bc2103aa73e}},
{{0x0002fa8cb5c7db77, 0x000718e6982aa810, 0x00039e95f81a1a1b, 0x0005e794f3646cfb, 0x0000473d308a7639}},
{{0x0006792603589e05, 0x000248db9892595d, 0x000006a53cad2d08, 0x00020d0150f7ba73, 0x000102f73bfde043}}},
{{{0x0000b9ab7f5745c6, 0x0005caf0f8d21d63, 0x0007debea408ea2b, 0x00009edb93896d16, 0x00036597d25ea5c0}},
{{0x0004dae0b5511c9a, 0x0005257fffe0d456, 0x00054108d1eb2180, 0x000096cc0f9baefa, 0x0003f6bd725da4ea}},
{{0x00058d7b106058ac, 0x0003cdf8d20bee69, 0x00000a4cb765015e, 0x00036832337c7cc9, 0x0007b7ecc19da60d}}},
{{{0x0002373c695c690d, 0x0004c0c8520dcf18, 0x000384af4b7494b9, 0x0004ab4a8ea22225, 0x0004235ad7601743}},
{{0x00064a51a77cfa9b, 0x00029cf470ca0db5, 0x0004b60b6e0898d9, 0x00055d04ddffe6c7, 0x00003bedc661bf5c}},
{{0x0000cb0d078975f5, 0x000292313e530c4b, 0x00038dbb9124a509, 0x000350d0655a11f1, 0x0000e7ce2b0cdf06}}},
{{{0x0004643ac48c85a3, 0x0006878c2735b892, 0x0003a53523f4d877, 0x0003a504ed8bee9d, 0x000666e0a5d8fb46}},
{{0x0006fedfd94b70f9, 0x0002383f9745bfd4, 0x0004beae27c4c301, 0x00075aa4416a3f3f, 0x000615256138aece}},
{{0x0003f64e4870cb0d, 0x00061548b16d6557, 0x0007a261773596f3, 0x0007724d5f275d3a, 0x0007f0bc810d514d}}},
{{{0x00006ba426f4136f, 0x0003cafc0606b720, 0x000518f0a2359cda, 0x0005fae5e46feca7, 0x0000d1f8dbcf8eed}},
{{0x00049dad737213a0, 0x000745dee5d31075, 0x0007b1a55e7fdbe2, 0x0005ba988f176ea1, 0x0001d3a907ddec5a}},
{{0x000693313ed081dc, 0x0005b0a366901742, 0x00040c872ca4ca7e, 0x0006f18094009e01, 0x00000011b44a31bf}}},
{{{0x0007a06c3fc66c0c, 0x0001c9bac1ba47fb, 0x00023935c575038e, 0x0003f0bd71c59c13, 0x0003ac48d916e835}},
{{0x00061f696a0aa75c, 0x00038b0a57ad42ca, 0x0001e59ab706fdc9, 0x00001308d46ebfcd, 0x00063d988a2d2851}},
{{0x00020753afbd232e, 0x00071fbb1ed06002, 0x00039cae47a4af3a, 0x0000337c0b34d9c2, 0x00033fad52b2368a}}},
{{{0x000649c6c5e41e16, 0x00060667eee6aa80, 0x0004179d182be190, 0x000653d9567e6979, 0x00016c0f429a256d}},
{{0x0004c8d0c422cfe8, 0x000760b4275971a5, 0x0003da95bc1cad3d, 0x0000f151ff5b7376, 0x0003cc355ccb90a7}},
{{0x00069443903e9131, 0x00016f4ac6f9dd36, 0x0002ea4912e29253, 0x0002b4643e68d25d, 0x000631eaf426bae7}}},
{{{0x00010410da66fe9f, 0x00024d82dcb4d67d, 0x0003e6fe0e17752d, 0x0004dade1ecbb08f, 0x0005599648b1ea91}},
{{0x000175b9a3700de8, 0x00077c5f00aa48fb, 0x0003917785ca0317, 0x00005aa9b2c79399, 0x000431f2c7f665f8}},
{{0x00026344858f7b19, 0x0005f43d4a295ac0, 0x000242a75c52acd4, 0x0005934480220d10, 0x0007b04715f91253}}},
{{{0x0005bd28acf6ae43, 0x00016fab8f56907d, 0x0007acb11218d5f2, 0x00041fe02023b4db, 0x00059b37bf5c2f65}},
{{0x0006c280c4e6bac6, 0x0003ada3b361766e, 0x00042fe5125c3b4f, 0x000111d84d4aac22, 0x00048d0acfa57cde}},
{{0x000726e47dabe671, 0x0002ec45e746f6c1, 0x0006580e53c74686, 0x0005eda104673f74, 0x00016234191336d3}}},
{{{0x000499def6267ff6, 0x00076e858108773c, 0x000693cac5ddcb29, 0x00000311d00a9ff4, 0x0002cdfdfecd5d05}},
{{0x00019cd61ff38640, 0x000060c6c4b41ba9, 0x00075cf70ca7366f, 0x000118a8f16c011e, 0x0004a25707a203b9}},
{{0x0007668a53f6ed6a, 0x000303ba2e142556, 0x0003880584c10909, 0x0004fe20000a261d, 0x0005721896d248e4}}},
{{{0x00065517fd181bae, 0x0003e5772c76816d, 0x000019189640898a, 0x0001ed2a84de7499, 0x000578edd74f63c1}},
{{0x00055091a1d0da4e, 0x0004f6bfc7c1050b, 0x00064e4ecd2ea9be, 0x00007eb1f28bbe70, 0x00003c935afc4b03}},
{{0x000276c6492b0c3d, 0x00009bfc40bf932e, 0x000588e8f11f330b, 0x0003d16e694dc26e, 0x0003ec2ab590288c}}},
{{{0x0000d27be4d87bb9, 0x00056c27235db434, 0x00072e6e0ea62d37, 0x0005674cd06ee839, 0x0002dd5c25a200fc}},
{{0x00013a09ae32d1cb, 0x0003e81eb85ab4e4, 0x00007aaca43cae1f, 0x00062f05d7526374, 0x0000e1bf66c6adba}},
{{0x0003d5e9792c887e, 0x000319724dabbc55, 0x0002b97c78680800, 0x0007afdfdd34e6dd, 0x000730548b35ae88}}},
{{{0x000551a3cba8b8ee, 0x0003b6422be2d886, 0x000630e1419689bc, 0x0004653b07a7a955, 0x0003043443b411db}},
{{0x0003094ba1d6e334, 0x0006e126a7e3300b, 0x000089c0aefcfbc5, 0x0002eea11f836583, 0x000585a2277d8784}},
{{0x00025f8233d48962, 0x0006bd8f04aff431, 0x0004f907fd9a6312, 0x00040fd3c737d29b, 0x0007656278950ef9}}},
{{{0x0003cf59d51fc8c0, 0x0007a0a0d6de4718, 0x00055c3a3e6fb74b, 0x000353135f884fd5, 0x0003f4160a8c1b84}},
{{0x000073a3ea86cf9d, 0x0006e0e2abfb9c2e, 0x00060e2a38ea33ee, 0x00030b2429f3fe18, 0x00028bbf484b613f}},
{{0x00012f5c6f136c7c, 0x0000fedba237de4c, 0x000779bccebfab44, 0x0003aea93f4d6909, 0x0001e79cb358188f}}},
{{{0x000436c3eef7e3f1, 0x0007ffd3c21f0026, 0x0003e77bf20a2da9, 0x000418bffc8472de, 0x00065d7951b3a3b3}},
{{0x000153d8f5e08181, 0x00008533bbdb2efd, 0x0001149796129431, 0x00017a6e36168643, 0x000478ab52d39d1f}},
{{0x0006a4d39252d159, 0x000790e35900ecd4, 0x00030725bf977786, 0x00010a5c1635a053, 0x00016d87a411a212}}},
{{{0x00057e5a42066215, 0x0001a18b44983677, 0x0003e652de1e6f8f, 0x0006532be02ed8eb, 0x00028f87c8165f38}},
{{0x0004d5e2d54e0583, 0x0002e5d7b33f5f74, 0x0003a5de3f887ebf, 0x0006ef24bd6139b7, 0x0001f990b577a5a6}},
{{0x00044ead1be8f7d6, 0x0005759d4f31f466, 0x0000378149f47943, 0x00069f3be32b4f29, 0x00045882fe1534d6}}},
{{{0x0001345d757983d6, 0x000222f54234cccd, 0x0001784a3d8adbb4, 0x00036ebeee8c2bcc, 0x000688fe5b8f626f}},
{{0x00049929943c6fe4, 0x0004347072545b15, 0x0003226bced7e7c5, 0x00003a134ced89df, 0x0007dcf843ce405f}},
{{0x0000d6484a4732c0, 0x0007b94ac6532d92, 0x0005771b8754850f, 0x00048dd9df1461c8, 0x0006739687e73271}}},
{{{0x00002014385675a6, 0x0006155fb53d1def, 0x00037ea32e89927c, 0x000059a668f5a82e, 0x00046115aba1d4dc}},
{{0x0005cc9dc80c1ac0, 0x000683671486d4cd, 0x00076f5f1a5e8173, 0x0006d5d3f5f9df4a, 0x0007da0b8f68d7e7}},
{{0x00071953c3b5da76, 0x0006642233d37a81, 0x0002c9658076b1bd, 0x0005a581e63010ff, 0x0005a5f887e83674}}},
{{{0x000301cf70a13d11, 0x0002a6a1ba1891ec, 0x0002f291fb3f3ae0, 0x00021a7b814bea52, 0x0003669b656e44d1}},
{{0x000628d3a0a643b9, 0x00001cd8640c93d2, 0x0000b7b0cad70f2c, 0x0003864da98144be, 0x00043e37ae2d5d1c}},
{{0x00063f06eda6e133, 0x000233342758070f, 0x000098e0459cc075, 0x0004df5ead6c7c1b, 0x0006a21e6cd4fd5e}}},
{{{0x0006170a3046e65f, 0x0005401a46a49e38, 0x00020add5561c4a8, 0x0007abb4edde9e46, 0x000586bf9f1a195f}},
{{0x000129126699b2e3, 0x0000ee11a2603de8, 0x00060ac2f5c74c21, 0x00059b192a196808, 0x00045371b07001e8}},
{{0x0003088d5ef8790b, 0x00038c2126fcb4db, 0x000685bae149e3c3, 0x0000bcd601a4e930, 0x0000eafb03790e52}}},
{{{0x000555c13748042f, 0x0004d041754232c0, 0x000521b430866907, 0x0003308e40fb9c39, 0x000309acc675a02c}},
{{0x0000805e0f75ae1d, 0x000464cc59860a28, 0x000248e5b7b00bef, 0x0005d99675ef8f75, 0x00044ae3344c5435}},
{{0x000289b9bba543ee, 0x0003ab592e28539e, 0x00064d82abcdd83a, 0x0003c78ec172e327, 0x00062d5221b7f946}}},
{{{0x0004299c18d0936d, 0x0005914183418a49, 0x00052a18c721aed5, 0x0002b151ba82976d, 0x0005c0efde4bc754}},
{{0x0005d4263af77a3c, 0x00023fdd2289aeb0, 0x0007dc64f77eb9ec, 0x00001bd28338402c, 0x00014f29a5383922}},
{{0x00017edc25b2d7f5, 0x00037336a6081bee, 0x0007b5318887e5c3, 0x00049f6d491a5be1, 0x0005e72365c7bee0}}},
{{{0x0003fc074571217f, 0x0003a0d29b2b6aeb, 0x00006478ccdde59d, 0x00055e4d051bddfa, 0x00077f1104c47b4e}},
{{0x000339062f08b33e, 0x0004bbf3e657cfb2, 0x00067af7f56e5967, 0x0004dbd67f9ed68f, 0x00070b20555cb734}},
{{0x000113c555112c4c, 0x0007535103f9b7ca, 0x000140ed1d9a2108, 0x00002522333bc2af, 0x0000e34398f4a064}}},
{{{0x000522d93ecebde8, 0x000024f045e0f6cf, 0x00016db63426cfa1, 0x0001b93a1fd30fd8, 0x0005e5405368a362}},
{{0x00030b093e4b1928, 0x0001ce7e7ec80312, 0x0004e575bdf78f84, 0x00061f7a190bed39, 0x0006f8aded6ca379}},
{{0x0000123dfdb7b29a, 0x0004344356523c68, 0x00079a527921ee5f, 0x00074bfccb3e817e, 0x000780de72ec8d3d}}},
{{{0x00028545089ae7bc, 0x0001e38fe9a0c15c, 0x00012046e0e2377b, 0x0006721c560aa885, 0x0000eb28bf671928}},
{{0x0007eaf300f42772, 0x0005455188354ce3, 0x0004dcca4a3dcbac, 0x0003d314d0bfebcb, 0x0001defc6ad32b58}},
{{0x0003be1aef5195a7, 0x0006f22f62bdb5eb, 0x00039768b8523049, 0x00043394c8fbfdbd, 0x000467d201bf8dd2}}},
{{{0x0006919a74ef4fad, 0x00059ed4611452bf, 0x000691ec04ea09ef, 0x0003cbcb2700e984, 0x00071c43c4f5ba3c}},
{{0x0006f4bd567ae7a9, 0x00065ac89317b783, 0x00007d3b20fd8932, 0x000000f208326916, 0x0002ef9c5a5ba384}},
{{0x00056df6fa9e74cd, 0x00079c95e4cf56df, 0x0007be643bc609e2, 0x000149c12ad9e878, 0x0005a758ca390c5f}}},
{{{0x00072710d9462495, 0x00025aafaa007456, 0x0002d21f28eaa31b, 0x00017671ea005fd0, 0x0002dbae244b3eb7}},
{{0x0000918b1d61dc94, 0x0000d350260cd19c, 0x0007a2ab4e37b4d9, 0x00021fea735414d7, 0x0000a738027f639d}},
{{0x00074a2f57ffe1cc, 0x0001bc3073087301, 0x0007ec57f4019c34, 0x00034e082e1fa524, 0x0002698ca635126a}}},
{{{0x0005318832b0ba78, 0x0006f24b9ff17cec, 0x0000a47f30e060c7, 0x00058384540dc8d0, 0x0001fb43dcc49cae}},
{{0x0005702f5e3dd90e, 0x00031c9a4a70c5c7, 0x000136a5aa78fc24, 0x0001992f3b9f7b01, 0x0003c004b0c4afa3}},
{{0x000146ac06f4b82b, 0x0004b500d89e7355, 0x0003351e1c728a12, 0x00010b9f69932fe3, 0x0006b43fd01cd1fd}}},
{{{0x00075d4b4697c544, 0x00011be1fff7f8f4, 0x000119e16857f7e1, 0x00038a14345cf5d5, 0x0005a68d7105b52f}},
{{0x000742583e760ef3, 0x00073dc1573216b8, 0x0004ae48fdd7714a, 0x0004f85f8a13e103, 0x00073420b2d6ff0d}},
{{0x0004f6cb9e851e06, 0x000278c4471895e5, 0x0007efcdce3d64e4, 0x00064f6d455c4b4c, 0x0003db5632fea34b}}},
{{{0x0006ee2bf75dd9d8, 0x0006c72ceb34be8d, 0x000679c9cc345ec7, 0x0007898df96898a4, 0x00004321adf49d75}},
{{0x000190b1829825d5, 0x0000e7d3513225c9, 0x0001c12be3b7abae, 0x00058777781e9ca6, 0x00059197ea495df2}},
{{0x00016019e4e55aae, 0x00074fc5f25d209c, 0x0004566a939ded0d, 0x00066063e716e0b7, 0x00045eafdc1f4d70}}},
{{{0x000401858045d72b, 0x000459e5e0ca2d30, 0x000488b719308bea, 0x00056f4a0d1b32b5, 0x0005a5eebc80362d}},
{{0x00064624cfccb1ed, 0x000257ab8072b6c1, 0x0000120725676f0a, 0x0004a018d04e8eee, 0x0003f73ceea5d56d}},
{{0x0007bfd10a4e8dc6, 0x0007c899366736f4, 0x00055ebbeaf95c01, 0x00046db060903f8a, 0x0002605889126621}}},
{{{0x000704a68360ff04, 0x0003cecc3cde8b3e, 0x00021cd5470f64ff, 0x0006abc18d953989, 0x00054ad0c2e4e615}},
{{0x00018e3cc676e542, 0x00026079d995a990, 0x00004a7c217908b2, 0x0001dc7603e6655a, 0x0000dedfa10b2444}},
{{0x000367d5b82b522a, 0x0000d3f4b83d7dc7, 0x0003067f4cdbc58d, 0x00020452da697937, 0x00062ecb2baa77a9}}},
{{{0x0005795261152b3d, 0x0007a1dbbafa3cbd, 0x0005ad31c52588d5, 0x00045f3a4164685c, 0x0002e59f919a966d}},
{{0x00072836afb62874, 0x0000af3c2094b240, 0x0000c285297f357a, 0x0007cc2d5680d6e3, 0x00061913d5075663}},
{{0x00062d361a3231da, 0x00065284004e01b8, 0x000656533be91d60, 0x0006ae016c00a89f, 0x0003ddbc2a131c05}}},
{{{0x00040ff9ce5ec54b, 0x00057185e261b35b, 0x0003e254540e70a9, 0x0001b5814003e3f8, 0x00078968314ac04b}},
{{0x000257a22796bb14, 0x0006f360fb443e75, 0x000680e47220eaea, 0x0002fcf2a5f10c18, 0x0005ee7fb38d8320}},
{{0x0005fdcb41446a8e, 0x0005286926ff2a71, 0x0000f231e296b3f6, 0x000684a357c84693, 0x00061d0633c9bca0}}},
{{{0x00044935ffdb2566, 0x00012f016d176c6e, 0x0004fbb00f16f5ae, 0x0003fab78d99402a, 0x0006e965fd847aed}},
{{0x000328bcf8fc73df, 0x0003b4de06ff95b4, 0x00030aa427ba11a5, 0x0005ee31bfda6d9c, 0x0005b23ac2df8067}},
{{0x0002b953ee80527b, 0x00055f5bcdb1b35a, 0x00043a0b3fa23c66, 0x00076e07388b820a, 0x00079b9bbb9dd95d}}},
{{{0x000355406a3126c2, 0x00050d1918727d76, 0x0006e5ea0b498e0e, 0x0000a3b6063214f2, 0x0005065f158c9fd2}},
{{0x00017dae8e9f7374, 0x000719f76102da33, 0x0005117c2a80ca8b, 0x00041a66b65d0936, 0x0001ba811460accb}},
{{0x000169fb0c429954, 0x00059aedd9ecee10, 0x00039916eb851802, 0x00057917555cc538, 0x0003981f39e58a4f}}},
{{{0x00038a7559230a93, 0x00052c1cde8ba31f, 0x0002a4f2d4745a3d, 0x00007e9d42d4a28a, 0x00038dc083705acd}},
{{0x0005dfa56de66fde, 0x0000058809075908, 0x0006d3d8cb854a94, 0x0005b2f4e970b1e3, 0x00030f4452edcbc1}},
{{0x00052782c5759740, 0x00053f3397d990ad, 0x0003a939c7e84d15, 0x000234c4227e39e0, 0x000632d9a1a593f2}}},
{{{0x00036b15b807cba6, 0x0003f78a9e1afed7, 0x0000a59c2c608f1f, 0x00052bdd8ecb81b7, 0x0000b24f48847ed4}},
{{0x0001fd11ed0c84a7, 0x000021b3ed2757e1, 0x00073e1de58fc1c6, 0x0005d110c84616ab, 0x0003a5a7df28af64}},
{{0x0002d4be511beac7, 0x0006bda4d99e5b9b, 0x00017e6996914e01, 0x0007b1f0ce7fcf80, 0x00034fcf74475481}}},
{{{0x0007e04c789767ca, 0x0001671b28cfb832, 0x0007e57ea2e1c537, 0x0001fbaaef444141, 0x0003d3bdc164dfa6}},
{{0x00031dab78cfaa98, 0x0004e3216e5e54b7, 0x000249823973b689, 0x0002584984e48885, 0x0000119a3042fb37}},
{{0x0002d89ce8c2177d, 0x0006cd12ba182cf4, 0x00020a8ac19a7697, 0x000539fab2cc72d9, 0x00056c088f1ede20}}},
{{{0x00053d1110a86e17, 0x0006416eb65f466d, 0x00041ca6235fce20, 0x0005c3fc8a99bb12, 0x00009674c6b99108}},
{{0x00035fac24f38f02, 0x0007d75c6197ab03, 0x00033e4bc2a42fa7, 0x0001c7cd10b48145, 0x000038b7ea483590}},
{{0x0006f82199316ff8, 0x00005d54f1a9f3e9, 0x0003bcc5d0bd274a, 0x0005b284b8d2d5ad, 0x0006e5e31025969e}}},
{{{0x000462f587e593fb, 0x0003d94ba7ce362d, 0x000330f9b52667b7, 0x0005d45a48e0f00a, 0x00008f5114789a8d}},
{{0x0004fb0e63066222, 0x000130f59747e660, 0x000041868fecd41a, 0x0003105e8c923bc6, 0x0003058ad43d1838}},
{{0x00040ffde57663d0, 0x00071445d4c20647, 0x0002653e68170f7c, 0x00064cdee3c55ed6, 0x00026549fa4efe3d}}},
{{{0x00055a461e6bf9d6, 0x00078eeef4b02e83, 0x0001d34f648c16cf, 0x00007fea2aba5132, 0x0001926e1dc6401e}},
{{0x00068549af3f666e, 0x00009e2941d4bb68, 0x0002e8311f5dff3c, 0x0006429ef91ffbd2, 0x0003a10dfe132ce3}},
{{0x00074e8aea17cea0, 0x0000c743f83fbc0f, 0x0007cb03c4bf5455, 0x00068a8ba9917e98, 0x0001fa1d01d861e5}}},
{{{0x000055947d599832, 0x000346fe2aa41990, 0x0000164c8079195b, 0x000799ccfb7bba27, 0x000773563bc6a75c}},
{{0x0004ac00d1df94ab, 0x0003ba2101bd271b, 0x0007578988b9c4af, 0x0000f2bf89f49f7e, 0x00073fced18ee9a0}},
{{0x0001e90863139cb3, 0x0004f8b407d9a0d6, 0x00058e24ca924f69, 0x0007a246bbe76456, 0x0001f426b701b864}}},
{{{0x0001264c41911c01, 0x000702f44584bdf9, 0x00043c511fc68ede, 0x0000482c3aed35f9, 0x0004e1af5271d31b}},
{{0x000635c891a12552, 0x00026aebd38ede2f, 0x00066dc8faddae05, 0x00021c7d41a03786, 0x0000b76bb1b3fa7e}},
{{0x0000c1f97f92939b, 0x00017a88956dc117, 0x0006ee005ef99dc7, 0x0004aa9172b231cc, 0x0007b6dd61eb772a}}},
{{{0x0005c1e850f33d92, 0x0001ec119ab9f6f5, 0x0007f16f6de663e9, 0x0007a7d6cb16dec6, 0x000703e9bceaf1d2}},
{{0x0000abf9ab01d2c7, 0x0003880287630ae6, 0x00032eca045beddb, 0x00057f43365f32d0, 0x00053fa9b659bff6}},
{{0x0004c8e994885455, 0x0004ccb5da9cad82, 0x0003596bc610e975, 0x0007a80c0ddb9f5e, 0x000398d93e5c4c61}}},
{{{0x0003d16733e248f3, 0x0000e2b7e14be389, 0x00042c0ddaf6784a, 0x000589ea1fc67850, 0x00053b09b5ddf191}},
{{0x00077c60d2e7e3f2, 0x0004061051763870, 0x00067bc4e0ecd2aa, 0x0002bb941f1373b9, 0x000699c9c9002c30}},
{{0x0006a7235946f1cc, 0x0006b99cbb2fbe60, 0x0006d3a5d6485c62, 0x0004839466e923c0, 0x00051caf30c6fcdd}}},
{{{0x0003a7427674e00a, 0x0006142f4f7e74c1, 0x0004cc93318c3a15, 0x0006d51bac2b1ee7, 0x0005504aa292383f}},
{{0x0002f99a18ac54c7, 0x000398a39661ee6f, 0x000384331e40cde3, 0x0004cd15c4de19a6, 0x00012ae29c189f8e}},
{{0x0006c0cb1f0d01cf, 0x000187469ef5d533, 0x00027138883747bf, 0x0002f52ae53a90e8, 0x0005fd14fe958eba}}},
{{{0x00042ddf2845ab2c, 0x0006214ffd3276bb, 0x00000b8d181a5246, 0x000268a6d579eb20, 0x000093ff26e58647}},
{{0x0002fe5ebf93cb8e, 0x000226da8acbe788, 0x00010883a2fb7ea1, 0x000094707842cf44, 0x0007dd73f960725d}},
{{0x000524fe68059829, 0x00065b75e47cb621, 0x00015eb0a5d5cc19, 0x00005209b3929d5a, 0x0002f59bcbc86b47}}},
{{{0x00047d429917135f, 0x0003eacfa07af070, 0x0001deab46b46e44, 0x0007a53f3ba46cdf, 0x0005458b42e2e51a}},
{{0x0001d560b691c301, 0x0007f5bafce3ce08, 0x0004cd561614806c, 0x0004588b6170b188, 0x0002aa55e3d01082}},
{{0x000192e60c07444f, 0x0005ae8843a21daa, 0x0006d721910b1538, 0x0003321a95a6417e, 0x00013e9004a8a768}}},
{{{0x00058845832fcedb, 0x000135cd7f0c6e73, 0x00053ffbdfe8e35b, 0x00022f195e06e55b, 0x00073937e8814bce}},
{{0x000600c9193b877f, 0x00021c1b8a0d7765, 0x000379927fb38ea2, 0x00070d7679dbe01b, 0x0005f46040898de9}},
{{0x00037116297bf48d, 0x00045a9e0d069720, 0x00025af71aa744ec, 0x00041af0cb8aaba3, 0x0002cf8a4e891d5e}}},
{{{0x0003fd8707110f67, 0x00026f8716a92db2, 0x0001cdaa1b753027, 0x000504be58b52661, 0x0002049bd6e58252}},
{{0x0005487e17d06ba2, 0x0003872a032d6596, 0x00065e28c09348e0, 0x00027b6bb2ce40c2, 0x0007a6f7f2891d6a}},
{{0x0001fd8d6a9aef49, 0x0007cb67b7216fa1, 0x00067aff53c3b982, 0x00020ea610da9628, 0x0006011aadfc5459}}},
{{{0x0007926dcf95f83c, 0x00042e25120e2bec, 0x00063de96df1fa15, 0x0004f06b50f3f9cc, 0x0006fc5cc1b0b62f}},
{{0x0006d0c802cbf890, 0x000141bfed554c7b, 0x0006dbb667ef4263, 0x00058f3126857edc, 0x00069ce18b779340}},
{{0x00075528b29879cb, 0x00079a8fd2125a3d, 0x00027c8d4b746ab8, 0x0000f8893f02210c, 0x00015596b3ae5710}}},
{{{0x000739d23f9179a2, 0x000632fadbb9e8c4, 0x0007c8522bfe0c48, 0x0006ed0983ef5aa9, 0x0000d2237687b5f4}},
{{0x000731167e5124ca, 0x00017b38e8bbe13f, 0x0003d55b942f9056, 0x00009c1495be913f, 0x0003aa4e241afb6d}},
{{0x000138bf2a3305f5, 0x0001f45d24d86598, 0x0005274bad2160fe, 0x0001b6041d58d12a, 0x00032fcaa6e4687a}}},
{{{0x00056e8dc57d9af5, 0x0005b3be17be4f78, 0x0003bf928cf82f4b, 0x00052e55600a6f11, 0x0004627e9cefebd6}},
{{0x0007a4732787ccdf, 0x00011e427c7f0640, 0x00003659385f8c64, 0x0005f4ead9766bfb, 0x000746f6336c2600}},
{{0x0002f345ab6c971c, 0x000653286e63e7e9, 0x00051061b78a23ad, 0x00014999acb54501, 0x0007b4917007ed66}}},
{{{0x0005fb5cab84b064, 0x0002513e778285b0, 0x000457383125e043, 0x0006bda3b56e223d, 0x000122ba376f844f}},
{{0x00041b28dd53a2dd, 0x00037be85f87ea86, 0x00074be3d2a85e41, 0x0001be87fac96ca6, 0x0001d03620fe08cd}},
{{0x000232cda2b4e554, 0x0000422ba30ff840, 0x000751e7667b43f5, 0x0006261755da5f3e, 0x00002c70bf52b68e}}},
{{{0x0007ec4b5d0b2fbb, 0x000200e910595450, 0x000742057105715e, 0x0002f07022530f60, 0x00026334f0a409ef}},
{{0x000532bf458d72e1, 0x00040f96e796b59c, 0x00022ef79d6f9da3, 0x000501ab67beca77, 0x0006b0697e3feb43}},
{{0x0000f04adf62a3c0, 0x0005e0edb48bb6d9, 0x0007c34aa4fbc003, 0x0007d74e4e5cac24, 0x0001cc37f43441b2}}},
{{{0x0007565a5cc7324f, 0x00001ca0d5244a11, 0x000116b067418713, 0x0000a57d8c55edae, 0x0006c6809c103803}},
{{0x000656f1c9ceaeb9, 0x0007031cacad5aec, 0x0001308cd0716c57, 0x00041c1373941942, 0x0003a346f772f196}},
{{0x00055112e2da6ac8, 0x0006363d0a3dba5a, 0x000319c98ba6f40c, 0x0002e84b03a36ec7, 0x00005911b9f6ef7c}}},
{{{0x00039983f5df0ebb, 0x0001ea2589959826, 0x0006ce638703cdd6, 0x0006311678898505, 0x0006b3cecf9aa270}},
{{0x0001acf3512eeaef, 0x0002639839692a69, 0x000669a234830507, 0x00068b920c0603d4, 0x000555ef9d1c64b2}},
{{0x000770ba3b73bd08, 0x00011475f7e186d4, 0x0000251bc9892bbc, 0x00024eab9bffcc5a, 0x000675f4de133817}}},
{{{0x000452036b1782fc, 0x00002d95b07681c5, 0x0005901cf99205b2, 0x000290686e5eecb4, 0x00013d99df70164c}},
{{0x0007f6d93bdab31d, 0x0001f3aca5bfd425, 0x0002fa521c1c9760, 0x00062180ce27f9cd, 0x00060f450b882cd3}},
{{0x00035ec321e5c0ca, 0x00013ae337f44029, 0x0004008e813f2da7, 0x000640272f8e0c3a, 0x0001c06de9e55eda}}},
{{{0x00077ad6a33ec4e2, 0x000717c5dc11d321, 0x0004a114559823e4, 0x000306ce50a1e2b1, 0x0004cf38a1fec2db}},
{{0x00052b40ff6d69aa, 0x00031b8809377ffa, 0x000536625cd14c2c, 0x000516af252e17d1, 0x00078096f8e7d32b}},
{{0x0002aa650dfa5ce7, 0x00054916a8f19415, 0x00000dc96fe71278, 0x00055f2784e63eb8, 0x000373cad3a26091}}},
{{{0x0004634d82c9f57c, 0x0004249268a6d652, 0x0006336d687f2ff7, 0x0004fe4f4e26d9a0, 0x0000040f3d945441}},
{{0x0006a8fb89ddbbad, 0x00078c35d5d97e37, 0x00066e3674ef2cb2, 0x00034347ac53dd8f, 0x00021547eda5112a}},
{{0x0005e939fd5986d3, 0x00012a2147019bdf, 0x0004c466e7d09cb2, 0x0006fa5b95d203dd, 0x00063550a334a254}}},
{{{0x0007d6edb569cf37, 0x00060194a5dc2ca0, 0x0005af59745e10a6, 0x0007a8f53e004875, 0x0003eea62c7daf78}},
{{0x0002584572547b49, 0x00075c58811c1377, 0x0004d3c637cc171b, 0x00033d30747d34e3, 0x00039a92bafaa7d7}},
{{0x0004c713e693274e, 0x0006ed1b7a6eb3a4, 0x00062ace697d8e15, 0x000266b8292ab075, 0x00068436a0665c9c}}},
{{{0x000235e8202f3f27, 0x00044c9f2eb61780, 0x000630905b1d7003, 0x0004fcc8d274ead1, 0x00017b6e7f68ab78}},
{{0x0006d317e820107c, 0x000090815d2ca3ca, 0x00003ff1eb1499a1, 0x00023960f050e319, 0x0005373669c91611}},
{{0x000014ab9a0e5257, 0x00009939567f8ba5, 0x0004b47b2a423c82, 0x000688d7e57ac42d, 0x0001cb4b5a678f87}}},
{{{0x0004c06b394afc6c, 0x0004931b4bf636cc, 0x00072b60d0322378, 0x00025127c6818b25, 0x000330bca78de743}},
{{0x0004aa62a2a007e7, 0x00061e0e38f62d6e, 0x00002f888fcc4782, 0x0007562b83f21c00, 0x0002dc0fd2d82ef6}},
{{0x0006ff841119744e, 0x0002c560e8e49305, 0x0007254fefe5a57a, 0x00067ae2c560a7df, 0x0003c31be1b369f1}}},
{{{0x0004864d08948aee, 0x0005d237438df61e, 0x0002b285601f7067, 0x00025dbcbae6d753, 0x000330b61134262d}},
{{0x0000bc93f9cb4272, 0x0003f8f9db73182d, 0x0002b235eabae1c4, 0x0002ddbf8729551a, 0x00041cec1097e7d5}},
{{0x000619d7a26d808a, 0x0003c3b3c2adbef2, 0x0006877c9eec7f52, 0x0003beb9ebe1b66d, 0x00026b44cd91f287}}},
{{{0x000048478f387475, 0x00069397d9678a3e, 0x00067c8156c976f3, 0x0002eb4d5589226c, 0x0002c709e6c1c10a}},
{{0x0007f29362730383, 0x0007fd7951459c36, 0x0007504c512d49e7, 0x000087ed7e3bc55f, 0x0007deb10149c726}},
{{0x0002af6a8766ee7a, 0x00008aaa79a1d96c, 0x00042f92d59b2fb0, 0x0001752c40009c07, 0x00008e68e9ff62ce}}},
{{{0x0005500a4bc130ad, 0x000127a17a938695, 0x00002a26fa34e36d, 0x000584d12e1ecc28, 0x0002f1f3f87eeba3}},
{{0x000509d50ab8f2f9, 0x0001b8ab247be5e5, 0x0005d9b2e6b2e486, 0x0004faa5479a1339, 0x0004cb13bd738f71}},
{{0x00048c75e515b64a, 0x00075b6952071ef0, 0x0005d46d42965406, 0x0007746106989f9f, 0x00019a1e353c0ae2}}},
{{{0x00047560bafa05c3, 0x000418dcabcc2fa3, 0x00035991cecf8682, 0x00024371a94b8c60, 0x00041546b11c20c3}},
{{0x000172cdd596bdbd, 0x0000731ddf881684, 0x00010426d64f8115, 0x00071a4fd8a9a3da, 0x000736bd3990266a}},
{{0x00032d509334b3b4, 0x00016c102cae70aa, 0x0001720dd51bf445, 0x0005ae662faf9821, 0x000412295a2b87fa}}},
{{{0x00019b88f57ed6e9, 0x0004cdbf1904a339, 0x00042b49cd4e4f2c, 0x00071a2e771909d9, 0x00014e153ebb52d2}},
{{0x00055261e293eac6, 0x00006426759b65cc, 0x00040265ae116a48, 0x0006c02304bae5bc, 0x0000760bb8d195ad}},
{{0x00061a17cde6818a, 0x00053dad34108827, 0x00032b32c55c55b6, 0x0002f9165f9347a3, 0x0006b34be9bc33ac}}},
{{{0x00072f643a78c0b2, 0x0003de45c04f9e7b, 0x000706d68d30fa5c, 0x000696f63e8e2f24, 0x0002012c18f0922d}},
{{0x000469656571f2d3, 0x0000aa61ce6f423f, 0x0003f940d71b27a1, 0x000185f19d73d16a, 0x00001b9c7b62e6dd}},
{{0x000355e55ac89d29, 0x0003e8b414ec7101, 0x00039db07c520c90, 0x0006f41e9b77efe1, 0x00008af5b784e4ba}}},
{{{0x000499dc881f2533, 0x00034ef26476c506, 0x0004d107d2741497, 0x000346c4bd6efdb3, 0x00032b79d71163a1}},
{{0x000314d289cc2c4b, 0x00023450e2f1bc4e, 0x0000cd93392f92f4, 0x0001370c6a946b7d, 0x0006423c1d5afd98}},
{{0x0005f8d9edfcb36a, 0x0001e6e8dcbf3990, 0x0007974f348af30a, 0x0006e6724ef19c7c, 0x000480a5efbc13e2}}},
{{{0x0001e70b01622071, 0x0001f163b5f8a16a, 0x00056aaf341ad417, 0x0007989635d830f7, 0x00047aa27600cb7b}},
{{0x00014ce442ce221f, 0x00018980a72516cc, 0x000072f80db86677, 0x000703331fda526e, 0x00024b31d47691c8}},
{{0x00041eedc015f8c3, 0x0007cf8d27ef854a, 0x000289e3584693f9, 0x00004a7857b309a7, 0x000545b585d14dda}}},
{{{0x0007275ea0d43a0f, 0x000681137dd7ccf7, 0x0001e79cbab79a38, 0x00022a214489a66a, 0x0000f62f9c332ba5}},
{{0x0004e4d0e3b321e1, 0x0007451fe3d2ac40, 0x000666f678eea98d, 0x000038858667fead, 0x0004d22dc3e64c8d}},
{{0x00046589d63b5f39, 0x0007eaf979ec3f96, 0x0004ebe81572b9a8, 0x00021b7f5d61694a, 0x0001c0fa01a36371}}},
{{{0x000604b622943dff, 0x0001c899f6741a58, 0x00060219e2f232fb, 0x00035fae92a7f9cb, 0x0000fa3614f3b1ca}},
{{0x00002b0e8c936a50, 0x0006b83b58b6cd21, 0x00037ed8d3e72680, 0x0000a037db9f2a62, 0x0004005419b1d2bc}},
{{0x0003febdb9be82f0, 0x0005e74895921400, 0x000553ea38822706, 0x0005a17c24cfc88c, 0x0001fba218aef40a}}},
{{{0x00049448fac8f53e, 0x00034f74c6e8356a, 0x0000ad780607dba2, 0x0007213a7eb63eb6, 0x000392e3acaa8c86}},
{{0x000657043e7b0194, 0x0005c11b55efe9e7, 0x0007737bc6a074fb, 0x0000eae41ce355cc, 0x0006c535d13ff776}},
{{0x000534e93e8a35af, 0x00008b10fd02c997, 0x00026ac2acb81e05, 0x00009d8c98ce3b79, 0x00025e17fe4d50ac}}},
{{{0x00009bd71e04f676, 0x00025ac841f2a145, 0x0001a47eac823871, 0x0001a8a8c36c581a, 0x000255751442a9fb}},
{{0x00077ff576f121a7, 0x0004e5f9b0fc722b, 0x00046f949b0d28c8, 0x0004cde65d17ef26, 0x0006bba828f89698}},
{{0x0001bc6690fe3901, 0x000314132f5abc5a, 0x000611835132d528, 0x0005f24b8eb48a57, 0x000559d504f7f6b7}}},
{{{0x00038378b3eb54d5, 0x0004d4aaa78f94ee, 0x0004a002e875a74d, 0x00010b851367b17c, 0x00001ab12d5807e3}},
{{0x000091e7f6d266fd, 0x00036060ef037389, 0x00018788ec1d1286, 0x000287441c478eb0, 0x000123ea6a3354bd}},
{{0x0005189041e32d96, 0x00005b062b090231, 0x0000c91766e7b78f, 0x0000aa0f55a138ec, 0x0004a3961e2c918a}}},
{{{0x00043be0f8e6bba0, 0x00068fdffc614e3b, 0x0004e91dab5b3be0, 0x0003b1d4c9212ff0, 0x0002cd6bce3fb1db}},
{{0x0007d644f3233f1e, 0x0001c69f9e02c064, 0x00036ae5e5266898, 0x00008fc1dad38b79, 0x00068aceead9bd41}},
{{0x0004c90ef3d7c210, 0x000496f5a0818716, 0x00079cf88cc239b8, 0x0002cb9c306cf8db, 0x000595760d5b508f}}},
{{{0x0001bfe104aa6397, 0x00011494ff996c25, 0x00064251623e5800, 0x0000d49fc5e044be, 0x000709fa43edcb29}},
{{0x0002cbebfd022790, 0x0000b8822aec1105, 0x0004d1cfd226bccc, 0x000515b2fa4971be, 0x0002cb2c5df54515}},
{{0x00025d8c63fd2aca, 0x0004c5cd29dffd61, 0x00032ec0eb48af05, 0x00018f9391f9b77c, 0x00070f029ecf0c81}}},
{{{0x000307b32eed3e33, 0x0006748ab03ce8c2, 0x00057c0d9ab810bc, 0x00042c64a224e98c, 0x0000b7d5d8a6c314}},
{{0x0002afaa5e10b0b9, 0x00061de08355254d, 0x0000eb587de3c28d, 0x0004f0bb9f7dbbd5, 0x00044eca5a2a74bd}},
{{0x000448327b95d543, 0x0000146681e3a4ba, 0x00038714adc34e0c, 0x0004f26f0e298e30, 0x000272224512c7de}}},
{{{0x000492af49c5342e, 0x0002365cdf5a0357, 0x00032138a7ffbb60, 0x0002a1f7d14646fe, 0x00011b5df18a44cc}},
{{0x0003bb8a42a975fc, 0x0006f2d5b46b17ef, 0x0007b6a9223170e5, 0x000053713fe3b7e6, 0x00019735fd7f6bc2}},
{{0x000390d042c84266, 0x0001efe32a8fdc75, 0x0006925ee7ae1238, 0x0004af9281d0e832, 0x0000fef911191df8}}}

View file

@ -0,0 +1,96 @@
{{{0x00003905d740913e, 0x0000ba2817d673a2, 0x00023e2827f4e67c, 0x000133d2e0c21a34, 0x00044fd2f9298f81}},
{{0x000493c6f58c3b85, 0x0000df7181c325f7, 0x0000f50b0b3e4cb7, 0x0005329385a44c32, 0x00007cf9d3a33d4b}},
{{0x00011205877aaa68, 0x000479955893d579, 0x00050d66309b67a0, 0x0002d42d0dbee5ee, 0x0006f117b689f0c6}}},
{{{0x00011fe8a4fcd265, 0x0007bcb8374faacc, 0x00052f5af4ef4d4f, 0x0005314098f98d10, 0x0002ab91587555bd}},
{{0x0005b0a84cee9730, 0x00061d10c97155e4, 0x0004059cc8096a10, 0x00047a608da8014f, 0x0007a164e1b9a80f}},
{{0x0006933f0dd0d889, 0x00044386bb4c4295, 0x0003cb6d3162508c, 0x00026368b872a2c6, 0x0005a2826af12b9b}}},
{{{0x000182c3a447d6ba, 0x00022964e536eff2, 0x000192821f540053, 0x0002f9f19e788e5c, 0x000154a7e73eb1b5}},
{{0x0002bc4408a5bb33, 0x000078ebdda05442, 0x0002ffb112354123, 0x000375ee8df5862d, 0x0002945ccf146e20}},
{{0x0003dbf1812a8285, 0x0000fa17ba3f9797, 0x0006f69cb49c3820, 0x00034d5a0db3858d, 0x00043aabe696b3bb}}},
{{{0x00072c9aaa3221b1, 0x000267774474f74d, 0x000064b0e9b28085, 0x0003f04ef53b27c9, 0x0001d6edd5d2e531}},
{{0x00025cd0944ea3bf, 0x00075673b81a4d63, 0x000150b925d1c0d4, 0x00013f38d9294114, 0x000461bea69283c9}},
{{0x00036dc801b8b3a2, 0x0000e0a7d4935e30, 0x0001deb7cecc0d7d, 0x000053a94e20dd2c, 0x0007a9fbb1c6a0f9}}},
{{{0x0006217e039d8064, 0x0006dea408337e6d, 0x00057ac112628206, 0x000647cb65e30473, 0x00049c05a51fadc9}},
{{0x0006678aa6a8632f, 0x0005ea3788d8b365, 0x00021bd6d6994279, 0x0007ace75919e4e3, 0x00034b9ed338add7}},
{{0x0004e8bf9045af1b, 0x000514e33a45e0d6, 0x0007533c5b8bfe0f, 0x000583557b7e14c9, 0x00073c172021b008}}},
{{{0x00075b0249864348, 0x00052ee11070262b, 0x000237ae54fb5acd, 0x0003bfd1d03aaab5, 0x00018ab598029d5c}},
{{0x000700848a802ade, 0x0001e04605c4e5f7, 0x0005c0d01b9767fb, 0x0007d7889f42388b, 0x0004275aae2546d8}},
{{0x00032cc5fd6089e9, 0x000426505c949b05, 0x00046a18880c7ad2, 0x0004a4221888ccda, 0x0003dc65522b53df}}},
{{{0x0007013b327fbf93, 0x0001336eeded6a0d, 0x0002b565a2bbf3af, 0x000253ce89591955, 0x0000267882d17602}},
{{0x0000c222a2007f6d, 0x000356b79bdb77ee, 0x00041ee81efe12ce, 0x000120a9bd07097d, 0x000234fd7eec346f}},
{{0x0000a119732ea378, 0x00063bf1ba8e2a6c, 0x00069f94cc90df9a, 0x000431d1779bfc48, 0x000497ba6fdaa097}}},
{{{0x0003cd86468ccf0b, 0x00048553221ac081, 0x0006c9464b4e0a6e, 0x00075fba84180403, 0x00043b5cd4218d05}},
{{0x0006cc0313cfeaa0, 0x0001a313848da499, 0x0007cb534219230a, 0x00039596dedefd60, 0x00061e22917f12de}},
{{0x0002762f9bd0b516, 0x0001c6e7fbddcbb3, 0x00075909c3ace2bd, 0x00042101972d3ec9, 0x000511d61210ae4d}}},
{{{0x000386484420de87, 0x0002d6b25db68102, 0x000650b4962873c0, 0x0004081cfd271394, 0x00071a7fe6fe2482}},
{{0x000676ef950e9d81, 0x0001b81ae089f258, 0x00063c4922951883, 0x0002f1d54d9b3237, 0x0006d325924ddb85}},
{{0x000182b8a5c8c854, 0x00073fcbe5406d8e, 0x0005de3430cff451, 0x000554b967ac8c41, 0x0004746c4b6559ee}}},
{{{0x000546c864741147, 0x0003a1df99092690, 0x0001ca8cc9f4d6bb, 0x00036b7fc9cd3b03, 0x000219663497db5e}},
{{0x00077b3c6dc69a2b, 0x0004edf13ec2fa6e, 0x0004e85ad77beac8, 0x0007dba2b28e7bda, 0x0005c9a51de34fe9}},
{{0x0000f1cf79f10e67, 0x00043ccb0a2b7ea2, 0x00005089dfff776a, 0x0001dd84e1d38b88, 0x0004804503c60822}}},
{{{0x000021d23a36d175, 0x0004fd3373c6476d, 0x00020e291eeed02a, 0x00062f2ecf2e7210, 0x000771e098858de4}},
{{0x00049ed02ca37fc7, 0x000474c2b5957884, 0x0005b8388e816683, 0x0004b6c454b76be4, 0x000553398a516506}},
{{0x0002f5d278451edf, 0x000730b133997342, 0x0006965420eb6975, 0x000308a3bfa516cf, 0x0005a5ed1d68ff5a}}},
{{{0x0005e0c558527359, 0x0003395b73afd75c, 0x000072afa4e4b970, 0x00062214329e0f6d, 0x000019b60135fefd}},
{{0x0005122afe150e83, 0x0004afc966bb0232, 0x0001c478833c8268, 0x00017839c3fc148f, 0x00044acb897d8bf9}},
{{0x000068145e134b83, 0x0001e4860982c3cc, 0x000068fb5f13d799, 0x0007c9283744547e, 0x000150c49fde6ad2}}},
{{{0x0001863c9cdca868, 0x0003770e295a1709, 0x0000d85a3720fd13, 0x0005e0ff1f71ab06, 0x00078a6d7791e05f}},
{{0x0003f29509471138, 0x000729eeb4ca31cf, 0x00069c22b575bfbc, 0x0004910857bce212, 0x0006b2b5a075bb99}},
{{0x0007704b47a0b976, 0x0002ae82e91aab17, 0x00050bd6429806cd, 0x00068055158fd8ea, 0x000725c7ffc4ad55}}},
{{{0x00002bf71cd098c0, 0x00049dabcc6cd230, 0x00040a6533f905b2, 0x000573efac2eb8a4, 0x0004cd54625f855f}},
{{0x00026715d1cf99b2, 0x0002205441a69c88, 0x000448427dcd4b54, 0x0001d191e88abdc5, 0x000794cc9277cb1f}},
{{0x0006c426c2ac5053, 0x0005a65ece4b095e, 0x0000c44086f26bb6, 0x0007429568197885, 0x0007008357b6fcc8}}},
{{{0x00039fbb82584a34, 0x00047a568f257a03, 0x00014d88091ead91, 0x0002145b18b1ce24, 0x00013a92a3669d6d}},
{{0x0000672738773f01, 0x000752bf799f6171, 0x0006b4a6dae33323, 0x0007b54696ead1dc, 0x00006ef7e9851ad0}},
{{0x0003771cc0577de5, 0x0003ca06bb8b9952, 0x00000b81c5d50390, 0x00043512340780ec, 0x0003c296ddf8a2af}}},
{{{0x00034d2ebb1f2541, 0x0000e815b723ff9d, 0x000286b416e25443, 0x0000bdfe38d1bee8, 0x0000a892c7007477}},
{{0x000515f9d914a713, 0x00073191ff2255d5, 0x00054f5cc2a4bdef, 0x0003dd57fc118bcf, 0x0007a99d393490c7}},
{{0x0002ed2436bda3e8, 0x00002afd00f291ea, 0x0000be7381dea321, 0x0003e952d4b2b193, 0x000286762d28302f}}},
{{{0x00058e2bce2ef5bd, 0x00068ce8f78c6f8a, 0x0006ee26e39261b2, 0x00033d0aa50bcf9d, 0x0007686f2a3d6f17}},
{{0x000036093ce35b25, 0x0003b64d7552e9cf, 0x00071ee0fe0b8460, 0x00069d0660c969e5, 0x00032f1da046a9d9}},
{{0x000512a66d597c6a, 0x0000609a70a57551, 0x000026c08a3c464c, 0x0004531fc8ee39e1, 0x000561305f8a9ad2}}},
{{{0x0002cc28e7b0c0d5, 0x00077b60eb8a6ce4, 0x0004042985c277a6, 0x000636657b46d3eb, 0x000030a1aef2c57c}},
{{0x0004978dec92aed1, 0x000069adae7ca201, 0x00011ee923290f55, 0x00069641898d916c, 0x00000aaec53e35d4}},
{{0x0001f773003ad2aa, 0x000005642cc10f76, 0x00003b48f82cfca6, 0x0002403c10ee4329, 0x00020be9c1c24065}}},
{{{0x0000e44ae2025e60, 0x0005f97b9727041c, 0x0005683472c0ecec, 0x000188882eb1ce7c, 0x00069764c545067e}},
{{0x000387d8249673a6, 0x0005bea8dc927c2a, 0x0005bd8ed5650ef0, 0x0000ef0e3fcd40e1, 0x000750ab3361f0ac}},
{{0x00023283a2f81037, 0x000477aff97e23d1, 0x0000b8958dbcbb68, 0x0000205b97e8add6, 0x00054f96b3fb7075}}},
{{{0x0005afc616b11ecd, 0x00039f4aec8f22ef, 0x0003b39e1625d92e, 0x0005f85bd4508873, 0x00078e6839fbe85d}},
{{0x0005f20429669279, 0x00008fafae4941f5, 0x00015d83c4eb7688, 0x0001cf379eca4146, 0x0003d7fe9c52bb75}},
{{0x00032df737b8856b, 0x0000608342f14e06, 0x0003967889d74175, 0x0001211907fba550, 0x00070f268f350088}}},
{{{0x0004112070dcf355, 0x0007dcff9c22e464, 0x00054ada60e03325, 0x00025cd98eef769a, 0x000404e56c039b8c}},
{{0x00064583b1805f47, 0x00022c1baf832cd0, 0x000132c01bd4d717, 0x0004ecf4c3a75b8f, 0x0007c0d345cfad88}},
{{0x00071f4b8c78338a, 0x00062cfc16bc2b23, 0x00017cf51280d9aa, 0x0003bbae5e20a95a, 0x00020d754762aaec}}},
{{{0x0004feb135b9f543, 0x00063bd192ad93ae, 0x00044e2ea612cdf7, 0x000670f4991583ab, 0x00038b8ada8790b4}},
{{0x0007c36fc73bb758, 0x0004a6c797734bd1, 0x0000ef248ab3950e, 0x00063154c9a53ec8, 0x0002b8f1e46f3cee}},
{{0x00004a9cdf51f95d, 0x0005d963fbd596b8, 0x00022d9b68ace54a, 0x0004a98e8836c599, 0x000049aeb32ceba1}}},
{{{0x00067d3c63dcfe7e, 0x000112f0adc81aee, 0x00053df04c827165, 0x0002fe5b33b430f0, 0x00051c665e0c8d62}},
{{0x00007d0b75fc7931, 0x00016f4ce4ba754a, 0x0005ace4c03fbe49, 0x00027e0ec12a159c, 0x000795ee17530f67}},
{{0x00025b0a52ecbd81, 0x0005dc0695fce4a9, 0x0003b928c575047d, 0x00023bf3512686e5, 0x0006cd19bf49dc54}}},
{{{0x0007619052179ca3, 0x0000c16593f0afd0, 0x000265c4795c7428, 0x00031c40515d5442, 0x0007520f3db40b2e}},
{{0x0006612165afc386, 0x0001171aa36203ff, 0x0002642ea820a8aa, 0x0001f3bb7b313f10, 0x0005e01b3a7429e4}},
{{0x00050be3d39357a1, 0x0003ab33d294a7b6, 0x0004c479ba59edb3, 0x0004c30d184d326f, 0x00071092c9ccef3c}}},
{{{0x0000523f0364918c, 0x000687f56d638a7b, 0x00020796928ad013, 0x0005d38405a54f33, 0x0000ea15b03d0257}},
{{0x0003d8ac74051dcf, 0x00010ab6f543d0ad, 0x0005d0f3ac0fda90, 0x0005ef1d2573e5e4, 0x0004173a5bb7137a}},
{{0x00056e31f0f9218a, 0x0005635f88e102f8, 0x0002cbc5d969a5b8, 0x000533fbc98b347a, 0x0005fc565614a4e3}}},
{{{0x0006570dc46d7ae5, 0x00018a9f1b91e26d, 0x000436b6183f42ab, 0x000550acaa4f8198, 0x00062711c414c454}},
{{0x0002e1e67790988e, 0x0001e38b9ae44912, 0x000648fbb4075654, 0x00028df1d840cd72, 0x0003214c7409d466}},
{{0x0001827406651770, 0x0004d144f286c265, 0x00017488f0ee9281, 0x00019e6cdb5c760c, 0x0005bea94073ecb8}}},
{{{0x0005bf0912c89be4, 0x00062fadcaf38c83, 0x00025ec196b3ce2c, 0x00077655ff4f017b, 0x0003aacd5c148f61}},
{{0x0000ce63f343d2f8, 0x0001e0a87d1e368e, 0x000045edbc019eea, 0x0006979aed28d0d1, 0x0004ad0785944f1b}},
{{0x00063b34c3318301, 0x0000e0e62d04d0b1, 0x000676a233726701, 0x00029e9a042d9769, 0x0003aff0cb1d9028}}},
{{{0x0005c7eb3a20405e, 0x0005fdb5aad930f8, 0x0004a757e63b8c47, 0x00028e9492972456, 0x000110e7e86f4cd2}},
{{0x0006430bf4c53505, 0x000264c3e4507244, 0x00074c9f19a39270, 0x00073f84f799bc47, 0x0002ccf9f732bd99}},
{{0x0000d89ed603f5e4, 0x00051e1604018af8, 0x0000b8eedc4a2218, 0x00051ba98b9384d0, 0x00005c557e0b9693}}},
{{{0x0001ce311fc97e6f, 0x0006023f3fb5db1f, 0x0007b49775e8fc98, 0x0003ad70adbf5045, 0x0006e154c178fe98}},
{{0x0006bbb089c20eb0, 0x0006df41fb0b9eee, 0x00051087ed87e16f, 0x000102db5c9fa731, 0x000289fef0841861}},
{{0x00016336fed69abf, 0x0004f066b929f9ec, 0x0004e9ff9e6c5b93, 0x00018c89bc4bb2ba, 0x0006afbf642a95ca}}},
{{{0x0000de0c62f5d2c1, 0x00049601cf734fb5, 0x0006b5c38263f0f6, 0x0004623ef5b56d06, 0x0000db4b851b9503}},
{{0x00055070f913a8cc, 0x000765619eac2bbc, 0x0003ab5225f47459, 0x00076ced14ab5b48, 0x00012c093cedb801}},
{{0x00047f9308b8190f, 0x000414235c621f82, 0x00031f5ff41a5a76, 0x0006736773aab96d, 0x00033aa8799c6635}}},
{{{0x0007f51ebd085cf2, 0x00012cfa67e3f5e1, 0x0001800cf1e3d46a, 0x00054337615ff0a8, 0x000233c6f29e8e21}},
{{0x0000f588fc156cb1, 0x000363414da4f069, 0x0007296ad9b68aea, 0x0004d3711316ae43, 0x000212cd0c1c8d58}},
{{0x0004d5107f18c781, 0x00064a4fd3a51a5e, 0x0004f4cd0448bb37, 0x000671d38543151e, 0x0001db7778911914}}},
{{{0x000352397c6bc26f, 0x00018a7aa0227bbe, 0x0005e68cc1ea5f8b, 0x0006fe3e3a7a1d5f, 0x00031ad97ad26e2a}},
{{0x00014769dd701ab6, 0x00028339f1b4b667, 0x0004ab214b8ae37b, 0x00025f0aefa0b0fe, 0x0007ae2ca8a017d2}},
{{0x000017ed0920b962, 0x000187e33b53b6fd, 0x00055829907a1463, 0x000641f248e0a792, 0x0001ed1fc53a6622}}}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,8 @@
#include "ge25519.h"
void ge25519_double(ge25519_p3 *r, const ge25519_p3 *p)
{
ge25519_p1p1 grp1p1;
ge25519_dbl_p1p1(&grp1p1, (ge25519_p2 *)p);
ge25519_p1p1_to_p3(r, &grp1p1);
}

View file

@ -0,0 +1,97 @@
#include "fe25519.h"
#include "sc25519.h"
#include "ge25519.h"
#define S1_SWINDOWSIZE 5
#define PRE1_SIZE (1<<(S1_SWINDOWSIZE-2))
#define S2_SWINDOWSIZE 7
#define PRE2_SIZE (1<<(S2_SWINDOWSIZE-2))
ge25519_niels pre2[PRE2_SIZE] = {
#include "ge25519_base_slide_multiples.data"
};
static const fe25519 ec2d = {{1859910466990425, 932731440258426, 1072319116312658, 1815898335770999, 633789495995903}};
static void setneutral(ge25519 *r)
{
fe25519_setint(&r->x,0);
fe25519_setint(&r->y,1);
fe25519_setint(&r->z,1);
fe25519_setint(&r->t,0);
}
/* computes [s1]p1 + [s2]p2 */
void ge25519_double_scalarmult_vartime(ge25519_p3 *r, const ge25519_p3 *p1, const sc25519 *s1, const sc25519 *s2)
{
signed char slide1[256], slide2[256];
ge25519_pniels pre1[PRE1_SIZE], neg;
ge25519_p3 d1;
ge25519_p1p1 t;
ge25519_niels nneg;
fe25519 d;
int i;
sc25519_slide(slide1, s1, S1_SWINDOWSIZE);
sc25519_slide(slide2, s2, S2_SWINDOWSIZE);
/* precomputation */
pre1[0] = *(ge25519_pniels *)p1;
ge25519_dbl_p1p1(&t,(ge25519_p2 *)pre1); ge25519_p1p1_to_p3(&d1, &t);
/* Convert pre[0] to projective Niels representation */
d = pre1[0].ysubx;
fe25519_sub(&pre1[0].ysubx, &pre1[0].xaddy, &pre1[0].ysubx);
fe25519_add(&pre1[0].xaddy, &pre1[0].xaddy, &d);
fe25519_mul(&pre1[0].t2d, &pre1[0].t2d, &ec2d);
for(i=0;i<PRE1_SIZE-1;i++)
{
ge25519_pnielsadd_p1p1(&t, &d1, &pre1[i]); ge25519_p1p1_to_pniels(&pre1[i+1], &t);
}
setneutral(r);
for (i = 255;i >= 0;--i) {
if (slide1[i] || slide2[i]) goto firstbit;
}
for(;i>=0;i--)
{
firstbit:
ge25519_dbl_p1p1(&t, (ge25519_p2 *)r);
if(slide1[i]>0)
{
ge25519_p1p1_to_p3(r, &t);
ge25519_pnielsadd_p1p1(&t, r, &pre1[slide1[i]/2]);
}
else if(slide1[i]<0)
{
ge25519_p1p1_to_p3(r, &t);
neg = pre1[-slide1[i]/2];
d = neg.ysubx;
neg.ysubx = neg.xaddy;
neg.xaddy = d;
fe25519_neg(&neg.t2d, &neg.t2d);
ge25519_pnielsadd_p1p1(&t, r, &neg);
}
if(slide2[i]>0)
{
ge25519_p1p1_to_p3(r, &t);
ge25519_nielsadd_p1p1(&t, r, &pre2[slide2[i]/2]);
}
else if(slide2[i]<0)
{
ge25519_p1p1_to_p3(r, &t);
nneg = pre2[-slide2[i]/2];
d = nneg.ysubx;
nneg.ysubx = nneg.xaddy;
nneg.xaddy = d;
fe25519_neg(&nneg.t2d, &nneg.t2d);
ge25519_nielsadd_p1p1(&t, r, &nneg);
}
ge25519_p1p1_to_p2((ge25519_p2 *)r, &t);
}
}

View file

@ -0,0 +1,9 @@
#include "fe25519.h"
#include "ge25519.h"
int ge25519_isneutral_vartime(const ge25519_p3 *p)
{
if(!fe25519_iszero_vartime(&p->x)) return 0;
if(!fe25519_iseq_vartime(&p->y, &p->z)) return 0;
return 1;
}

View file

@ -0,0 +1,102 @@
#include "fe25519.h"
#include "sc25519.h"
#include "ge25519.h"
#include "index_heap.h"
static void setneutral(ge25519 *r)
{
fe25519_setint(&r->x,0);
fe25519_setint(&r->y,1);
fe25519_setint(&r->z,1);
fe25519_setint(&r->t,0);
}
static void ge25519_scalarmult_vartime_2limbs(ge25519 *r, ge25519 *p, sc25519 *s)
{
if (s->v[1] == 0 && s->v[0] == 1) /* This will happen most of the time after Bos-Coster */
*r = *p;
else if (s->v[1] == 0 && s->v[0] == 0) /* This won't ever happen, except for all scalars == 0 in Bos-Coster */
setneutral(r);
else
{
ge25519 d;
unsigned long long mask = (1ULL << 63);
int i = 1;
while(!(mask & s->v[1]) && mask != 0)
mask >>= 1;
if(mask == 0)
{
mask = (1ULL << 63);
i = 0;
while(!(mask & s->v[0]) && mask != 0)
mask >>= 1;
}
d = *p;
mask >>= 1;
for(;mask != 0;mask >>= 1)
{
ge25519_double(&d,&d);
if(s->v[i] & mask)
ge25519_add(&d,&d,p);
}
if(i==1)
{
mask = (1ULL << 63);
for(;mask != 0;mask >>= 1)
{
ge25519_double(&d,&d);
if(s->v[0] & mask)
ge25519_add(&d,&d,p);
}
}
*r = d;
}
}
/* caller's responsibility to ensure npoints >= 5 */
void ge25519_multi_scalarmult_vartime(ge25519_p3 *r, ge25519_p3 *p, sc25519 *s, const unsigned long long npoints)
{
unsigned long long pos[npoints];
unsigned long long hlen=((npoints+1)/2)|1;
unsigned long long max1, max2,i;
heap_init(pos, hlen, s);
for(i=0;;i++)
{
heap_get2max(pos, &max1, &max2, s);
if((s[max1].v[3] == 0) || (sc25519_iszero_vartime(&s[max2]))) break;
sc25519_sub_nored(&s[max1],&s[max1],&s[max2]);
ge25519_add(&p[max2],&p[max2],&p[max1]);
heap_rootreplaced(pos, hlen, s);
}
for(;;i++)
{
heap_get2max(pos, &max1, &max2, s);
if((s[max1].v[2] == 0) || (sc25519_iszero_vartime(&s[max2]))) break;
sc25519_sub_nored(&s[max1],&s[max1],&s[max2]);
ge25519_add(&p[max2],&p[max2],&p[max1]);
heap_rootreplaced_3limbs(pos, hlen, s);
}
/* We know that (npoints-1)/2 scalars are only 128-bit scalars */
heap_extend(pos, hlen, npoints, s);
hlen = npoints;
for(;;i++)
{
heap_get2max(pos, &max1, &max2, s);
if((s[max1].v[1] == 0) || (sc25519_iszero_vartime(&s[max2]))) break;
sc25519_sub_nored(&s[max1],&s[max1],&s[max2]);
ge25519_add(&p[max2],&p[max2],&p[max1]);
heap_rootreplaced_2limbs(pos, hlen, s);
}
for(;;i++)
{
heap_get2max(pos, &max1, &max2, s);
if(sc25519_iszero_vartime(&s[max2])) break;
sc25519_sub_nored(&s[max1],&s[max1],&s[max2]);
ge25519_add(&p[max2],&p[max2],&p[max1]);
heap_rootreplaced_1limb(pos, hlen, s);
}
ge25519_scalarmult_vartime_2limbs(r, &p[max1], &s[max1]);
}

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,13 @@
#include "fe25519.h"
#include "sc25519.h"
#include "ge25519.h"
void ge25519_pack(unsigned char r[32], const ge25519_p3 *p)
{
fe25519 tx, ty, zi;
fe25519_invert(&zi, &p->z);
fe25519_mul(&tx, &p->x, &zi);
fe25519_mul(&ty, &p->y, &zi);
fe25519_pack(r, &ty);
r[31] ^= fe25519_getparity(&tx) << 7;
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,50 @@
#include "fe25519.h"
#include "sc25519.h"
#include "ge25519.h"
/* Multiples of the base point in Niels' representation */
static const ge25519_niels ge25519_base_multiples_niels[] = {
#include "ge25519_base_niels_smalltables.data"
};
/* d */
static const fe25519 ecd = {{929955233495203, 466365720129213, 1662059464998953, 2033849074728123, 1442794654840575}};
void ge25519_scalarmult_base(ge25519_p3 *r, const sc25519 *s)
{
signed char b[64];
int i;
ge25519_niels t;
fe25519 d;
sc25519_window4(b,s);
ge25519_p1p1 tp1p1;
choose_t((ge25519_niels *)r, 0, (signed long long) b[1], ge25519_base_multiples_niels);
fe25519_sub(&d, &r->y, &r->x);
fe25519_add(&r->y, &r->y, &r->x);
r->x = d;
r->t = r->z;
fe25519_setint(&r->z,2);
for(i=3;i<64;i+=2)
{
choose_t(&t, (unsigned long long) i/2, (signed long long) b[i], ge25519_base_multiples_niels);
ge25519_nielsadd2(r, &t);
}
ge25519_dbl_p1p1(&tp1p1,(ge25519_p2 *)r);
ge25519_p1p1_to_p2((ge25519_p2 *)r, &tp1p1);
ge25519_dbl_p1p1(&tp1p1,(ge25519_p2 *)r);
ge25519_p1p1_to_p2((ge25519_p2 *)r, &tp1p1);
ge25519_dbl_p1p1(&tp1p1,(ge25519_p2 *)r);
ge25519_p1p1_to_p2((ge25519_p2 *)r, &tp1p1);
ge25519_dbl_p1p1(&tp1p1,(ge25519_p2 *)r);
ge25519_p1p1_to_p3(r, &tp1p1);
choose_t(&t, (unsigned long long) 0, (signed long long) b[0], ge25519_base_multiples_niels);
fe25519_mul(&t.t2d, &t.t2d, &ecd);
ge25519_nielsadd2(r, &t);
for(i=2;i<64;i+=2)
{
choose_t(&t, (unsigned long long) i/2, (signed long long) b[i], ge25519_base_multiples_niels);
ge25519_nielsadd2(r, &t);
}
}

View file

@ -0,0 +1,60 @@
#include "fe25519.h"
#include "ge25519.h"
/* d */
static const fe25519 ecd = {{929955233495203, 466365720129213, 1662059464998953, 2033849074728123, 1442794654840575}};
/* sqrt(-1) */
static const fe25519 sqrtm1 = {{1718705420411056, 234908883556509, 2233514472574048, 2117202627021982, 765476049583133}};
/* return 0 on success, -1 otherwise */
int ge25519_unpackneg_vartime(ge25519_p3 *r, const unsigned char p[32])
{
fe25519 t, chk, num, den, den2, den4, den6;
unsigned char par = p[31] >> 7;
fe25519_setint(&r->z,1);
fe25519_unpack(&r->y, p);
fe25519_square(&num, &r->y); /* x = y^2 */
fe25519_mul(&den, &num, &ecd); /* den = dy^2 */
fe25519_sub(&num, &num, &r->z); /* x = y^2-1 */
fe25519_add(&den, &r->z, &den); /* den = dy^2+1 */
/* Computation of sqrt(num/den)
1.: computation of num^((p-5)/8)*den^((7p-35)/8) = (num*den^7)^((p-5)/8)
*/
fe25519_square(&den2, &den);
fe25519_square(&den4, &den2);
fe25519_mul(&den6, &den4, &den2);
fe25519_mul(&t, &den6, &num);
fe25519_mul(&t, &t, &den);
fe25519_pow2523(&t, &t);
/* 2. computation of r->x = t * num * den^3
*/
fe25519_mul(&t, &t, &num);
fe25519_mul(&t, &t, &den);
fe25519_mul(&t, &t, &den);
fe25519_mul(&r->x, &t, &den);
/* 3. Check whether sqrt computation gave correct result, multiply by sqrt(-1) if not:
*/
fe25519_square(&chk, &r->x);
fe25519_mul(&chk, &chk, &den);
if (!fe25519_iseq_vartime(&chk, &num))
fe25519_mul(&r->x, &r->x, &sqrtm1);
/* 4. Now we have one of the two square roots, except if input was not a square
*/
fe25519_square(&chk, &r->x);
fe25519_mul(&chk, &chk, &den);
if (!fe25519_iseq_vartime(&chk, &num))
return -1;
/* 5. Choose the desired square root according to parity:
*/
if(fe25519_getparity(&r->x) != (1-par))
fe25519_neg(&r->x, &r->x);
fe25519_mul(&r->t, &r->x, &r->y);
return 0;
}

View file

@ -0,0 +1,476 @@
# qhasm: int64 hp
# qhasm: int64 hlen
# qhasm: int64 sp
# qhasm: int64 pp
# qhasm: input hp
# qhasm: input hlen
# qhasm: input sp
# qhasm: int64 prc
# qhasm: int64 plc
# qhasm: int64 pc
# qhasm: int64 d
# qhasm: int64 spp
# qhasm: int64 sprc
# qhasm: int64 spc
# qhasm: int64 c0
# qhasm: int64 c1
# qhasm: int64 c2
# qhasm: int64 c3
# qhasm: int64 t0
# qhasm: int64 t1
# qhasm: int64 t2
# qhasm: int64 t3
# qhasm: int64 p0
# qhasm: int64 p1
# qhasm: int64 p2
# qhasm: int64 p3
# qhasm: int64 caller1
# qhasm: int64 caller2
# qhasm: int64 caller3
# qhasm: int64 caller4
# qhasm: int64 caller5
# qhasm: int64 caller6
# qhasm: int64 caller7
# qhasm: caller caller1
# qhasm: caller caller2
# qhasm: caller caller3
# qhasm: caller caller4
# qhasm: caller caller5
# qhasm: caller caller6
# qhasm: caller caller7
# qhasm: stack64 caller1_stack
# qhasm: stack64 caller2_stack
# qhasm: stack64 caller3_stack
# qhasm: stack64 caller4_stack
# qhasm: stack64 caller5_stack
# qhasm: stack64 caller6_stack
# qhasm: stack64 caller7_stack
# qhasm: enter crypto_sign_ed25519_amd64_51_30k_batch_heap_rootreplaced
.text
.p2align 5
.globl _crypto_sign_ed25519_amd64_51_30k_batch_heap_rootreplaced
.globl crypto_sign_ed25519_amd64_51_30k_batch_heap_rootreplaced
_crypto_sign_ed25519_amd64_51_30k_batch_heap_rootreplaced:
crypto_sign_ed25519_amd64_51_30k_batch_heap_rootreplaced:
mov %rsp,%r11
and $31,%r11
add $64,%r11
sub %r11,%rsp
# qhasm: caller1_stack = caller1
# asm 1: movq <caller1=int64#9,>caller1_stack=stack64#1
# asm 2: movq <caller1=%r11,>caller1_stack=0(%rsp)
movq %r11,0(%rsp)
# qhasm: caller2_stack = caller2
# asm 1: movq <caller2=int64#10,>caller2_stack=stack64#2
# asm 2: movq <caller2=%r12,>caller2_stack=8(%rsp)
movq %r12,8(%rsp)
# qhasm: caller3_stack = caller3
# asm 1: movq <caller3=int64#11,>caller3_stack=stack64#3
# asm 2: movq <caller3=%r13,>caller3_stack=16(%rsp)
movq %r13,16(%rsp)
# qhasm: caller4_stack = caller4
# asm 1: movq <caller4=int64#12,>caller4_stack=stack64#4
# asm 2: movq <caller4=%r14,>caller4_stack=24(%rsp)
movq %r14,24(%rsp)
# qhasm: caller5_stack = caller5
# asm 1: movq <caller5=int64#13,>caller5_stack=stack64#5
# asm 2: movq <caller5=%r15,>caller5_stack=32(%rsp)
movq %r15,32(%rsp)
# qhasm: caller6_stack = caller6
# asm 1: movq <caller6=int64#14,>caller6_stack=stack64#6
# asm 2: movq <caller6=%rbx,>caller6_stack=40(%rsp)
movq %rbx,40(%rsp)
# qhasm: caller7_stack = caller7
# asm 1: movq <caller7=int64#15,>caller7_stack=stack64#7
# asm 2: movq <caller7=%rbp,>caller7_stack=48(%rsp)
movq %rbp,48(%rsp)
# qhasm: pp = 0
# asm 1: mov $0,>pp=int64#4
# asm 2: mov $0,>pp=%rcx
mov $0,%rcx
# qhasm: siftdownloop:
._siftdownloop:
# qhasm: prc = pp
# asm 1: mov <pp=int64#4,>prc=int64#5
# asm 2: mov <pp=%rcx,>prc=%r8
mov %rcx,%r8
# qhasm: prc *= 2
# asm 1: imulq $2,<prc=int64#5,>prc=int64#5
# asm 2: imulq $2,<prc=%r8,>prc=%r8
imulq $2,%r8,%r8
# qhasm: pc = prc
# asm 1: mov <prc=int64#5,>pc=int64#6
# asm 2: mov <prc=%r8,>pc=%r9
mov %r8,%r9
# qhasm: prc += 2
# asm 1: add $2,<prc=int64#5
# asm 2: add $2,<prc=%r8
add $2,%r8
# qhasm: pc += 1
# asm 1: add $1,<pc=int64#6
# asm 2: add $1,<pc=%r9
add $1,%r9
# qhasm: unsigned>? hlen - prc
# asm 1: cmp <prc=int64#5,<hlen=int64#2
# asm 2: cmp <prc=%r8,<hlen=%rsi
cmp %r8,%rsi
# comment:fp stack unchanged by jump
# qhasm: goto siftuploop if !unsigned>
jbe ._siftuploop
# qhasm: sprc = *(uint64 *)(hp + prc * 8)
# asm 1: movq (<hp=int64#1,<prc=int64#5,8),>sprc=int64#7
# asm 2: movq (<hp=%rdi,<prc=%r8,8),>sprc=%rax
movq (%rdi,%r8,8),%rax
# qhasm: sprc <<= 5
# asm 1: shl $5,<sprc=int64#7
# asm 2: shl $5,<sprc=%rax
shl $5,%rax
# qhasm: sprc += sp
# asm 1: add <sp=int64#3,<sprc=int64#7
# asm 2: add <sp=%rdx,<sprc=%rax
add %rdx,%rax
# qhasm: spc = *(uint64 *)(hp + pc * 8)
# asm 1: movq (<hp=int64#1,<pc=int64#6,8),>spc=int64#8
# asm 2: movq (<hp=%rdi,<pc=%r9,8),>spc=%r10
movq (%rdi,%r9,8),%r10
# qhasm: spc <<= 5
# asm 1: shl $5,<spc=int64#8
# asm 2: shl $5,<spc=%r10
shl $5,%r10
# qhasm: spc += sp
# asm 1: add <sp=int64#3,<spc=int64#8
# asm 2: add <sp=%rdx,<spc=%r10
add %rdx,%r10
# qhasm: c0 = *(uint64 *)(spc + 0)
# asm 1: movq 0(<spc=int64#8),>c0=int64#9
# asm 2: movq 0(<spc=%r10),>c0=%r11
movq 0(%r10),%r11
# qhasm: c1 = *(uint64 *)(spc + 8)
# asm 1: movq 8(<spc=int64#8),>c1=int64#10
# asm 2: movq 8(<spc=%r10),>c1=%r12
movq 8(%r10),%r12
# qhasm: c2 = *(uint64 *)(spc + 16)
# asm 1: movq 16(<spc=int64#8),>c2=int64#11
# asm 2: movq 16(<spc=%r10),>c2=%r13
movq 16(%r10),%r13
# qhasm: c3 = *(uint64 *)(spc + 24)
# asm 1: movq 24(<spc=int64#8),>c3=int64#12
# asm 2: movq 24(<spc=%r10),>c3=%r14
movq 24(%r10),%r14
# qhasm: carry? c0 -= *(uint64 *)(sprc + 0)
# asm 1: subq 0(<sprc=int64#7),<c0=int64#9
# asm 2: subq 0(<sprc=%rax),<c0=%r11
subq 0(%rax),%r11
# qhasm: carry? c1 -= *(uint64 *)(sprc + 8) - carry
# asm 1: sbbq 8(<sprc=int64#7),<c1=int64#10
# asm 2: sbbq 8(<sprc=%rax),<c1=%r12
sbbq 8(%rax),%r12
# qhasm: carry? c2 -= *(uint64 *)(sprc + 16) - carry
# asm 1: sbbq 16(<sprc=int64#7),<c2=int64#11
# asm 2: sbbq 16(<sprc=%rax),<c2=%r13
sbbq 16(%rax),%r13
# qhasm: carry? c3 -= *(uint64 *)(sprc + 24) - carry
# asm 1: sbbq 24(<sprc=int64#7),<c3=int64#12
# asm 2: sbbq 24(<sprc=%rax),<c3=%r14
sbbq 24(%rax),%r14
# qhasm: pc = prc if carry
# asm 1: cmovc <prc=int64#5,<pc=int64#6
# asm 2: cmovc <prc=%r8,<pc=%r9
cmovc %r8,%r9
# qhasm: spc = sprc if carry
# asm 1: cmovc <sprc=int64#7,<spc=int64#8
# asm 2: cmovc <sprc=%rax,<spc=%r10
cmovc %rax,%r10
# qhasm: spc -= sp
# asm 1: sub <sp=int64#3,<spc=int64#8
# asm 2: sub <sp=%rdx,<spc=%r10
sub %rdx,%r10
# qhasm: (uint64) spc >>= 5
# asm 1: shr $5,<spc=int64#8
# asm 2: shr $5,<spc=%r10
shr $5,%r10
# qhasm: spp = *(uint64 *)(hp + pp * 8)
# asm 1: movq (<hp=int64#1,<pp=int64#4,8),>spp=int64#5
# asm 2: movq (<hp=%rdi,<pp=%rcx,8),>spp=%r8
movq (%rdi,%rcx,8),%r8
# qhasm: *(uint64 *)(hp + pp * 8) = spc
# asm 1: movq <spc=int64#8,(<hp=int64#1,<pp=int64#4,8)
# asm 2: movq <spc=%r10,(<hp=%rdi,<pp=%rcx,8)
movq %r10,(%rdi,%rcx,8)
# qhasm: *(uint64 *)(hp + pc * 8) = spp
# asm 1: movq <spp=int64#5,(<hp=int64#1,<pc=int64#6,8)
# asm 2: movq <spp=%r8,(<hp=%rdi,<pc=%r9,8)
movq %r8,(%rdi,%r9,8)
# qhasm: pp = pc
# asm 1: mov <pc=int64#6,>pp=int64#4
# asm 2: mov <pc=%r9,>pp=%rcx
mov %r9,%rcx
# comment:fp stack unchanged by jump
# qhasm: goto siftdownloop
jmp ._siftdownloop
# qhasm: siftuploop:
._siftuploop:
# qhasm: pc = pp
# asm 1: mov <pp=int64#4,>pc=int64#2
# asm 2: mov <pp=%rcx,>pc=%rsi
mov %rcx,%rsi
# qhasm: pp -= 1
# asm 1: sub $1,<pp=int64#4
# asm 2: sub $1,<pp=%rcx
sub $1,%rcx
# qhasm: (uint64) pp >>= 1
# asm 1: shr $1,<pp=int64#4
# asm 2: shr $1,<pp=%rcx
shr $1,%rcx
# qhasm: unsigned>? pc - 0
# asm 1: cmp $0,<pc=int64#2
# asm 2: cmp $0,<pc=%rsi
cmp $0,%rsi
# comment:fp stack unchanged by jump
# qhasm: goto end if !unsigned>
jbe ._end
# qhasm: spp = *(uint64 *)(hp + pp * 8)
# asm 1: movq (<hp=int64#1,<pp=int64#4,8),>spp=int64#5
# asm 2: movq (<hp=%rdi,<pp=%rcx,8),>spp=%r8
movq (%rdi,%rcx,8),%r8
# qhasm: spc = *(uint64 *)(hp + pc * 8)
# asm 1: movq (<hp=int64#1,<pc=int64#2,8),>spc=int64#6
# asm 2: movq (<hp=%rdi,<pc=%rsi,8),>spc=%r9
movq (%rdi,%rsi,8),%r9
# qhasm: spp <<= 5
# asm 1: shl $5,<spp=int64#5
# asm 2: shl $5,<spp=%r8
shl $5,%r8
# qhasm: spc <<= 5
# asm 1: shl $5,<spc=int64#6
# asm 2: shl $5,<spc=%r9
shl $5,%r9
# qhasm: spc += sp
# asm 1: add <sp=int64#3,<spc=int64#6
# asm 2: add <sp=%rdx,<spc=%r9
add %rdx,%r9
# qhasm: spp += sp
# asm 1: add <sp=int64#3,<spp=int64#5
# asm 2: add <sp=%rdx,<spp=%r8
add %rdx,%r8
# qhasm: c0 = *(uint64 *)(spc + 0)
# asm 1: movq 0(<spc=int64#6),>c0=int64#7
# asm 2: movq 0(<spc=%r9),>c0=%rax
movq 0(%r9),%rax
# qhasm: c1 = *(uint64 *)(spc + 8)
# asm 1: movq 8(<spc=int64#6),>c1=int64#8
# asm 2: movq 8(<spc=%r9),>c1=%r10
movq 8(%r9),%r10
# qhasm: c2 = *(uint64 *)(spc + 16)
# asm 1: movq 16(<spc=int64#6),>c2=int64#9
# asm 2: movq 16(<spc=%r9),>c2=%r11
movq 16(%r9),%r11
# qhasm: c3 = *(uint64 *)(spc + 24)
# asm 1: movq 24(<spc=int64#6),>c3=int64#10
# asm 2: movq 24(<spc=%r9),>c3=%r12
movq 24(%r9),%r12
# qhasm: carry? c0 -= *(uint64 *)(spp + 0)
# asm 1: subq 0(<spp=int64#5),<c0=int64#7
# asm 2: subq 0(<spp=%r8),<c0=%rax
subq 0(%r8),%rax
# qhasm: carry? c1 -= *(uint64 *)(spp + 8) - carry
# asm 1: sbbq 8(<spp=int64#5),<c1=int64#8
# asm 2: sbbq 8(<spp=%r8),<c1=%r10
sbbq 8(%r8),%r10
# qhasm: carry? c2 -= *(uint64 *)(spp + 16) - carry
# asm 1: sbbq 16(<spp=int64#5),<c2=int64#9
# asm 2: sbbq 16(<spp=%r8),<c2=%r11
sbbq 16(%r8),%r11
# qhasm: carry? c3 -= *(uint64 *)(spp + 24) - carry
# asm 1: sbbq 24(<spp=int64#5),<c3=int64#10
# asm 2: sbbq 24(<spp=%r8),<c3=%r12
sbbq 24(%r8),%r12
# comment:fp stack unchanged by jump
# qhasm: goto end if carry
jc ._end
# qhasm: spc -= sp
# asm 1: sub <sp=int64#3,<spc=int64#6
# asm 2: sub <sp=%rdx,<spc=%r9
sub %rdx,%r9
# qhasm: (uint64) spc >>= 5
# asm 1: shr $5,<spc=int64#6
# asm 2: shr $5,<spc=%r9
shr $5,%r9
# qhasm: spp -= sp
# asm 1: sub <sp=int64#3,<spp=int64#5
# asm 2: sub <sp=%rdx,<spp=%r8
sub %rdx,%r8
# qhasm: (uint64) spp >>= 5
# asm 1: shr $5,<spp=int64#5
# asm 2: shr $5,<spp=%r8
shr $5,%r8
# qhasm: *(uint64 *)(hp + pp * 8) = spc
# asm 1: movq <spc=int64#6,(<hp=int64#1,<pp=int64#4,8)
# asm 2: movq <spc=%r9,(<hp=%rdi,<pp=%rcx,8)
movq %r9,(%rdi,%rcx,8)
# qhasm: *(uint64 *)(hp + pc * 8) = spp
# asm 1: movq <spp=int64#5,(<hp=int64#1,<pc=int64#2,8)
# asm 2: movq <spp=%r8,(<hp=%rdi,<pc=%rsi,8)
movq %r8,(%rdi,%rsi,8)
# comment:fp stack unchanged by jump
# qhasm: goto siftuploop
jmp ._siftuploop
# qhasm: end:
._end:
# qhasm: caller1 = caller1_stack
# asm 1: movq <caller1_stack=stack64#1,>caller1=int64#9
# asm 2: movq <caller1_stack=0(%rsp),>caller1=%r11
movq 0(%rsp),%r11
# qhasm: caller2 = caller2_stack
# asm 1: movq <caller2_stack=stack64#2,>caller2=int64#10
# asm 2: movq <caller2_stack=8(%rsp),>caller2=%r12
movq 8(%rsp),%r12
# qhasm: caller3 = caller3_stack
# asm 1: movq <caller3_stack=stack64#3,>caller3=int64#11
# asm 2: movq <caller3_stack=16(%rsp),>caller3=%r13
movq 16(%rsp),%r13
# qhasm: caller4 = caller4_stack
# asm 1: movq <caller4_stack=stack64#4,>caller4=int64#12
# asm 2: movq <caller4_stack=24(%rsp),>caller4=%r14
movq 24(%rsp),%r14
# qhasm: caller5 = caller5_stack
# asm 1: movq <caller5_stack=stack64#5,>caller5=int64#13
# asm 2: movq <caller5_stack=32(%rsp),>caller5=%r15
movq 32(%rsp),%r15
# qhasm: caller6 = caller6_stack
# asm 1: movq <caller6_stack=stack64#6,>caller6=int64#14
# asm 2: movq <caller6_stack=40(%rsp),>caller6=%rbx
movq 40(%rsp),%rbx
# qhasm: caller7 = caller7_stack
# asm 1: movq <caller7_stack=stack64#7,>caller7=int64#15
# asm 2: movq <caller7_stack=48(%rsp),>caller7=%rbp
movq 48(%rsp),%rbp
# qhasm: leave
add %r11,%rsp
mov %rdi,%rax
mov %rsi,%rdx
ret

View file

@ -0,0 +1,416 @@
# qhasm: int64 hp
# qhasm: int64 hlen
# qhasm: int64 sp
# qhasm: int64 pp
# qhasm: input hp
# qhasm: input hlen
# qhasm: input sp
# qhasm: int64 prc
# qhasm: int64 plc
# qhasm: int64 pc
# qhasm: int64 d
# qhasm: int64 spp
# qhasm: int64 sprc
# qhasm: int64 spc
# qhasm: int64 c0
# qhasm: int64 c1
# qhasm: int64 c2
# qhasm: int64 c3
# qhasm: int64 t0
# qhasm: int64 t1
# qhasm: int64 t2
# qhasm: int64 t3
# qhasm: int64 p0
# qhasm: int64 p1
# qhasm: int64 p2
# qhasm: int64 p3
# qhasm: int64 caller1
# qhasm: int64 caller2
# qhasm: int64 caller3
# qhasm: int64 caller4
# qhasm: int64 caller5
# qhasm: int64 caller6
# qhasm: int64 caller7
# qhasm: caller caller1
# qhasm: caller caller2
# qhasm: caller caller3
# qhasm: caller caller4
# qhasm: caller caller5
# qhasm: caller caller6
# qhasm: caller caller7
# qhasm: stack64 caller1_stack
# qhasm: stack64 caller2_stack
# qhasm: stack64 caller3_stack
# qhasm: stack64 caller4_stack
# qhasm: stack64 caller5_stack
# qhasm: stack64 caller6_stack
# qhasm: stack64 caller7_stack
# qhasm: enter crypto_sign_ed25519_amd64_51_30k_batch_heap_rootreplaced_1limb
.text
.p2align 5
.globl _crypto_sign_ed25519_amd64_51_30k_batch_heap_rootreplaced_1limb
.globl crypto_sign_ed25519_amd64_51_30k_batch_heap_rootreplaced_1limb
_crypto_sign_ed25519_amd64_51_30k_batch_heap_rootreplaced_1limb:
crypto_sign_ed25519_amd64_51_30k_batch_heap_rootreplaced_1limb:
mov %rsp,%r11
and $31,%r11
add $64,%r11
sub %r11,%rsp
# qhasm: caller1_stack = caller1
# asm 1: movq <caller1=int64#9,>caller1_stack=stack64#1
# asm 2: movq <caller1=%r11,>caller1_stack=0(%rsp)
movq %r11,0(%rsp)
# qhasm: caller2_stack = caller2
# asm 1: movq <caller2=int64#10,>caller2_stack=stack64#2
# asm 2: movq <caller2=%r12,>caller2_stack=8(%rsp)
movq %r12,8(%rsp)
# qhasm: caller3_stack = caller3
# asm 1: movq <caller3=int64#11,>caller3_stack=stack64#3
# asm 2: movq <caller3=%r13,>caller3_stack=16(%rsp)
movq %r13,16(%rsp)
# qhasm: caller4_stack = caller4
# asm 1: movq <caller4=int64#12,>caller4_stack=stack64#4
# asm 2: movq <caller4=%r14,>caller4_stack=24(%rsp)
movq %r14,24(%rsp)
# qhasm: caller5_stack = caller5
# asm 1: movq <caller5=int64#13,>caller5_stack=stack64#5
# asm 2: movq <caller5=%r15,>caller5_stack=32(%rsp)
movq %r15,32(%rsp)
# qhasm: caller6_stack = caller6
# asm 1: movq <caller6=int64#14,>caller6_stack=stack64#6
# asm 2: movq <caller6=%rbx,>caller6_stack=40(%rsp)
movq %rbx,40(%rsp)
# qhasm: caller7_stack = caller7
# asm 1: movq <caller7=int64#15,>caller7_stack=stack64#7
# asm 2: movq <caller7=%rbp,>caller7_stack=48(%rsp)
movq %rbp,48(%rsp)
# qhasm: pp = 0
# asm 1: mov $0,>pp=int64#4
# asm 2: mov $0,>pp=%rcx
mov $0,%rcx
# qhasm: siftdownloop:
._siftdownloop:
# qhasm: prc = pp
# asm 1: mov <pp=int64#4,>prc=int64#5
# asm 2: mov <pp=%rcx,>prc=%r8
mov %rcx,%r8
# qhasm: prc *= 2
# asm 1: imulq $2,<prc=int64#5,>prc=int64#5
# asm 2: imulq $2,<prc=%r8,>prc=%r8
imulq $2,%r8,%r8
# qhasm: pc = prc
# asm 1: mov <prc=int64#5,>pc=int64#6
# asm 2: mov <prc=%r8,>pc=%r9
mov %r8,%r9
# qhasm: prc += 2
# asm 1: add $2,<prc=int64#5
# asm 2: add $2,<prc=%r8
add $2,%r8
# qhasm: pc += 1
# asm 1: add $1,<pc=int64#6
# asm 2: add $1,<pc=%r9
add $1,%r9
# qhasm: unsigned>? hlen - prc
# asm 1: cmp <prc=int64#5,<hlen=int64#2
# asm 2: cmp <prc=%r8,<hlen=%rsi
cmp %r8,%rsi
# comment:fp stack unchanged by jump
# qhasm: goto siftuploop if !unsigned>
jbe ._siftuploop
# qhasm: sprc = *(uint64 *)(hp + prc * 8)
# asm 1: movq (<hp=int64#1,<prc=int64#5,8),>sprc=int64#7
# asm 2: movq (<hp=%rdi,<prc=%r8,8),>sprc=%rax
movq (%rdi,%r8,8),%rax
# qhasm: sprc <<= 5
# asm 1: shl $5,<sprc=int64#7
# asm 2: shl $5,<sprc=%rax
shl $5,%rax
# qhasm: sprc += sp
# asm 1: add <sp=int64#3,<sprc=int64#7
# asm 2: add <sp=%rdx,<sprc=%rax
add %rdx,%rax
# qhasm: spc = *(uint64 *)(hp + pc * 8)
# asm 1: movq (<hp=int64#1,<pc=int64#6,8),>spc=int64#8
# asm 2: movq (<hp=%rdi,<pc=%r9,8),>spc=%r10
movq (%rdi,%r9,8),%r10
# qhasm: spc <<= 5
# asm 1: shl $5,<spc=int64#8
# asm 2: shl $5,<spc=%r10
shl $5,%r10
# qhasm: spc += sp
# asm 1: add <sp=int64#3,<spc=int64#8
# asm 2: add <sp=%rdx,<spc=%r10
add %rdx,%r10
# qhasm: c0 = *(uint64 *)(spc + 0)
# asm 1: movq 0(<spc=int64#8),>c0=int64#9
# asm 2: movq 0(<spc=%r10),>c0=%r11
movq 0(%r10),%r11
# qhasm: carry? c0 -= *(uint64 *)(sprc + 0)
# asm 1: subq 0(<sprc=int64#7),<c0=int64#9
# asm 2: subq 0(<sprc=%rax),<c0=%r11
subq 0(%rax),%r11
# qhasm: pc = prc if carry
# asm 1: cmovc <prc=int64#5,<pc=int64#6
# asm 2: cmovc <prc=%r8,<pc=%r9
cmovc %r8,%r9
# qhasm: spc = sprc if carry
# asm 1: cmovc <sprc=int64#7,<spc=int64#8
# asm 2: cmovc <sprc=%rax,<spc=%r10
cmovc %rax,%r10
# qhasm: spc -= sp
# asm 1: sub <sp=int64#3,<spc=int64#8
# asm 2: sub <sp=%rdx,<spc=%r10
sub %rdx,%r10
# qhasm: (uint64) spc >>= 5
# asm 1: shr $5,<spc=int64#8
# asm 2: shr $5,<spc=%r10
shr $5,%r10
# qhasm: spp = *(uint64 *)(hp + pp * 8)
# asm 1: movq (<hp=int64#1,<pp=int64#4,8),>spp=int64#5
# asm 2: movq (<hp=%rdi,<pp=%rcx,8),>spp=%r8
movq (%rdi,%rcx,8),%r8
# qhasm: *(uint64 *)(hp + pp * 8) = spc
# asm 1: movq <spc=int64#8,(<hp=int64#1,<pp=int64#4,8)
# asm 2: movq <spc=%r10,(<hp=%rdi,<pp=%rcx,8)
movq %r10,(%rdi,%rcx,8)
# qhasm: *(uint64 *)(hp + pc * 8) = spp
# asm 1: movq <spp=int64#5,(<hp=int64#1,<pc=int64#6,8)
# asm 2: movq <spp=%r8,(<hp=%rdi,<pc=%r9,8)
movq %r8,(%rdi,%r9,8)
# qhasm: pp = pc
# asm 1: mov <pc=int64#6,>pp=int64#4
# asm 2: mov <pc=%r9,>pp=%rcx
mov %r9,%rcx
# comment:fp stack unchanged by jump
# qhasm: goto siftdownloop
jmp ._siftdownloop
# qhasm: siftuploop:
._siftuploop:
# qhasm: pc = pp
# asm 1: mov <pp=int64#4,>pc=int64#2
# asm 2: mov <pp=%rcx,>pc=%rsi
mov %rcx,%rsi
# qhasm: pp -= 1
# asm 1: sub $1,<pp=int64#4
# asm 2: sub $1,<pp=%rcx
sub $1,%rcx
# qhasm: (uint64) pp >>= 1
# asm 1: shr $1,<pp=int64#4
# asm 2: shr $1,<pp=%rcx
shr $1,%rcx
# qhasm: unsigned>? pc - 0
# asm 1: cmp $0,<pc=int64#2
# asm 2: cmp $0,<pc=%rsi
cmp $0,%rsi
# comment:fp stack unchanged by jump
# qhasm: goto end if !unsigned>
jbe ._end
# qhasm: spp = *(uint64 *)(hp + pp * 8)
# asm 1: movq (<hp=int64#1,<pp=int64#4,8),>spp=int64#5
# asm 2: movq (<hp=%rdi,<pp=%rcx,8),>spp=%r8
movq (%rdi,%rcx,8),%r8
# qhasm: spc = *(uint64 *)(hp + pc * 8)
# asm 1: movq (<hp=int64#1,<pc=int64#2,8),>spc=int64#6
# asm 2: movq (<hp=%rdi,<pc=%rsi,8),>spc=%r9
movq (%rdi,%rsi,8),%r9
# qhasm: spp <<= 5
# asm 1: shl $5,<spp=int64#5
# asm 2: shl $5,<spp=%r8
shl $5,%r8
# qhasm: spc <<= 5
# asm 1: shl $5,<spc=int64#6
# asm 2: shl $5,<spc=%r9
shl $5,%r9
# qhasm: spc += sp
# asm 1: add <sp=int64#3,<spc=int64#6
# asm 2: add <sp=%rdx,<spc=%r9
add %rdx,%r9
# qhasm: spp += sp
# asm 1: add <sp=int64#3,<spp=int64#5
# asm 2: add <sp=%rdx,<spp=%r8
add %rdx,%r8
# qhasm: c0 = *(uint64 *)(spc + 0)
# asm 1: movq 0(<spc=int64#6),>c0=int64#7
# asm 2: movq 0(<spc=%r9),>c0=%rax
movq 0(%r9),%rax
# qhasm: carry? c0 -= *(uint64 *)(spp + 0)
# asm 1: subq 0(<spp=int64#5),<c0=int64#7
# asm 2: subq 0(<spp=%r8),<c0=%rax
subq 0(%r8),%rax
# comment:fp stack unchanged by jump
# qhasm: goto end if carry
jc ._end
# qhasm: spc -= sp
# asm 1: sub <sp=int64#3,<spc=int64#6
# asm 2: sub <sp=%rdx,<spc=%r9
sub %rdx,%r9
# qhasm: (uint64) spc >>= 5
# asm 1: shr $5,<spc=int64#6
# asm 2: shr $5,<spc=%r9
shr $5,%r9
# qhasm: spp -= sp
# asm 1: sub <sp=int64#3,<spp=int64#5
# asm 2: sub <sp=%rdx,<spp=%r8
sub %rdx,%r8
# qhasm: (uint64) spp >>= 5
# asm 1: shr $5,<spp=int64#5
# asm 2: shr $5,<spp=%r8
shr $5,%r8
# qhasm: *(uint64 *)(hp + pp * 8) = spc
# asm 1: movq <spc=int64#6,(<hp=int64#1,<pp=int64#4,8)
# asm 2: movq <spc=%r9,(<hp=%rdi,<pp=%rcx,8)
movq %r9,(%rdi,%rcx,8)
# qhasm: *(uint64 *)(hp + pc * 8) = spp
# asm 1: movq <spp=int64#5,(<hp=int64#1,<pc=int64#2,8)
# asm 2: movq <spp=%r8,(<hp=%rdi,<pc=%rsi,8)
movq %r8,(%rdi,%rsi,8)
# comment:fp stack unchanged by jump
# qhasm: goto siftuploop
jmp ._siftuploop
# qhasm: end:
._end:
# qhasm: caller1 = caller1_stack
# asm 1: movq <caller1_stack=stack64#1,>caller1=int64#9
# asm 2: movq <caller1_stack=0(%rsp),>caller1=%r11
movq 0(%rsp),%r11
# qhasm: caller2 = caller2_stack
# asm 1: movq <caller2_stack=stack64#2,>caller2=int64#10
# asm 2: movq <caller2_stack=8(%rsp),>caller2=%r12
movq 8(%rsp),%r12
# qhasm: caller3 = caller3_stack
# asm 1: movq <caller3_stack=stack64#3,>caller3=int64#11
# asm 2: movq <caller3_stack=16(%rsp),>caller3=%r13
movq 16(%rsp),%r13
# qhasm: caller4 = caller4_stack
# asm 1: movq <caller4_stack=stack64#4,>caller4=int64#12
# asm 2: movq <caller4_stack=24(%rsp),>caller4=%r14
movq 24(%rsp),%r14
# qhasm: caller5 = caller5_stack
# asm 1: movq <caller5_stack=stack64#5,>caller5=int64#13
# asm 2: movq <caller5_stack=32(%rsp),>caller5=%r15
movq 32(%rsp),%r15
# qhasm: caller6 = caller6_stack
# asm 1: movq <caller6_stack=stack64#6,>caller6=int64#14
# asm 2: movq <caller6_stack=40(%rsp),>caller6=%rbx
movq 40(%rsp),%rbx
# qhasm: caller7 = caller7_stack
# asm 1: movq <caller7_stack=stack64#7,>caller7=int64#15
# asm 2: movq <caller7_stack=48(%rsp),>caller7=%rbp
movq 48(%rsp),%rbp
# qhasm: leave
add %r11,%rsp
mov %rdi,%rax
mov %rsi,%rdx
ret

View file

@ -0,0 +1,436 @@
# qhasm: int64 hp
# qhasm: int64 hlen
# qhasm: int64 sp
# qhasm: int64 pp
# qhasm: input hp
# qhasm: input hlen
# qhasm: input sp
# qhasm: int64 prc
# qhasm: int64 plc
# qhasm: int64 pc
# qhasm: int64 d
# qhasm: int64 spp
# qhasm: int64 sprc
# qhasm: int64 spc
# qhasm: int64 c0
# qhasm: int64 c1
# qhasm: int64 c2
# qhasm: int64 c3
# qhasm: int64 t0
# qhasm: int64 t1
# qhasm: int64 t2
# qhasm: int64 t3
# qhasm: int64 p0
# qhasm: int64 p1
# qhasm: int64 p2
# qhasm: int64 p3
# qhasm: int64 caller1
# qhasm: int64 caller2
# qhasm: int64 caller3
# qhasm: int64 caller4
# qhasm: int64 caller5
# qhasm: int64 caller6
# qhasm: int64 caller7
# qhasm: caller caller1
# qhasm: caller caller2
# qhasm: caller caller3
# qhasm: caller caller4
# qhasm: caller caller5
# qhasm: caller caller6
# qhasm: caller caller7
# qhasm: stack64 caller1_stack
# qhasm: stack64 caller2_stack
# qhasm: stack64 caller3_stack
# qhasm: stack64 caller4_stack
# qhasm: stack64 caller5_stack
# qhasm: stack64 caller6_stack
# qhasm: stack64 caller7_stack
# qhasm: enter crypto_sign_ed25519_amd64_51_30k_batch_heap_rootreplaced_2limbs
.text
.p2align 5
.globl _crypto_sign_ed25519_amd64_51_30k_batch_heap_rootreplaced_2limbs
.globl crypto_sign_ed25519_amd64_51_30k_batch_heap_rootreplaced_2limbs
_crypto_sign_ed25519_amd64_51_30k_batch_heap_rootreplaced_2limbs:
crypto_sign_ed25519_amd64_51_30k_batch_heap_rootreplaced_2limbs:
mov %rsp,%r11
and $31,%r11
add $64,%r11
sub %r11,%rsp
# qhasm: caller1_stack = caller1
# asm 1: movq <caller1=int64#9,>caller1_stack=stack64#1
# asm 2: movq <caller1=%r11,>caller1_stack=0(%rsp)
movq %r11,0(%rsp)
# qhasm: caller2_stack = caller2
# asm 1: movq <caller2=int64#10,>caller2_stack=stack64#2
# asm 2: movq <caller2=%r12,>caller2_stack=8(%rsp)
movq %r12,8(%rsp)
# qhasm: caller3_stack = caller3
# asm 1: movq <caller3=int64#11,>caller3_stack=stack64#3
# asm 2: movq <caller3=%r13,>caller3_stack=16(%rsp)
movq %r13,16(%rsp)
# qhasm: caller4_stack = caller4
# asm 1: movq <caller4=int64#12,>caller4_stack=stack64#4
# asm 2: movq <caller4=%r14,>caller4_stack=24(%rsp)
movq %r14,24(%rsp)
# qhasm: caller5_stack = caller5
# asm 1: movq <caller5=int64#13,>caller5_stack=stack64#5
# asm 2: movq <caller5=%r15,>caller5_stack=32(%rsp)
movq %r15,32(%rsp)
# qhasm: caller6_stack = caller6
# asm 1: movq <caller6=int64#14,>caller6_stack=stack64#6
# asm 2: movq <caller6=%rbx,>caller6_stack=40(%rsp)
movq %rbx,40(%rsp)
# qhasm: caller7_stack = caller7
# asm 1: movq <caller7=int64#15,>caller7_stack=stack64#7
# asm 2: movq <caller7=%rbp,>caller7_stack=48(%rsp)
movq %rbp,48(%rsp)
# qhasm: pp = 0
# asm 1: mov $0,>pp=int64#4
# asm 2: mov $0,>pp=%rcx
mov $0,%rcx
# qhasm: siftdownloop:
._siftdownloop:
# qhasm: prc = pp
# asm 1: mov <pp=int64#4,>prc=int64#5
# asm 2: mov <pp=%rcx,>prc=%r8
mov %rcx,%r8
# qhasm: prc *= 2
# asm 1: imulq $2,<prc=int64#5,>prc=int64#5
# asm 2: imulq $2,<prc=%r8,>prc=%r8
imulq $2,%r8,%r8
# qhasm: pc = prc
# asm 1: mov <prc=int64#5,>pc=int64#6
# asm 2: mov <prc=%r8,>pc=%r9
mov %r8,%r9
# qhasm: prc += 2
# asm 1: add $2,<prc=int64#5
# asm 2: add $2,<prc=%r8
add $2,%r8
# qhasm: pc += 1
# asm 1: add $1,<pc=int64#6
# asm 2: add $1,<pc=%r9
add $1,%r9
# qhasm: unsigned>? hlen - prc
# asm 1: cmp <prc=int64#5,<hlen=int64#2
# asm 2: cmp <prc=%r8,<hlen=%rsi
cmp %r8,%rsi
# comment:fp stack unchanged by jump
# qhasm: goto siftuploop if !unsigned>
jbe ._siftuploop
# qhasm: sprc = *(uint64 *)(hp + prc * 8)
# asm 1: movq (<hp=int64#1,<prc=int64#5,8),>sprc=int64#7
# asm 2: movq (<hp=%rdi,<prc=%r8,8),>sprc=%rax
movq (%rdi,%r8,8),%rax
# qhasm: sprc <<= 5
# asm 1: shl $5,<sprc=int64#7
# asm 2: shl $5,<sprc=%rax
shl $5,%rax
# qhasm: sprc += sp
# asm 1: add <sp=int64#3,<sprc=int64#7
# asm 2: add <sp=%rdx,<sprc=%rax
add %rdx,%rax
# qhasm: spc = *(uint64 *)(hp + pc * 8)
# asm 1: movq (<hp=int64#1,<pc=int64#6,8),>spc=int64#8
# asm 2: movq (<hp=%rdi,<pc=%r9,8),>spc=%r10
movq (%rdi,%r9,8),%r10
# qhasm: spc <<= 5
# asm 1: shl $5,<spc=int64#8
# asm 2: shl $5,<spc=%r10
shl $5,%r10
# qhasm: spc += sp
# asm 1: add <sp=int64#3,<spc=int64#8
# asm 2: add <sp=%rdx,<spc=%r10
add %rdx,%r10
# qhasm: c0 = *(uint64 *)(spc + 0)
# asm 1: movq 0(<spc=int64#8),>c0=int64#9
# asm 2: movq 0(<spc=%r10),>c0=%r11
movq 0(%r10),%r11
# qhasm: c1 = *(uint64 *)(spc + 8)
# asm 1: movq 8(<spc=int64#8),>c1=int64#10
# asm 2: movq 8(<spc=%r10),>c1=%r12
movq 8(%r10),%r12
# qhasm: carry? c0 -= *(uint64 *)(sprc + 0)
# asm 1: subq 0(<sprc=int64#7),<c0=int64#9
# asm 2: subq 0(<sprc=%rax),<c0=%r11
subq 0(%rax),%r11
# qhasm: carry? c1 -= *(uint64 *)(sprc + 8) - carry
# asm 1: sbbq 8(<sprc=int64#7),<c1=int64#10
# asm 2: sbbq 8(<sprc=%rax),<c1=%r12
sbbq 8(%rax),%r12
# qhasm: pc = prc if carry
# asm 1: cmovc <prc=int64#5,<pc=int64#6
# asm 2: cmovc <prc=%r8,<pc=%r9
cmovc %r8,%r9
# qhasm: spc = sprc if carry
# asm 1: cmovc <sprc=int64#7,<spc=int64#8
# asm 2: cmovc <sprc=%rax,<spc=%r10
cmovc %rax,%r10
# qhasm: spc -= sp
# asm 1: sub <sp=int64#3,<spc=int64#8
# asm 2: sub <sp=%rdx,<spc=%r10
sub %rdx,%r10
# qhasm: (uint64) spc >>= 5
# asm 1: shr $5,<spc=int64#8
# asm 2: shr $5,<spc=%r10
shr $5,%r10
# qhasm: spp = *(uint64 *)(hp + pp * 8)
# asm 1: movq (<hp=int64#1,<pp=int64#4,8),>spp=int64#5
# asm 2: movq (<hp=%rdi,<pp=%rcx,8),>spp=%r8
movq (%rdi,%rcx,8),%r8
# qhasm: *(uint64 *)(hp + pp * 8) = spc
# asm 1: movq <spc=int64#8,(<hp=int64#1,<pp=int64#4,8)
# asm 2: movq <spc=%r10,(<hp=%rdi,<pp=%rcx,8)
movq %r10,(%rdi,%rcx,8)
# qhasm: *(uint64 *)(hp + pc * 8) = spp
# asm 1: movq <spp=int64#5,(<hp=int64#1,<pc=int64#6,8)
# asm 2: movq <spp=%r8,(<hp=%rdi,<pc=%r9,8)
movq %r8,(%rdi,%r9,8)
# qhasm: pp = pc
# asm 1: mov <pc=int64#6,>pp=int64#4
# asm 2: mov <pc=%r9,>pp=%rcx
mov %r9,%rcx
# comment:fp stack unchanged by jump
# qhasm: goto siftdownloop
jmp ._siftdownloop
# qhasm: siftuploop:
._siftuploop:
# qhasm: pc = pp
# asm 1: mov <pp=int64#4,>pc=int64#2
# asm 2: mov <pp=%rcx,>pc=%rsi
mov %rcx,%rsi
# qhasm: pp -= 1
# asm 1: sub $1,<pp=int64#4
# asm 2: sub $1,<pp=%rcx
sub $1,%rcx
# qhasm: (uint64) pp >>= 1
# asm 1: shr $1,<pp=int64#4
# asm 2: shr $1,<pp=%rcx
shr $1,%rcx
# qhasm: unsigned>? pc - 0
# asm 1: cmp $0,<pc=int64#2
# asm 2: cmp $0,<pc=%rsi
cmp $0,%rsi
# comment:fp stack unchanged by jump
# qhasm: goto end if !unsigned>
jbe ._end
# qhasm: spp = *(uint64 *)(hp + pp * 8)
# asm 1: movq (<hp=int64#1,<pp=int64#4,8),>spp=int64#5
# asm 2: movq (<hp=%rdi,<pp=%rcx,8),>spp=%r8
movq (%rdi,%rcx,8),%r8
# qhasm: spc = *(uint64 *)(hp + pc * 8)
# asm 1: movq (<hp=int64#1,<pc=int64#2,8),>spc=int64#6
# asm 2: movq (<hp=%rdi,<pc=%rsi,8),>spc=%r9
movq (%rdi,%rsi,8),%r9
# qhasm: spp <<= 5
# asm 1: shl $5,<spp=int64#5
# asm 2: shl $5,<spp=%r8
shl $5,%r8
# qhasm: spc <<= 5
# asm 1: shl $5,<spc=int64#6
# asm 2: shl $5,<spc=%r9
shl $5,%r9
# qhasm: spc += sp
# asm 1: add <sp=int64#3,<spc=int64#6
# asm 2: add <sp=%rdx,<spc=%r9
add %rdx,%r9
# qhasm: spp += sp
# asm 1: add <sp=int64#3,<spp=int64#5
# asm 2: add <sp=%rdx,<spp=%r8
add %rdx,%r8
# qhasm: c0 = *(uint64 *)(spc + 0)
# asm 1: movq 0(<spc=int64#6),>c0=int64#7
# asm 2: movq 0(<spc=%r9),>c0=%rax
movq 0(%r9),%rax
# qhasm: c1 = *(uint64 *)(spc + 8)
# asm 1: movq 8(<spc=int64#6),>c1=int64#8
# asm 2: movq 8(<spc=%r9),>c1=%r10
movq 8(%r9),%r10
# qhasm: carry? c0 -= *(uint64 *)(spp + 0)
# asm 1: subq 0(<spp=int64#5),<c0=int64#7
# asm 2: subq 0(<spp=%r8),<c0=%rax
subq 0(%r8),%rax
# qhasm: carry? c1 -= *(uint64 *)(spp + 8) - carry
# asm 1: sbbq 8(<spp=int64#5),<c1=int64#8
# asm 2: sbbq 8(<spp=%r8),<c1=%r10
sbbq 8(%r8),%r10
# comment:fp stack unchanged by jump
# qhasm: goto end if carry
jc ._end
# qhasm: spc -= sp
# asm 1: sub <sp=int64#3,<spc=int64#6
# asm 2: sub <sp=%rdx,<spc=%r9
sub %rdx,%r9
# qhasm: (uint64) spc >>= 5
# asm 1: shr $5,<spc=int64#6
# asm 2: shr $5,<spc=%r9
shr $5,%r9
# qhasm: spp -= sp
# asm 1: sub <sp=int64#3,<spp=int64#5
# asm 2: sub <sp=%rdx,<spp=%r8
sub %rdx,%r8
# qhasm: (uint64) spp >>= 5
# asm 1: shr $5,<spp=int64#5
# asm 2: shr $5,<spp=%r8
shr $5,%r8
# qhasm: *(uint64 *)(hp + pp * 8) = spc
# asm 1: movq <spc=int64#6,(<hp=int64#1,<pp=int64#4,8)
# asm 2: movq <spc=%r9,(<hp=%rdi,<pp=%rcx,8)
movq %r9,(%rdi,%rcx,8)
# qhasm: *(uint64 *)(hp + pc * 8) = spp
# asm 1: movq <spp=int64#5,(<hp=int64#1,<pc=int64#2,8)
# asm 2: movq <spp=%r8,(<hp=%rdi,<pc=%rsi,8)
movq %r8,(%rdi,%rsi,8)
# comment:fp stack unchanged by jump
# qhasm: goto siftuploop
jmp ._siftuploop
# qhasm: end:
._end:
# qhasm: caller1 = caller1_stack
# asm 1: movq <caller1_stack=stack64#1,>caller1=int64#9
# asm 2: movq <caller1_stack=0(%rsp),>caller1=%r11
movq 0(%rsp),%r11
# qhasm: caller2 = caller2_stack
# asm 1: movq <caller2_stack=stack64#2,>caller2=int64#10
# asm 2: movq <caller2_stack=8(%rsp),>caller2=%r12
movq 8(%rsp),%r12
# qhasm: caller3 = caller3_stack
# asm 1: movq <caller3_stack=stack64#3,>caller3=int64#11
# asm 2: movq <caller3_stack=16(%rsp),>caller3=%r13
movq 16(%rsp),%r13
# qhasm: caller4 = caller4_stack
# asm 1: movq <caller4_stack=stack64#4,>caller4=int64#12
# asm 2: movq <caller4_stack=24(%rsp),>caller4=%r14
movq 24(%rsp),%r14
# qhasm: caller5 = caller5_stack
# asm 1: movq <caller5_stack=stack64#5,>caller5=int64#13
# asm 2: movq <caller5_stack=32(%rsp),>caller5=%r15
movq 32(%rsp),%r15
# qhasm: caller6 = caller6_stack
# asm 1: movq <caller6_stack=stack64#6,>caller6=int64#14
# asm 2: movq <caller6_stack=40(%rsp),>caller6=%rbx
movq 40(%rsp),%rbx
# qhasm: caller7 = caller7_stack
# asm 1: movq <caller7_stack=stack64#7,>caller7=int64#15
# asm 2: movq <caller7_stack=48(%rsp),>caller7=%rbp
movq 48(%rsp),%rbp
# qhasm: leave
add %r11,%rsp
mov %rdi,%rax
mov %rsi,%rdx
ret

View file

@ -0,0 +1,456 @@
# qhasm: int64 hp
# qhasm: int64 hlen
# qhasm: int64 sp
# qhasm: int64 pp
# qhasm: input hp
# qhasm: input hlen
# qhasm: input sp
# qhasm: int64 prc
# qhasm: int64 plc
# qhasm: int64 pc
# qhasm: int64 d
# qhasm: int64 spp
# qhasm: int64 sprc
# qhasm: int64 spc
# qhasm: int64 c0
# qhasm: int64 c1
# qhasm: int64 c2
# qhasm: int64 c3
# qhasm: int64 t0
# qhasm: int64 t1
# qhasm: int64 t2
# qhasm: int64 t3
# qhasm: int64 p0
# qhasm: int64 p1
# qhasm: int64 p2
# qhasm: int64 p3
# qhasm: int64 caller1
# qhasm: int64 caller2
# qhasm: int64 caller3
# qhasm: int64 caller4
# qhasm: int64 caller5
# qhasm: int64 caller6
# qhasm: int64 caller7
# qhasm: caller caller1
# qhasm: caller caller2
# qhasm: caller caller3
# qhasm: caller caller4
# qhasm: caller caller5
# qhasm: caller caller6
# qhasm: caller caller7
# qhasm: stack64 caller1_stack
# qhasm: stack64 caller2_stack
# qhasm: stack64 caller3_stack
# qhasm: stack64 caller4_stack
# qhasm: stack64 caller5_stack
# qhasm: stack64 caller6_stack
# qhasm: stack64 caller7_stack
# qhasm: enter crypto_sign_ed25519_amd64_51_30k_batch_heap_rootreplaced_3limbs
.text
.p2align 5
.globl _crypto_sign_ed25519_amd64_51_30k_batch_heap_rootreplaced_3limbs
.globl crypto_sign_ed25519_amd64_51_30k_batch_heap_rootreplaced_3limbs
_crypto_sign_ed25519_amd64_51_30k_batch_heap_rootreplaced_3limbs:
crypto_sign_ed25519_amd64_51_30k_batch_heap_rootreplaced_3limbs:
mov %rsp,%r11
and $31,%r11
add $64,%r11
sub %r11,%rsp
# qhasm: caller1_stack = caller1
# asm 1: movq <caller1=int64#9,>caller1_stack=stack64#1
# asm 2: movq <caller1=%r11,>caller1_stack=0(%rsp)
movq %r11,0(%rsp)
# qhasm: caller2_stack = caller2
# asm 1: movq <caller2=int64#10,>caller2_stack=stack64#2
# asm 2: movq <caller2=%r12,>caller2_stack=8(%rsp)
movq %r12,8(%rsp)
# qhasm: caller3_stack = caller3
# asm 1: movq <caller3=int64#11,>caller3_stack=stack64#3
# asm 2: movq <caller3=%r13,>caller3_stack=16(%rsp)
movq %r13,16(%rsp)
# qhasm: caller4_stack = caller4
# asm 1: movq <caller4=int64#12,>caller4_stack=stack64#4
# asm 2: movq <caller4=%r14,>caller4_stack=24(%rsp)
movq %r14,24(%rsp)
# qhasm: caller5_stack = caller5
# asm 1: movq <caller5=int64#13,>caller5_stack=stack64#5
# asm 2: movq <caller5=%r15,>caller5_stack=32(%rsp)
movq %r15,32(%rsp)
# qhasm: caller6_stack = caller6
# asm 1: movq <caller6=int64#14,>caller6_stack=stack64#6
# asm 2: movq <caller6=%rbx,>caller6_stack=40(%rsp)
movq %rbx,40(%rsp)
# qhasm: caller7_stack = caller7
# asm 1: movq <caller7=int64#15,>caller7_stack=stack64#7
# asm 2: movq <caller7=%rbp,>caller7_stack=48(%rsp)
movq %rbp,48(%rsp)
# qhasm: pp = 0
# asm 1: mov $0,>pp=int64#4
# asm 2: mov $0,>pp=%rcx
mov $0,%rcx
# qhasm: siftdownloop:
._siftdownloop:
# qhasm: prc = pp
# asm 1: mov <pp=int64#4,>prc=int64#5
# asm 2: mov <pp=%rcx,>prc=%r8
mov %rcx,%r8
# qhasm: prc *= 2
# asm 1: imulq $2,<prc=int64#5,>prc=int64#5
# asm 2: imulq $2,<prc=%r8,>prc=%r8
imulq $2,%r8,%r8
# qhasm: pc = prc
# asm 1: mov <prc=int64#5,>pc=int64#6
# asm 2: mov <prc=%r8,>pc=%r9
mov %r8,%r9
# qhasm: prc += 2
# asm 1: add $2,<prc=int64#5
# asm 2: add $2,<prc=%r8
add $2,%r8
# qhasm: pc += 1
# asm 1: add $1,<pc=int64#6
# asm 2: add $1,<pc=%r9
add $1,%r9
# qhasm: unsigned>? hlen - prc
# asm 1: cmp <prc=int64#5,<hlen=int64#2
# asm 2: cmp <prc=%r8,<hlen=%rsi
cmp %r8,%rsi
# comment:fp stack unchanged by jump
# qhasm: goto siftuploop if !unsigned>
jbe ._siftuploop
# qhasm: sprc = *(uint64 *)(hp + prc * 8)
# asm 1: movq (<hp=int64#1,<prc=int64#5,8),>sprc=int64#7
# asm 2: movq (<hp=%rdi,<prc=%r8,8),>sprc=%rax
movq (%rdi,%r8,8),%rax
# qhasm: sprc <<= 5
# asm 1: shl $5,<sprc=int64#7
# asm 2: shl $5,<sprc=%rax
shl $5,%rax
# qhasm: sprc += sp
# asm 1: add <sp=int64#3,<sprc=int64#7
# asm 2: add <sp=%rdx,<sprc=%rax
add %rdx,%rax
# qhasm: spc = *(uint64 *)(hp + pc * 8)
# asm 1: movq (<hp=int64#1,<pc=int64#6,8),>spc=int64#8
# asm 2: movq (<hp=%rdi,<pc=%r9,8),>spc=%r10
movq (%rdi,%r9,8),%r10
# qhasm: spc <<= 5
# asm 1: shl $5,<spc=int64#8
# asm 2: shl $5,<spc=%r10
shl $5,%r10
# qhasm: spc += sp
# asm 1: add <sp=int64#3,<spc=int64#8
# asm 2: add <sp=%rdx,<spc=%r10
add %rdx,%r10
# qhasm: c0 = *(uint64 *)(spc + 0)
# asm 1: movq 0(<spc=int64#8),>c0=int64#9
# asm 2: movq 0(<spc=%r10),>c0=%r11
movq 0(%r10),%r11
# qhasm: c1 = *(uint64 *)(spc + 8)
# asm 1: movq 8(<spc=int64#8),>c1=int64#10
# asm 2: movq 8(<spc=%r10),>c1=%r12
movq 8(%r10),%r12
# qhasm: c2 = *(uint64 *)(spc + 16)
# asm 1: movq 16(<spc=int64#8),>c2=int64#11
# asm 2: movq 16(<spc=%r10),>c2=%r13
movq 16(%r10),%r13
# qhasm: carry? c0 -= *(uint64 *)(sprc + 0)
# asm 1: subq 0(<sprc=int64#7),<c0=int64#9
# asm 2: subq 0(<sprc=%rax),<c0=%r11
subq 0(%rax),%r11
# qhasm: carry? c1 -= *(uint64 *)(sprc + 8) - carry
# asm 1: sbbq 8(<sprc=int64#7),<c1=int64#10
# asm 2: sbbq 8(<sprc=%rax),<c1=%r12
sbbq 8(%rax),%r12
# qhasm: carry? c2 -= *(uint64 *)(sprc + 16) - carry
# asm 1: sbbq 16(<sprc=int64#7),<c2=int64#11
# asm 2: sbbq 16(<sprc=%rax),<c2=%r13
sbbq 16(%rax),%r13
# qhasm: pc = prc if carry
# asm 1: cmovc <prc=int64#5,<pc=int64#6
# asm 2: cmovc <prc=%r8,<pc=%r9
cmovc %r8,%r9
# qhasm: spc = sprc if carry
# asm 1: cmovc <sprc=int64#7,<spc=int64#8
# asm 2: cmovc <sprc=%rax,<spc=%r10
cmovc %rax,%r10
# qhasm: spc -= sp
# asm 1: sub <sp=int64#3,<spc=int64#8
# asm 2: sub <sp=%rdx,<spc=%r10
sub %rdx,%r10
# qhasm: (uint64) spc >>= 5
# asm 1: shr $5,<spc=int64#8
# asm 2: shr $5,<spc=%r10
shr $5,%r10
# qhasm: spp = *(uint64 *)(hp + pp * 8)
# asm 1: movq (<hp=int64#1,<pp=int64#4,8),>spp=int64#5
# asm 2: movq (<hp=%rdi,<pp=%rcx,8),>spp=%r8
movq (%rdi,%rcx,8),%r8
# qhasm: *(uint64 *)(hp + pp * 8) = spc
# asm 1: movq <spc=int64#8,(<hp=int64#1,<pp=int64#4,8)
# asm 2: movq <spc=%r10,(<hp=%rdi,<pp=%rcx,8)
movq %r10,(%rdi,%rcx,8)
# qhasm: *(uint64 *)(hp + pc * 8) = spp
# asm 1: movq <spp=int64#5,(<hp=int64#1,<pc=int64#6,8)
# asm 2: movq <spp=%r8,(<hp=%rdi,<pc=%r9,8)
movq %r8,(%rdi,%r9,8)
# qhasm: pp = pc
# asm 1: mov <pc=int64#6,>pp=int64#4
# asm 2: mov <pc=%r9,>pp=%rcx
mov %r9,%rcx
# comment:fp stack unchanged by jump
# qhasm: goto siftdownloop
jmp ._siftdownloop
# qhasm: siftuploop:
._siftuploop:
# qhasm: pc = pp
# asm 1: mov <pp=int64#4,>pc=int64#2
# asm 2: mov <pp=%rcx,>pc=%rsi
mov %rcx,%rsi
# qhasm: pp -= 1
# asm 1: sub $1,<pp=int64#4
# asm 2: sub $1,<pp=%rcx
sub $1,%rcx
# qhasm: (uint64) pp >>= 1
# asm 1: shr $1,<pp=int64#4
# asm 2: shr $1,<pp=%rcx
shr $1,%rcx
# qhasm: unsigned>? pc - 0
# asm 1: cmp $0,<pc=int64#2
# asm 2: cmp $0,<pc=%rsi
cmp $0,%rsi
# comment:fp stack unchanged by jump
# qhasm: goto end if !unsigned>
jbe ._end
# qhasm: spp = *(uint64 *)(hp + pp * 8)
# asm 1: movq (<hp=int64#1,<pp=int64#4,8),>spp=int64#5
# asm 2: movq (<hp=%rdi,<pp=%rcx,8),>spp=%r8
movq (%rdi,%rcx,8),%r8
# qhasm: spc = *(uint64 *)(hp + pc * 8)
# asm 1: movq (<hp=int64#1,<pc=int64#2,8),>spc=int64#6
# asm 2: movq (<hp=%rdi,<pc=%rsi,8),>spc=%r9
movq (%rdi,%rsi,8),%r9
# qhasm: spp <<= 5
# asm 1: shl $5,<spp=int64#5
# asm 2: shl $5,<spp=%r8
shl $5,%r8
# qhasm: spc <<= 5
# asm 1: shl $5,<spc=int64#6
# asm 2: shl $5,<spc=%r9
shl $5,%r9
# qhasm: spc += sp
# asm 1: add <sp=int64#3,<spc=int64#6
# asm 2: add <sp=%rdx,<spc=%r9
add %rdx,%r9
# qhasm: spp += sp
# asm 1: add <sp=int64#3,<spp=int64#5
# asm 2: add <sp=%rdx,<spp=%r8
add %rdx,%r8
# qhasm: c0 = *(uint64 *)(spc + 0)
# asm 1: movq 0(<spc=int64#6),>c0=int64#7
# asm 2: movq 0(<spc=%r9),>c0=%rax
movq 0(%r9),%rax
# qhasm: c1 = *(uint64 *)(spc + 8)
# asm 1: movq 8(<spc=int64#6),>c1=int64#8
# asm 2: movq 8(<spc=%r9),>c1=%r10
movq 8(%r9),%r10
# qhasm: c2 = *(uint64 *)(spc + 16)
# asm 1: movq 16(<spc=int64#6),>c2=int64#9
# asm 2: movq 16(<spc=%r9),>c2=%r11
movq 16(%r9),%r11
# qhasm: carry? c0 -= *(uint64 *)(spp + 0)
# asm 1: subq 0(<spp=int64#5),<c0=int64#7
# asm 2: subq 0(<spp=%r8),<c0=%rax
subq 0(%r8),%rax
# qhasm: carry? c1 -= *(uint64 *)(spp + 8) - carry
# asm 1: sbbq 8(<spp=int64#5),<c1=int64#8
# asm 2: sbbq 8(<spp=%r8),<c1=%r10
sbbq 8(%r8),%r10
# qhasm: carry? c2 -= *(uint64 *)(spp + 16) - carry
# asm 1: sbbq 16(<spp=int64#5),<c2=int64#9
# asm 2: sbbq 16(<spp=%r8),<c2=%r11
sbbq 16(%r8),%r11
# comment:fp stack unchanged by jump
# qhasm: goto end if carry
jc ._end
# qhasm: spc -= sp
# asm 1: sub <sp=int64#3,<spc=int64#6
# asm 2: sub <sp=%rdx,<spc=%r9
sub %rdx,%r9
# qhasm: (uint64) spc >>= 5
# asm 1: shr $5,<spc=int64#6
# asm 2: shr $5,<spc=%r9
shr $5,%r9
# qhasm: spp -= sp
# asm 1: sub <sp=int64#3,<spp=int64#5
# asm 2: sub <sp=%rdx,<spp=%r8
sub %rdx,%r8
# qhasm: (uint64) spp >>= 5
# asm 1: shr $5,<spp=int64#5
# asm 2: shr $5,<spp=%r8
shr $5,%r8
# qhasm: *(uint64 *)(hp + pp * 8) = spc
# asm 1: movq <spc=int64#6,(<hp=int64#1,<pp=int64#4,8)
# asm 2: movq <spc=%r9,(<hp=%rdi,<pp=%rcx,8)
movq %r9,(%rdi,%rcx,8)
# qhasm: *(uint64 *)(hp + pc * 8) = spp
# asm 1: movq <spp=int64#5,(<hp=int64#1,<pc=int64#2,8)
# asm 2: movq <spp=%r8,(<hp=%rdi,<pc=%rsi,8)
movq %r8,(%rdi,%rsi,8)
# comment:fp stack unchanged by jump
# qhasm: goto siftuploop
jmp ._siftuploop
# qhasm: end:
._end:
# qhasm: caller1 = caller1_stack
# asm 1: movq <caller1_stack=stack64#1,>caller1=int64#9
# asm 2: movq <caller1_stack=0(%rsp),>caller1=%r11
movq 0(%rsp),%r11
# qhasm: caller2 = caller2_stack
# asm 1: movq <caller2_stack=stack64#2,>caller2=int64#10
# asm 2: movq <caller2_stack=8(%rsp),>caller2=%r12
movq 8(%rsp),%r12
# qhasm: caller3 = caller3_stack
# asm 1: movq <caller3_stack=stack64#3,>caller3=int64#11
# asm 2: movq <caller3_stack=16(%rsp),>caller3=%r13
movq 16(%rsp),%r13
# qhasm: caller4 = caller4_stack
# asm 1: movq <caller4_stack=stack64#4,>caller4=int64#12
# asm 2: movq <caller4_stack=24(%rsp),>caller4=%r14
movq 24(%rsp),%r14
# qhasm: caller5 = caller5_stack
# asm 1: movq <caller5_stack=stack64#5,>caller5=int64#13
# asm 2: movq <caller5_stack=32(%rsp),>caller5=%r15
movq 32(%rsp),%r15
# qhasm: caller6 = caller6_stack
# asm 1: movq <caller6_stack=stack64#6,>caller6=int64#14
# asm 2: movq <caller6_stack=40(%rsp),>caller6=%rbx
movq 40(%rsp),%rbx
# qhasm: caller7 = caller7_stack
# asm 1: movq <caller7_stack=stack64#7,>caller7=int64#15
# asm 2: movq <caller7_stack=48(%rsp),>caller7=%rbp
movq 48(%rsp),%rbp
# qhasm: leave
add %r11,%rsp
mov %rdi,%rax
mov %rsi,%rdx
ret

View file

@ -0,0 +1,13 @@
#include "crypto_hash_sha512.h"
#include "hram.h"
void get_hram(unsigned char *hram, const unsigned char *sm, const unsigned char *pk, unsigned char *playground, unsigned long long smlen)
{
unsigned long long i;
for (i = 0;i < 32;++i) playground[i] = sm[i];
for (i = 32;i < 64;++i) playground[i] = pk[i-32];
for (i = 64;i < smlen;++i) playground[i] = sm[i];
crypto_hash_sha512(hram,playground,smlen);
}

View file

@ -0,0 +1,8 @@
#ifndef HRAM_H
#define HRAM_H
#define get_hram crypto_sign_ed25519_amd64_51_30k_batch_get_hram
extern void get_hram(unsigned char *hram, const unsigned char *sm, const unsigned char *pk, unsigned char *playground, unsigned long long smlen);
#endif

View file

@ -0,0 +1,5 @@
Daniel J. Bernstein
Niels Duif
Tanja Lange
lead: Peter Schwabe
Bo-Yin Yang

View file

@ -0,0 +1,58 @@
#include "sc25519.h"
#include "index_heap.h"
/* caller's responsibility to ensure hlen>=3 */
void heap_init(unsigned long long *h, unsigned long long hlen, sc25519 *scalars)
{
h[0] = 0;
unsigned long long i=1;
while(i<hlen)
heap_push(h, &i, i, scalars);
}
void heap_extend(unsigned long long *h, unsigned long long oldlen, unsigned long long newlen, sc25519 *scalars)
{
unsigned long long i=oldlen;
while(i<newlen)
heap_push(h, &i, i, scalars);
}
void heap_push(unsigned long long *h, unsigned long long *hlen, unsigned long long elem, sc25519 *scalars)
{
/* Move up towards the root */
/* XXX: Check size of hlen, whether cast to signed value is ok */
signed long long pos = *hlen;
signed long long ppos = (pos-1)/2;
unsigned long long t;
h[*hlen] = elem;
while(pos > 0)
{
/* if(sc25519_lt_vartime(&scalars[h[ppos]], &scalars[h[pos]])) */
if(sc25519_lt(&scalars[h[ppos]], &scalars[h[pos]]))
{
t = h[ppos];
h[ppos] = h[pos];
h[pos] = t;
pos = ppos;
ppos = (pos-1)/2;
}
else break;
}
(*hlen)++;
}
/* Put the largest value in the heap in max1, the second largest in max2 */
void heap_get2max(unsigned long long *h, unsigned long long *max1, unsigned long long *max2, sc25519 *scalars)
{
*max1 = h[0];
*max2 = h[1];
if(sc25519_lt(&scalars[h[1]],&scalars[h[2]]))
*max2 = h[2];
}
/* After the root has been replaced, restore heap property */
/* extern void heap_rootreplaced(unsigned long long *h, unsigned long long hlen, sc25519 *scalars);
*/
/* extern void heap_rootreplaced_shortscalars(unsigned long long *h, unsigned long long hlen, sc25519 *scalars);
*/

View file

@ -0,0 +1,31 @@
#ifndef INDEX_HEAP_H
#define INDEX_HEAP_H
#include "sc25519.h"
#define heap_init crypto_sign_ed25519_amd64_51_30k_batch_heap_init
#define heap_extend crypto_sign_ed25519_amd64_51_30k_batch_heap_extend
#define heap_pop crypto_sign_ed25519_amd64_51_30k_batch_heap_pop
#define heap_push crypto_sign_ed25519_amd64_51_30k_batch_heap_push
#define heap_get2max crypto_sign_ed25519_amd64_51_30k_batch_heap_get2max
#define heap_rootreplaced crypto_sign_ed25519_amd64_51_30k_batch_heap_rootreplaced
#define heap_rootreplaced_3limbs crypto_sign_ed25519_amd64_51_30k_batch_heap_rootreplaced_3limbs
#define heap_rootreplaced_2limbs crypto_sign_ed25519_amd64_51_30k_batch_heap_rootreplaced_2limbs
#define heap_rootreplaced_1limb crypto_sign_ed25519_amd64_51_30k_batch_heap_rootreplaced_1limb
void heap_init(unsigned long long *h, unsigned long long hlen, sc25519 *scalars);
void heap_extend(unsigned long long *h, unsigned long long oldlen, unsigned long long newlen, sc25519 *scalars);
unsigned long long heap_pop(unsigned long long *h, unsigned long long *hlen, sc25519 *scalars);
void heap_push(unsigned long long *h, unsigned long long *hlen, unsigned long long elem, sc25519 *scalars);
void heap_get2max(unsigned long long *h, unsigned long long *max1, unsigned long long *max2, sc25519 *scalars);
void heap_rootreplaced(unsigned long long *h, unsigned long long hlen, sc25519 *scalars);
void heap_rootreplaced_3limbs(unsigned long long *h, unsigned long long hlen, sc25519 *scalars);
void heap_rootreplaced_2limbs(unsigned long long *h, unsigned long long hlen, sc25519 *scalars);
void heap_rootreplaced_1limb(unsigned long long *h, unsigned long long hlen, sc25519 *scalars);
#endif

View file

@ -0,0 +1,46 @@
#include <string.h>
#include "crypto_sign.h"
#include "crypto_hash_sha512.h"
#include "randombytes.h"
#include "ge25519.h"
int crypto_sign_seckey_expand(unsigned char *sk,const unsigned char *seed)
{
crypto_hash_sha512(sk,seed,32);
sk[0] &= 248;
sk[31] &= 63;
sk[31] |= 64;
return 0;
}
int crypto_sign_seckey(unsigned char *sk)
{
unsigned char seed[32];
if (randombytes(seed,32) < 0)
return -1;
crypto_sign_seckey_expand(sk,seed);
return 0;
}
int crypto_sign_pubkey(unsigned char *pk,const unsigned char *sk)
{
sc25519 scsk;
ge25519_p3 gepk;
sc25519_from32bytes(&scsk,sk);
ge25519_scalarmult_base(&gepk,&scsk);
ge25519_pack(pk,&gepk);
return 0;
}
int crypto_sign_keypair(unsigned char *pk,unsigned char *sk)
{
crypto_sign_seckey(sk);
crypto_sign_pubkey(pk,sk);
return 0;
}

View file

@ -0,0 +1,49 @@
#include <string.h>
#include "crypto_sign.h"
#include "crypto_verify_32.h"
#include "crypto_hash_sha512.h"
#include "ge25519.h"
int crypto_sign_open(
unsigned char *m,unsigned long long *mlen,
const unsigned char *sm,unsigned long long smlen,
const unsigned char *pk
)
{
unsigned char pkcopy[32];
unsigned char rcopy[32];
unsigned char hram[64];
unsigned char rcheck[32];
ge25519 get1, get2;
sc25519 schram, scs;
if (smlen < 64) goto badsig;
if (sm[63] & 224) goto badsig;
if (ge25519_unpackneg_vartime(&get1,pk)) goto badsig;
memmove(pkcopy,pk,32);
memmove(rcopy,sm,32);
sc25519_from32bytes(&scs, sm+32);
memmove(m,sm,smlen);
memmove(m + 32,pkcopy,32);
crypto_hash_sha512(hram,m,smlen);
sc25519_from64bytes(&schram, hram);
ge25519_double_scalarmult_vartime(&get2, &get1, &schram, &scs);
ge25519_pack(rcheck, &get2);
if (crypto_verify_32(rcopy,rcheck) == 0) {
memmove(m,m + 64,smlen - 64);
memset(m + smlen - 64,0,64);
*mlen = smlen - 64;
return 0;
}
badsig:
*mlen = (unsigned long long) -1;
memset(m,0,smlen);
return -1;
}

View file

@ -0,0 +1,3 @@
#include <sodium/randombytes.h>
#define randombytes(b,n) \
(randombytes(b,n), 0)

View file

@ -0,0 +1,69 @@
#ifndef SC25519_H
#define SC25519_H
#define sc25519 crypto_sign_ed25519_amd64_51_30k_batch_sc25519
#define shortsc25519 crypto_sign_ed25519_amd64_51_30k_batch_shortsc25519
#define sc25519_from32bytes crypto_sign_ed25519_amd64_51_30k_batch_sc25519_from32bytes
#define shortsc25519_from16bytes crypto_sign_ed25519_amd64_51_30k_batch_shortsc25519_from16bytes
#define sc25519_from64bytes crypto_sign_ed25519_amd64_51_30k_batch_sc25519_from64bytes
#define sc25519_from_shortsc crypto_sign_ed25519_amd64_51_30k_batch_sc25519_from_shortsc
#define sc25519_to32bytes crypto_sign_ed25519_amd64_51_30k_batch_sc25519_to32bytes
#define sc25519_iszero_vartime crypto_sign_ed25519_amd64_51_30k_batch_sc25519_iszero_vartime
#define sc25519_isshort_vartime crypto_sign_ed25519_amd64_51_30k_batch_sc25519_isshort_vartime
#define sc25519_lt crypto_sign_ed25519_amd64_51_30k_batch_sc25519_lt
#define sc25519_add crypto_sign_ed25519_amd64_51_30k_batch_sc25519_add
#define sc25519_sub_nored crypto_sign_ed25519_amd64_51_30k_batch_sc25519_sub_nored
#define sc25519_mul crypto_sign_ed25519_amd64_51_30k_batch_sc25519_mul
#define sc25519_mul_shortsc crypto_sign_ed25519_amd64_51_30k_batch_sc25519_mul_shortsc
#define sc25519_window4 crypto_sign_ed25519_amd64_51_30k_batch_sc25519_window4
#define sc25519_window5 crypto_sign_ed25519_amd64_51_30k_batch_sc25519_window5
#define sc25519_slide crypto_sign_ed25519_amd64_51_30k_batch_sc25519_slide
#define sc25519_2interleave2 crypto_sign_ed25519_amd64_51_30k_batch_sc25519_2interleave2
#define sc25519_barrett crypto_sign_ed25519_amd64_51_30k_batch_sc25519_barrett
typedef struct
{
unsigned long long v[4];
}
sc25519;
typedef struct
{
unsigned long long v[2];
}
shortsc25519;
void sc25519_from32bytes(sc25519 *r, const unsigned char x[32]);
void sc25519_from64bytes(sc25519 *r, const unsigned char x[64]);
void sc25519_from_shortsc(sc25519 *r, const shortsc25519 *x);
void sc25519_to32bytes(unsigned char r[32], const sc25519 *x);
int sc25519_iszero_vartime(const sc25519 *x);
int sc25519_lt(const sc25519 *x, const sc25519 *y);
void sc25519_add(sc25519 *r, const sc25519 *x, const sc25519 *y);
void sc25519_sub_nored(sc25519 *r, const sc25519 *x, const sc25519 *y);
void sc25519_mul(sc25519 *r, const sc25519 *x, const sc25519 *y);
void sc25519_mul_shortsc(sc25519 *r, const sc25519 *x, const shortsc25519 *y);
/* Convert s into a representation of the form \sum_{i=0}^{63}r[i]2^(4*i)
* with r[i] in {-8,...,7}
*/
void sc25519_window4(signed char r[64], const sc25519 *s);
void sc25519_window5(signed char r[51], const sc25519 *s);
void sc25519_slide(signed char r[256], const sc25519 *s, int swindowsize);
void sc25519_2interleave2(unsigned char r[127], const sc25519 *s1, const sc25519 *s2);
void sc25519_barrett(sc25519 *r, unsigned long long x[8]);
#endif

View file

@ -0,0 +1,232 @@
# qhasm: int64 rp
# qhasm: int64 xp
# qhasm: int64 yp
# qhasm: input rp
# qhasm: input xp
# qhasm: input yp
# qhasm: int64 r0
# qhasm: int64 r1
# qhasm: int64 r2
# qhasm: int64 r3
# qhasm: int64 t0
# qhasm: int64 t1
# qhasm: int64 t2
# qhasm: int64 t3
# qhasm: int64 caller1
# qhasm: int64 caller2
# qhasm: int64 caller3
# qhasm: int64 caller4
# qhasm: int64 caller5
# qhasm: int64 caller6
# qhasm: int64 caller7
# qhasm: caller caller1
# qhasm: caller caller2
# qhasm: caller caller3
# qhasm: caller caller4
# qhasm: caller caller5
# qhasm: caller caller6
# qhasm: caller caller7
# qhasm: stack64 caller4_stack
# qhasm: stack64 caller5_stack
# qhasm: stack64 caller6_stack
# qhasm: stack64 caller7_stack
# qhasm: enter crypto_sign_ed25519_amd64_51_30k_batch_sc25519_add
.text
.p2align 5
.globl _crypto_sign_ed25519_amd64_51_30k_batch_sc25519_add
.globl crypto_sign_ed25519_amd64_51_30k_batch_sc25519_add
_crypto_sign_ed25519_amd64_51_30k_batch_sc25519_add:
crypto_sign_ed25519_amd64_51_30k_batch_sc25519_add:
mov %rsp,%r11
and $31,%r11
add $32,%r11
sub %r11,%rsp
# qhasm: caller4_stack = caller4
# asm 1: movq <caller4=int64#12,>caller4_stack=stack64#1
# asm 2: movq <caller4=%r14,>caller4_stack=0(%rsp)
movq %r14,0(%rsp)
# qhasm: caller5_stack = caller5
# asm 1: movq <caller5=int64#13,>caller5_stack=stack64#2
# asm 2: movq <caller5=%r15,>caller5_stack=8(%rsp)
movq %r15,8(%rsp)
# qhasm: caller6_stack = caller6
# asm 1: movq <caller6=int64#14,>caller6_stack=stack64#3
# asm 2: movq <caller6=%rbx,>caller6_stack=16(%rsp)
movq %rbx,16(%rsp)
# qhasm: r0 = *(uint64 *)(xp + 0)
# asm 1: movq 0(<xp=int64#2),>r0=int64#4
# asm 2: movq 0(<xp=%rsi),>r0=%rcx
movq 0(%rsi),%rcx
# qhasm: r1 = *(uint64 *)(xp + 8)
# asm 1: movq 8(<xp=int64#2),>r1=int64#5
# asm 2: movq 8(<xp=%rsi),>r1=%r8
movq 8(%rsi),%r8
# qhasm: r2 = *(uint64 *)(xp + 16)
# asm 1: movq 16(<xp=int64#2),>r2=int64#6
# asm 2: movq 16(<xp=%rsi),>r2=%r9
movq 16(%rsi),%r9
# qhasm: r3 = *(uint64 *)(xp + 24)
# asm 1: movq 24(<xp=int64#2),>r3=int64#2
# asm 2: movq 24(<xp=%rsi),>r3=%rsi
movq 24(%rsi),%rsi
# qhasm: carry? r0 += *(uint64 *)(yp + 0)
# asm 1: addq 0(<yp=int64#3),<r0=int64#4
# asm 2: addq 0(<yp=%rdx),<r0=%rcx
addq 0(%rdx),%rcx
# qhasm: carry? r1 += *(uint64 *)(yp + 8) + carry
# asm 1: adcq 8(<yp=int64#3),<r1=int64#5
# asm 2: adcq 8(<yp=%rdx),<r1=%r8
adcq 8(%rdx),%r8
# qhasm: carry? r2 += *(uint64 *)(yp + 16) + carry
# asm 1: adcq 16(<yp=int64#3),<r2=int64#6
# asm 2: adcq 16(<yp=%rdx),<r2=%r9
adcq 16(%rdx),%r9
# qhasm: r3 += *(uint64 *)(yp + 24) + carry
# asm 1: adcq 24(<yp=int64#3),<r3=int64#2
# asm 2: adcq 24(<yp=%rdx),<r3=%rsi
adcq 24(%rdx),%rsi
# qhasm: t0 = r0
# asm 1: mov <r0=int64#4,>t0=int64#3
# asm 2: mov <r0=%rcx,>t0=%rdx
mov %rcx,%rdx
# qhasm: t1 = r1
# asm 1: mov <r1=int64#5,>t1=int64#7
# asm 2: mov <r1=%r8,>t1=%rax
mov %r8,%rax
# qhasm: t2 = r2
# asm 1: mov <r2=int64#6,>t2=int64#8
# asm 2: mov <r2=%r9,>t2=%r10
mov %r9,%r10
# qhasm: t3 = r3
# asm 1: mov <r3=int64#2,>t3=int64#12
# asm 2: mov <r3=%rsi,>t3=%r14
mov %rsi,%r14
# qhasm: carry? t0 -= *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_ORDER0
# asm 1: sub crypto_sign_ed25519_amd64_51_30k_batch_ORDER0,<t0=int64#3
# asm 2: sub crypto_sign_ed25519_amd64_51_30k_batch_ORDER0,<t0=%rdx
sub crypto_sign_ed25519_amd64_51_30k_batch_ORDER0,%rdx
# qhasm: carry? t1 -= *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_ORDER1 - carry
# asm 1: sbb crypto_sign_ed25519_amd64_51_30k_batch_ORDER1,<t1=int64#7
# asm 2: sbb crypto_sign_ed25519_amd64_51_30k_batch_ORDER1,<t1=%rax
sbb crypto_sign_ed25519_amd64_51_30k_batch_ORDER1,%rax
# qhasm: carry? t2 -= *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_ORDER2 - carry
# asm 1: sbb crypto_sign_ed25519_amd64_51_30k_batch_ORDER2,<t2=int64#8
# asm 2: sbb crypto_sign_ed25519_amd64_51_30k_batch_ORDER2,<t2=%r10
sbb crypto_sign_ed25519_amd64_51_30k_batch_ORDER2,%r10
# qhasm: unsigned<? t3 -= *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_ORDER3 - carry
# asm 1: sbb crypto_sign_ed25519_amd64_51_30k_batch_ORDER3,<t3=int64#12
# asm 2: sbb crypto_sign_ed25519_amd64_51_30k_batch_ORDER3,<t3=%r14
sbb crypto_sign_ed25519_amd64_51_30k_batch_ORDER3,%r14
# qhasm: r0 = t0 if !unsigned<
# asm 1: cmovae <t0=int64#3,<r0=int64#4
# asm 2: cmovae <t0=%rdx,<r0=%rcx
cmovae %rdx,%rcx
# qhasm: r1 = t1 if !unsigned<
# asm 1: cmovae <t1=int64#7,<r1=int64#5
# asm 2: cmovae <t1=%rax,<r1=%r8
cmovae %rax,%r8
# qhasm: r2 = t2 if !unsigned<
# asm 1: cmovae <t2=int64#8,<r2=int64#6
# asm 2: cmovae <t2=%r10,<r2=%r9
cmovae %r10,%r9
# qhasm: r3 = t3 if !unsigned<
# asm 1: cmovae <t3=int64#12,<r3=int64#2
# asm 2: cmovae <t3=%r14,<r3=%rsi
cmovae %r14,%rsi
# qhasm: *(uint64 *)(rp + 0) = r0
# asm 1: movq <r0=int64#4,0(<rp=int64#1)
# asm 2: movq <r0=%rcx,0(<rp=%rdi)
movq %rcx,0(%rdi)
# qhasm: *(uint64 *)(rp + 8) = r1
# asm 1: movq <r1=int64#5,8(<rp=int64#1)
# asm 2: movq <r1=%r8,8(<rp=%rdi)
movq %r8,8(%rdi)
# qhasm: *(uint64 *)(rp + 16) = r2
# asm 1: movq <r2=int64#6,16(<rp=int64#1)
# asm 2: movq <r2=%r9,16(<rp=%rdi)
movq %r9,16(%rdi)
# qhasm: *(uint64 *)(rp + 24) = r3
# asm 1: movq <r3=int64#2,24(<rp=int64#1)
# asm 2: movq <r3=%rsi,24(<rp=%rdi)
movq %rsi,24(%rdi)
# qhasm: caller4 = caller4_stack
# asm 1: movq <caller4_stack=stack64#1,>caller4=int64#12
# asm 2: movq <caller4_stack=0(%rsp),>caller4=%r14
movq 0(%rsp),%r14
# qhasm: caller5 = caller5_stack
# asm 1: movq <caller5_stack=stack64#2,>caller5=int64#13
# asm 2: movq <caller5_stack=8(%rsp),>caller5=%r15
movq 8(%rsp),%r15
# qhasm: caller6 = caller6_stack
# asm 1: movq <caller6_stack=stack64#3,>caller6=int64#14
# asm 2: movq <caller6_stack=16(%rsp),>caller6=%rbx
movq 16(%rsp),%rbx
# qhasm: leave
add %r11,%rsp
mov %rdi,%rax
mov %rsi,%rdx
ret

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,55 @@
#include "sc25519.h"
/*Arithmetic modulo the group order n = 2^252 + 27742317777372353535851937790883648493
* = 7237005577332262213973186563042994240857116359379907606001950938285454250989
*/
/* Contains order, 2*order, 4*order, 8*order, each represented in 4 consecutive unsigned long long */
static const unsigned long long order[16] = {0x5812631A5CF5D3EDULL, 0x14DEF9DEA2F79CD6ULL,
0x0000000000000000ULL, 0x1000000000000000ULL,
0xB024C634B9EBA7DAULL, 0x29BDF3BD45EF39ACULL,
0x0000000000000000ULL, 0x2000000000000000ULL,
0x60498C6973D74FB4ULL, 0x537BE77A8BDE7359ULL,
0x0000000000000000ULL, 0x4000000000000000ULL,
0xC09318D2E7AE9F68ULL, 0xA6F7CEF517BCE6B2ULL,
0x0000000000000000ULL, 0x8000000000000000ULL};
static unsigned long long smaller(unsigned long long a,unsigned long long b)
{
unsigned long long atop = a >> 32;
unsigned long long abot = a & 4294967295;
unsigned long long btop = b >> 32;
unsigned long long bbot = b & 4294967295;
unsigned long long atopbelowbtop = (atop - btop) >> 63;
unsigned long long atopeqbtop = ((atop ^ btop) - 1) >> 63;
unsigned long long abotbelowbbot = (abot - bbot) >> 63;
return atopbelowbtop | (atopeqbtop & abotbelowbbot);
}
void sc25519_from32bytes(sc25519 *r, const unsigned char x[32])
{
unsigned long long t[4];
unsigned long long b;
unsigned long long mask;
int i, j;
/* assuming little-endian */
r->v[0] = *(unsigned long long *)x;
r->v[1] = *(((unsigned long long *)x)+1);
r->v[2] = *(((unsigned long long *)x)+2);
r->v[3] = *(((unsigned long long *)x)+3);
for(j=3;j>=0;j--)
{
b=0;
for(i=0;i<4;i++)
{
b += order[4*j+i]; /* no overflow for this particular order */
t[i] = r->v[i] - b;
b = smaller(r->v[i],b);
}
mask = b - 1;
for(i=0;i<4;i++)
r->v[i] ^= mask & (r->v[i] ^ t[i]);
}
}

View file

@ -0,0 +1,7 @@
#include "sc25519.h"
void sc25519_from64bytes(sc25519 *r, const unsigned char x[64])
{
/* assuming little-endian representation of unsigned long long */
sc25519_barrett(r, (unsigned long long *)x);
}

View file

@ -0,0 +1,9 @@
#include "sc25519.h"
void sc25519_from_shortsc(sc25519 *r, const shortsc25519 *x)
{
r->v[0] = x->v[0];
r->v[1] = x->v[1];
r->v[2] = 0;
r->v[3] = 0;
}

View file

@ -0,0 +1,10 @@
#include "sc25519.h"
int sc25519_iszero_vartime(const sc25519 *x)
{
if(x->v[0] != 0) return 0;
if(x->v[1] != 0) return 0;
if(x->v[2] != 0) return 0;
if(x->v[3] != 0) return 0;
return 1;
}

View file

@ -0,0 +1,131 @@
# qhasm: int64 xp
# qhasm: int64 yp
# qhasm: int64 ret
# qhasm: input xp
# qhasm: input yp
# qhasm: output ret
# qhasm: int64 t0
# qhasm: int64 t1
# qhasm: int64 t2
# qhasm: int64 t3
# qhasm: int64 doof
# qhasm: int64 caller1
# qhasm: int64 caller2
# qhasm: int64 caller3
# qhasm: int64 caller4
# qhasm: int64 caller5
# qhasm: int64 caller6
# qhasm: int64 caller7
# qhasm: caller caller1
# qhasm: caller caller2
# qhasm: caller caller3
# qhasm: caller caller4
# qhasm: caller caller5
# qhasm: caller caller6
# qhasm: caller caller7
# qhasm: stack64 caller4_stack
# qhasm: stack64 caller5_stack
# qhasm: stack64 caller6_stack
# qhasm: stack64 caller7_stack
# qhasm: enter crypto_sign_ed25519_amd64_51_30k_batch_sc25519_lt
.text
.p2align 5
.globl _crypto_sign_ed25519_amd64_51_30k_batch_sc25519_lt
.globl crypto_sign_ed25519_amd64_51_30k_batch_sc25519_lt
_crypto_sign_ed25519_amd64_51_30k_batch_sc25519_lt:
crypto_sign_ed25519_amd64_51_30k_batch_sc25519_lt:
mov %rsp,%r11
and $31,%r11
add $0,%r11
sub %r11,%rsp
# qhasm: t0 = *(uint64 *)(xp + 0)
# asm 1: movq 0(<xp=int64#1),>t0=int64#3
# asm 2: movq 0(<xp=%rdi),>t0=%rdx
movq 0(%rdi),%rdx
# qhasm: t1 = *(uint64 *)(xp + 8)
# asm 1: movq 8(<xp=int64#1),>t1=int64#4
# asm 2: movq 8(<xp=%rdi),>t1=%rcx
movq 8(%rdi),%rcx
# qhasm: t2 = *(uint64 *)(xp + 16)
# asm 1: movq 16(<xp=int64#1),>t2=int64#5
# asm 2: movq 16(<xp=%rdi),>t2=%r8
movq 16(%rdi),%r8
# qhasm: t3 = *(uint64 *)(xp + 24)
# asm 1: movq 24(<xp=int64#1),>t3=int64#1
# asm 2: movq 24(<xp=%rdi),>t3=%rdi
movq 24(%rdi),%rdi
# qhasm: carry? t0 -= *(uint64 *)(yp + 0)
# asm 1: subq 0(<yp=int64#2),<t0=int64#3
# asm 2: subq 0(<yp=%rsi),<t0=%rdx
subq 0(%rsi),%rdx
# qhasm: carry? t1 -= *(uint64 *)(yp + 8) - carry
# asm 1: sbbq 8(<yp=int64#2),<t1=int64#4
# asm 2: sbbq 8(<yp=%rsi),<t1=%rcx
sbbq 8(%rsi),%rcx
# qhasm: carry? t2 -= *(uint64 *)(yp + 16) - carry
# asm 1: sbbq 16(<yp=int64#2),<t2=int64#5
# asm 2: sbbq 16(<yp=%rsi),<t2=%r8
sbbq 16(%rsi),%r8
# qhasm: carry? t3 -= *(uint64 *)(yp + 24) - carry
# asm 1: sbbq 24(<yp=int64#2),<t3=int64#1
# asm 2: sbbq 24(<yp=%rsi),<t3=%rdi
sbbq 24(%rsi),%rdi
# qhasm: ret = 0
# asm 1: mov $0,>ret=int64#1
# asm 2: mov $0,>ret=%rdi
mov $0,%rdi
# qhasm: doof = 1
# asm 1: mov $1,>doof=int64#2
# asm 2: mov $1,>doof=%rsi
mov $1,%rsi
# qhasm: ret = doof if carry
# asm 1: cmovc <doof=int64#2,<ret=int64#1
# asm 2: cmovc <doof=%rsi,<ret=%rdi
cmovc %rsi,%rdi
# qhasm: leave
add %r11,%rsp
mov %rdi,%rax
mov %rsi,%rdx
ret

View file

@ -0,0 +1,12 @@
#include "sc25519.h"
#define ull4_mul crypto_sign_ed25519_amd64_51_30k_batch_ull4_mul
extern void ull4_mul(unsigned long long r[8], const unsigned long long x[4], const unsigned long long y[4]);
void sc25519_mul(sc25519 *r, const sc25519 *x, const sc25519 *y)
{
unsigned long long t[8];
ull4_mul(t, x->v, y->v);
sc25519_barrett(r, t);
}

View file

@ -0,0 +1,9 @@
#include "sc25519.h"
void sc25519_mul_shortsc(sc25519 *r, const sc25519 *x, const shortsc25519 *y)
{
/* XXX: This wants to be faster */
sc25519 t;
sc25519_from_shortsc(&t, y);
sc25519_mul(r, x, &t);
}

View file

@ -0,0 +1,49 @@
#include "sc25519.h"
void sc25519_slide(signed char r[256], const sc25519 *s, int swindowsize)
{
int i,j,k,b,m=(1<<(swindowsize-1))-1, soplen=256;
unsigned long long sv0 = s->v[0];
unsigned long long sv1 = s->v[1];
unsigned long long sv2 = s->v[2];
unsigned long long sv3 = s->v[3];
/* first put the binary expansion into r */
for(i=0;i<64;i++) {
r[i] = sv0 & 1;
r[i+64] = sv1 & 1;
r[i+128] = sv2 & 1;
r[i+192] = sv3 & 1;
sv0 >>= 1;
sv1 >>= 1;
sv2 >>= 1;
sv3 >>= 1;
}
/* Making it sliding window */
for (j = 0;j < soplen;++j)
{
if (r[j]) {
for (b = 1;b < soplen - j && b <= 6;++b) {
if (r[j] + (r[j + b] << b) <= m)
{
r[j] += r[j + b] << b; r[j + b] = 0;
}
else if (r[j] - (r[j + b] << b) >= -m)
{
r[j] -= r[j + b] << b;
for (k = j + b;k < soplen;++k)
{
if (!r[k]) {
r[k] = 1;
break;
}
r[k] = 0;
}
}
else if (r[j + b])
break;
}
}
}
}

View file

@ -0,0 +1,142 @@
# qhasm: int64 rp
# qhasm: int64 xp
# qhasm: int64 yp
# qhasm: input rp
# qhasm: input xp
# qhasm: input yp
# qhasm: int64 r0
# qhasm: int64 r1
# qhasm: int64 r2
# qhasm: int64 r3
# qhasm: int64 t0
# qhasm: int64 t1
# qhasm: int64 t2
# qhasm: int64 t3
# qhasm: int64 caller1
# qhasm: int64 caller2
# qhasm: int64 caller3
# qhasm: int64 caller4
# qhasm: int64 caller5
# qhasm: int64 caller6
# qhasm: int64 caller7
# qhasm: caller caller1
# qhasm: caller caller2
# qhasm: caller caller3
# qhasm: caller caller4
# qhasm: caller caller5
# qhasm: caller caller6
# qhasm: caller caller7
# qhasm: stack64 caller4_stack
# qhasm: stack64 caller5_stack
# qhasm: stack64 caller6_stack
# qhasm: stack64 caller7_stack
# qhasm: enter crypto_sign_ed25519_amd64_51_30k_batch_sc25519_sub_nored
.text
.p2align 5
.globl _crypto_sign_ed25519_amd64_51_30k_batch_sc25519_sub_nored
.globl crypto_sign_ed25519_amd64_51_30k_batch_sc25519_sub_nored
_crypto_sign_ed25519_amd64_51_30k_batch_sc25519_sub_nored:
crypto_sign_ed25519_amd64_51_30k_batch_sc25519_sub_nored:
mov %rsp,%r11
and $31,%r11
add $0,%r11
sub %r11,%rsp
# qhasm: r0 = *(uint64 *)(xp + 0)
# asm 1: movq 0(<xp=int64#2),>r0=int64#4
# asm 2: movq 0(<xp=%rsi),>r0=%rcx
movq 0(%rsi),%rcx
# qhasm: r1 = *(uint64 *)(xp + 8)
# asm 1: movq 8(<xp=int64#2),>r1=int64#5
# asm 2: movq 8(<xp=%rsi),>r1=%r8
movq 8(%rsi),%r8
# qhasm: r2 = *(uint64 *)(xp + 16)
# asm 1: movq 16(<xp=int64#2),>r2=int64#6
# asm 2: movq 16(<xp=%rsi),>r2=%r9
movq 16(%rsi),%r9
# qhasm: r3 = *(uint64 *)(xp + 24)
# asm 1: movq 24(<xp=int64#2),>r3=int64#2
# asm 2: movq 24(<xp=%rsi),>r3=%rsi
movq 24(%rsi),%rsi
# qhasm: carry? r0 -= *(uint64 *)(yp + 0)
# asm 1: subq 0(<yp=int64#3),<r0=int64#4
# asm 2: subq 0(<yp=%rdx),<r0=%rcx
subq 0(%rdx),%rcx
# qhasm: carry? r1 -= *(uint64 *)(yp + 8) - carry
# asm 1: sbbq 8(<yp=int64#3),<r1=int64#5
# asm 2: sbbq 8(<yp=%rdx),<r1=%r8
sbbq 8(%rdx),%r8
# qhasm: carry? r2 -= *(uint64 *)(yp + 16) - carry
# asm 1: sbbq 16(<yp=int64#3),<r2=int64#6
# asm 2: sbbq 16(<yp=%rdx),<r2=%r9
sbbq 16(%rdx),%r9
# qhasm: r3 -= *(uint64 *)(yp + 24) - carry
# asm 1: sbbq 24(<yp=int64#3),<r3=int64#2
# asm 2: sbbq 24(<yp=%rdx),<r3=%rsi
sbbq 24(%rdx),%rsi
# qhasm: *(uint64 *)(rp + 0) = r0
# asm 1: movq <r0=int64#4,0(<rp=int64#1)
# asm 2: movq <r0=%rcx,0(<rp=%rdi)
movq %rcx,0(%rdi)
# qhasm: *(uint64 *)(rp + 8) = r1
# asm 1: movq <r1=int64#5,8(<rp=int64#1)
# asm 2: movq <r1=%r8,8(<rp=%rdi)
movq %r8,8(%rdi)
# qhasm: *(uint64 *)(rp + 16) = r2
# asm 1: movq <r2=int64#6,16(<rp=int64#1)
# asm 2: movq <r2=%r9,16(<rp=%rdi)
movq %r9,16(%rdi)
# qhasm: *(uint64 *)(rp + 24) = r3
# asm 1: movq <r3=int64#2,24(<rp=int64#1)
# asm 2: movq <r3=%rsi,24(<rp=%rdi)
movq %rsi,24(%rdi)
# qhasm: leave
add %r11,%rsp
mov %rdi,%rax
mov %rsi,%rdx
ret

View file

@ -0,0 +1,8 @@
#include "sc25519.h"
void sc25519_to32bytes(unsigned char r[32], const sc25519 *x)
{
/* assuming little-endian */
int i;
for(i=0;i<32;i++) r[i] = i[(unsigned char *)x->v];
}

View file

@ -0,0 +1,27 @@
#include "sc25519.h"
void sc25519_window4(signed char r[64], const sc25519 *s)
{
char carry;
int i;
for(i=0;i<16;i++)
r[i] = (s->v[0] >> (4*i)) & 15;
for(i=0;i<16;i++)
r[i+16] = (s->v[1] >> (4*i)) & 15;
for(i=0;i<16;i++)
r[i+32] = (s->v[2] >> (4*i)) & 15;
for(i=0;i<16;i++)
r[i+48] = (s->v[3] >> (4*i)) & 15;
/* Making it signed */
carry = 0;
for(i=0;i<63;i++)
{
r[i] += carry;
r[i+1] += r[i] >> 4;
r[i] &= 15;
carry = r[i] >> 3;
r[i] -= carry << 4;
}
r[63] += carry;
}

View file

@ -0,0 +1,57 @@
#include <string.h>
#include "crypto_sign.h"
#include "crypto_hash_sha512.h"
#include "ge25519.h"
int crypto_sign(
unsigned char *sm,unsigned long long *smlen,
const unsigned char *m,unsigned long long mlen,
const unsigned char *sk
)
{
unsigned char pk[32];
unsigned char az[64];
unsigned char nonce[64];
unsigned char hram[64];
sc25519 sck, scs, scsk;
ge25519 ger;
memmove(pk,sk + 32,32);
/* pk: 32-byte public key A */
crypto_hash_sha512(az,sk,32);
az[0] &= 248;
az[31] &= 127;
az[31] |= 64;
/* az: 32-byte scalar a, 32-byte randomizer z */
*smlen = mlen + 64;
memmove(sm + 64,m,mlen);
memmove(sm + 32,az + 32,32);
/* sm: 32-byte uninit, 32-byte z, mlen-byte m */
crypto_hash_sha512(nonce, sm+32, mlen+32);
/* nonce: 64-byte H(z,m) */
sc25519_from64bytes(&sck, nonce);
ge25519_scalarmult_base(&ger, &sck);
ge25519_pack(sm, &ger);
/* sm: 32-byte R, 32-byte z, mlen-byte m */
memmove(sm + 32,pk,32);
/* sm: 32-byte R, 32-byte A, mlen-byte m */
crypto_hash_sha512(hram,sm,mlen + 64);
/* hram: 64-byte H(R,A,m) */
sc25519_from64bytes(&scs, hram);
sc25519_from32bytes(&scsk, az);
sc25519_mul(&scs, &scs, &scsk);
sc25519_add(&scs, &scs, &sck);
/* scs: S = nonce + H(R,A,m)a */
sc25519_to32bytes(sm + 32,&scs);
/* sm: 32-byte R, 32-byte S, mlen-byte m */
return 0;
}

View file

@ -0,0 +1,716 @@
# qhasm: int64 rp
# qhasm: int64 xp
# qhasm: int64 yp
# qhasm: input rp
# qhasm: input xp
# qhasm: input yp
# qhasm: int64 r0
# qhasm: int64 r1
# qhasm: int64 r2
# qhasm: int64 r3
# qhasm: int64 r4
# qhasm: int64 r5
# qhasm: int64 r6
# qhasm: int64 r7
# qhasm: int64 c
# qhasm: int64 zero
# qhasm: int64 rax
# qhasm: int64 rdx
# qhasm: int64 caller1
# qhasm: int64 caller2
# qhasm: int64 caller3
# qhasm: int64 caller4
# qhasm: int64 caller5
# qhasm: int64 caller6
# qhasm: int64 caller7
# qhasm: caller caller1
# qhasm: caller caller2
# qhasm: caller caller3
# qhasm: caller caller4
# qhasm: caller caller5
# qhasm: caller caller6
# qhasm: caller caller7
# qhasm: stack64 caller1_stack
# qhasm: stack64 caller2_stack
# qhasm: stack64 caller3_stack
# qhasm: stack64 caller4_stack
# qhasm: stack64 caller5_stack
# qhasm: stack64 caller6_stack
# qhasm: stack64 caller7_stack
# qhasm: enter crypto_sign_ed25519_amd64_51_30k_batch_ull4_mul
.text
.p2align 5
.globl _crypto_sign_ed25519_amd64_51_30k_batch_ull4_mul
.globl crypto_sign_ed25519_amd64_51_30k_batch_ull4_mul
_crypto_sign_ed25519_amd64_51_30k_batch_ull4_mul:
crypto_sign_ed25519_amd64_51_30k_batch_ull4_mul:
mov %rsp,%r11
and $31,%r11
add $64,%r11
sub %r11,%rsp
# qhasm: caller1_stack = caller1
# asm 1: movq <caller1=int64#9,>caller1_stack=stack64#1
# asm 2: movq <caller1=%r11,>caller1_stack=0(%rsp)
movq %r11,0(%rsp)
# qhasm: caller2_stack = caller2
# asm 1: movq <caller2=int64#10,>caller2_stack=stack64#2
# asm 2: movq <caller2=%r12,>caller2_stack=8(%rsp)
movq %r12,8(%rsp)
# qhasm: caller3_stack = caller3
# asm 1: movq <caller3=int64#11,>caller3_stack=stack64#3
# asm 2: movq <caller3=%r13,>caller3_stack=16(%rsp)
movq %r13,16(%rsp)
# qhasm: caller4_stack = caller4
# asm 1: movq <caller4=int64#12,>caller4_stack=stack64#4
# asm 2: movq <caller4=%r14,>caller4_stack=24(%rsp)
movq %r14,24(%rsp)
# qhasm: caller5_stack = caller5
# asm 1: movq <caller5=int64#13,>caller5_stack=stack64#5
# asm 2: movq <caller5=%r15,>caller5_stack=32(%rsp)
movq %r15,32(%rsp)
# qhasm: caller6_stack = caller6
# asm 1: movq <caller6=int64#14,>caller6_stack=stack64#6
# asm 2: movq <caller6=%rbx,>caller6_stack=40(%rsp)
movq %rbx,40(%rsp)
# qhasm: caller7_stack = caller7
# asm 1: movq <caller7=int64#15,>caller7_stack=stack64#7
# asm 2: movq <caller7=%rbp,>caller7_stack=48(%rsp)
movq %rbp,48(%rsp)
# qhasm: yp = yp
# asm 1: mov <yp=int64#3,>yp=int64#4
# asm 2: mov <yp=%rdx,>yp=%rcx
mov %rdx,%rcx
# qhasm: r4 = 0
# asm 1: mov $0,>r4=int64#5
# asm 2: mov $0,>r4=%r8
mov $0,%r8
# qhasm: r5 = 0
# asm 1: mov $0,>r5=int64#6
# asm 2: mov $0,>r5=%r9
mov $0,%r9
# qhasm: r6 = 0
# asm 1: mov $0,>r6=int64#8
# asm 2: mov $0,>r6=%r10
mov $0,%r10
# qhasm: r7 = 0
# asm 1: mov $0,>r7=int64#9
# asm 2: mov $0,>r7=%r11
mov $0,%r11
# qhasm: zero = 0
# asm 1: mov $0,>zero=int64#10
# asm 2: mov $0,>zero=%r12
mov $0,%r12
# qhasm: rax = *(uint64 *)(xp + 0)
# asm 1: movq 0(<xp=int64#2),>rax=int64#7
# asm 2: movq 0(<xp=%rsi),>rax=%rax
movq 0(%rsi),%rax
# qhasm: (uint128) rdx rax = rax * *(uint64 *)(yp + 0)
# asm 1: mulq 0(<yp=int64#4)
# asm 2: mulq 0(<yp=%rcx)
mulq 0(%rcx)
# qhasm: r0 = rax
# asm 1: mov <rax=int64#7,>r0=int64#11
# asm 2: mov <rax=%rax,>r0=%r13
mov %rax,%r13
# qhasm: c = rdx
# asm 1: mov <rdx=int64#3,>c=int64#12
# asm 2: mov <rdx=%rdx,>c=%r14
mov %rdx,%r14
# qhasm: rax = *(uint64 *)(xp + 0)
# asm 1: movq 0(<xp=int64#2),>rax=int64#7
# asm 2: movq 0(<xp=%rsi),>rax=%rax
movq 0(%rsi),%rax
# qhasm: (uint128) rdx rax = rax * *(uint64 *)(yp + 8)
# asm 1: mulq 8(<yp=int64#4)
# asm 2: mulq 8(<yp=%rcx)
mulq 8(%rcx)
# qhasm: r1 = rax
# asm 1: mov <rax=int64#7,>r1=int64#13
# asm 2: mov <rax=%rax,>r1=%r15
mov %rax,%r15
# qhasm: carry? r1 += c
# asm 1: add <c=int64#12,<r1=int64#13
# asm 2: add <c=%r14,<r1=%r15
add %r14,%r15
# qhasm: c = 0
# asm 1: mov $0,>c=int64#12
# asm 2: mov $0,>c=%r14
mov $0,%r14
# qhasm: c += rdx + carry
# asm 1: adc <rdx=int64#3,<c=int64#12
# asm 2: adc <rdx=%rdx,<c=%r14
adc %rdx,%r14
# qhasm: rax = *(uint64 *)(xp + 0)
# asm 1: movq 0(<xp=int64#2),>rax=int64#7
# asm 2: movq 0(<xp=%rsi),>rax=%rax
movq 0(%rsi),%rax
# qhasm: (uint128) rdx rax = rax * *(uint64 *)(yp + 16)
# asm 1: mulq 16(<yp=int64#4)
# asm 2: mulq 16(<yp=%rcx)
mulq 16(%rcx)
# qhasm: r2 = rax
# asm 1: mov <rax=int64#7,>r2=int64#14
# asm 2: mov <rax=%rax,>r2=%rbx
mov %rax,%rbx
# qhasm: carry? r2 += c
# asm 1: add <c=int64#12,<r2=int64#14
# asm 2: add <c=%r14,<r2=%rbx
add %r14,%rbx
# qhasm: c = 0
# asm 1: mov $0,>c=int64#12
# asm 2: mov $0,>c=%r14
mov $0,%r14
# qhasm: c += rdx + carry
# asm 1: adc <rdx=int64#3,<c=int64#12
# asm 2: adc <rdx=%rdx,<c=%r14
adc %rdx,%r14
# qhasm: rax = *(uint64 *)(xp + 0)
# asm 1: movq 0(<xp=int64#2),>rax=int64#7
# asm 2: movq 0(<xp=%rsi),>rax=%rax
movq 0(%rsi),%rax
# qhasm: (uint128) rdx rax = rax * *(uint64 *)(yp + 24)
# asm 1: mulq 24(<yp=int64#4)
# asm 2: mulq 24(<yp=%rcx)
mulq 24(%rcx)
# qhasm: r3 = rax
# asm 1: mov <rax=int64#7,>r3=int64#15
# asm 2: mov <rax=%rax,>r3=%rbp
mov %rax,%rbp
# qhasm: carry? r3 += c
# asm 1: add <c=int64#12,<r3=int64#15
# asm 2: add <c=%r14,<r3=%rbp
add %r14,%rbp
# qhasm: r4 += rdx + carry
# asm 1: adc <rdx=int64#3,<r4=int64#5
# asm 2: adc <rdx=%rdx,<r4=%r8
adc %rdx,%r8
# qhasm: rax = *(uint64 *)(xp + 8)
# asm 1: movq 8(<xp=int64#2),>rax=int64#7
# asm 2: movq 8(<xp=%rsi),>rax=%rax
movq 8(%rsi),%rax
# qhasm: (uint128) rdx rax = rax * *(uint64 *)(yp + 0)
# asm 1: mulq 0(<yp=int64#4)
# asm 2: mulq 0(<yp=%rcx)
mulq 0(%rcx)
# qhasm: carry? r1 += rax
# asm 1: add <rax=int64#7,<r1=int64#13
# asm 2: add <rax=%rax,<r1=%r15
add %rax,%r15
# qhasm: c = 0
# asm 1: mov $0,>c=int64#12
# asm 2: mov $0,>c=%r14
mov $0,%r14
# qhasm: c += rdx + carry
# asm 1: adc <rdx=int64#3,<c=int64#12
# asm 2: adc <rdx=%rdx,<c=%r14
adc %rdx,%r14
# qhasm: rax = *(uint64 *)(xp + 8)
# asm 1: movq 8(<xp=int64#2),>rax=int64#7
# asm 2: movq 8(<xp=%rsi),>rax=%rax
movq 8(%rsi),%rax
# qhasm: (uint128) rdx rax = rax * *(uint64 *)(yp + 8)
# asm 1: mulq 8(<yp=int64#4)
# asm 2: mulq 8(<yp=%rcx)
mulq 8(%rcx)
# qhasm: carry? r2 += rax
# asm 1: add <rax=int64#7,<r2=int64#14
# asm 2: add <rax=%rax,<r2=%rbx
add %rax,%rbx
# qhasm: rdx += zero + carry
# asm 1: adc <zero=int64#10,<rdx=int64#3
# asm 2: adc <zero=%r12,<rdx=%rdx
adc %r12,%rdx
# qhasm: carry? r2 += c
# asm 1: add <c=int64#12,<r2=int64#14
# asm 2: add <c=%r14,<r2=%rbx
add %r14,%rbx
# qhasm: c = 0
# asm 1: mov $0,>c=int64#12
# asm 2: mov $0,>c=%r14
mov $0,%r14
# qhasm: c += rdx + carry
# asm 1: adc <rdx=int64#3,<c=int64#12
# asm 2: adc <rdx=%rdx,<c=%r14
adc %rdx,%r14
# qhasm: rax = *(uint64 *)(xp + 8)
# asm 1: movq 8(<xp=int64#2),>rax=int64#7
# asm 2: movq 8(<xp=%rsi),>rax=%rax
movq 8(%rsi),%rax
# qhasm: (uint128) rdx rax = rax * *(uint64 *)(yp + 16)
# asm 1: mulq 16(<yp=int64#4)
# asm 2: mulq 16(<yp=%rcx)
mulq 16(%rcx)
# qhasm: carry? r3 += rax
# asm 1: add <rax=int64#7,<r3=int64#15
# asm 2: add <rax=%rax,<r3=%rbp
add %rax,%rbp
# qhasm: rdx += zero + carry
# asm 1: adc <zero=int64#10,<rdx=int64#3
# asm 2: adc <zero=%r12,<rdx=%rdx
adc %r12,%rdx
# qhasm: carry? r3 += c
# asm 1: add <c=int64#12,<r3=int64#15
# asm 2: add <c=%r14,<r3=%rbp
add %r14,%rbp
# qhasm: c = 0
# asm 1: mov $0,>c=int64#12
# asm 2: mov $0,>c=%r14
mov $0,%r14
# qhasm: c += rdx + carry
# asm 1: adc <rdx=int64#3,<c=int64#12
# asm 2: adc <rdx=%rdx,<c=%r14
adc %rdx,%r14
# qhasm: rax = *(uint64 *)(xp + 8)
# asm 1: movq 8(<xp=int64#2),>rax=int64#7
# asm 2: movq 8(<xp=%rsi),>rax=%rax
movq 8(%rsi),%rax
# qhasm: (uint128) rdx rax = rax * *(uint64 *)(yp + 24)
# asm 1: mulq 24(<yp=int64#4)
# asm 2: mulq 24(<yp=%rcx)
mulq 24(%rcx)
# qhasm: carry? r4 += rax
# asm 1: add <rax=int64#7,<r4=int64#5
# asm 2: add <rax=%rax,<r4=%r8
add %rax,%r8
# qhasm: rdx += zero + carry
# asm 1: adc <zero=int64#10,<rdx=int64#3
# asm 2: adc <zero=%r12,<rdx=%rdx
adc %r12,%rdx
# qhasm: carry? r4 += c
# asm 1: add <c=int64#12,<r4=int64#5
# asm 2: add <c=%r14,<r4=%r8
add %r14,%r8
# qhasm: r5 += rdx + carry
# asm 1: adc <rdx=int64#3,<r5=int64#6
# asm 2: adc <rdx=%rdx,<r5=%r9
adc %rdx,%r9
# qhasm: rax = *(uint64 *)(xp + 16)
# asm 1: movq 16(<xp=int64#2),>rax=int64#7
# asm 2: movq 16(<xp=%rsi),>rax=%rax
movq 16(%rsi),%rax
# qhasm: (uint128) rdx rax = rax * *(uint64 *)(yp + 0)
# asm 1: mulq 0(<yp=int64#4)
# asm 2: mulq 0(<yp=%rcx)
mulq 0(%rcx)
# qhasm: carry? r2 += rax
# asm 1: add <rax=int64#7,<r2=int64#14
# asm 2: add <rax=%rax,<r2=%rbx
add %rax,%rbx
# qhasm: c = 0
# asm 1: mov $0,>c=int64#12
# asm 2: mov $0,>c=%r14
mov $0,%r14
# qhasm: c += rdx + carry
# asm 1: adc <rdx=int64#3,<c=int64#12
# asm 2: adc <rdx=%rdx,<c=%r14
adc %rdx,%r14
# qhasm: rax = *(uint64 *)(xp + 16)
# asm 1: movq 16(<xp=int64#2),>rax=int64#7
# asm 2: movq 16(<xp=%rsi),>rax=%rax
movq 16(%rsi),%rax
# qhasm: (uint128) rdx rax = rax * *(uint64 *)(yp + 8)
# asm 1: mulq 8(<yp=int64#4)
# asm 2: mulq 8(<yp=%rcx)
mulq 8(%rcx)
# qhasm: carry? r3 += rax
# asm 1: add <rax=int64#7,<r3=int64#15
# asm 2: add <rax=%rax,<r3=%rbp
add %rax,%rbp
# qhasm: rdx += zero + carry
# asm 1: adc <zero=int64#10,<rdx=int64#3
# asm 2: adc <zero=%r12,<rdx=%rdx
adc %r12,%rdx
# qhasm: carry? r3 += c
# asm 1: add <c=int64#12,<r3=int64#15
# asm 2: add <c=%r14,<r3=%rbp
add %r14,%rbp
# qhasm: c = 0
# asm 1: mov $0,>c=int64#12
# asm 2: mov $0,>c=%r14
mov $0,%r14
# qhasm: c += rdx + carry
# asm 1: adc <rdx=int64#3,<c=int64#12
# asm 2: adc <rdx=%rdx,<c=%r14
adc %rdx,%r14
# qhasm: rax = *(uint64 *)(xp + 16)
# asm 1: movq 16(<xp=int64#2),>rax=int64#7
# asm 2: movq 16(<xp=%rsi),>rax=%rax
movq 16(%rsi),%rax
# qhasm: (uint128) rdx rax = rax * *(uint64 *)(yp + 16)
# asm 1: mulq 16(<yp=int64#4)
# asm 2: mulq 16(<yp=%rcx)
mulq 16(%rcx)
# qhasm: carry? r4 += rax
# asm 1: add <rax=int64#7,<r4=int64#5
# asm 2: add <rax=%rax,<r4=%r8
add %rax,%r8
# qhasm: rdx += zero + carry
# asm 1: adc <zero=int64#10,<rdx=int64#3
# asm 2: adc <zero=%r12,<rdx=%rdx
adc %r12,%rdx
# qhasm: carry? r4 += c
# asm 1: add <c=int64#12,<r4=int64#5
# asm 2: add <c=%r14,<r4=%r8
add %r14,%r8
# qhasm: c = 0
# asm 1: mov $0,>c=int64#12
# asm 2: mov $0,>c=%r14
mov $0,%r14
# qhasm: c += rdx + carry
# asm 1: adc <rdx=int64#3,<c=int64#12
# asm 2: adc <rdx=%rdx,<c=%r14
adc %rdx,%r14
# qhasm: rax = *(uint64 *)(xp + 16)
# asm 1: movq 16(<xp=int64#2),>rax=int64#7
# asm 2: movq 16(<xp=%rsi),>rax=%rax
movq 16(%rsi),%rax
# qhasm: (uint128) rdx rax = rax * *(uint64 *)(yp + 24)
# asm 1: mulq 24(<yp=int64#4)
# asm 2: mulq 24(<yp=%rcx)
mulq 24(%rcx)
# qhasm: carry? r5 += rax
# asm 1: add <rax=int64#7,<r5=int64#6
# asm 2: add <rax=%rax,<r5=%r9
add %rax,%r9
# qhasm: rdx += zero + carry
# asm 1: adc <zero=int64#10,<rdx=int64#3
# asm 2: adc <zero=%r12,<rdx=%rdx
adc %r12,%rdx
# qhasm: carry? r5 += c
# asm 1: add <c=int64#12,<r5=int64#6
# asm 2: add <c=%r14,<r5=%r9
add %r14,%r9
# qhasm: r6 += rdx + carry
# asm 1: adc <rdx=int64#3,<r6=int64#8
# asm 2: adc <rdx=%rdx,<r6=%r10
adc %rdx,%r10
# qhasm: rax = *(uint64 *)(xp + 24)
# asm 1: movq 24(<xp=int64#2),>rax=int64#7
# asm 2: movq 24(<xp=%rsi),>rax=%rax
movq 24(%rsi),%rax
# qhasm: (uint128) rdx rax = rax * *(uint64 *)(yp + 0)
# asm 1: mulq 0(<yp=int64#4)
# asm 2: mulq 0(<yp=%rcx)
mulq 0(%rcx)
# qhasm: carry? r3 += rax
# asm 1: add <rax=int64#7,<r3=int64#15
# asm 2: add <rax=%rax,<r3=%rbp
add %rax,%rbp
# qhasm: c = 0
# asm 1: mov $0,>c=int64#12
# asm 2: mov $0,>c=%r14
mov $0,%r14
# qhasm: c += rdx + carry
# asm 1: adc <rdx=int64#3,<c=int64#12
# asm 2: adc <rdx=%rdx,<c=%r14
adc %rdx,%r14
# qhasm: rax = *(uint64 *)(xp + 24)
# asm 1: movq 24(<xp=int64#2),>rax=int64#7
# asm 2: movq 24(<xp=%rsi),>rax=%rax
movq 24(%rsi),%rax
# qhasm: (uint128) rdx rax = rax * *(uint64 *)(yp + 8)
# asm 1: mulq 8(<yp=int64#4)
# asm 2: mulq 8(<yp=%rcx)
mulq 8(%rcx)
# qhasm: carry? r4 += rax
# asm 1: add <rax=int64#7,<r4=int64#5
# asm 2: add <rax=%rax,<r4=%r8
add %rax,%r8
# qhasm: rdx += zero + carry
# asm 1: adc <zero=int64#10,<rdx=int64#3
# asm 2: adc <zero=%r12,<rdx=%rdx
adc %r12,%rdx
# qhasm: carry? r4 += c
# asm 1: add <c=int64#12,<r4=int64#5
# asm 2: add <c=%r14,<r4=%r8
add %r14,%r8
# qhasm: c = 0
# asm 1: mov $0,>c=int64#12
# asm 2: mov $0,>c=%r14
mov $0,%r14
# qhasm: c += rdx + carry
# asm 1: adc <rdx=int64#3,<c=int64#12
# asm 2: adc <rdx=%rdx,<c=%r14
adc %rdx,%r14
# qhasm: rax = *(uint64 *)(xp + 24)
# asm 1: movq 24(<xp=int64#2),>rax=int64#7
# asm 2: movq 24(<xp=%rsi),>rax=%rax
movq 24(%rsi),%rax
# qhasm: (uint128) rdx rax = rax * *(uint64 *)(yp + 16)
# asm 1: mulq 16(<yp=int64#4)
# asm 2: mulq 16(<yp=%rcx)
mulq 16(%rcx)
# qhasm: carry? r5 += rax
# asm 1: add <rax=int64#7,<r5=int64#6
# asm 2: add <rax=%rax,<r5=%r9
add %rax,%r9
# qhasm: rdx += zero + carry
# asm 1: adc <zero=int64#10,<rdx=int64#3
# asm 2: adc <zero=%r12,<rdx=%rdx
adc %r12,%rdx
# qhasm: carry? r5 += c
# asm 1: add <c=int64#12,<r5=int64#6
# asm 2: add <c=%r14,<r5=%r9
add %r14,%r9
# qhasm: c = 0
# asm 1: mov $0,>c=int64#12
# asm 2: mov $0,>c=%r14
mov $0,%r14
# qhasm: c += rdx + carry
# asm 1: adc <rdx=int64#3,<c=int64#12
# asm 2: adc <rdx=%rdx,<c=%r14
adc %rdx,%r14
# qhasm: rax = *(uint64 *)(xp + 24)
# asm 1: movq 24(<xp=int64#2),>rax=int64#7
# asm 2: movq 24(<xp=%rsi),>rax=%rax
movq 24(%rsi),%rax
# qhasm: (uint128) rdx rax = rax * *(uint64 *)(yp + 24)
# asm 1: mulq 24(<yp=int64#4)
# asm 2: mulq 24(<yp=%rcx)
mulq 24(%rcx)
# qhasm: carry? r6 += rax
# asm 1: add <rax=int64#7,<r6=int64#8
# asm 2: add <rax=%rax,<r6=%r10
add %rax,%r10
# qhasm: rdx += zero + carry
# asm 1: adc <zero=int64#10,<rdx=int64#3
# asm 2: adc <zero=%r12,<rdx=%rdx
adc %r12,%rdx
# qhasm: carry? r6 += c
# asm 1: add <c=int64#12,<r6=int64#8
# asm 2: add <c=%r14,<r6=%r10
add %r14,%r10
# qhasm: r7 += rdx + carry
# asm 1: adc <rdx=int64#3,<r7=int64#9
# asm 2: adc <rdx=%rdx,<r7=%r11
adc %rdx,%r11
# qhasm: *(uint64 *)(rp + 0) = r0
# asm 1: movq <r0=int64#11,0(<rp=int64#1)
# asm 2: movq <r0=%r13,0(<rp=%rdi)
movq %r13,0(%rdi)
# qhasm: *(uint64 *)(rp + 8) = r1
# asm 1: movq <r1=int64#13,8(<rp=int64#1)
# asm 2: movq <r1=%r15,8(<rp=%rdi)
movq %r15,8(%rdi)
# qhasm: *(uint64 *)(rp + 16) = r2
# asm 1: movq <r2=int64#14,16(<rp=int64#1)
# asm 2: movq <r2=%rbx,16(<rp=%rdi)
movq %rbx,16(%rdi)
# qhasm: *(uint64 *)(rp + 24) = r3
# asm 1: movq <r3=int64#15,24(<rp=int64#1)
# asm 2: movq <r3=%rbp,24(<rp=%rdi)
movq %rbp,24(%rdi)
# qhasm: *(uint64 *)(rp + 32) = r4
# asm 1: movq <r4=int64#5,32(<rp=int64#1)
# asm 2: movq <r4=%r8,32(<rp=%rdi)
movq %r8,32(%rdi)
# qhasm: *(uint64 *)(rp + 40) = r5
# asm 1: movq <r5=int64#6,40(<rp=int64#1)
# asm 2: movq <r5=%r9,40(<rp=%rdi)
movq %r9,40(%rdi)
# qhasm: *(uint64 *)(rp + 48) = r6
# asm 1: movq <r6=int64#8,48(<rp=int64#1)
# asm 2: movq <r6=%r10,48(<rp=%rdi)
movq %r10,48(%rdi)
# qhasm: *(uint64 *)(rp + 56) = r7
# asm 1: movq <r7=int64#9,56(<rp=int64#1)
# asm 2: movq <r7=%r11,56(<rp=%rdi)
movq %r11,56(%rdi)
# qhasm: caller1 = caller1_stack
# asm 1: movq <caller1_stack=stack64#1,>caller1=int64#9
# asm 2: movq <caller1_stack=0(%rsp),>caller1=%r11
movq 0(%rsp),%r11
# qhasm: caller2 = caller2_stack
# asm 1: movq <caller2_stack=stack64#2,>caller2=int64#10
# asm 2: movq <caller2_stack=8(%rsp),>caller2=%r12
movq 8(%rsp),%r12
# qhasm: caller3 = caller3_stack
# asm 1: movq <caller3_stack=stack64#3,>caller3=int64#11
# asm 2: movq <caller3_stack=16(%rsp),>caller3=%r13
movq 16(%rsp),%r13
# qhasm: caller4 = caller4_stack
# asm 1: movq <caller4_stack=stack64#4,>caller4=int64#12
# asm 2: movq <caller4_stack=24(%rsp),>caller4=%r14
movq 24(%rsp),%r14
# qhasm: caller5 = caller5_stack
# asm 1: movq <caller5_stack=stack64#5,>caller5=int64#13
# asm 2: movq <caller5_stack=32(%rsp),>caller5=%r15
movq 32(%rsp),%r15
# qhasm: caller6 = caller6_stack
# asm 1: movq <caller6_stack=stack64#6,>caller6=int64#14
# asm 2: movq <caller6_stack=40(%rsp),>caller6=%rbx
movq 40(%rsp),%rbx
# qhasm: caller7 = caller7_stack
# asm 1: movq <caller7_stack=stack64#7,>caller7=int64#15
# asm 2: movq <caller7_stack=48(%rsp),>caller7=%rbp
movq 48(%rsp),%rbp
# qhasm: leave
add %r11,%rsp
mov %rdi,%rax
mov %rsi,%rdx
ret

View file

@ -0,0 +1,4 @@
#define CRYPTO_SECRETKEYBYTES 64
#define CRYPTO_PUBLICKEYBYTES 32
#define CRYPTO_BYTES 64
#define CRYPTO_DETERMINISTIC 1

View file

@ -0,0 +1 @@
amd64

View file

@ -0,0 +1,94 @@
#include "crypto_sign.h"
#include "crypto_verify_32.h"
#include "crypto_hash_sha512.h"
#include "randombytes.h"
#include "ge25519.h"
#include "hram.h"
#define MAXBATCH 64
int crypto_sign_open_batch(
unsigned char* const m[],unsigned long long mlen[],
unsigned char* const sm[],const unsigned long long smlen[],
unsigned char* const pk[],
unsigned long long num
)
{
int ret = 0;
unsigned long long i, j;
shortsc25519 r[MAXBATCH];
sc25519 scalars[2*MAXBATCH+1];
ge25519 points[2*MAXBATCH+1];
unsigned char hram[crypto_hash_sha512_BYTES];
unsigned long long batchsize;
for (i = 0;i < num;++i) mlen[i] = -1;
while (num >= 3) {
batchsize = num;
if (batchsize > MAXBATCH) batchsize = MAXBATCH;
for (i = 0;i < batchsize;++i)
if (smlen[i] < 64) goto fallback;
randombytes((unsigned char*)r,sizeof(shortsc25519) * batchsize);
/* Computing scalars[0] = ((r1s1 + r2s2 + ...)) */
for(i=0;i<batchsize;i++)
{
sc25519_from32bytes(&scalars[i], sm[i]+32);
sc25519_mul_shortsc(&scalars[i], &scalars[i], &r[i]);
}
for(i=1;i<batchsize;i++)
sc25519_add(&scalars[0], &scalars[0], &scalars[i]);
/* Computing scalars[1] ... scalars[batchsize] as r[i]*H(R[i],A[i],m[i]) */
for(i=0;i<batchsize;i++)
{
get_hram(hram, sm[i], pk[i], m[i], smlen[i]);
sc25519_from64bytes(&scalars[i+1],hram);
sc25519_mul_shortsc(&scalars[i+1],&scalars[i+1],&r[i]);
}
/* Setting scalars[batchsize+1] ... scalars[2*batchsize] to r[i] */
for(i=0;i<batchsize;i++)
sc25519_from_shortsc(&scalars[batchsize+i+1],&r[i]);
/* Computing points */
points[0] = ge25519_base;
for(i=0;i<batchsize;i++)
if (ge25519_unpackneg_vartime(&points[i+1], pk[i])) goto fallback;
for(i=0;i<batchsize;i++)
if (ge25519_unpackneg_vartime(&points[batchsize+i+1], sm[i])) goto fallback;
ge25519_multi_scalarmult_vartime(points, points, scalars, 2*batchsize+1);
if (ge25519_isneutral_vartime(points)) {
for(i=0;i<batchsize;i++)
{
for(j=0;j<smlen[i]-64;j++)
m[i][j] = sm[i][j + 64];
mlen[i] = smlen[i]-64;
}
} else {
fallback:
for (i = 0;i < batchsize;++i)
ret |= crypto_sign_open(m[i], &mlen[i], sm[i], smlen[i], pk[i]);
}
m += batchsize;
mlen += batchsize;
sm += batchsize;
smlen += batchsize;
pk += batchsize;
num -= batchsize;
}
for (i = 0;i < num;++i)
ret |= crypto_sign_open(m[i], &mlen[i], sm[i], smlen[i], pk[i]);
return ret;
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,39 @@
.data
.globl crypto_sign_ed25519_amd64_64_121666
.globl crypto_sign_ed25519_amd64_64_MU0
.globl crypto_sign_ed25519_amd64_64_MU1
.globl crypto_sign_ed25519_amd64_64_MU2
.globl crypto_sign_ed25519_amd64_64_MU3
.globl crypto_sign_ed25519_amd64_64_MU4
.globl crypto_sign_ed25519_amd64_64_ORDER0
.globl crypto_sign_ed25519_amd64_64_ORDER1
.globl crypto_sign_ed25519_amd64_64_ORDER2
.globl crypto_sign_ed25519_amd64_64_ORDER3
.globl crypto_sign_ed25519_amd64_64_EC2D0
.globl crypto_sign_ed25519_amd64_64_EC2D1
.globl crypto_sign_ed25519_amd64_64_EC2D2
.globl crypto_sign_ed25519_amd64_64_EC2D3
.globl crypto_sign_ed25519_amd64_64_38
.p2align 4
crypto_sign_ed25519_amd64_64_121666: .quad 121666
crypto_sign_ed25519_amd64_64_MU0: .quad 0xED9CE5A30A2C131B
crypto_sign_ed25519_amd64_64_MU1: .quad 0x2106215D086329A7
crypto_sign_ed25519_amd64_64_MU2: .quad 0xFFFFFFFFFFFFFFEB
crypto_sign_ed25519_amd64_64_MU3: .quad 0xFFFFFFFFFFFFFFFF
crypto_sign_ed25519_amd64_64_MU4: .quad 0x000000000000000F
crypto_sign_ed25519_amd64_64_ORDER0: .quad 0x5812631A5CF5D3ED
crypto_sign_ed25519_amd64_64_ORDER1: .quad 0x14DEF9DEA2F79CD6
crypto_sign_ed25519_amd64_64_ORDER2: .quad 0x0000000000000000
crypto_sign_ed25519_amd64_64_ORDER3: .quad 0x1000000000000000
crypto_sign_ed25519_amd64_64_EC2D0: .quad 0xEBD69B9426B2F146
crypto_sign_ed25519_amd64_64_EC2D1: .quad 0x00E0149A8283B156
crypto_sign_ed25519_amd64_64_EC2D2: .quad 0x198E80F2EEF3D130
crypto_sign_ed25519_amd64_64_EC2D3: .quad 0xA406D9DC56DFFCE7
crypto_sign_ed25519_amd64_64_38: .quad 38

View file

@ -0,0 +1 @@
#include <sodium/crypto_hash_sha512.h>

View file

@ -0,0 +1,2 @@
#include <stdint.h>
#define crypto_int32 int32_t

View file

@ -0,0 +1,2 @@
#include <stdint.h>
#define crypto_int64 int64_t

View file

@ -0,0 +1,8 @@
#define crypto_sign ed25519_amd64_64_sign
#define crypto_sign_keypair ed25519_amd64_64_keygen
#define crypto_sign_seckey ed25519_amd64_64_seckey
#define crypto_sign_seckey_expand ed25519_amd64_64_seckey_expand
#define crypto_sign_pubkey ed25519_amd64_64_pubkey
#define crypto_sign_open ed25519_amd64_64_open
#include "ed25519.h"

View file

@ -0,0 +1,2 @@
#include <stdint.h>
#define crypto_uint32 uint32_t

View file

@ -0,0 +1,2 @@
#include <stdint.h>
#define crypto_uint64 uint64_t

View file

@ -0,0 +1,4 @@
#include <sodium/utils.h>
#define crypto_verify_32(a,b) \
(!sodium_memcmp((a), (b), 32))

View file

@ -0,0 +1,4 @@
int ed25519_amd64_64_seckey(unsigned char *sk);
int ed25519_amd64_64_seckey_expand(unsigned char *sk,const unsigned char *seed);
int ed25519_amd64_64_pubkey(unsigned char *pk,const unsigned char *sk);
int ed25519_amd64_64_keygen(unsigned char *pk,unsigned char *sk);

View file

@ -0,0 +1,64 @@
#ifndef FE25519_H
#define FE25519_H
#define fe25519 crypto_sign_ed25519_amd64_64_fe25519
#define fe25519_freeze crypto_sign_ed25519_amd64_64_fe25519_freeze
#define fe25519_unpack crypto_sign_ed25519_amd64_64_fe25519_unpack
#define fe25519_pack crypto_sign_ed25519_amd64_64_fe25519_pack
#define fe25519_iszero_vartime crypto_sign_ed25519_amd64_64_fe25519_iszero_vartime
#define fe25519_iseq_vartime crypto_sign_ed25519_amd64_64_fe25519_iseq_vartime
#define fe25519_cmov crypto_sign_ed25519_amd64_64_fe25519_cmov
#define fe25519_setint crypto_sign_ed25519_amd64_64_fe25519_setint
#define fe25519_neg crypto_sign_ed25519_amd64_64_fe25519_neg
#define fe25519_getparity crypto_sign_ed25519_amd64_64_fe25519_getparity
#define fe25519_add crypto_sign_ed25519_amd64_64_fe25519_add
#define fe25519_sub crypto_sign_ed25519_amd64_64_fe25519_sub
#define fe25519_mul crypto_sign_ed25519_amd64_64_fe25519_mul
#define fe25519_mul121666 crypto_sign_ed25519_amd64_64_fe25519_mul121666
#define fe25519_square crypto_sign_ed25519_amd64_64_fe25519_square
#define fe25519_invert crypto_sign_ed25519_amd64_64_fe25519_invert
#define fe25519_pow2523 crypto_sign_ed25519_amd64_64_fe25519_pow2523
typedef struct
{
unsigned long long v[4];
}
fe25519;
void fe25519_freeze(fe25519 *r);
void fe25519_unpack(fe25519 *r, const unsigned char x[32]);
void fe25519_pack(unsigned char r[32], const fe25519 *x);
void fe25519_cmov(fe25519 *r, const fe25519 *x, unsigned char b);
void fe25519_cswap(fe25519 *r, fe25519 *x, unsigned char b);
void fe25519_setint(fe25519 *r, unsigned int v);
void fe25519_neg(fe25519 *r, const fe25519 *x);
unsigned char fe25519_getparity(const fe25519 *x);
int fe25519_iszero_vartime(const fe25519 *x);
int fe25519_iseq_vartime(const fe25519 *x, const fe25519 *y);
void fe25519_add(fe25519 *r, const fe25519 *x, const fe25519 *y);
void fe25519_sub(fe25519 *r, const fe25519 *x, const fe25519 *y);
void fe25519_mul(fe25519 *r, const fe25519 *x, const fe25519 *y);
void fe25519_mul121666(fe25519 *r, const fe25519 *x);
void fe25519_square(fe25519 *r, const fe25519 *x);
void fe25519_pow(fe25519 *r, const fe25519 *x, const unsigned char *e);
void fe25519_invert(fe25519 *r, const fe25519 *x);
void fe25519_pow2523(fe25519 *r, const fe25519 *x);
#endif

View file

@ -0,0 +1,189 @@
# qhasm: int64 rp
# qhasm: int64 xp
# qhasm: int64 yp
# qhasm: input rp
# qhasm: input xp
# qhasm: input yp
# qhasm: int64 r0
# qhasm: int64 r1
# qhasm: int64 r2
# qhasm: int64 r3
# qhasm: int64 addt0
# qhasm: int64 addt1
# qhasm: int64 caller1
# qhasm: int64 caller2
# qhasm: int64 caller3
# qhasm: int64 caller4
# qhasm: int64 caller5
# qhasm: int64 caller6
# qhasm: int64 caller7
# qhasm: caller caller1
# qhasm: caller caller2
# qhasm: caller caller3
# qhasm: caller caller4
# qhasm: caller caller5
# qhasm: caller caller6
# qhasm: caller caller7
# qhasm: stack64 caller1_stack
# qhasm: stack64 caller2_stack
# qhasm: stack64 caller3_stack
# qhasm: stack64 caller4_stack
# qhasm: stack64 caller5_stack
# qhasm: stack64 caller6_stack
# qhasm: stack64 caller7_stack
# qhasm: enter crypto_sign_ed25519_amd64_64_fe25519_add
.text
.p2align 5
.globl _crypto_sign_ed25519_amd64_64_fe25519_add
.globl crypto_sign_ed25519_amd64_64_fe25519_add
_crypto_sign_ed25519_amd64_64_fe25519_add:
crypto_sign_ed25519_amd64_64_fe25519_add:
mov %rsp,%r11
and $31,%r11
add $0,%r11
sub %r11,%rsp
# qhasm: r0 = *(uint64 *)(xp + 0)
# asm 1: movq 0(<xp=int64#2),>r0=int64#4
# asm 2: movq 0(<xp=%rsi),>r0=%rcx
movq 0(%rsi),%rcx
# qhasm: r1 = *(uint64 *)(xp + 8)
# asm 1: movq 8(<xp=int64#2),>r1=int64#5
# asm 2: movq 8(<xp=%rsi),>r1=%r8
movq 8(%rsi),%r8
# qhasm: r2 = *(uint64 *)(xp + 16)
# asm 1: movq 16(<xp=int64#2),>r2=int64#6
# asm 2: movq 16(<xp=%rsi),>r2=%r9
movq 16(%rsi),%r9
# qhasm: r3 = *(uint64 *)(xp + 24)
# asm 1: movq 24(<xp=int64#2),>r3=int64#2
# asm 2: movq 24(<xp=%rsi),>r3=%rsi
movq 24(%rsi),%rsi
# qhasm: carry? r0 += *(uint64 *)(yp + 0)
# asm 1: addq 0(<yp=int64#3),<r0=int64#4
# asm 2: addq 0(<yp=%rdx),<r0=%rcx
addq 0(%rdx),%rcx
# qhasm: carry? r1 += *(uint64 *)(yp + 8) + carry
# asm 1: adcq 8(<yp=int64#3),<r1=int64#5
# asm 2: adcq 8(<yp=%rdx),<r1=%r8
adcq 8(%rdx),%r8
# qhasm: carry? r2 += *(uint64 *)(yp + 16) + carry
# asm 1: adcq 16(<yp=int64#3),<r2=int64#6
# asm 2: adcq 16(<yp=%rdx),<r2=%r9
adcq 16(%rdx),%r9
# qhasm: carry? r3 += *(uint64 *)(yp + 24) + carry
# asm 1: adcq 24(<yp=int64#3),<r3=int64#2
# asm 2: adcq 24(<yp=%rdx),<r3=%rsi
adcq 24(%rdx),%rsi
# qhasm: addt0 = 0
# asm 1: mov $0,>addt0=int64#3
# asm 2: mov $0,>addt0=%rdx
mov $0,%rdx
# qhasm: addt1 = 38
# asm 1: mov $38,>addt1=int64#7
# asm 2: mov $38,>addt1=%rax
mov $38,%rax
# qhasm: addt1 = addt0 if !carry
# asm 1: cmovae <addt0=int64#3,<addt1=int64#7
# asm 2: cmovae <addt0=%rdx,<addt1=%rax
cmovae %rdx,%rax
# qhasm: carry? r0 += addt1
# asm 1: add <addt1=int64#7,<r0=int64#4
# asm 2: add <addt1=%rax,<r0=%rcx
add %rax,%rcx
# qhasm: carry? r1 += addt0 + carry
# asm 1: adc <addt0=int64#3,<r1=int64#5
# asm 2: adc <addt0=%rdx,<r1=%r8
adc %rdx,%r8
# qhasm: carry? r2 += addt0 + carry
# asm 1: adc <addt0=int64#3,<r2=int64#6
# asm 2: adc <addt0=%rdx,<r2=%r9
adc %rdx,%r9
# qhasm: carry? r3 += addt0 + carry
# asm 1: adc <addt0=int64#3,<r3=int64#2
# asm 2: adc <addt0=%rdx,<r3=%rsi
adc %rdx,%rsi
# qhasm: addt0 = addt1 if carry
# asm 1: cmovc <addt1=int64#7,<addt0=int64#3
# asm 2: cmovc <addt1=%rax,<addt0=%rdx
cmovc %rax,%rdx
# qhasm: r0 += addt0
# asm 1: add <addt0=int64#3,<r0=int64#4
# asm 2: add <addt0=%rdx,<r0=%rcx
add %rdx,%rcx
# qhasm: *(uint64 *)(rp + 0) = r0
# asm 1: movq <r0=int64#4,0(<rp=int64#1)
# asm 2: movq <r0=%rcx,0(<rp=%rdi)
movq %rcx,0(%rdi)
# qhasm: *(uint64 *)(rp + 8) = r1
# asm 1: movq <r1=int64#5,8(<rp=int64#1)
# asm 2: movq <r1=%r8,8(<rp=%rdi)
movq %r8,8(%rdi)
# qhasm: *(uint64 *)(rp + 16) = r2
# asm 1: movq <r2=int64#6,16(<rp=int64#1)
# asm 2: movq <r2=%r9,16(<rp=%rdi)
movq %r9,16(%rdi)
# qhasm: *(uint64 *)(rp + 24) = r3
# asm 1: movq <r3=int64#2,24(<rp=int64#1)
# asm 2: movq <r3=%rsi,24(<rp=%rdi)
movq %rsi,24(%rdi)
# qhasm: leave
add %r11,%rsp
mov %rdi,%rax
mov %rsi,%rdx
ret

View file

@ -0,0 +1,322 @@
# qhasm: int64 rp
# qhasm: input rp
# qhasm: int64 r0
# qhasm: int64 r1
# qhasm: int64 r2
# qhasm: int64 r3
# qhasm: int64 t0
# qhasm: int64 t1
# qhasm: int64 t2
# qhasm: int64 t3
# qhasm: int64 two63
# qhasm: int64 caller1
# qhasm: int64 caller2
# qhasm: int64 caller3
# qhasm: int64 caller4
# qhasm: int64 caller5
# qhasm: int64 caller6
# qhasm: int64 caller7
# qhasm: caller caller1
# qhasm: caller caller2
# qhasm: caller caller3
# qhasm: caller caller4
# qhasm: caller caller5
# qhasm: caller caller6
# qhasm: caller caller7
# qhasm: stack64 caller1_stack
# qhasm: stack64 caller2_stack
# qhasm: stack64 caller3_stack
# qhasm: stack64 caller4_stack
# qhasm: stack64 caller5_stack
# qhasm: stack64 caller6_stack
# qhasm: stack64 caller7_stack
# qhasm: enter crypto_sign_ed25519_amd64_64_fe25519_freeze
.text
.p2align 5
.globl _crypto_sign_ed25519_amd64_64_fe25519_freeze
.globl crypto_sign_ed25519_amd64_64_fe25519_freeze
_crypto_sign_ed25519_amd64_64_fe25519_freeze:
crypto_sign_ed25519_amd64_64_fe25519_freeze:
mov %rsp,%r11
and $31,%r11
add $64,%r11
sub %r11,%rsp
# qhasm: caller1_stack = caller1
# asm 1: movq <caller1=int64#9,>caller1_stack=stack64#1
# asm 2: movq <caller1=%r11,>caller1_stack=0(%rsp)
movq %r11,0(%rsp)
# qhasm: caller2_stack = caller2
# asm 1: movq <caller2=int64#10,>caller2_stack=stack64#2
# asm 2: movq <caller2=%r12,>caller2_stack=8(%rsp)
movq %r12,8(%rsp)
# qhasm: caller3_stack = caller3
# asm 1: movq <caller3=int64#11,>caller3_stack=stack64#3
# asm 2: movq <caller3=%r13,>caller3_stack=16(%rsp)
movq %r13,16(%rsp)
# qhasm: caller4_stack = caller4
# asm 1: movq <caller4=int64#12,>caller4_stack=stack64#4
# asm 2: movq <caller4=%r14,>caller4_stack=24(%rsp)
movq %r14,24(%rsp)
# qhasm: caller5_stack = caller5
# asm 1: movq <caller5=int64#13,>caller5_stack=stack64#5
# asm 2: movq <caller5=%r15,>caller5_stack=32(%rsp)
movq %r15,32(%rsp)
# qhasm: caller6_stack = caller6
# asm 1: movq <caller6=int64#14,>caller6_stack=stack64#6
# asm 2: movq <caller6=%rbx,>caller6_stack=40(%rsp)
movq %rbx,40(%rsp)
# qhasm: caller7_stack = caller7
# asm 1: movq <caller7=int64#15,>caller7_stack=stack64#7
# asm 2: movq <caller7=%rbp,>caller7_stack=48(%rsp)
movq %rbp,48(%rsp)
# qhasm: r0 = *(uint64 *) (rp + 0)
# asm 1: movq 0(<rp=int64#1),>r0=int64#2
# asm 2: movq 0(<rp=%rdi),>r0=%rsi
movq 0(%rdi),%rsi
# qhasm: r1 = *(uint64 *) (rp + 8)
# asm 1: movq 8(<rp=int64#1),>r1=int64#3
# asm 2: movq 8(<rp=%rdi),>r1=%rdx
movq 8(%rdi),%rdx
# qhasm: r2 = *(uint64 *) (rp + 16)
# asm 1: movq 16(<rp=int64#1),>r2=int64#4
# asm 2: movq 16(<rp=%rdi),>r2=%rcx
movq 16(%rdi),%rcx
# qhasm: r3 = *(uint64 *) (rp + 24)
# asm 1: movq 24(<rp=int64#1),>r3=int64#5
# asm 2: movq 24(<rp=%rdi),>r3=%r8
movq 24(%rdi),%r8
# qhasm: t0 = r0
# asm 1: mov <r0=int64#2,>t0=int64#6
# asm 2: mov <r0=%rsi,>t0=%r9
mov %rsi,%r9
# qhasm: t1 = r1
# asm 1: mov <r1=int64#3,>t1=int64#7
# asm 2: mov <r1=%rdx,>t1=%rax
mov %rdx,%rax
# qhasm: t2 = r2
# asm 1: mov <r2=int64#4,>t2=int64#8
# asm 2: mov <r2=%rcx,>t2=%r10
mov %rcx,%r10
# qhasm: t3 = r3
# asm 1: mov <r3=int64#5,>t3=int64#9
# asm 2: mov <r3=%r8,>t3=%r11
mov %r8,%r11
# qhasm: two63 = 1
# asm 1: mov $1,>two63=int64#10
# asm 2: mov $1,>two63=%r12
mov $1,%r12
# qhasm: two63 <<= 63
# asm 1: shl $63,<two63=int64#10
# asm 2: shl $63,<two63=%r12
shl $63,%r12
# qhasm: carry? t0 += 19
# asm 1: add $19,<t0=int64#6
# asm 2: add $19,<t0=%r9
add $19,%r9
# qhasm: carry? t1 += 0 + carry
# asm 1: adc $0,<t1=int64#7
# asm 2: adc $0,<t1=%rax
adc $0,%rax
# qhasm: carry? t2 += 0 + carry
# asm 1: adc $0,<t2=int64#8
# asm 2: adc $0,<t2=%r10
adc $0,%r10
# qhasm: carry? t3 += two63 + carry
# asm 1: adc <two63=int64#10,<t3=int64#9
# asm 2: adc <two63=%r12,<t3=%r11
adc %r12,%r11
# qhasm: r0 = t0 if carry
# asm 1: cmovc <t0=int64#6,<r0=int64#2
# asm 2: cmovc <t0=%r9,<r0=%rsi
cmovc %r9,%rsi
# qhasm: r1 = t1 if carry
# asm 1: cmovc <t1=int64#7,<r1=int64#3
# asm 2: cmovc <t1=%rax,<r1=%rdx
cmovc %rax,%rdx
# qhasm: r2 = t2 if carry
# asm 1: cmovc <t2=int64#8,<r2=int64#4
# asm 2: cmovc <t2=%r10,<r2=%rcx
cmovc %r10,%rcx
# qhasm: r3 = t3 if carry
# asm 1: cmovc <t3=int64#9,<r3=int64#5
# asm 2: cmovc <t3=%r11,<r3=%r8
cmovc %r11,%r8
# qhasm: t0 = r0
# asm 1: mov <r0=int64#2,>t0=int64#6
# asm 2: mov <r0=%rsi,>t0=%r9
mov %rsi,%r9
# qhasm: t1 = r1
# asm 1: mov <r1=int64#3,>t1=int64#7
# asm 2: mov <r1=%rdx,>t1=%rax
mov %rdx,%rax
# qhasm: t2 = r2
# asm 1: mov <r2=int64#4,>t2=int64#8
# asm 2: mov <r2=%rcx,>t2=%r10
mov %rcx,%r10
# qhasm: t3 = r3
# asm 1: mov <r3=int64#5,>t3=int64#9
# asm 2: mov <r3=%r8,>t3=%r11
mov %r8,%r11
# qhasm: carry? t0 += 19
# asm 1: add $19,<t0=int64#6
# asm 2: add $19,<t0=%r9
add $19,%r9
# qhasm: carry? t1 += 0 + carry
# asm 1: adc $0,<t1=int64#7
# asm 2: adc $0,<t1=%rax
adc $0,%rax
# qhasm: carry? t2 += 0 + carry
# asm 1: adc $0,<t2=int64#8
# asm 2: adc $0,<t2=%r10
adc $0,%r10
# qhasm: carry? t3 += two63 + carry
# asm 1: adc <two63=int64#10,<t3=int64#9
# asm 2: adc <two63=%r12,<t3=%r11
adc %r12,%r11
# qhasm: r0 = t0 if carry
# asm 1: cmovc <t0=int64#6,<r0=int64#2
# asm 2: cmovc <t0=%r9,<r0=%rsi
cmovc %r9,%rsi
# qhasm: r1 = t1 if carry
# asm 1: cmovc <t1=int64#7,<r1=int64#3
# asm 2: cmovc <t1=%rax,<r1=%rdx
cmovc %rax,%rdx
# qhasm: r2 = t2 if carry
# asm 1: cmovc <t2=int64#8,<r2=int64#4
# asm 2: cmovc <t2=%r10,<r2=%rcx
cmovc %r10,%rcx
# qhasm: r3 = t3 if carry
# asm 1: cmovc <t3=int64#9,<r3=int64#5
# asm 2: cmovc <t3=%r11,<r3=%r8
cmovc %r11,%r8
# qhasm: *(uint64 *)(rp + 0) = r0
# asm 1: movq <r0=int64#2,0(<rp=int64#1)
# asm 2: movq <r0=%rsi,0(<rp=%rdi)
movq %rsi,0(%rdi)
# qhasm: *(uint64 *)(rp + 8) = r1
# asm 1: movq <r1=int64#3,8(<rp=int64#1)
# asm 2: movq <r1=%rdx,8(<rp=%rdi)
movq %rdx,8(%rdi)
# qhasm: *(uint64 *)(rp + 16) = r2
# asm 1: movq <r2=int64#4,16(<rp=int64#1)
# asm 2: movq <r2=%rcx,16(<rp=%rdi)
movq %rcx,16(%rdi)
# qhasm: *(uint64 *)(rp + 24) = r3
# asm 1: movq <r3=int64#5,24(<rp=int64#1)
# asm 2: movq <r3=%r8,24(<rp=%rdi)
movq %r8,24(%rdi)
# qhasm: caller1 = caller1_stack
# asm 1: movq <caller1_stack=stack64#1,>caller1=int64#9
# asm 2: movq <caller1_stack=0(%rsp),>caller1=%r11
movq 0(%rsp),%r11
# qhasm: caller2 = caller2_stack
# asm 1: movq <caller2_stack=stack64#2,>caller2=int64#10
# asm 2: movq <caller2_stack=8(%rsp),>caller2=%r12
movq 8(%rsp),%r12
# qhasm: caller3 = caller3_stack
# asm 1: movq <caller3_stack=stack64#3,>caller3=int64#11
# asm 2: movq <caller3_stack=16(%rsp),>caller3=%r13
movq 16(%rsp),%r13
# qhasm: caller4 = caller4_stack
# asm 1: movq <caller4_stack=stack64#4,>caller4=int64#12
# asm 2: movq <caller4_stack=24(%rsp),>caller4=%r14
movq 24(%rsp),%r14
# qhasm: caller5 = caller5_stack
# asm 1: movq <caller5_stack=stack64#5,>caller5=int64#13
# asm 2: movq <caller5_stack=32(%rsp),>caller5=%r15
movq 32(%rsp),%r15
# qhasm: caller6 = caller6_stack
# asm 1: movq <caller6_stack=stack64#6,>caller6=int64#14
# asm 2: movq <caller6_stack=40(%rsp),>caller6=%rbx
movq 40(%rsp),%rbx
# qhasm: caller7 = caller7_stack
# asm 1: movq <caller7_stack=stack64#7,>caller7=int64#15
# asm 2: movq <caller7_stack=48(%rsp),>caller7=%rbp
movq 48(%rsp),%rbp
# qhasm: leave
add %r11,%rsp
mov %rdi,%rax
mov %rsi,%rdx
ret

View file

@ -0,0 +1,8 @@
#include "fe25519.h"
unsigned char fe25519_getparity(const fe25519 *x)
{
fe25519 t = *x;
fe25519_freeze(&t);
return (unsigned char)t.v[0] & 1;
}

View file

@ -0,0 +1,60 @@
#include "fe25519.h"
void fe25519_invert(fe25519 *r, const fe25519 *x)
{
fe25519 z2;
fe25519 z9;
fe25519 z11;
fe25519 z2_5_0;
fe25519 z2_10_0;
fe25519 z2_20_0;
fe25519 z2_50_0;
fe25519 z2_100_0;
fe25519 t;
int i;
/* 2 */ fe25519_square(&z2,x);
/* 4 */ fe25519_square(&t,&z2);
/* 8 */ fe25519_square(&t,&t);
/* 9 */ fe25519_mul(&z9,&t,x);
/* 11 */ fe25519_mul(&z11,&z9,&z2);
/* 22 */ fe25519_square(&t,&z11);
/* 2^5 - 2^0 = 31 */ fe25519_mul(&z2_5_0,&t,&z9);
/* 2^6 - 2^1 */ fe25519_square(&t,&z2_5_0);
/* 2^20 - 2^10 */ for (i = 1;i < 5;i++) { fe25519_square(&t,&t); }
/* 2^10 - 2^0 */ fe25519_mul(&z2_10_0,&t,&z2_5_0);
/* 2^11 - 2^1 */ fe25519_square(&t,&z2_10_0);
/* 2^20 - 2^10 */ for (i = 1;i < 10;i++) { fe25519_square(&t,&t); }
/* 2^20 - 2^0 */ fe25519_mul(&z2_20_0,&t,&z2_10_0);
/* 2^21 - 2^1 */ fe25519_square(&t,&z2_20_0);
/* 2^40 - 2^20 */ for (i = 1;i < 20;i++) { fe25519_square(&t,&t); }
/* 2^40 - 2^0 */ fe25519_mul(&t,&t,&z2_20_0);
/* 2^41 - 2^1 */ fe25519_square(&t,&t);
/* 2^50 - 2^10 */ for (i = 1;i < 10;i++) { fe25519_square(&t,&t); }
/* 2^50 - 2^0 */ fe25519_mul(&z2_50_0,&t,&z2_10_0);
/* 2^51 - 2^1 */ fe25519_square(&t,&z2_50_0);
/* 2^100 - 2^50 */ for (i = 1;i < 50;i++) { fe25519_square(&t,&t); }
/* 2^100 - 2^0 */ fe25519_mul(&z2_100_0,&t,&z2_50_0);
/* 2^101 - 2^1 */ fe25519_square(&t,&z2_100_0);
/* 2^200 - 2^100 */ for (i = 1;i < 100;i++) { fe25519_square(&t,&t); }
/* 2^200 - 2^0 */ fe25519_mul(&t,&t,&z2_100_0);
/* 2^201 - 2^1 */ fe25519_square(&t,&t);
/* 2^250 - 2^50 */ for (i = 1;i < 50;i++) { fe25519_square(&t,&t); }
/* 2^250 - 2^0 */ fe25519_mul(&t,&t,&z2_50_0);
/* 2^251 - 2^1 */ fe25519_square(&t,&t);
/* 2^252 - 2^2 */ fe25519_square(&t,&t);
/* 2^253 - 2^3 */ fe25519_square(&t,&t);
/* 2^254 - 2^4 */ fe25519_square(&t,&t);
/* 2^255 - 2^5 */ fe25519_square(&t,&t);
/* 2^255 - 21 */ fe25519_mul(r,&t,&z11);
}

View file

@ -0,0 +1,14 @@
#include "fe25519.h"
int fe25519_iseq_vartime(const fe25519 *x, const fe25519 *y)
{
fe25519 t1 = *x;
fe25519 t2 = *y;
fe25519_freeze(&t1);
fe25519_freeze(&t2);
if(t1.v[0] != t2.v[0]) return 0;
if(t1.v[1] != t2.v[1]) return 0;
if(t1.v[2] != t2.v[2]) return 0;
if(t1.v[3] != t2.v[3]) return 0;
return 1;
}

Some files were not shown because too many files have changed in this diff Show more