diff --git a/GNUmakefile.in b/GNUmakefile.in index 80ebed7..acd8694 100644 --- a/GNUmakefile.in +++ b/GNUmakefile.in @@ -1,19 +1,19 @@ CC= @CC@ CSTD= @CSTD@ -CFLAGS= $(CSTD) @CFLAGS@ @CPPFLAGS@ -DED25519_@ED25519IMPL@ @MYDEFS@ -ASFLAGS= -LDFLAGS= @NOPIE@ @LDFLAGS@ +CFLAGS= $(CSTD) @CFLAGS@ @CPPFLAGS@ -DED25519_@ED25519IMPL@ @PIE@ @MYDEFS@ +ASFLAGS= @PIE@ +LDFLAGS= @LDFLAGS@ MV= mv ED25519_DEFS= -DED25519_ref10 -DED25519_amd64_51_30k -DED25519_amd64_64_24k -DED25519_donna ED25519_ref10= $(patsubst @SRCDIR@/%.c,%.c.o,$(wildcard @SRCDIR@/ed25519/ref10/*.c)) ED25519_amd64_51_30k= \ $(patsubst @SRCDIR@/%.c,%.c.o,$(wildcard @SRCDIR@/ed25519/amd64-51-30k/*.c)) \ - $(patsubst @SRCDIR@/%.s,%.s.o,$(wildcard @SRCDIR@/ed25519/amd64-51-30k/*.s)) + $(patsubst @SRCDIR@/%.S,%.S.o,$(wildcard @SRCDIR@/ed25519/amd64-51-30k/*.S)) ED25519_amd64_64_24k= \ $(patsubst @SRCDIR@/%.c,%.c.o,$(wildcard @SRCDIR@/ed25519/amd64-64-24k/*.c)) \ - $(patsubst @SRCDIR@/%.s,%.s.o,$(wildcard @SRCDIR@/ed25519/amd64-64-24k/*.s)) + $(patsubst @SRCDIR@/%.S,%.S.o,$(wildcard @SRCDIR@/ed25519/amd64-64-24k/*.S)) ED25519_donna= ED25519_OBJ= $(ED25519_@ED25519IMPL@) @@ -128,10 +128,22 @@ depend: VPATH=@SRCDIR@ +%.c.o: CFLAGS += \ + -D'CRYPTO_NAMESPACETOP=crypto_sign_ed25519_@ED25519IMPL@' \ + -D'_CRYPTO_NAMESPACETOP=_crypto_sign_ed25519_@ED25519IMPL@' \ + -D'CRYPTO_NAMESPACE(name)=crypto_sign_ed25519_@ED25519IMPL@_\#\#name' \ + -D'_CRYPTO_NAMESPACE(name)=_crypto_sign_ed25519_@ED25519IMPL@_\#\#name' \ + +%.S.o: ASFLAGS += \ + -D'CRYPTO_NAMESPACETOP=crypto_sign_ed25519_@ED25519IMPL@' \ + -D'_CRYPTO_NAMESPACETOP=_crypto_sign_ed25519_@ED25519IMPL@' \ + -D'CRYPTO_NAMESPACE(name)=crypto_sign_ed25519_@ED25519IMPL@_\#\#name' \ + -D'_CRYPTO_NAMESPACE(name)=_crypto_sign_ed25519_@ED25519IMPL@_\#\#name' \ + %.c.o: %.c $(CC) $(CFLAGS) -c -o $@.tmp $< && $(MV) $@.tmp $@ -%.s.o: %.s +%.S.o: %.S $(CC) $(ASFLAGS) -c -o $@.tmp $< && $(MV) $@.tmp $@ # DO NOT DELETE THIS LINE @@ -404,13 +416,15 @@ main.c.o: filters_common.inc.h ifilter_bitsum.h test_base16.c.o: types.h base16.h test_base32.c.o: types.h base32.h test_base64.c.o: types.h base64.h -test_ed25519.c.o: types.h base16.h ed25519/ed25519.h ed25519/ref10/ed25519.h -test_ed25519.c.o: ed25519/ref10/ge.h ed25519/ref10/fe.h -test_ed25519.c.o: ed25519/ref10/crypto_int32.h ed25519/amd64-51-30k/ed25519.h +test_ed25519.c.o: types.h base16.h ed25519/ed25519.h +test_ed25519.c.o: ed25519/ed25519_impl_pre.h ed25519/ref10/crypto_sign.h +test_ed25519.c.o: ed25519/amd64-51-30k/ed25519.h ed25519/ref10/ge.h +test_ed25519.c.o: ed25519/ref10/fe.h ed25519/ref10/crypto_int32.h +test_ed25519.c.o: ed25519/amd64-51-30k/crypto_sign.h test_ed25519.c.o: ed25519/amd64-51-30k/ge25519.h test_ed25519.c.o: ed25519/amd64-51-30k/fe25519.h test_ed25519.c.o: ed25519/amd64-51-30k/sc25519.h -test_ed25519.c.o: ed25519/amd64-64-24k/ed25519.h +test_ed25519.c.o: ed25519/amd64-64-24k/crypto_sign.h test_ed25519.c.o: ed25519/amd64-64-24k/ge25519.h test_ed25519.c.o: ed25519/ed25519-donna/ed25519-donna.h test_ed25519.c.o: ed25519/ed25519-donna/ed25519-donna-portable.h @@ -430,13 +444,15 @@ test_ed25519.c.o: ed25519/ed25519-donna/ed25519-donna-32bit-sse2.h test_ed25519.c.o: ed25519/ed25519-donna/ed25519-donna-64bit-sse2.h test_ed25519.c.o: ed25519/ed25519-donna/ed25519-donna-impl-sse2.h test_ed25519.c.o: ed25519/ed25519-donna/ed25519-donna-impl-base.h testutil.h +test_ed25519.c.o: ed25519/ed25519_impl_post.h vec.c.o: vec.h worker.c.o: types.h likely.h vec.h base32.h keccak.h ed25519/ed25519.h -worker.c.o: ed25519/ref10/ed25519.h ed25519/ref10/ge.h ed25519/ref10/fe.h -worker.c.o: ed25519/ref10/crypto_int32.h ed25519/amd64-51-30k/ed25519.h -worker.c.o: ed25519/amd64-51-30k/ge25519.h ed25519/amd64-51-30k/fe25519.h -worker.c.o: ed25519/amd64-51-30k/sc25519.h ed25519/amd64-64-24k/ed25519.h -worker.c.o: ed25519/amd64-64-24k/ge25519.h +worker.c.o: ed25519/ed25519_impl_pre.h ed25519/ref10/crypto_sign.h +worker.c.o: ed25519/amd64-51-30k/ed25519.h ed25519/ref10/ge.h +worker.c.o: ed25519/ref10/fe.h ed25519/ref10/crypto_int32.h +worker.c.o: ed25519/amd64-51-30k/crypto_sign.h ed25519/amd64-51-30k/ge25519.h +worker.c.o: ed25519/amd64-51-30k/fe25519.h ed25519/amd64-51-30k/sc25519.h +worker.c.o: ed25519/amd64-64-24k/crypto_sign.h ed25519/amd64-64-24k/ge25519.h worker.c.o: ed25519/ed25519-donna/ed25519-donna.h worker.c.o: ed25519/ed25519-donna/ed25519-donna-portable.h worker.c.o: ed25519/ed25519-donna/ed25519-donna-portable-identify.h @@ -458,4 +474,5 @@ worker.c.o: ed25519/ed25519-donna/ed25519-donna-impl-base.h ioutil.h common.h worker.c.o: yaml.h worker.h filters.h filters_inc.inc.h filters_worker.inc.h worker.c.o: filters_common.inc.h worker_slow.inc.h worker_fast.inc.h worker.c.o: worker_fast_pass.inc.h worker_batch.inc.h worker_batch_pass.inc.h +worker.c.o: ed25519/ed25519_impl_post.h yaml.c.o: types.h yaml.h ioutil.h base32.h base64.h common.h diff --git a/configure.ac b/configure.ac index 35a488e..68152c7 100644 --- a/configure.ac +++ b/configure.ac @@ -27,31 +27,18 @@ then CFLAGS="$oldcflags" fi -nopie="" +pie="" oldcflags="$CFLAGS" -CFLAGS="-nopie -Werror" -AC_MSG_CHECKING([whether CC supports -nopie]) +CFLAGS="-fPIE -Werror" +AC_MSG_CHECKING([whether CC supports -fPIE]) AC_LINK_IFELSE([AC_LANG_PROGRAM([])], [AC_MSG_RESULT([yes])] - [nopie="-nopie"], + [pie="-fPIE"], [AC_MSG_RESULT([no])] ) CFLAGS="$oldcflags" -if test "x$nopie" = "x" -then - oldcflags="$CFLAGS" - CFLAGS="-no-pie -Werror" - AC_MSG_CHECKING([whether CC supports -no-pie]) - AC_LINK_IFELSE([AC_LANG_PROGRAM([])], - [AC_MSG_RESULT([yes])] - [nopie="-no-pie"], - [AC_MSG_RESULT([no])] - ) - CFLAGS="$oldcflags" -fi - MYDEFS="" MAINLIB="" @@ -357,7 +344,7 @@ AC_SUBST(CSTD,["$cstd"]) AC_SUBST(ED25519IMPL,["$ed25519impl"]) AC_SUBST(MYDEFS,["$MYDEFS"]) AC_SUBST(MAINLIB,["$MAINLIB"]) -AC_SUBST(NOPIE,["$nopie"]) +AC_SUBST(PIE,["$pie"]) AC_SUBST(SRCDIR,["$srcdir"]) AC_CONFIG_FILES([GNUmakefile]) AC_OUTPUT diff --git a/ed25519/amd64-51-30k/choose_t.s b/ed25519/amd64-51-30k/choose_t.S similarity index 96% rename from ed25519/amd64-51-30k/choose_t.s rename to ed25519/amd64-51-30k/choose_t.S index 6a64c08..ef6eb43 100644 --- a/ed25519/amd64-51-30k/choose_t.s +++ b/ed25519/amd64-51-30k/choose_t.S @@ -105,13 +105,13 @@ # qhasm: stack64 caller7_stack -# qhasm: enter crypto_sign_ed25519_amd64_51_30k_batch_choose_t +# qhasm: enter CRYPTO_NAMESPACE(batch_choose_t) .text .p2align 5 -.globl _crypto_sign_ed25519_amd64_51_30k_batch_choose_t -.globl crypto_sign_ed25519_amd64_51_30k_batch_choose_t -_crypto_sign_ed25519_amd64_51_30k_batch_choose_t: -crypto_sign_ed25519_amd64_51_30k_batch_choose_t: +.globl _CRYPTO_NAMESPACE(batch_choose_t) +.globl CRYPTO_NAMESPACE(batch_choose_t) +_CRYPTO_NAMESPACE(batch_choose_t): +CRYPTO_NAMESPACE(batch_choose_t): mov %rsp,%r11 and $31,%r11 add $64,%r11 @@ -1677,30 +1677,30 @@ movq 952(%rcx,%rdi),%rdi # asm 2: cmove tt0=int64#1 -# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_2P0,>tt0=%rdi -movq crypto_sign_ed25519_amd64_51_30k_batch_2P0,%rdi +# qhasm: tt0 = *(uint64 *)&CRYPTO_NAMESPACE(batch_2P0) +# asm 1: movq CRYPTO_NAMESPACE(batch_2P0),>tt0=int64#1 +# asm 2: movq CRYPTO_NAMESPACE(batch_2P0),>tt0=%rdi +movq CRYPTO_NAMESPACE(batch_2P0)(%rip),%rdi -# qhasm: tt1 = *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P1234 -# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_2P1234,>tt1=int64#4 -# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_2P1234,>tt1=%rcx -movq crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%rcx +# qhasm: tt1 = *(uint64 *)&CRYPTO_NAMESPACE(batch_2P1234) +# asm 1: movq CRYPTO_NAMESPACE(batch_2P1234),>tt1=int64#4 +# asm 2: movq CRYPTO_NAMESPACE(batch_2P1234),>tt1=%rcx +movq CRYPTO_NAMESPACE(batch_2P1234)(%rip),%rcx -# qhasm: tt2 = *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P1234 -# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_2P1234,>tt2=int64#5 -# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_2P1234,>tt2=%r8 -movq crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%r8 +# qhasm: tt2 = *(uint64 *)&CRYPTO_NAMESPACE(batch_2P1234) +# asm 1: movq CRYPTO_NAMESPACE(batch_2P1234),>tt2=int64#5 +# asm 2: movq CRYPTO_NAMESPACE(batch_2P1234),>tt2=%r8 +movq CRYPTO_NAMESPACE(batch_2P1234)(%rip),%r8 -# qhasm: tt3 = *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P1234 -# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_2P1234,>tt3=int64#10 -# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_2P1234,>tt3=%r12 -movq crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%r12 +# qhasm: tt3 = *(uint64 *)&CRYPTO_NAMESPACE(batch_2P1234) +# asm 1: movq CRYPTO_NAMESPACE(batch_2P1234),>tt3=int64#10 +# asm 2: movq CRYPTO_NAMESPACE(batch_2P1234),>tt3=%r12 +movq CRYPTO_NAMESPACE(batch_2P1234)(%rip),%r12 -# qhasm: tt4 = *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P1234 -# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_2P1234,>tt4=int64#11 -# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_2P1234,>tt4=%r13 -movq crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%r13 +# qhasm: tt4 = *(uint64 *)&CRYPTO_NAMESPACE(batch_2P1234) +# asm 1: movq CRYPTO_NAMESPACE(batch_2P1234),>tt4=int64#11 +# asm 2: movq CRYPTO_NAMESPACE(batch_2P1234),>tt4=%r13 +movq CRYPTO_NAMESPACE(batch_2P1234)(%rip),%r13 # qhasm: tt0 -= tt2d0 # asm 1: sub -#define fe25519 crypto_sign_ed25519_amd64_51_30k_batch_fe25519 -#define fe25519_freeze crypto_sign_ed25519_amd64_51_30k_batch_fe25519_freeze -#define fe25519_unpack crypto_sign_ed25519_amd64_51_30k_batch_fe25519_unpack -#define fe25519_pack crypto_sign_ed25519_amd64_51_30k_batch_fe25519_pack -#define fe25519_iszero_vartime crypto_sign_ed25519_amd64_51_30k_batch_fe25519_iszero_vartime -#define fe25519_iseq_vartime crypto_sign_ed25519_amd64_51_30k_batch_fe25519_iseq_vartime -#define fe25519_cmov crypto_sign_ed25519_amd64_51_30k_batch_fe25519_cmov -#define fe25519_setint crypto_sign_ed25519_amd64_51_30k_batch_fe25519_setint -#define fe25519_neg crypto_sign_ed25519_amd64_51_30k_batch_fe25519_neg -#define fe25519_getparity crypto_sign_ed25519_amd64_51_30k_batch_fe25519_getparity -#define fe25519_add crypto_sign_ed25519_amd64_51_30k_batch_fe25519_add -#define fe25519_sub crypto_sign_ed25519_amd64_51_30k_batch_fe25519_sub -#define fe25519_mul crypto_sign_ed25519_amd64_51_30k_batch_fe25519_mul -#define fe25519_mul121666 crypto_sign_ed25519_amd64_51_30k_batch_fe25519_mul121666 -#define fe25519_square crypto_sign_ed25519_amd64_51_30k_batch_fe25519_square -#define fe25519_nsquare crypto_sign_ed25519_amd64_51_30k_batch_fe25519_nsquare -#define fe25519_invert crypto_sign_ed25519_amd64_51_30k_batch_fe25519_invert -#define fe25519_pow2523 crypto_sign_ed25519_amd64_51_30k_batch_fe25519_pow2523 +#define fe25519 CRYPTO_NAMESPACE(batch_fe25519) +#define fe25519_freeze CRYPTO_NAMESPACE(batch_fe25519_freeze) +#define fe25519_unpack CRYPTO_NAMESPACE(batch_fe25519_unpack) +#define fe25519_pack CRYPTO_NAMESPACE(batch_fe25519_pack) +#define fe25519_iszero_vartime CRYPTO_NAMESPACE(batch_fe25519_iszero_vartime) +#define fe25519_iseq_vartime CRYPTO_NAMESPACE(batch_fe25519_iseq_vartime) +#define fe25519_cmov CRYPTO_NAMESPACE(batch_fe25519_cmov) +#define fe25519_setint CRYPTO_NAMESPACE(batch_fe25519_setint) +#define fe25519_neg CRYPTO_NAMESPACE(batch_fe25519_neg) +#define fe25519_getparity CRYPTO_NAMESPACE(batch_fe25519_getparity) +#define fe25519_add CRYPTO_NAMESPACE(batch_fe25519_add) +#define fe25519_sub CRYPTO_NAMESPACE(batch_fe25519_sub) +#define fe25519_mul CRYPTO_NAMESPACE(batch_fe25519_mul) +#define fe25519_mul121666 CRYPTO_NAMESPACE(batch_fe25519_mul121666) +#define fe25519_square CRYPTO_NAMESPACE(batch_fe25519_square) +#define fe25519_nsquare CRYPTO_NAMESPACE(batch_fe25519_nsquare) +#define fe25519_invert CRYPTO_NAMESPACE(batch_fe25519_invert) +#define fe25519_batchinvert CRYPTO_NAMESPACE(batch_fe25519_batchinvert) +#define fe25519_pow2523 CRYPTO_NAMESPACE(batch_fe25519_pow2523) typedef struct { diff --git a/ed25519/amd64-51-30k/fe25519_freeze.s b/ed25519/amd64-51-30k/fe25519_freeze.S similarity index 94% rename from ed25519/amd64-51-30k/fe25519_freeze.s rename to ed25519/amd64-51-30k/fe25519_freeze.S index 5cd0b1d..0435525 100644 --- a/ed25519/amd64-51-30k/fe25519_freeze.s +++ b/ed25519/amd64-51-30k/fe25519_freeze.S @@ -63,13 +63,13 @@ # qhasm: stack64 caller7_stack -# qhasm: enter crypto_sign_ed25519_amd64_51_30k_batch_fe25519_freeze +# qhasm: enter CRYPTO_NAMESPACE(batch_fe25519_freeze) .text .p2align 5 -.globl _crypto_sign_ed25519_amd64_51_30k_batch_fe25519_freeze -.globl crypto_sign_ed25519_amd64_51_30k_batch_fe25519_freeze -_crypto_sign_ed25519_amd64_51_30k_batch_fe25519_freeze: -crypto_sign_ed25519_amd64_51_30k_batch_fe25519_freeze: +.globl _CRYPTO_NAMESPACE(batch_fe25519_freeze) +.globl CRYPTO_NAMESPACE(batch_fe25519_freeze) +_CRYPTO_NAMESPACE(batch_fe25519_freeze): +CRYPTO_NAMESPACE(batch_fe25519_freeze): mov %rsp,%r11 and $31,%r11 add $64,%r11 @@ -135,10 +135,10 @@ movq 24(%rdi),%r8 # asm 2: movq 32(r4=%r9 movq 32(%rdi),%r9 -# qhasm: two51minus1 = *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51 -# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>two51minus1=int64#7 -# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>two51minus1=%rax -movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rax +# qhasm: two51minus1 = *(uint64 *) &CRYPTO_NAMESPACE(batch_REDMASK51) +# asm 1: movq CRYPTO_NAMESPACE(batch_REDMASK51),>two51minus1=int64#7 +# asm 2: movq CRYPTO_NAMESPACE(batch_REDMASK51),>two51minus1=%rax +movq CRYPTO_NAMESPACE(batch_REDMASK51)(%rip),%rax # qhasm: two51minus19 = two51minus1 # asm 1: mov two51minus19=int64#8 diff --git a/ed25519/amd64-51-30k/fe25519_mul.s b/ed25519/amd64-51-30k/fe25519_mul.S similarity index 97% rename from ed25519/amd64-51-30k/fe25519_mul.s rename to ed25519/amd64-51-30k/fe25519_mul.S index 9d6c537..cbcdbe9 100644 --- a/ed25519/amd64-51-30k/fe25519_mul.s +++ b/ed25519/amd64-51-30k/fe25519_mul.S @@ -97,13 +97,13 @@ # qhasm: stack64 mulx419_stack -# qhasm: enter crypto_sign_ed25519_amd64_51_30k_batch_fe25519_mul +# qhasm: enter CRYPTO_NAMESPACE(batch_fe25519_mul) .text .p2align 5 -.globl _crypto_sign_ed25519_amd64_51_30k_batch_fe25519_mul -.globl crypto_sign_ed25519_amd64_51_30k_batch_fe25519_mul -_crypto_sign_ed25519_amd64_51_30k_batch_fe25519_mul: -crypto_sign_ed25519_amd64_51_30k_batch_fe25519_mul: +.globl _CRYPTO_NAMESPACE(batch_fe25519_mul) +.globl CRYPTO_NAMESPACE(batch_fe25519_mul) +_CRYPTO_NAMESPACE(batch_fe25519_mul): +CRYPTO_NAMESPACE(batch_fe25519_mul): mov %rsp,%r11 and $31,%r11 add $96,%r11 @@ -689,10 +689,10 @@ add %rax,%r14 # asm 2: adc mulredmask=int64#2 -# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rsi -movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rsi +# qhasm: mulredmask = *(uint64 *) &CRYPTO_NAMESPACE(batch_REDMASK51) +# asm 1: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=int64#2 +# asm 2: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=%rsi +movq CRYPTO_NAMESPACE(batch_REDMASK51)(%rip),%rsi # qhasm: mulr01 = (mulr01.r0) << 13 # asm 1: shld $13,squareredmask=int64#3 -# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>squareredmask=%rdx -movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rdx +# qhasm: squareredmask = *(uint64 *) &CRYPTO_NAMESPACE(batch_REDMASK51) +# asm 1: movq CRYPTO_NAMESPACE(batch_REDMASK51),>squareredmask=int64#3 +# asm 2: movq CRYPTO_NAMESPACE(batch_REDMASK51),>squareredmask=%rdx +movq CRYPTO_NAMESPACE(batch_REDMASK51)(%rip),%rdx # qhasm: squarer01 = (squarer01.r0) << 13 # asm 1: shld $13,squareredmask=int64#2 -# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>squareredmask=%rsi -movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rsi +# qhasm: squareredmask = *(uint64 *) &CRYPTO_NAMESPACE(batch_REDMASK51) +# asm 1: movq CRYPTO_NAMESPACE(batch_REDMASK51),>squareredmask=int64#2 +# asm 2: movq CRYPTO_NAMESPACE(batch_REDMASK51),>squareredmask=%rsi +movq CRYPTO_NAMESPACE(batch_REDMASK51)(%rip),%rsi # qhasm: squarer01 = (squarer01.r0) << 13 # asm 1: shld $13,b0=%r11 mov %rdx,%r11 -# qhasm: a0 += *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_2P0 -# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P0,x0=int64#10 @@ -354,10 +354,10 @@ sub %r12,%rdx # asm 2: mov b1=%r12 mov %r8,%r12 -# qhasm: a1 += *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_2P1234 -# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,x1=int64#11 @@ -379,10 +379,10 @@ sub %r13,%r8 # asm 2: mov b2=%r13 mov %r9,%r13 -# qhasm: a2 += *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_2P1234 -# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,x2=int64#12 @@ -404,10 +404,10 @@ sub %r14,%r9 # asm 2: mov b3=%r14 mov %rax,%r14 -# qhasm: a3 += *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_2P1234 -# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,x3=int64#13 @@ -429,10 +429,10 @@ sub %r15,%rax # asm 2: mov b4=%r15 mov %r10,%r15 -# qhasm: a4 += *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_2P1234 -# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,x4=int64#14 @@ -529,10 +529,10 @@ movq 72(%rcx),%r10 # asm 2: mov t20=%r11 mov %rdx,%r11 -# qhasm: t10 += *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_2P0 -# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P0,rx0=int64#10 @@ -554,10 +554,10 @@ sub %r12,%rdx # asm 2: mov t21=%r12 mov %r8,%r12 -# qhasm: t11 += *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_2P1234 -# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,rx1=int64#11 @@ -579,10 +579,10 @@ sub %r13,%r8 # asm 2: mov t22=%r13 mov %r9,%r13 -# qhasm: t12 += *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_2P1234 -# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,rx2=int64#12 @@ -604,10 +604,10 @@ sub %r14,%r9 # asm 2: mov t23=%r14 mov %rax,%r14 -# qhasm: t13 += *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_2P1234 -# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,rx3=int64#13 @@ -629,10 +629,10 @@ sub %r15,%rax # asm 2: mov t24=%r15 mov %r10,%r15 -# qhasm: t14 += *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_2P1234 -# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,rx4=int64#14 @@ -1234,10 +1234,10 @@ add %rax,%r14 # asm 2: adc mulredmask=int64#3 -# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rdx -movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rdx +# qhasm: mulredmask = *(uint64 *) &CRYPTO_NAMESPACE(batch_REDMASK51) +# asm 1: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=int64#3 +# asm 2: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=%rdx +movq CRYPTO_NAMESPACE(batch_REDMASK51)(%rip),%rdx # qhasm: mulr01 = (mulr01.a0) << 13 # asm 1: shld $13,mulredmask=int64#3 -# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rdx -movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rdx +# qhasm: mulredmask = *(uint64 *) &CRYPTO_NAMESPACE(batch_REDMASK51) +# asm 1: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=int64#3 +# asm 2: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=%rdx +movq CRYPTO_NAMESPACE(batch_REDMASK51)(%rip),%rdx # qhasm: mulr01 = (mulr01.rx0) << 13 # asm 1: shld $13,ry4=%r15 mov %r12,%r15 -# qhasm: rx0 += *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P0 -# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P0,mulredmask=int64#3 -# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rdx -movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rdx +# qhasm: mulredmask = *(uint64 *) &CRYPTO_NAMESPACE(batch_REDMASK51) +# asm 1: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=int64#3 +# asm 2: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=%rdx +movq CRYPTO_NAMESPACE(batch_REDMASK51)(%rip),%rdx # qhasm: mulr01 = (mulr01.c0) << 13 # asm 1: shld $13,mulx319_stack=96(%rsp) movq %rax,96(%rsp) -# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D2 -mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D2 +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(batch_EC2D2) +mulq CRYPTO_NAMESPACE(batch_EC2D2)(%rip) # qhasm: c0 = mulrax # asm 1: mov c0=int64#5 @@ -3117,8 +3117,8 @@ imulq $19,%rdx,%rax # asm 2: movq mulx419_stack=104(%rsp) movq %rax,104(%rsp) -# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D1 -mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D1 +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(batch_EC2D1) +mulq CRYPTO_NAMESPACE(batch_EC2D1)(%rip) # qhasm: carry? c0 += mulrax # asm 1: add mulrax=%rax movq 56(%rsp),%rax -# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D0 -mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D0 +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(batch_EC2D0) +mulq CRYPTO_NAMESPACE(batch_EC2D0)(%rip) # qhasm: carry? c0 += mulrax # asm 1: add mulrax=%rax movq 56(%rsp),%rax -# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D1 -mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D1 +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(batch_EC2D1) +mulq CRYPTO_NAMESPACE(batch_EC2D1)(%rip) # qhasm: c1 = mulrax # asm 1: mov c1=int64#8 @@ -3171,8 +3171,8 @@ mov %rdx,%r11 # asm 2: movq mulrax=%rax movq 56(%rsp),%rax -# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D2 -mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D2 +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(batch_EC2D2) +mulq CRYPTO_NAMESPACE(batch_EC2D2)(%rip) # qhasm: c2 = mulrax # asm 1: mov c2=int64#10 @@ -3189,8 +3189,8 @@ mov %rdx,%r13 # asm 2: movq mulrax=%rax movq 56(%rsp),%rax -# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D3 -mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D3 +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(batch_EC2D3) +mulq CRYPTO_NAMESPACE(batch_EC2D3)(%rip) # qhasm: c3 = mulrax # asm 1: mov c3=int64#12 @@ -3207,8 +3207,8 @@ mov %rdx,%r15 # asm 2: movq mulrax=%rax movq 56(%rsp),%rax -# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D4 -mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D4 +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(batch_EC2D4) +mulq CRYPTO_NAMESPACE(batch_EC2D4)(%rip) # qhasm: c4 = mulrax # asm 1: mov c4=int64#14 @@ -3225,8 +3225,8 @@ mov %rdx,%rbp # asm 2: movq mulrax=%rax movq 64(%rsp),%rax -# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D0 -mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D0 +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(batch_EC2D0) +mulq CRYPTO_NAMESPACE(batch_EC2D0)(%rip) # qhasm: carry? c1 += mulrax # asm 1: add mulrax=%rax movq 64(%rsp),%rax -# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D1 -mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D1 +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(batch_EC2D1) +mulq CRYPTO_NAMESPACE(batch_EC2D1)(%rip) # qhasm: carry? c2 += mulrax # asm 1: add mulrax=%rax movq 64(%rsp),%rax -# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D2 -mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D2 +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(batch_EC2D2) +mulq CRYPTO_NAMESPACE(batch_EC2D2)(%rip) # qhasm: carry? c3 += mulrax # asm 1: add mulrax=%rax movq 64(%rsp),%rax -# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D3 -mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D3 +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(batch_EC2D3) +mulq CRYPTO_NAMESPACE(batch_EC2D3)(%rip) # qhasm: carry? c4 += mulrax # asm 1: add mulrax=%rax imulq $19,%rdx,%rax -# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D4 -mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D4 +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(batch_EC2D4) +mulq CRYPTO_NAMESPACE(batch_EC2D4)(%rip) # qhasm: carry? c0 += mulrax # asm 1: add mulrax=%rax movq 72(%rsp),%rax -# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D0 -mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D0 +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(batch_EC2D0) +mulq CRYPTO_NAMESPACE(batch_EC2D0)(%rip) # qhasm: carry? c2 += mulrax # asm 1: add mulrax=%rax movq 72(%rsp),%rax -# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D1 -mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D1 +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(batch_EC2D1) +mulq CRYPTO_NAMESPACE(batch_EC2D1)(%rip) # qhasm: carry? c3 += mulrax # asm 1: add mulrax=%rax movq 72(%rsp),%rax -# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D2 -mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D2 +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(batch_EC2D2) +mulq CRYPTO_NAMESPACE(batch_EC2D2)(%rip) # qhasm: carry? c4 += mulrax # asm 1: add mulrax=%rax imulq $19,%rdx,%rax -# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D3 -mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D3 +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(batch_EC2D3) +mulq CRYPTO_NAMESPACE(batch_EC2D3)(%rip) # qhasm: carry? c0 += mulrax # asm 1: add mulrax=%rax imulq $19,%rdx,%rax -# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D4 -mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D4 +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(batch_EC2D4) +mulq CRYPTO_NAMESPACE(batch_EC2D4)(%rip) # qhasm: carry? c1 += mulrax # asm 1: add mulrax=%rax movq 80(%rsp),%rax -# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D0 -mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D0 +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(batch_EC2D0) +mulq CRYPTO_NAMESPACE(batch_EC2D0)(%rip) # qhasm: carry? c3 += mulrax # asm 1: add mulrax=%rax movq 80(%rsp),%rax -# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D1 -mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D1 +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(batch_EC2D1) +mulq CRYPTO_NAMESPACE(batch_EC2D1)(%rip) # qhasm: carry? c4 += mulrax # asm 1: add mulrax=%rax movq 96(%rsp),%rax -# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D3 -mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D3 +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(batch_EC2D3) +mulq CRYPTO_NAMESPACE(batch_EC2D3)(%rip) # qhasm: carry? c1 += mulrax # asm 1: add mulrax=%rax movq 96(%rsp),%rax -# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D4 -mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D4 +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(batch_EC2D4) +mulq CRYPTO_NAMESPACE(batch_EC2D4)(%rip) # qhasm: carry? c2 += mulrax # asm 1: add mulrax=%rax movq 88(%rsp),%rax -# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D0 -mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D0 +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(batch_EC2D0) +mulq CRYPTO_NAMESPACE(batch_EC2D0)(%rip) # qhasm: carry? c4 += mulrax # asm 1: add mulrax=%rax movq 104(%rsp),%rax -# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D2 -mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D2 +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(batch_EC2D2) +mulq CRYPTO_NAMESPACE(batch_EC2D2)(%rip) # qhasm: carry? c1 += mulrax # asm 1: add mulrax=%rax movq 104(%rsp),%rax -# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D3 -mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D3 +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(batch_EC2D3) +mulq CRYPTO_NAMESPACE(batch_EC2D3)(%rip) # qhasm: carry? c2 += mulrax # asm 1: add mulrax=%rax movq 104(%rsp),%rax -# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D4 -mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D4 +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(batch_EC2D4) +mulq CRYPTO_NAMESPACE(batch_EC2D4)(%rip) # qhasm: carry? c3 += mulrax # asm 1: add mulredmask=int64#3 -# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rdx -movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rdx +# qhasm: mulredmask = *(uint64 *) &CRYPTO_NAMESPACE(batch_REDMASK51) +# asm 1: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=int64#3 +# asm 2: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=%rdx +movq CRYPTO_NAMESPACE(batch_REDMASK51)(%rip),%rdx # qhasm: mulr01 = (mulr01.c0) << 13 # asm 1: shld $13,mulredmask=int64#2 -# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rsi -movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rsi +# qhasm: mulredmask = *(uint64 *) &CRYPTO_NAMESPACE(batch_REDMASK51) +# asm 1: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=int64#2 +# asm 2: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=%rsi +movq CRYPTO_NAMESPACE(batch_REDMASK51)(%rip),%rsi # qhasm: mulr01 = (mulr01.rt0) << 13 # asm 1: shld $13,rz4=%r13 mov %r10,%r13 -# qhasm: rt0 += *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P0 -# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P0,squareredmask=int64#3 -# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>squareredmask=%rdx -movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rdx +# qhasm: squareredmask = *(uint64 *) &CRYPTO_NAMESPACE(batch_REDMASK51) +# asm 1: movq CRYPTO_NAMESPACE(batch_REDMASK51),>squareredmask=int64#3 +# asm 2: movq CRYPTO_NAMESPACE(batch_REDMASK51),>squareredmask=%rdx +movq CRYPTO_NAMESPACE(batch_REDMASK51)(%rip),%rdx # qhasm: squarer01 = (squarer01.a0) << 13 # asm 1: shld $13,squareredmask=int64#3 -# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>squareredmask=%rdx -movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rdx +# qhasm: squareredmask = *(uint64 *) &CRYPTO_NAMESPACE(batch_REDMASK51) +# asm 1: movq CRYPTO_NAMESPACE(batch_REDMASK51),>squareredmask=int64#3 +# asm 2: movq CRYPTO_NAMESPACE(batch_REDMASK51),>squareredmask=%rdx +movq CRYPTO_NAMESPACE(batch_REDMASK51)(%rip),%rdx # qhasm: squarer01 = (squarer01.b0) << 13 # asm 1: shld $13,squareredmask=int64#3 -# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>squareredmask=%rdx -movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rdx +# qhasm: squareredmask = *(uint64 *) &CRYPTO_NAMESPACE(batch_REDMASK51) +# asm 1: movq CRYPTO_NAMESPACE(batch_REDMASK51),>squareredmask=int64#3 +# asm 2: movq CRYPTO_NAMESPACE(batch_REDMASK51),>squareredmask=%rdx +movq CRYPTO_NAMESPACE(batch_REDMASK51)(%rip),%rdx # qhasm: squarer01 = (squarer01.c0) << 13 # asm 1: shld $13,c4_stack=168(%rsp) movq %r11,168(%rsp) -# qhasm: d0 = *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P0 -# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_2P0,>d0=int64#3 -# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_2P0,>d0=%rdx -movq crypto_sign_ed25519_amd64_51_30k_batch_2P0,%rdx +# qhasm: d0 = *(uint64 *)&CRYPTO_NAMESPACE(batch_2P0) +# asm 1: movq CRYPTO_NAMESPACE(batch_2P0),>d0=int64#3 +# asm 2: movq CRYPTO_NAMESPACE(batch_2P0),>d0=%rdx +movq CRYPTO_NAMESPACE(batch_2P0)(%rip),%rdx -# qhasm: d1 = *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P1234 -# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_2P1234,>d1=int64#4 -# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_2P1234,>d1=%rcx -movq crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%rcx +# qhasm: d1 = *(uint64 *)&CRYPTO_NAMESPACE(batch_2P1234) +# asm 1: movq CRYPTO_NAMESPACE(batch_2P1234),>d1=int64#4 +# asm 2: movq CRYPTO_NAMESPACE(batch_2P1234),>d1=%rcx +movq CRYPTO_NAMESPACE(batch_2P1234)(%rip),%rcx -# qhasm: d2 = *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P1234 -# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_2P1234,>d2=int64#5 -# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_2P1234,>d2=%r8 -movq crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%r8 +# qhasm: d2 = *(uint64 *)&CRYPTO_NAMESPACE(batch_2P1234) +# asm 1: movq CRYPTO_NAMESPACE(batch_2P1234),>d2=int64#5 +# asm 2: movq CRYPTO_NAMESPACE(batch_2P1234),>d2=%r8 +movq CRYPTO_NAMESPACE(batch_2P1234)(%rip),%r8 -# qhasm: d3 = *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P1234 -# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_2P1234,>d3=int64#6 -# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_2P1234,>d3=%r9 -movq crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%r9 +# qhasm: d3 = *(uint64 *)&CRYPTO_NAMESPACE(batch_2P1234) +# asm 1: movq CRYPTO_NAMESPACE(batch_2P1234),>d3=int64#6 +# asm 2: movq CRYPTO_NAMESPACE(batch_2P1234),>d3=%r9 +movq CRYPTO_NAMESPACE(batch_2P1234)(%rip),%r9 -# qhasm: d4 = *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P1234 -# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_2P1234,>d4=int64#7 -# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_2P1234,>d4=%rax -movq crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%rax +# qhasm: d4 = *(uint64 *)&CRYPTO_NAMESPACE(batch_2P1234) +# asm 1: movq CRYPTO_NAMESPACE(batch_2P1234),>d4=int64#7 +# asm 2: movq CRYPTO_NAMESPACE(batch_2P1234),>d4=%rax +movq CRYPTO_NAMESPACE(batch_2P1234)(%rip),%rax # qhasm: e0 = d0 # asm 1: mov e0=int64#8 @@ -2263,30 +2263,30 @@ movq %r13,64(%rdi) # asm 2: movq squareredmask=int64#3 -# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>squareredmask=%rdx -movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rdx +# qhasm: squareredmask = *(uint64 *) &CRYPTO_NAMESPACE(batch_REDMASK51) +# asm 1: movq CRYPTO_NAMESPACE(batch_REDMASK51),>squareredmask=int64#3 +# asm 2: movq CRYPTO_NAMESPACE(batch_REDMASK51),>squareredmask=%rdx +movq CRYPTO_NAMESPACE(batch_REDMASK51)(%rip),%rdx # qhasm: squarer01 = (squarer01.rx0) << 13 # asm 1: shld $13,b4=%r14 mov %rax,%r14 -# qhasm: a0 += *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_2P0 -# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P0,mulredmask=int64#3 -# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rdx -movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rdx +# qhasm: mulredmask = *(uint64 *) &CRYPTO_NAMESPACE(batch_REDMASK51) +# asm 1: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=int64#3 +# asm 2: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=%rdx +movq CRYPTO_NAMESPACE(batch_REDMASK51)(%rip),%rdx # qhasm: mulr01 = (mulr01.a0) << 13 # asm 1: shld $13,mulredmask=int64#3 -# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rdx -movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rdx +# qhasm: mulredmask = *(uint64 *) &CRYPTO_NAMESPACE(batch_REDMASK51) +# asm 1: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=int64#3 +# asm 2: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=%rdx +movq CRYPTO_NAMESPACE(batch_REDMASK51)(%rip),%rdx # qhasm: mulr01 = (mulr01.e0) << 13 # asm 1: shld $13,h4=%r14 mov %r11,%r14 -# qhasm: e0 += *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P0 -# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P0,mulredmask=int64#2 -# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rsi -movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rsi +# qhasm: mulredmask = *(uint64 *) &CRYPTO_NAMESPACE(batch_REDMASK51) +# asm 1: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=int64#2 +# asm 2: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=%rsi +movq CRYPTO_NAMESPACE(batch_REDMASK51)(%rip),%rsi # qhasm: mulr01 = (mulr01.c0) << 13 # asm 1: shld $13,g4=%rbp mov %r12,%rbp -# qhasm: f0 += *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P0 -# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P0,mulredmask=int64#3 -# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rdx -movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rdx +# qhasm: mulredmask = *(uint64 *) &CRYPTO_NAMESPACE(batch_REDMASK51) +# asm 1: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=int64#3 +# asm 2: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=%rdx +movq CRYPTO_NAMESPACE(batch_REDMASK51)(%rip),%rdx # qhasm: mulr01 = (mulr01.rx0) << 13 # asm 1: shld $13,mulredmask=int64#3 -# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rdx -movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rdx +# qhasm: mulredmask = *(uint64 *) &CRYPTO_NAMESPACE(batch_REDMASK51) +# asm 1: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=int64#3 +# asm 2: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=%rdx +movq CRYPTO_NAMESPACE(batch_REDMASK51)(%rip),%rdx # qhasm: mulr01 = (mulr01.ry0) << 13 # asm 1: shld $13,mulredmask=int64#3 -# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rdx -movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rdx +# qhasm: mulredmask = *(uint64 *) &CRYPTO_NAMESPACE(batch_REDMASK51) +# asm 1: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=int64#3 +# asm 2: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=%rdx +movq CRYPTO_NAMESPACE(batch_REDMASK51)(%rip),%rdx # qhasm: mulr01 = (mulr01.rz0) << 13 # asm 1: shld $13,mulredmask=int64#3 -# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rdx -movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rdx +# qhasm: mulredmask = *(uint64 *) &CRYPTO_NAMESPACE(batch_REDMASK51) +# asm 1: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=int64#3 +# asm 2: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=%rdx +movq CRYPTO_NAMESPACE(batch_REDMASK51)(%rip),%rdx # qhasm: mulr01 = (mulr01.rt0) << 13 # asm 1: shld $13,b4=%r15 mov %r10,%r15 -# qhasm: a0 += *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_2P0 -# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P0,mulredmask=int64#3 -# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rdx -movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rdx +# qhasm: mulredmask = *(uint64 *) &CRYPTO_NAMESPACE(batch_REDMASK51) +# asm 1: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=int64#3 +# asm 2: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=%rdx +movq CRYPTO_NAMESPACE(batch_REDMASK51)(%rip),%rdx # qhasm: mulr01 = (mulr01.a0) << 13 # asm 1: shld $13,mulredmask=int64#3 -# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rdx -movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rdx +# qhasm: mulredmask = *(uint64 *) &CRYPTO_NAMESPACE(batch_REDMASK51) +# asm 1: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=int64#3 +# asm 2: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=%rdx +movq CRYPTO_NAMESPACE(batch_REDMASK51)(%rip),%rdx # qhasm: mulr01 = (mulr01.e0) << 13 # asm 1: shld $13,h4=%r15 mov %r12,%r15 -# qhasm: e0 += *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P0 -# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P0,mulredmask=int64#3 -# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rdx -movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rdx +# qhasm: mulredmask = *(uint64 *) &CRYPTO_NAMESPACE(batch_REDMASK51) +# asm 1: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=int64#3 +# asm 2: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=%rdx +movq CRYPTO_NAMESPACE(batch_REDMASK51)(%rip),%rdx # qhasm: mulr01 = (mulr01.c0) << 13 # asm 1: shld $13,g4=%rbp mov %rsi,%rbp -# qhasm: f0 += *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P0 -# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P0,mulredmask=int64#3 -# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rdx -movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rdx +# qhasm: mulredmask = *(uint64 *) &CRYPTO_NAMESPACE(batch_REDMASK51) +# asm 1: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=int64#3 +# asm 2: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=%rdx +movq CRYPTO_NAMESPACE(batch_REDMASK51)(%rip),%rdx # qhasm: mulr01 = (mulr01.rx0) << 13 # asm 1: shld $13,mulredmask=int64#3 -# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rdx -movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rdx +# qhasm: mulredmask = *(uint64 *) &CRYPTO_NAMESPACE(batch_REDMASK51) +# asm 1: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=int64#3 +# asm 2: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=%rdx +movq CRYPTO_NAMESPACE(batch_REDMASK51)(%rip),%rdx # qhasm: mulr01 = (mulr01.ry0) << 13 # asm 1: shld $13,mulredmask=int64#2 -# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rsi -movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rsi +# qhasm: mulredmask = *(uint64 *) &CRYPTO_NAMESPACE(batch_REDMASK51) +# asm 1: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=int64#2 +# asm 2: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=%rsi +movq CRYPTO_NAMESPACE(batch_REDMASK51)(%rip),%rsi # qhasm: mulr01 = (mulr01.rz0) << 13 # asm 1: shld $13,mulredmask=int64#3 -# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rdx -movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rdx +# qhasm: mulredmask = *(uint64 *) &CRYPTO_NAMESPACE(batch_REDMASK51) +# asm 1: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=int64#3 +# asm 2: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=%rdx +movq CRYPTO_NAMESPACE(batch_REDMASK51)(%rip),%rdx # qhasm: mulr01 = (mulr01.rx0) << 13 # asm 1: shld $13,mulredmask=int64#3 -# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rdx -movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rdx +# qhasm: mulredmask = *(uint64 *) &CRYPTO_NAMESPACE(batch_REDMASK51) +# asm 1: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=int64#3 +# asm 2: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=%rdx +movq CRYPTO_NAMESPACE(batch_REDMASK51)(%rip),%rdx # qhasm: mulr01 = (mulr01.ry0) << 13 # asm 1: shld $13,mulredmask=int64#3 -# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rdx -movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rdx +# qhasm: mulredmask = *(uint64 *) &CRYPTO_NAMESPACE(batch_REDMASK51) +# asm 1: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=int64#3 +# asm 2: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=%rdx +movq CRYPTO_NAMESPACE(batch_REDMASK51)(%rip),%rdx # qhasm: mulr01 = (mulr01.rz0) << 13 # asm 1: shld $13,mulredmask=int64#2 -# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rsi -movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rsi +# qhasm: mulredmask = *(uint64 *) &CRYPTO_NAMESPACE(batch_REDMASK51) +# asm 1: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=int64#2 +# asm 2: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=%rsi +movq CRYPTO_NAMESPACE(batch_REDMASK51)(%rip),%rsi # qhasm: mulr01 = (mulr01.rt0) << 13 # asm 1: shld $13,mulredmask=int64#3 -# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rdx -movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rdx +# qhasm: mulredmask = *(uint64 *) &CRYPTO_NAMESPACE(batch_REDMASK51) +# asm 1: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=int64#3 +# asm 2: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=%rdx +movq CRYPTO_NAMESPACE(batch_REDMASK51)(%rip),%rdx # qhasm: mulr01 = (mulr01.x0) << 13 # asm 1: shld $13,mulredmask=int64#3 -# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rdx -movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rdx +# qhasm: mulredmask = *(uint64 *) &CRYPTO_NAMESPACE(batch_REDMASK51) +# asm 1: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=int64#3 +# asm 2: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=%rdx +movq CRYPTO_NAMESPACE(batch_REDMASK51)(%rip),%rdx # qhasm: mulr01 = (mulr01.y0) << 13 # asm 1: shld $13,ysubx4=%r14 mov %r11,%r14 -# qhasm: ysubx0 += *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_2P0 -# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P0,x0=int64#13 @@ -2403,10 +2403,10 @@ add %rax,%r13 # asm 2: adc mulredmask=int64#3 -# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rdx -movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rdx +# qhasm: mulredmask = *(uint64 *) &CRYPTO_NAMESPACE(batch_REDMASK51) +# asm 1: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=int64#3 +# asm 2: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=%rdx +movq CRYPTO_NAMESPACE(batch_REDMASK51)(%rip),%rdx # qhasm: mulr01 = (mulr01.rz0) << 13 # asm 1: shld $13,mulredmask=int64#2 -# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rsi -movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rsi +# qhasm: mulredmask = *(uint64 *) &CRYPTO_NAMESPACE(batch_REDMASK51) +# asm 1: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=int64#2 +# asm 2: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=%rsi +movq CRYPTO_NAMESPACE(batch_REDMASK51)(%rip),%rsi # qhasm: mulr01 = (mulr01.t0) << 13 # asm 1: shld $13,mulx319_stack=96(%rsp) movq %rax,96(%rsp) -# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D2 -mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D2 +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(batch_EC2D2) +mulq CRYPTO_NAMESPACE(batch_EC2D2)(%rip) # qhasm: t2d0 = mulrax # asm 1: mov t2d0=int64#2 @@ -3411,8 +3411,8 @@ imulq $19,%rdx,%rax # asm 2: movq mulx419_stack=104(%rsp) movq %rax,104(%rsp) -# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D1 -mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D1 +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(batch_EC2D1) +mulq CRYPTO_NAMESPACE(batch_EC2D1)(%rip) # qhasm: carry? t2d0 += mulrax # asm 1: add mulrax=%rax movq 56(%rsp),%rax -# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D0 -mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D0 +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(batch_EC2D0) +mulq CRYPTO_NAMESPACE(batch_EC2D0)(%rip) # qhasm: carry? t2d0 += mulrax # asm 1: add mulrax=%rax movq 56(%rsp),%rax -# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D1 -mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D1 +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(batch_EC2D1) +mulq CRYPTO_NAMESPACE(batch_EC2D1)(%rip) # qhasm: t2d1 = mulrax # asm 1: mov t2d1=int64#5 @@ -3465,8 +3465,8 @@ mov %rdx,%r9 # asm 2: movq mulrax=%rax movq 56(%rsp),%rax -# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D2 -mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D2 +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(batch_EC2D2) +mulq CRYPTO_NAMESPACE(batch_EC2D2)(%rip) # qhasm: t2d2 = mulrax # asm 1: mov t2d2=int64#8 @@ -3483,8 +3483,8 @@ mov %rdx,%r11 # asm 2: movq mulrax=%rax movq 56(%rsp),%rax -# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D3 -mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D3 +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(batch_EC2D3) +mulq CRYPTO_NAMESPACE(batch_EC2D3)(%rip) # qhasm: t2d3 = mulrax # asm 1: mov t2d3=int64#10 @@ -3501,8 +3501,8 @@ mov %rdx,%r13 # asm 2: movq mulrax=%rax movq 56(%rsp),%rax -# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D4 -mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D4 +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(batch_EC2D4) +mulq CRYPTO_NAMESPACE(batch_EC2D4)(%rip) # qhasm: t2d4 = mulrax # asm 1: mov t2d4=int64#12 @@ -3519,8 +3519,8 @@ mov %rdx,%r15 # asm 2: movq mulrax=%rax movq 64(%rsp),%rax -# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D0 -mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D0 +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(batch_EC2D0) +mulq CRYPTO_NAMESPACE(batch_EC2D0)(%rip) # qhasm: carry? t2d1 += mulrax # asm 1: add mulrax=%rax movq 64(%rsp),%rax -# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D1 -mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D1 +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(batch_EC2D1) +mulq CRYPTO_NAMESPACE(batch_EC2D1)(%rip) # qhasm: carry? t2d2 += mulrax # asm 1: add mulrax=%rax movq 64(%rsp),%rax -# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D2 -mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D2 +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(batch_EC2D2) +mulq CRYPTO_NAMESPACE(batch_EC2D2)(%rip) # qhasm: carry? t2d3 += mulrax # asm 1: add mulrax=%rax movq 64(%rsp),%rax -# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D3 -mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D3 +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(batch_EC2D3) +mulq CRYPTO_NAMESPACE(batch_EC2D3)(%rip) # qhasm: carry? t2d4 += mulrax # asm 1: add mulrax=%rax imulq $19,%rdx,%rax -# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D4 -mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D4 +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(batch_EC2D4) +mulq CRYPTO_NAMESPACE(batch_EC2D4)(%rip) # qhasm: carry? t2d0 += mulrax # asm 1: add mulrax=%rax movq 72(%rsp),%rax -# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D0 -mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D0 +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(batch_EC2D0) +mulq CRYPTO_NAMESPACE(batch_EC2D0)(%rip) # qhasm: carry? t2d2 += mulrax # asm 1: add mulrax=%rax movq 72(%rsp),%rax -# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D1 -mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D1 +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(batch_EC2D1) +mulq CRYPTO_NAMESPACE(batch_EC2D1)(%rip) # qhasm: carry? t2d3 += mulrax # asm 1: add mulrax=%rax movq 72(%rsp),%rax -# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D2 -mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D2 +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(batch_EC2D2) +mulq CRYPTO_NAMESPACE(batch_EC2D2)(%rip) # qhasm: carry? t2d4 += mulrax # asm 1: add mulrax=%rax imulq $19,%rdx,%rax -# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D3 -mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D3 +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(batch_EC2D3) +mulq CRYPTO_NAMESPACE(batch_EC2D3)(%rip) # qhasm: carry? t2d0 += mulrax # asm 1: add mulrax=%rax imulq $19,%rdx,%rax -# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D4 -mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D4 +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(batch_EC2D4) +mulq CRYPTO_NAMESPACE(batch_EC2D4)(%rip) # qhasm: carry? t2d1 += mulrax # asm 1: add mulrax=%rax movq 80(%rsp),%rax -# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D0 -mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D0 +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(batch_EC2D0) +mulq CRYPTO_NAMESPACE(batch_EC2D0)(%rip) # qhasm: carry? t2d3 += mulrax # asm 1: add mulrax=%rax movq 80(%rsp),%rax -# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D1 -mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D1 +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(batch_EC2D1) +mulq CRYPTO_NAMESPACE(batch_EC2D1)(%rip) # qhasm: carry? t2d4 += mulrax # asm 1: add mulrax=%rax movq 96(%rsp),%rax -# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D3 -mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D3 +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(batch_EC2D3) +mulq CRYPTO_NAMESPACE(batch_EC2D3)(%rip) # qhasm: carry? t2d1 += mulrax # asm 1: add mulrax=%rax movq 96(%rsp),%rax -# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D4 -mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D4 +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(batch_EC2D4) +mulq CRYPTO_NAMESPACE(batch_EC2D4)(%rip) # qhasm: carry? t2d2 += mulrax # asm 1: add mulrax=%rax movq 88(%rsp),%rax -# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D0 -mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D0 +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(batch_EC2D0) +mulq CRYPTO_NAMESPACE(batch_EC2D0)(%rip) # qhasm: carry? t2d4 += mulrax # asm 1: add mulrax=%rax movq 104(%rsp),%rax -# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D2 -mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D2 +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(batch_EC2D2) +mulq CRYPTO_NAMESPACE(batch_EC2D2)(%rip) # qhasm: carry? t2d1 += mulrax # asm 1: add mulrax=%rax movq 104(%rsp),%rax -# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D3 -mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D3 +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(batch_EC2D3) +mulq CRYPTO_NAMESPACE(batch_EC2D3)(%rip) # qhasm: carry? t2d2 += mulrax # asm 1: add mulrax=%rax movq 104(%rsp),%rax -# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D4 -mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D4 +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(batch_EC2D4) +mulq CRYPTO_NAMESPACE(batch_EC2D4)(%rip) # qhasm: carry? t2d3 += mulrax # asm 1: add mulredmask=int64#3 -# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rdx -movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rdx +# qhasm: mulredmask = *(uint64 *) &CRYPTO_NAMESPACE(batch_REDMASK51) +# asm 1: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=int64#3 +# asm 2: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=%rdx +movq CRYPTO_NAMESPACE(batch_REDMASK51)(%rip),%rdx # qhasm: mulr01 = (mulr01.t2d0) << 13 # asm 1: shld $13,b4=%r15 mov %r10,%r15 -# qhasm: a0 += *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_2P0 -# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P0,mulredmask=int64#3 -# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rdx -movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rdx +# qhasm: mulredmask = *(uint64 *) &CRYPTO_NAMESPACE(batch_REDMASK51) +# asm 1: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=int64#3 +# asm 2: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=%rdx +movq CRYPTO_NAMESPACE(batch_REDMASK51)(%rip),%rdx # qhasm: mulr01 = (mulr01.a0) << 13 # asm 1: shld $13,mulredmask=int64#3 -# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rdx -movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rdx +# qhasm: mulredmask = *(uint64 *) &CRYPTO_NAMESPACE(batch_REDMASK51) +# asm 1: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=int64#3 +# asm 2: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=%rdx +movq CRYPTO_NAMESPACE(batch_REDMASK51)(%rip),%rdx # qhasm: mulr01 = (mulr01.rx0) << 13 # asm 1: shld $13,ry4=%r15 mov %r12,%r15 -# qhasm: rx0 += *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P0 -# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P0,mulredmask=int64#3 -# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rdx -movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rdx +# qhasm: mulredmask = *(uint64 *) &CRYPTO_NAMESPACE(batch_REDMASK51) +# asm 1: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=int64#3 +# asm 2: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=%rdx +movq CRYPTO_NAMESPACE(batch_REDMASK51)(%rip),%rdx # qhasm: mulr01 = (mulr01.c0) << 13 # asm 1: shld $13,mulredmask=int64#2 -# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rsi -movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rsi +# qhasm: mulredmask = *(uint64 *) &CRYPTO_NAMESPACE(batch_REDMASK51) +# asm 1: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=int64#2 +# asm 2: movq CRYPTO_NAMESPACE(batch_REDMASK51),>mulredmask=%rsi +movq CRYPTO_NAMESPACE(batch_REDMASK51)(%rip),%rsi # qhasm: mulr01 = (mulr01.rt0) << 13 # asm 1: shld $13,rz4=%r13 mov %r10,%r13 -# qhasm: rt0 += *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P0 -# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P0,t3=%r14 mov %rsi,%r14 -# qhasm: carry? t0 -= *(uint64 *) &crypto_sign_ed25519_amd64_64_ORDER0 -# asm 1: sub crypto_sign_ed25519_amd64_64_ORDER0,rax=%rax movq 24(%rsi),%rax -# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_64_MU3 -mulq crypto_sign_ed25519_amd64_64_MU3 +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &CRYPTO_NAMESPACE(batch_MU3) +mulq CRYPTO_NAMESPACE(batch_MU3)(%rip) # qhasm: q23 = rax # asm 1: mov q23=int64#10 @@ -202,8 +202,8 @@ mov %rdx,%r13 # asm 2: movq 24(rax=%rax movq 24(%rsi),%rax -# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_64_MU4 -mulq crypto_sign_ed25519_amd64_64_MU4 +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &CRYPTO_NAMESPACE(batch_MU4) +mulq CRYPTO_NAMESPACE(batch_MU4)(%rip) # qhasm: q24 = rax # asm 1: mov q24=int64#12 @@ -225,8 +225,8 @@ adc %rdx,%r8 # asm 2: movq 32(rax=%rax movq 32(%rsi),%rax -# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_64_MU2 -mulq crypto_sign_ed25519_amd64_64_MU2 +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &CRYPTO_NAMESPACE(batch_MU2) +mulq CRYPTO_NAMESPACE(batch_MU2)(%rip) # qhasm: carry? q23 += rax # asm 1: add rax=%rax movq 32(%rsi),%rax -# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_64_MU3 -mulq crypto_sign_ed25519_amd64_64_MU3 +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &CRYPTO_NAMESPACE(batch_MU3) +mulq CRYPTO_NAMESPACE(batch_MU3)(%rip) # qhasm: carry? q24 += rax # asm 1: add rax=%rax movq 32(%rsi),%rax -# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_64_MU4 -mulq crypto_sign_ed25519_amd64_64_MU4 +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &CRYPTO_NAMESPACE(batch_MU4) +mulq CRYPTO_NAMESPACE(batch_MU4)(%rip) # qhasm: carry? q30 += rax # asm 1: add rax=%rax movq 40(%rsi),%rax -# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_64_MU1 -mulq crypto_sign_ed25519_amd64_64_MU1 +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &CRYPTO_NAMESPACE(batch_MU1) +mulq CRYPTO_NAMESPACE(batch_MU1)(%rip) # qhasm: carry? q23 += rax # asm 1: add rax=%rax movq 40(%rsi),%rax -# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_64_MU2 -mulq crypto_sign_ed25519_amd64_64_MU2 +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &CRYPTO_NAMESPACE(batch_MU2) +mulq CRYPTO_NAMESPACE(batch_MU2)(%rip) # qhasm: carry? q24 += rax # asm 1: add rax=%rax movq 40(%rsi),%rax -# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_64_MU3 -mulq crypto_sign_ed25519_amd64_64_MU3 +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &CRYPTO_NAMESPACE(batch_MU3) +mulq CRYPTO_NAMESPACE(batch_MU3)(%rip) # qhasm: carry? q30 += rax # asm 1: add rax=%rax movq 40(%rsi),%rax -# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_64_MU4 -mulq crypto_sign_ed25519_amd64_64_MU4 +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &CRYPTO_NAMESPACE(batch_MU4) +mulq CRYPTO_NAMESPACE(batch_MU4)(%rip) # qhasm: carry? q31 += rax # asm 1: add rax=%rax movq 48(%rsi),%rax -# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_64_MU0 -mulq crypto_sign_ed25519_amd64_64_MU0 +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &CRYPTO_NAMESPACE(batch_MU0) +mulq CRYPTO_NAMESPACE(batch_MU0)(%rip) # qhasm: carry? q23 += rax # asm 1: add rax=%rax movq 48(%rsi),%rax -# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_64_MU1 -mulq crypto_sign_ed25519_amd64_64_MU1 +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &CRYPTO_NAMESPACE(batch_MU1) +mulq CRYPTO_NAMESPACE(batch_MU1)(%rip) # qhasm: carry? q24 += rax # asm 1: add rax=%rax movq 48(%rsi),%rax -# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_64_MU2 -mulq crypto_sign_ed25519_amd64_64_MU2 +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &CRYPTO_NAMESPACE(batch_MU2) +mulq CRYPTO_NAMESPACE(batch_MU2)(%rip) # qhasm: carry? q30 += rax # asm 1: add rax=%rax movq 48(%rsi),%rax -# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_64_MU3 -mulq crypto_sign_ed25519_amd64_64_MU3 +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &CRYPTO_NAMESPACE(batch_MU3) +mulq CRYPTO_NAMESPACE(batch_MU3)(%rip) # qhasm: carry? q31 += rax # asm 1: add rax=%rax movq 48(%rsi),%rax -# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_64_MU4 -mulq crypto_sign_ed25519_amd64_64_MU4 +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &CRYPTO_NAMESPACE(batch_MU4) +mulq CRYPTO_NAMESPACE(batch_MU4)(%rip) # qhasm: carry? q32 += rax # asm 1: add rax=%rax movq 56(%rsi),%rax -# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_64_MU0 -mulq crypto_sign_ed25519_amd64_64_MU0 +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &CRYPTO_NAMESPACE(batch_MU0) +mulq CRYPTO_NAMESPACE(batch_MU0)(%rip) # qhasm: carry? q24 += rax # asm 1: add rax=%rax movq 56(%rsi),%rax -# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_64_MU1 -mulq crypto_sign_ed25519_amd64_64_MU1 +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &CRYPTO_NAMESPACE(batch_MU1) +mulq CRYPTO_NAMESPACE(batch_MU1)(%rip) # qhasm: carry? q30 += rax # asm 1: add rax=%rax movq 56(%rsi),%rax -# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_64_MU2 -mulq crypto_sign_ed25519_amd64_64_MU2 +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &CRYPTO_NAMESPACE(batch_MU2) +mulq CRYPTO_NAMESPACE(batch_MU2)(%rip) # qhasm: carry? q31 += rax # asm 1: add rax=%rax movq 56(%rsi),%rax -# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_64_MU3 -mulq crypto_sign_ed25519_amd64_64_MU3 +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &CRYPTO_NAMESPACE(batch_MU3) +mulq CRYPTO_NAMESPACE(batch_MU3)(%rip) # qhasm: carry? q32 += rax # asm 1: add rax=%rax movq 56(%rsi),%rax -# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_64_MU4 -mulq crypto_sign_ed25519_amd64_64_MU4 +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &CRYPTO_NAMESPACE(batch_MU4) +mulq CRYPTO_NAMESPACE(batch_MU4)(%rip) # qhasm: carry? q33 += rax # asm 1: add rax=%rax movq 56(%rsp),%rax -# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_64_ORDER0 -mulq crypto_sign_ed25519_amd64_64_ORDER0 +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &CRYPTO_NAMESPACE(batch_ORDER0) +mulq CRYPTO_NAMESPACE(batch_ORDER0)(%rip) # qhasm: r20 = rax # asm 1: mov r20=int64#5 @@ -761,8 +761,8 @@ mov %rdx,%r9 # asm 2: movq rax=%rax movq 56(%rsp),%rax -# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_64_ORDER1 -mulq crypto_sign_ed25519_amd64_64_ORDER1 +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &CRYPTO_NAMESPACE(batch_ORDER1) +mulq CRYPTO_NAMESPACE(batch_ORDER1)(%rip) # qhasm: r21 = rax # asm 1: mov r21=int64#8 @@ -789,8 +789,8 @@ adc %rdx,%r9 # asm 2: movq rax=%rax movq 56(%rsp),%rax -# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_64_ORDER2 -mulq crypto_sign_ed25519_amd64_64_ORDER2 +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &CRYPTO_NAMESPACE(batch_ORDER2) +mulq CRYPTO_NAMESPACE(batch_ORDER2)(%rip) # qhasm: r22 = rax # asm 1: mov r22=int64#9 @@ -817,8 +817,8 @@ adc %rdx,%r9 # asm 2: movq rax=%rax movq 56(%rsp),%rax -# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_64_ORDER3 -mulq crypto_sign_ed25519_amd64_64_ORDER3 +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &CRYPTO_NAMESPACE(batch_ORDER3) +mulq CRYPTO_NAMESPACE(batch_ORDER3)(%rip) # qhasm: free rdx @@ -837,8 +837,8 @@ add %r9,%r12 # asm 2: movq rax=%rax movq 64(%rsp),%rax -# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_64_ORDER0 -mulq crypto_sign_ed25519_amd64_64_ORDER0 +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &CRYPTO_NAMESPACE(batch_ORDER0) +mulq CRYPTO_NAMESPACE(batch_ORDER0)(%rip) # qhasm: carry? r21 += rax # asm 1: add rax=%rax movq 64(%rsp),%rax -# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_64_ORDER1 -mulq crypto_sign_ed25519_amd64_64_ORDER1 +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &CRYPTO_NAMESPACE(batch_ORDER1) +mulq CRYPTO_NAMESPACE(batch_ORDER1)(%rip) # qhasm: carry? r22 += rax # asm 1: add rax=%rax movq 64(%rsp),%rax -# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_64_ORDER2 -mulq crypto_sign_ed25519_amd64_64_ORDER2 +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &CRYPTO_NAMESPACE(batch_ORDER2) +mulq CRYPTO_NAMESPACE(batch_ORDER2)(%rip) # qhasm: free rdx @@ -913,8 +913,8 @@ add %rcx,%r12 # asm 2: movq rax=%rax movq 72(%rsp),%rax -# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_64_ORDER0 -mulq crypto_sign_ed25519_amd64_64_ORDER0 +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &CRYPTO_NAMESPACE(batch_ORDER0) +mulq CRYPTO_NAMESPACE(batch_ORDER0)(%rip) # qhasm: carry? r22 += rax # asm 1: add rax=%rax movq 72(%rsp),%rax -# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_64_ORDER1 -mulq crypto_sign_ed25519_amd64_64_ORDER1 +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &CRYPTO_NAMESPACE(batch_ORDER1) +mulq CRYPTO_NAMESPACE(batch_ORDER1)(%rip) # qhasm: free rdx @@ -956,8 +956,8 @@ add %rcx,%r12 # asm 2: movq rax=%rax movq 80(%rsp),%rax -# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_64_ORDER0 -mulq crypto_sign_ed25519_amd64_64_ORDER0 +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &CRYPTO_NAMESPACE(batch_ORDER0) +mulq CRYPTO_NAMESPACE(batch_ORDER0)(%rip) # qhasm: free rdx @@ -1026,25 +1026,25 @@ sbb %r12,%rsi # asm 2: mov t3=%r11 mov %rsi,%r11 -# qhasm: carry? t0 -= *(uint64 *) &crypto_sign_ed25519_amd64_64_ORDER0 -# asm 1: sub crypto_sign_ed25519_amd64_64_ORDER0,t3=%r11 mov %rsi,%r11 -# qhasm: carry? t0 -= *(uint64 *) &crypto_sign_ed25519_amd64_64_ORDER0 -# asm 1: sub crypto_sign_ed25519_amd64_64_ORDER0, -#define fe25519 crypto_sign_ed25519_amd64_64_fe25519 -#define fe25519_freeze crypto_sign_ed25519_amd64_64_fe25519_freeze -#define fe25519_unpack crypto_sign_ed25519_amd64_64_fe25519_unpack -#define fe25519_pack crypto_sign_ed25519_amd64_64_fe25519_pack -#define fe25519_iszero_vartime crypto_sign_ed25519_amd64_64_fe25519_iszero_vartime -#define fe25519_iseq_vartime crypto_sign_ed25519_amd64_64_fe25519_iseq_vartime -#define fe25519_cmov crypto_sign_ed25519_amd64_64_fe25519_cmov -#define fe25519_setint crypto_sign_ed25519_amd64_64_fe25519_setint -#define fe25519_neg crypto_sign_ed25519_amd64_64_fe25519_neg -#define fe25519_getparity crypto_sign_ed25519_amd64_64_fe25519_getparity -#define fe25519_add crypto_sign_ed25519_amd64_64_fe25519_add -#define fe25519_sub crypto_sign_ed25519_amd64_64_fe25519_sub -#define fe25519_mul crypto_sign_ed25519_amd64_64_fe25519_mul -#define fe25519_mul121666 crypto_sign_ed25519_amd64_64_fe25519_mul121666 -#define fe25519_square crypto_sign_ed25519_amd64_64_fe25519_square -#define fe25519_invert crypto_sign_ed25519_amd64_64_fe25519_invert -#define fe25519_batchinvert crypto_sign_ed25519_amd64_64_fe25519_batchinvert -#define fe25519_pow2523 crypto_sign_ed25519_amd64_64_fe25519_pow2523 +#define fe25519 CRYPTO_NAMESPACE(fe25519) +#define fe25519_freeze CRYPTO_NAMESPACE(fe25519_freeze) +#define fe25519_unpack CRYPTO_NAMESPACE(fe25519_unpack) +#define fe25519_pack CRYPTO_NAMESPACE(fe25519_pack) +#define fe25519_iszero_vartime CRYPTO_NAMESPACE(fe25519_iszero_vartime) +#define fe25519_iseq_vartime CRYPTO_NAMESPACE(fe25519_iseq_vartime) +#define fe25519_cmov CRYPTO_NAMESPACE(fe25519_cmov) +#define fe25519_setint CRYPTO_NAMESPACE(fe25519_setint) +#define fe25519_neg CRYPTO_NAMESPACE(fe25519_neg) +#define fe25519_getparity CRYPTO_NAMESPACE(fe25519_getparity) +#define fe25519_add CRYPTO_NAMESPACE(fe25519_add) +#define fe25519_sub CRYPTO_NAMESPACE(fe25519_sub) +#define fe25519_mul CRYPTO_NAMESPACE(fe25519_mul) +#define fe25519_mul121666 CRYPTO_NAMESPACE(fe25519_mul121666) +#define fe25519_square CRYPTO_NAMESPACE(fe25519_square) +#define fe25519_invert CRYPTO_NAMESPACE(fe25519_invert) +#define fe25519_batchinvert CRYPTO_NAMESPACE(fe25519_batchinvert) +#define fe25519_pow2523 CRYPTO_NAMESPACE(fe25519_pow2523) typedef struct { diff --git a/ed25519/amd64-64-24k/fe25519_add.s b/ed25519/amd64-64-24k/fe25519_add.S similarity index 94% rename from ed25519/amd64-64-24k/fe25519_add.s rename to ed25519/amd64-64-24k/fe25519_add.S index b2e5625..de57080 100644 --- a/ed25519/amd64-64-24k/fe25519_add.s +++ b/ed25519/amd64-64-24k/fe25519_add.S @@ -65,13 +65,13 @@ # qhasm: stack64 caller7_stack -# qhasm: enter crypto_sign_ed25519_amd64_64_fe25519_add +# qhasm: enter CRYPTO_NAMESPACE(fe25519_add) .text .p2align 5 -.globl _crypto_sign_ed25519_amd64_64_fe25519_add -.globl crypto_sign_ed25519_amd64_64_fe25519_add -_crypto_sign_ed25519_amd64_64_fe25519_add: -crypto_sign_ed25519_amd64_64_fe25519_add: +.globl _CRYPTO_NAMESPACE(fe25519_add) +.globl CRYPTO_NAMESPACE(fe25519_add) +_CRYPTO_NAMESPACE(fe25519_add): +CRYPTO_NAMESPACE(fe25519_add): mov %rsp,%r11 and $31,%r11 add $0,%r11 diff --git a/ed25519/amd64-64-24k/fe25519_freeze.s b/ed25519/amd64-64-24k/fe25519_freeze.S similarity index 96% rename from ed25519/amd64-64-24k/fe25519_freeze.s rename to ed25519/amd64-64-24k/fe25519_freeze.S index dea2902..f0f15be 100644 --- a/ed25519/amd64-64-24k/fe25519_freeze.s +++ b/ed25519/amd64-64-24k/fe25519_freeze.S @@ -63,13 +63,13 @@ # qhasm: stack64 caller7_stack -# qhasm: enter crypto_sign_ed25519_amd64_64_fe25519_freeze +# qhasm: enter CRYPTO_NAMESPACE(fe25519_freeze) .text .p2align 5 -.globl _crypto_sign_ed25519_amd64_64_fe25519_freeze -.globl crypto_sign_ed25519_amd64_64_fe25519_freeze -_crypto_sign_ed25519_amd64_64_fe25519_freeze: -crypto_sign_ed25519_amd64_64_fe25519_freeze: +.globl _CRYPTO_NAMESPACE(fe25519_freeze) +.globl CRYPTO_NAMESPACE(fe25519_freeze) +_CRYPTO_NAMESPACE(fe25519_freeze): +CRYPTO_NAMESPACE(fe25519_freeze): mov %rsp,%r11 and $31,%r11 add $64,%r11 diff --git a/ed25519/amd64-64-24k/fe25519_mul.s b/ed25519/amd64-64-24k/fe25519_mul.S similarity index 96% rename from ed25519/amd64-64-24k/fe25519_mul.s rename to ed25519/amd64-64-24k/fe25519_mul.S index 7e24518..5c67c9f 100644 --- a/ed25519/amd64-64-24k/fe25519_mul.s +++ b/ed25519/amd64-64-24k/fe25519_mul.S @@ -89,13 +89,13 @@ # qhasm: int64 muli38 -# qhasm: enter crypto_sign_ed25519_amd64_64_fe25519_mul +# qhasm: enter CRYPTO_NAMESPACE(fe25519_mul) .text .p2align 5 -.globl _crypto_sign_ed25519_amd64_64_fe25519_mul -.globl crypto_sign_ed25519_amd64_64_fe25519_mul -_crypto_sign_ed25519_amd64_64_fe25519_mul: -crypto_sign_ed25519_amd64_64_fe25519_mul: +.globl _CRYPTO_NAMESPACE(fe25519_mul) +.globl CRYPTO_NAMESPACE(fe25519_mul) +_CRYPTO_NAMESPACE(fe25519_mul): +CRYPTO_NAMESPACE(fe25519_mul): mov %rsp,%r11 and $31,%r11 add $64,%r11 @@ -651,8 +651,8 @@ adc %rdx,%r11 # asm 2: mov mulrax=%rax mov %r8,%rax -# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 -mulq crypto_sign_ed25519_amd64_64_38 +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38) +mulq CRYPTO_NAMESPACE(38)(%rip) # qhasm: mulr4 = mulrax # asm 1: mov mulr4=int64#2 @@ -669,8 +669,8 @@ mov %r9,%rax # asm 2: mov mulr5=%rcx mov %rdx,%rcx -# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 -mulq crypto_sign_ed25519_amd64_64_38 +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38) +mulq CRYPTO_NAMESPACE(38)(%rip) # qhasm: carry? mulr5 += mulrax # asm 1: add squarerax=%rax mov %r11,%rax -# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 -mulq crypto_sign_ed25519_amd64_64_38 +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)&CRYPTO_NAMESPACE(38) +mulq CRYPTO_NAMESPACE(38)(%rip) # qhasm: squarer4 = squarerax # asm 1: mov squarer4=int64#2 @@ -443,8 +443,8 @@ mov %r12,%rax # asm 2: mov squarer5=%r11 mov %rdx,%r11 -# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 -mulq crypto_sign_ed25519_amd64_64_38 +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)&CRYPTO_NAMESPACE(38) +mulq CRYPTO_NAMESPACE(38)(%rip) # qhasm: carry? squarer5 += squarerax # asm 1: add mulrax=%rax mov %r8,%rax -# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 -mulq crypto_sign_ed25519_amd64_64_38 +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38) +mulq CRYPTO_NAMESPACE(38)(%rip) # qhasm: mulr4 = mulrax # asm 1: mov mulr4=int64#5 @@ -1225,8 +1225,8 @@ mov %r9,%rax # asm 2: mov mulr5=%r9 mov %rdx,%r9 -# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 -mulq crypto_sign_ed25519_amd64_64_38 +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38) +mulq CRYPTO_NAMESPACE(38)(%rip) # qhasm: carry? mulr5 += mulrax # asm 1: add mulrax=%rax mov %r8,%rax -# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 -mulq crypto_sign_ed25519_amd64_64_38 +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38) +mulq CRYPTO_NAMESPACE(38)(%rip) # qhasm: mulr4 = mulrax # asm 1: mov mulr4=int64#5 @@ -1907,8 +1907,8 @@ mov %r9,%rax # asm 2: mov mulr5=%r9 mov %rdx,%r9 -# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 -mulq crypto_sign_ed25519_amd64_64_38 +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38) +mulq CRYPTO_NAMESPACE(38)(%rip) # qhasm: carry? mulr5 += mulrax # asm 1: add mulrax=%rax mov %r8,%rax -# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 -mulq crypto_sign_ed25519_amd64_64_38 +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38) +mulq CRYPTO_NAMESPACE(38)(%rip) # qhasm: mulr4 = mulrax # asm 1: mov mulr4=int64#5 @@ -2759,8 +2759,8 @@ mov %r9,%rax # asm 2: mov mulr5=%r9 mov %rdx,%r9 -# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 -mulq crypto_sign_ed25519_amd64_64_38 +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38) +mulq CRYPTO_NAMESPACE(38)(%rip) # qhasm: carry? mulr5 += mulrax # asm 1: add mulx0=%r12 movq 56(%rsp),%r12 -# qhasm: mulrax = *(uint64 *)&crypto_sign_ed25519_amd64_64_EC2D0 -# asm 1: movq crypto_sign_ed25519_amd64_64_EC2D0,>mulrax=int64#7 -# asm 2: movq crypto_sign_ed25519_amd64_64_EC2D0,>mulrax=%rax -movq crypto_sign_ed25519_amd64_64_EC2D0,%rax +# qhasm: mulrax = *(uint64 *)&CRYPTO_NAMESPACE(EC2D0) +# asm 1: movq CRYPTO_NAMESPACE(EC2D0),>mulrax=int64#7 +# asm 2: movq CRYPTO_NAMESPACE(EC2D0),>mulrax=%rax +movq CRYPTO_NAMESPACE(EC2D0)(%rip),%rax # qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 # asm 1: mul c1=%r14 mov %rdx,%r14 -# qhasm: mulrax = *(uint64 *)&crypto_sign_ed25519_amd64_64_EC2D1 -# asm 1: movq crypto_sign_ed25519_amd64_64_EC2D1,>mulrax=int64#7 -# asm 2: movq crypto_sign_ed25519_amd64_64_EC2D1,>mulrax=%rax -movq crypto_sign_ed25519_amd64_64_EC2D1,%rax +# qhasm: mulrax = *(uint64 *)&CRYPTO_NAMESPACE(EC2D1) +# asm 1: movq CRYPTO_NAMESPACE(EC2D1),>mulrax=int64#7 +# asm 2: movq CRYPTO_NAMESPACE(EC2D1),>mulrax=%rax +movq CRYPTO_NAMESPACE(EC2D1)(%rip),%rax # qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 # asm 1: mul mulrax=int64#7 -# asm 2: movq crypto_sign_ed25519_amd64_64_EC2D2,>mulrax=%rax -movq crypto_sign_ed25519_amd64_64_EC2D2,%rax +# qhasm: mulrax = *(uint64 *)&CRYPTO_NAMESPACE(EC2D2) +# asm 1: movq CRYPTO_NAMESPACE(EC2D2),>mulrax=int64#7 +# asm 2: movq CRYPTO_NAMESPACE(EC2D2),>mulrax=%rax +movq CRYPTO_NAMESPACE(EC2D2)(%rip),%rax # qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 # asm 1: mul mulrax=int64#7 -# asm 2: movq crypto_sign_ed25519_amd64_64_EC2D3,>mulrax=%rax -movq crypto_sign_ed25519_amd64_64_EC2D3,%rax +# qhasm: mulrax = *(uint64 *)&CRYPTO_NAMESPACE(EC2D3) +# asm 1: movq CRYPTO_NAMESPACE(EC2D3),>mulrax=int64#7 +# asm 2: movq CRYPTO_NAMESPACE(EC2D3),>mulrax=%rax +movq CRYPTO_NAMESPACE(EC2D3)(%rip),%rax # qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 # asm 1: mul mulx1=%r12 movq 64(%rsp),%r12 -# qhasm: mulrax = *(uint64 *)&crypto_sign_ed25519_amd64_64_EC2D0 -# asm 1: movq crypto_sign_ed25519_amd64_64_EC2D0,>mulrax=int64#7 -# asm 2: movq crypto_sign_ed25519_amd64_64_EC2D0,>mulrax=%rax -movq crypto_sign_ed25519_amd64_64_EC2D0,%rax +# qhasm: mulrax = *(uint64 *)&CRYPTO_NAMESPACE(EC2D0) +# asm 1: movq CRYPTO_NAMESPACE(EC2D0),>mulrax=int64#7 +# asm 2: movq CRYPTO_NAMESPACE(EC2D0),>mulrax=%rax +movq CRYPTO_NAMESPACE(EC2D0)(%rip),%rax # qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 # asm 1: mul mulrax=int64#7 -# asm 2: movq crypto_sign_ed25519_amd64_64_EC2D1,>mulrax=%rax -movq crypto_sign_ed25519_amd64_64_EC2D1,%rax +# qhasm: mulrax = *(uint64 *)&CRYPTO_NAMESPACE(EC2D1) +# asm 1: movq CRYPTO_NAMESPACE(EC2D1),>mulrax=int64#7 +# asm 2: movq CRYPTO_NAMESPACE(EC2D1),>mulrax=%rax +movq CRYPTO_NAMESPACE(EC2D1)(%rip),%rax # qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 # asm 1: mul mulrax=int64#7 -# asm 2: movq crypto_sign_ed25519_amd64_64_EC2D2,>mulrax=%rax -movq crypto_sign_ed25519_amd64_64_EC2D2,%rax +# qhasm: mulrax = *(uint64 *)&CRYPTO_NAMESPACE(EC2D2) +# asm 1: movq CRYPTO_NAMESPACE(EC2D2),>mulrax=int64#7 +# asm 2: movq CRYPTO_NAMESPACE(EC2D2),>mulrax=%rax +movq CRYPTO_NAMESPACE(EC2D2)(%rip),%rax # qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 # asm 1: mul mulrax=int64#7 -# asm 2: movq crypto_sign_ed25519_amd64_64_EC2D3,>mulrax=%rax -movq crypto_sign_ed25519_amd64_64_EC2D3,%rax +# qhasm: mulrax = *(uint64 *)&CRYPTO_NAMESPACE(EC2D3) +# asm 1: movq CRYPTO_NAMESPACE(EC2D3),>mulrax=int64#7 +# asm 2: movq CRYPTO_NAMESPACE(EC2D3),>mulrax=%rax +movq CRYPTO_NAMESPACE(EC2D3)(%rip),%rax # qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 # asm 1: mul mulx2=%r12 movq 72(%rsp),%r12 -# qhasm: mulrax = *(uint64 *)&crypto_sign_ed25519_amd64_64_EC2D0 -# asm 1: movq crypto_sign_ed25519_amd64_64_EC2D0,>mulrax=int64#7 -# asm 2: movq crypto_sign_ed25519_amd64_64_EC2D0,>mulrax=%rax -movq crypto_sign_ed25519_amd64_64_EC2D0,%rax +# qhasm: mulrax = *(uint64 *)&CRYPTO_NAMESPACE(EC2D0) +# asm 1: movq CRYPTO_NAMESPACE(EC2D0),>mulrax=int64#7 +# asm 2: movq CRYPTO_NAMESPACE(EC2D0),>mulrax=%rax +movq CRYPTO_NAMESPACE(EC2D0)(%rip),%rax # qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 # asm 1: mul mulrax=int64#7 -# asm 2: movq crypto_sign_ed25519_amd64_64_EC2D1,>mulrax=%rax -movq crypto_sign_ed25519_amd64_64_EC2D1,%rax +# qhasm: mulrax = *(uint64 *)&CRYPTO_NAMESPACE(EC2D1) +# asm 1: movq CRYPTO_NAMESPACE(EC2D1),>mulrax=int64#7 +# asm 2: movq CRYPTO_NAMESPACE(EC2D1),>mulrax=%rax +movq CRYPTO_NAMESPACE(EC2D1)(%rip),%rax # qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 # asm 1: mul mulrax=int64#7 -# asm 2: movq crypto_sign_ed25519_amd64_64_EC2D2,>mulrax=%rax -movq crypto_sign_ed25519_amd64_64_EC2D2,%rax +# qhasm: mulrax = *(uint64 *)&CRYPTO_NAMESPACE(EC2D2) +# asm 1: movq CRYPTO_NAMESPACE(EC2D2),>mulrax=int64#7 +# asm 2: movq CRYPTO_NAMESPACE(EC2D2),>mulrax=%rax +movq CRYPTO_NAMESPACE(EC2D2)(%rip),%rax # qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 # asm 1: mul mulrax=int64#7 -# asm 2: movq crypto_sign_ed25519_amd64_64_EC2D3,>mulrax=%rax -movq crypto_sign_ed25519_amd64_64_EC2D3,%rax +# qhasm: mulrax = *(uint64 *)&CRYPTO_NAMESPACE(EC2D3) +# asm 1: movq CRYPTO_NAMESPACE(EC2D3),>mulrax=int64#7 +# asm 2: movq CRYPTO_NAMESPACE(EC2D3),>mulrax=%rax +movq CRYPTO_NAMESPACE(EC2D3)(%rip),%rax # qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 # asm 1: mul mulx3=%r12 movq 80(%rsp),%r12 -# qhasm: mulrax = *(uint64 *)&crypto_sign_ed25519_amd64_64_EC2D0 -# asm 1: movq crypto_sign_ed25519_amd64_64_EC2D0,>mulrax=int64#7 -# asm 2: movq crypto_sign_ed25519_amd64_64_EC2D0,>mulrax=%rax -movq crypto_sign_ed25519_amd64_64_EC2D0,%rax +# qhasm: mulrax = *(uint64 *)&CRYPTO_NAMESPACE(EC2D0) +# asm 1: movq CRYPTO_NAMESPACE(EC2D0),>mulrax=int64#7 +# asm 2: movq CRYPTO_NAMESPACE(EC2D0),>mulrax=%rax +movq CRYPTO_NAMESPACE(EC2D0)(%rip),%rax # qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 # asm 1: mul mulrax=int64#7 -# asm 2: movq crypto_sign_ed25519_amd64_64_EC2D1,>mulrax=%rax -movq crypto_sign_ed25519_amd64_64_EC2D1,%rax +# qhasm: mulrax = *(uint64 *)&CRYPTO_NAMESPACE(EC2D1) +# asm 1: movq CRYPTO_NAMESPACE(EC2D1),>mulrax=int64#7 +# asm 2: movq CRYPTO_NAMESPACE(EC2D1),>mulrax=%rax +movq CRYPTO_NAMESPACE(EC2D1)(%rip),%rax # qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 # asm 1: mul mulrax=int64#7 -# asm 2: movq crypto_sign_ed25519_amd64_64_EC2D2,>mulrax=%rax -movq crypto_sign_ed25519_amd64_64_EC2D2,%rax +# qhasm: mulrax = *(uint64 *)&CRYPTO_NAMESPACE(EC2D2) +# asm 1: movq CRYPTO_NAMESPACE(EC2D2),>mulrax=int64#7 +# asm 2: movq CRYPTO_NAMESPACE(EC2D2),>mulrax=%rax +movq CRYPTO_NAMESPACE(EC2D2)(%rip),%rax # qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 # asm 1: mul mulrax=int64#7 -# asm 2: movq crypto_sign_ed25519_amd64_64_EC2D3,>mulrax=%rax -movq crypto_sign_ed25519_amd64_64_EC2D3,%rax +# qhasm: mulrax = *(uint64 *)&CRYPTO_NAMESPACE(EC2D3) +# asm 1: movq CRYPTO_NAMESPACE(EC2D3),>mulrax=int64#7 +# asm 2: movq CRYPTO_NAMESPACE(EC2D3),>mulrax=%rax +movq CRYPTO_NAMESPACE(EC2D3)(%rip),%rax # qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 # asm 1: mul mulrax=%rax mov %r8,%rax -# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 -mulq crypto_sign_ed25519_amd64_64_38 +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38) +mulq CRYPTO_NAMESPACE(38)(%rip) # qhasm: mulr4 = mulrax # asm 1: mov mulr4=int64#5 @@ -3441,8 +3441,8 @@ mov %r9,%rax # asm 2: mov mulr5=%r9 mov %rdx,%r9 -# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 -mulq crypto_sign_ed25519_amd64_64_38 +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38) +mulq CRYPTO_NAMESPACE(38)(%rip) # qhasm: carry? mulr5 += mulrax # asm 1: add mulrax=%rax mov %r8,%rax -# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 -mulq crypto_sign_ed25519_amd64_64_38 +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38) +mulq CRYPTO_NAMESPACE(38)(%rip) # qhasm: mulr4 = mulrax # asm 1: mov mulr4=int64#2 @@ -4123,8 +4123,8 @@ mov %r9,%rax # asm 2: mov mulr5=%rcx mov %rdx,%rcx -# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 -mulq crypto_sign_ed25519_amd64_64_38 +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38) +mulq CRYPTO_NAMESPACE(38)(%rip) # qhasm: carry? mulr5 += mulrax # asm 1: add squarerax=%rax mov %r11,%rax -# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 -mulq crypto_sign_ed25519_amd64_64_38 +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)&CRYPTO_NAMESPACE(38) +mulq CRYPTO_NAMESPACE(38)(%rip) # qhasm: squarer4 = squarerax # asm 1: mov squarer4=int64#9 @@ -593,8 +593,8 @@ mov %r12,%rax # asm 2: mov squarer5=%r12 mov %rdx,%r12 -# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 -mulq crypto_sign_ed25519_amd64_64_38 +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)&CRYPTO_NAMESPACE(38) +mulq CRYPTO_NAMESPACE(38)(%rip) # qhasm: carry? squarer5 += squarerax # asm 1: add squarerax=%rax mov %r11,%rax -# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 -mulq crypto_sign_ed25519_amd64_64_38 +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)&CRYPTO_NAMESPACE(38) +mulq CRYPTO_NAMESPACE(38)(%rip) # qhasm: squarer4 = squarerax # asm 1: mov squarer4=int64#9 @@ -1060,8 +1060,8 @@ mov %r12,%rax # asm 2: mov squarer5=%r12 mov %rdx,%r12 -# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 -mulq crypto_sign_ed25519_amd64_64_38 +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)&CRYPTO_NAMESPACE(38) +mulq CRYPTO_NAMESPACE(38)(%rip) # qhasm: carry? squarer5 += squarerax # asm 1: add squarerax=%rax mov %r11,%rax -# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 -mulq crypto_sign_ed25519_amd64_64_38 +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)&CRYPTO_NAMESPACE(38) +mulq CRYPTO_NAMESPACE(38)(%rip) # qhasm: squarer4 = squarerax # asm 1: mov squarer4=int64#9 @@ -1527,8 +1527,8 @@ mov %r12,%rax # asm 2: mov squarer5=%r12 mov %rdx,%r12 -# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 -mulq crypto_sign_ed25519_amd64_64_38 +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)&CRYPTO_NAMESPACE(38) +mulq CRYPTO_NAMESPACE(38)(%rip) # qhasm: carry? squarer5 += squarerax # asm 1: add squarerax=%rax mov %r10,%rax -# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 -mulq crypto_sign_ed25519_amd64_64_38 +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)&CRYPTO_NAMESPACE(38) +mulq CRYPTO_NAMESPACE(38)(%rip) # qhasm: squarer4 = squarerax # asm 1: mov squarer4=int64#8 @@ -2649,8 +2649,8 @@ mov %r11,%rax # asm 2: mov squarer5=%r11 mov %rdx,%r11 -# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 -mulq crypto_sign_ed25519_amd64_64_38 +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)&CRYPTO_NAMESPACE(38) +mulq CRYPTO_NAMESPACE(38)(%rip) # qhasm: carry? squarer5 += squarerax # asm 1: add mulrax=%rax mov %rcx,%rax -# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 -mulq crypto_sign_ed25519_amd64_64_38 +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38) +mulq CRYPTO_NAMESPACE(38)(%rip) # qhasm: mulr4 = mulrax # asm 1: mov mulr4=int64#4 @@ -1078,8 +1078,8 @@ mov %r8,%rax # asm 2: mov mulr5=%r8 mov %rdx,%r8 -# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 -mulq crypto_sign_ed25519_amd64_64_38 +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38) +mulq CRYPTO_NAMESPACE(38)(%rip) # qhasm: carry? mulr5 += mulrax # asm 1: add mulrax=%rax mov %rcx,%rax -# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 -mulq crypto_sign_ed25519_amd64_64_38 +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38) +mulq CRYPTO_NAMESPACE(38)(%rip) # qhasm: mulr4 = mulrax # asm 1: mov mulr4=int64#4 @@ -1760,8 +1760,8 @@ mov %r8,%rax # asm 2: mov mulr5=%r8 mov %rdx,%r8 -# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 -mulq crypto_sign_ed25519_amd64_64_38 +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38) +mulq CRYPTO_NAMESPACE(38)(%rip) # qhasm: carry? mulr5 += mulrax # asm 1: add mulrax=%rax mov %rcx,%rax -# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 -mulq crypto_sign_ed25519_amd64_64_38 +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38) +mulq CRYPTO_NAMESPACE(38)(%rip) # qhasm: mulr4 = mulrax # asm 1: mov mulr4=int64#2 @@ -2612,8 +2612,8 @@ mov %r8,%rax # asm 2: mov mulr5=%rcx mov %rdx,%rcx -# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 -mulq crypto_sign_ed25519_amd64_64_38 +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38) +mulq CRYPTO_NAMESPACE(38)(%rip) # qhasm: carry? mulr5 += mulrax # asm 1: add mulrax=%rax mov %rsi,%rax -# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 -mulq crypto_sign_ed25519_amd64_64_38 +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38) +mulq CRYPTO_NAMESPACE(38)(%rip) # qhasm: mulr4 = mulrax # asm 1: mov mulr4=int64#2 @@ -3549,8 +3549,8 @@ mov %rcx,%rax # asm 2: mov mulr5=%rcx mov %rdx,%rcx -# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 -mulq crypto_sign_ed25519_amd64_64_38 +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38) +mulq CRYPTO_NAMESPACE(38)(%rip) # qhasm: carry? mulr5 += mulrax # asm 1: add mulrax=%rax mov %rsi,%rax -# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 -mulq crypto_sign_ed25519_amd64_64_38 +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38) +mulq CRYPTO_NAMESPACE(38)(%rip) # qhasm: mulr4 = mulrax # asm 1: mov mulr4=int64#2 @@ -4231,8 +4231,8 @@ mov %rcx,%rax # asm 2: mov mulr5=%rcx mov %rdx,%rcx -# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 -mulq crypto_sign_ed25519_amd64_64_38 +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38) +mulq CRYPTO_NAMESPACE(38)(%rip) # qhasm: carry? mulr5 += mulrax # asm 1: add mulrax=%rax mov %rsi,%rax -# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 -mulq crypto_sign_ed25519_amd64_64_38 +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38) +mulq CRYPTO_NAMESPACE(38)(%rip) # qhasm: mulr4 = mulrax # asm 1: mov mulr4=int64#2 @@ -4913,8 +4913,8 @@ mov %rcx,%rax # asm 2: mov mulr5=%rcx mov %rdx,%rcx -# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 -mulq crypto_sign_ed25519_amd64_64_38 +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38) +mulq CRYPTO_NAMESPACE(38)(%rip) # qhasm: carry? mulr5 += mulrax # asm 1: add mulrax=%rax mov %rsi,%rax -# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 -mulq crypto_sign_ed25519_amd64_64_38 +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38) +mulq CRYPTO_NAMESPACE(38)(%rip) # qhasm: mulr4 = mulrax # asm 1: mov mulr4=int64#2 @@ -5595,8 +5595,8 @@ mov %rcx,%rax # asm 2: mov mulr5=%rcx mov %rdx,%rcx -# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 -mulq crypto_sign_ed25519_amd64_64_38 +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38) +mulq CRYPTO_NAMESPACE(38)(%rip) # qhasm: carry? mulr5 += mulrax # asm 1: add mulrax=%rax mov %r8,%rax -# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 -mulq crypto_sign_ed25519_amd64_64_38 +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38) +mulq CRYPTO_NAMESPACE(38)(%rip) # qhasm: mulr4 = mulrax # asm 1: mov mulr4=int64#5 @@ -1087,8 +1087,8 @@ mov %r9,%rax # asm 2: mov mulr5=%r9 mov %rdx,%r9 -# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 -mulq crypto_sign_ed25519_amd64_64_38 +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38) +mulq CRYPTO_NAMESPACE(38)(%rip) # qhasm: carry? mulr5 += mulrax # asm 1: add mulrax=%rax mov %r8,%rax -# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 -mulq crypto_sign_ed25519_amd64_64_38 +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38) +mulq CRYPTO_NAMESPACE(38)(%rip) # qhasm: mulr4 = mulrax # asm 1: mov mulr4=int64#5 @@ -1769,8 +1769,8 @@ mov %r9,%rax # asm 2: mov mulr5=%r9 mov %rdx,%r9 -# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 -mulq crypto_sign_ed25519_amd64_64_38 +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38) +mulq CRYPTO_NAMESPACE(38)(%rip) # qhasm: carry? mulr5 += mulrax # asm 1: add mulrax=%rax mov %r8,%rax -# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 -mulq crypto_sign_ed25519_amd64_64_38 +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38) +mulq CRYPTO_NAMESPACE(38)(%rip) # qhasm: mulr4 = mulrax # asm 1: mov mulr4=int64#4 @@ -2621,8 +2621,8 @@ mov %r9,%rax # asm 2: mov mulr5=%r8 mov %rdx,%r8 -# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 -mulq crypto_sign_ed25519_amd64_64_38 +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38) +mulq CRYPTO_NAMESPACE(38)(%rip) # qhasm: carry? mulr5 += mulrax # asm 1: add mulrax=%rax mov %rcx,%rax -# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 -mulq crypto_sign_ed25519_amd64_64_38 +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38) +mulq CRYPTO_NAMESPACE(38)(%rip) # qhasm: mulr4 = mulrax # asm 1: mov mulr4=int64#4 @@ -676,8 +676,8 @@ mov %r8,%rax # asm 2: mov mulr5=%r8 mov %rdx,%r8 -# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 -mulq crypto_sign_ed25519_amd64_64_38 +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38) +mulq CRYPTO_NAMESPACE(38)(%rip) # qhasm: carry? mulr5 += mulrax # asm 1: add mulrax=%rax mov %rcx,%rax -# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 -mulq crypto_sign_ed25519_amd64_64_38 +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38) +mulq CRYPTO_NAMESPACE(38)(%rip) # qhasm: mulr4 = mulrax # asm 1: mov mulr4=int64#4 @@ -1358,8 +1358,8 @@ mov %r8,%rax # asm 2: mov mulr5=%r8 mov %rdx,%r8 -# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 -mulq crypto_sign_ed25519_amd64_64_38 +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38) +mulq CRYPTO_NAMESPACE(38)(%rip) # qhasm: carry? mulr5 += mulrax # asm 1: add mulrax=%rax mov %rcx,%rax -# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 -mulq crypto_sign_ed25519_amd64_64_38 +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38) +mulq CRYPTO_NAMESPACE(38)(%rip) # qhasm: mulr4 = mulrax # asm 1: mov mulr4=int64#2 @@ -2040,8 +2040,8 @@ mov %r8,%rax # asm 2: mov mulr5=%rcx mov %rdx,%rcx -# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 -mulq crypto_sign_ed25519_amd64_64_38 +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38) +mulq CRYPTO_NAMESPACE(38)(%rip) # qhasm: carry? mulr5 += mulrax # asm 1: add mulrax=%rax mov %rcx,%rax -# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 -mulq crypto_sign_ed25519_amd64_64_38 +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38) +mulq CRYPTO_NAMESPACE(38)(%rip) # qhasm: mulr4 = mulrax # asm 1: mov mulr4=int64#4 @@ -684,8 +684,8 @@ mov %r8,%rax # asm 2: mov mulr5=%r8 mov %rdx,%r8 -# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 -mulq crypto_sign_ed25519_amd64_64_38 +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38) +mulq CRYPTO_NAMESPACE(38)(%rip) # qhasm: carry? mulr5 += mulrax # asm 1: add mulrax=%rax mov %rcx,%rax -# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 -mulq crypto_sign_ed25519_amd64_64_38 +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38) +mulq CRYPTO_NAMESPACE(38)(%rip) # qhasm: mulr4 = mulrax # asm 1: mov mulr4=int64#4 @@ -1366,8 +1366,8 @@ mov %r8,%rax # asm 2: mov mulr5=%r8 mov %rdx,%r8 -# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 -mulq crypto_sign_ed25519_amd64_64_38 +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38) +mulq CRYPTO_NAMESPACE(38)(%rip) # qhasm: carry? mulr5 += mulrax # asm 1: add mulrax=%rax mov %rcx,%rax -# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 -mulq crypto_sign_ed25519_amd64_64_38 +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38) +mulq CRYPTO_NAMESPACE(38)(%rip) # qhasm: mulr4 = mulrax # asm 1: mov mulr4=int64#4 @@ -2048,8 +2048,8 @@ mov %r8,%rax # asm 2: mov mulr5=%r8 mov %rdx,%r8 -# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 -mulq crypto_sign_ed25519_amd64_64_38 +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38) +mulq CRYPTO_NAMESPACE(38)(%rip) # qhasm: carry? mulr5 += mulrax # asm 1: add mulrax=%rax mov %rcx,%rax -# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 -mulq crypto_sign_ed25519_amd64_64_38 +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38) +mulq CRYPTO_NAMESPACE(38)(%rip) # qhasm: mulr4 = mulrax # asm 1: mov mulr4=int64#2 @@ -2730,8 +2730,8 @@ mov %r8,%rax # asm 2: mov mulr5=%rcx mov %rdx,%rcx -# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 -mulq crypto_sign_ed25519_amd64_64_38 +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38) +mulq CRYPTO_NAMESPACE(38)(%rip) # qhasm: carry? mulr5 += mulrax # asm 1: add mulrax=%rax mov %r8,%rax -# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 -mulq crypto_sign_ed25519_amd64_64_38 +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38) +mulq CRYPTO_NAMESPACE(38)(%rip) # qhasm: mulr4 = mulrax # asm 1: mov mulr4=int64#5 @@ -1015,8 +1015,8 @@ mov %r9,%rax # asm 2: mov mulr5=%r9 mov %rdx,%r9 -# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 -mulq crypto_sign_ed25519_amd64_64_38 +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38) +mulq CRYPTO_NAMESPACE(38)(%rip) # qhasm: carry? mulr5 += mulrax # asm 1: add mulrax=%rax mov %r8,%rax -# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 -mulq crypto_sign_ed25519_amd64_64_38 +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38) +mulq CRYPTO_NAMESPACE(38)(%rip) # qhasm: mulr4 = mulrax # asm 1: mov mulr4=int64#5 @@ -1697,8 +1697,8 @@ mov %r9,%rax # asm 2: mov mulr5=%r9 mov %rdx,%r9 -# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 -mulq crypto_sign_ed25519_amd64_64_38 +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38) +mulq CRYPTO_NAMESPACE(38)(%rip) # qhasm: carry? mulr5 += mulrax # asm 1: add mulrax=%rax mov %r8,%rax -# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 -mulq crypto_sign_ed25519_amd64_64_38 +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38) +mulq CRYPTO_NAMESPACE(38)(%rip) # qhasm: mulr4 = mulrax # asm 1: mov mulr4=int64#5 @@ -2549,8 +2549,8 @@ mov %r9,%rax # asm 2: mov mulr5=%r9 mov %rdx,%r9 -# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 -mulq crypto_sign_ed25519_amd64_64_38 +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38) +mulq CRYPTO_NAMESPACE(38)(%rip) # qhasm: carry? mulr5 += mulrax # asm 1: add mulrax=%rax mov %r8,%rax -# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 -mulq crypto_sign_ed25519_amd64_64_38 +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38) +mulq CRYPTO_NAMESPACE(38)(%rip) # qhasm: mulr4 = mulrax # asm 1: mov mulr4=int64#2 @@ -3231,8 +3231,8 @@ mov %r9,%rax # asm 2: mov mulr5=%rcx mov %rdx,%rcx -# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 -mulq crypto_sign_ed25519_amd64_64_38 +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&CRYPTO_NAMESPACE(38) +mulq CRYPTO_NAMESPACE(38)(%rip) # qhasm: carry? mulr5 += mulrax # asm 1: add t3=%r14 mov %rsi,%r14 -# qhasm: carry? t0 -= *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_ORDER0 -# asm 1: sub crypto_sign_ed25519_amd64_51_30k_batch_ORDER0,rax=%rax movq 24(%rsi),%rax -# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_MU3 -mulq crypto_sign_ed25519_amd64_51_30k_batch_MU3 +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &CRYPTO_NAMESPACE(MU3) +mulq CRYPTO_NAMESPACE(MU3)(%rip) # qhasm: q23 = rax # asm 1: mov q23=int64#10 @@ -202,8 +202,8 @@ mov %rdx,%r13 # asm 2: movq 24(rax=%rax movq 24(%rsi),%rax -# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_MU4 -mulq crypto_sign_ed25519_amd64_51_30k_batch_MU4 +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &CRYPTO_NAMESPACE(MU4) +mulq CRYPTO_NAMESPACE(MU4)(%rip) # qhasm: q24 = rax # asm 1: mov q24=int64#12 @@ -225,8 +225,8 @@ adc %rdx,%r8 # asm 2: movq 32(rax=%rax movq 32(%rsi),%rax -# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_MU2 -mulq crypto_sign_ed25519_amd64_51_30k_batch_MU2 +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &CRYPTO_NAMESPACE(MU2) +mulq CRYPTO_NAMESPACE(MU2)(%rip) # qhasm: carry? q23 += rax # asm 1: add rax=%rax movq 32(%rsi),%rax -# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_MU3 -mulq crypto_sign_ed25519_amd64_51_30k_batch_MU3 +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &CRYPTO_NAMESPACE(MU3) +mulq CRYPTO_NAMESPACE(MU3)(%rip) # qhasm: carry? q24 += rax # asm 1: add rax=%rax movq 32(%rsi),%rax -# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_MU4 -mulq crypto_sign_ed25519_amd64_51_30k_batch_MU4 +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &CRYPTO_NAMESPACE(MU4) +mulq CRYPTO_NAMESPACE(MU4)(%rip) # qhasm: carry? q30 += rax # asm 1: add rax=%rax movq 40(%rsi),%rax -# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_MU1 -mulq crypto_sign_ed25519_amd64_51_30k_batch_MU1 +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &CRYPTO_NAMESPACE(MU1) +mulq CRYPTO_NAMESPACE(MU1)(%rip) # qhasm: carry? q23 += rax # asm 1: add rax=%rax movq 40(%rsi),%rax -# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_MU2 -mulq crypto_sign_ed25519_amd64_51_30k_batch_MU2 +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &CRYPTO_NAMESPACE(MU2) +mulq CRYPTO_NAMESPACE(MU2)(%rip) # qhasm: carry? q24 += rax # asm 1: add rax=%rax movq 40(%rsi),%rax -# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_MU3 -mulq crypto_sign_ed25519_amd64_51_30k_batch_MU3 +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &CRYPTO_NAMESPACE(MU3) +mulq CRYPTO_NAMESPACE(MU3)(%rip) # qhasm: carry? q30 += rax # asm 1: add rax=%rax movq 40(%rsi),%rax -# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_MU4 -mulq crypto_sign_ed25519_amd64_51_30k_batch_MU4 +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &CRYPTO_NAMESPACE(MU4) +mulq CRYPTO_NAMESPACE(MU4)(%rip) # qhasm: carry? q31 += rax # asm 1: add rax=%rax movq 48(%rsi),%rax -# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_MU0 -mulq crypto_sign_ed25519_amd64_51_30k_batch_MU0 +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &CRYPTO_NAMESPACE(MU0) +mulq CRYPTO_NAMESPACE(MU0)(%rip) # qhasm: carry? q23 += rax # asm 1: add rax=%rax movq 48(%rsi),%rax -# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_MU1 -mulq crypto_sign_ed25519_amd64_51_30k_batch_MU1 +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &CRYPTO_NAMESPACE(MU1) +mulq CRYPTO_NAMESPACE(MU1)(%rip) # qhasm: carry? q24 += rax # asm 1: add rax=%rax movq 48(%rsi),%rax -# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_MU2 -mulq crypto_sign_ed25519_amd64_51_30k_batch_MU2 +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &CRYPTO_NAMESPACE(MU2) +mulq CRYPTO_NAMESPACE(MU2)(%rip) # qhasm: carry? q30 += rax # asm 1: add rax=%rax movq 48(%rsi),%rax -# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_MU3 -mulq crypto_sign_ed25519_amd64_51_30k_batch_MU3 +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &CRYPTO_NAMESPACE(MU3) +mulq CRYPTO_NAMESPACE(MU3)(%rip) # qhasm: carry? q31 += rax # asm 1: add rax=%rax movq 48(%rsi),%rax -# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_MU4 -mulq crypto_sign_ed25519_amd64_51_30k_batch_MU4 +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &CRYPTO_NAMESPACE(MU4) +mulq CRYPTO_NAMESPACE(MU4)(%rip) # qhasm: carry? q32 += rax # asm 1: add rax=%rax movq 56(%rsi),%rax -# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_MU0 -mulq crypto_sign_ed25519_amd64_51_30k_batch_MU0 +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &CRYPTO_NAMESPACE(MU0) +mulq CRYPTO_NAMESPACE(MU0)(%rip) # qhasm: carry? q24 += rax # asm 1: add rax=%rax movq 56(%rsi),%rax -# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_MU1 -mulq crypto_sign_ed25519_amd64_51_30k_batch_MU1 +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &CRYPTO_NAMESPACE(MU1) +mulq CRYPTO_NAMESPACE(MU1)(%rip) # qhasm: carry? q30 += rax # asm 1: add rax=%rax movq 56(%rsi),%rax -# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_MU2 -mulq crypto_sign_ed25519_amd64_51_30k_batch_MU2 +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &CRYPTO_NAMESPACE(MU2) +mulq CRYPTO_NAMESPACE(MU2)(%rip) # qhasm: carry? q31 += rax # asm 1: add rax=%rax movq 56(%rsi),%rax -# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_MU3 -mulq crypto_sign_ed25519_amd64_51_30k_batch_MU3 +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &CRYPTO_NAMESPACE(MU3) +mulq CRYPTO_NAMESPACE(MU3)(%rip) # qhasm: carry? q32 += rax # asm 1: add rax=%rax movq 56(%rsi),%rax -# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_MU4 -mulq crypto_sign_ed25519_amd64_51_30k_batch_MU4 +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &CRYPTO_NAMESPACE(MU4) +mulq CRYPTO_NAMESPACE(MU4)(%rip) # qhasm: carry? q33 += rax # asm 1: add rax=%rax movq 56(%rsp),%rax -# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_ORDER0 -mulq crypto_sign_ed25519_amd64_51_30k_batch_ORDER0 +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &CRYPTO_NAMESPACE(ORDER0) +mulq CRYPTO_NAMESPACE(ORDER0)(%rip) # qhasm: r20 = rax # asm 1: mov r20=int64#5 @@ -761,8 +761,8 @@ mov %rdx,%r9 # asm 2: movq rax=%rax movq 56(%rsp),%rax -# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_ORDER1 -mulq crypto_sign_ed25519_amd64_51_30k_batch_ORDER1 +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &CRYPTO_NAMESPACE(ORDER1) +mulq CRYPTO_NAMESPACE(ORDER1)(%rip) # qhasm: r21 = rax # asm 1: mov r21=int64#8 @@ -789,8 +789,8 @@ adc %rdx,%r9 # asm 2: movq rax=%rax movq 56(%rsp),%rax -# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_ORDER2 -mulq crypto_sign_ed25519_amd64_51_30k_batch_ORDER2 +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &CRYPTO_NAMESPACE(ORDER2) +mulq CRYPTO_NAMESPACE(ORDER2)(%rip) # qhasm: r22 = rax # asm 1: mov r22=int64#9 @@ -817,8 +817,8 @@ adc %rdx,%r9 # asm 2: movq rax=%rax movq 56(%rsp),%rax -# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_ORDER3 -mulq crypto_sign_ed25519_amd64_51_30k_batch_ORDER3 +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &CRYPTO_NAMESPACE(ORDER3) +mulq CRYPTO_NAMESPACE(ORDER3)(%rip) # qhasm: free rdx @@ -837,8 +837,8 @@ add %r9,%r12 # asm 2: movq rax=%rax movq 64(%rsp),%rax -# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_ORDER0 -mulq crypto_sign_ed25519_amd64_51_30k_batch_ORDER0 +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &CRYPTO_NAMESPACE(ORDER0) +mulq CRYPTO_NAMESPACE(ORDER0)(%rip) # qhasm: carry? r21 += rax # asm 1: add rax=%rax movq 64(%rsp),%rax -# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_ORDER1 -mulq crypto_sign_ed25519_amd64_51_30k_batch_ORDER1 +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &CRYPTO_NAMESPACE(ORDER1) +mulq CRYPTO_NAMESPACE(ORDER1)(%rip) # qhasm: carry? r22 += rax # asm 1: add rax=%rax movq 64(%rsp),%rax -# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_ORDER2 -mulq crypto_sign_ed25519_amd64_51_30k_batch_ORDER2 +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &CRYPTO_NAMESPACE(ORDER2) +mulq CRYPTO_NAMESPACE(ORDER2)(%rip) # qhasm: free rdx @@ -913,8 +913,8 @@ add %rcx,%r12 # asm 2: movq rax=%rax movq 72(%rsp),%rax -# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_ORDER0 -mulq crypto_sign_ed25519_amd64_51_30k_batch_ORDER0 +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &CRYPTO_NAMESPACE(ORDER0) +mulq CRYPTO_NAMESPACE(ORDER0)(%rip) # qhasm: carry? r22 += rax # asm 1: add rax=%rax movq 72(%rsp),%rax -# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_ORDER1 -mulq crypto_sign_ed25519_amd64_51_30k_batch_ORDER1 +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &CRYPTO_NAMESPACE(ORDER1) +mulq CRYPTO_NAMESPACE(ORDER1)(%rip) # qhasm: free rdx @@ -956,8 +956,8 @@ add %rcx,%r12 # asm 2: movq rax=%rax movq 80(%rsp),%rax -# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_ORDER0 -mulq crypto_sign_ed25519_amd64_51_30k_batch_ORDER0 +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &CRYPTO_NAMESPACE(ORDER0) +mulq CRYPTO_NAMESPACE(ORDER0)(%rip) # qhasm: free rdx @@ -1026,25 +1026,25 @@ sbb %r12,%rsi # asm 2: mov t3=%r11 mov %rsi,%r11 -# qhasm: carry? t0 -= *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_ORDER0 -# asm 1: sub crypto_sign_ed25519_amd64_51_30k_batch_ORDER0,t3=%r11 mov %rsi,%r11 -# qhasm: carry? t0 -= *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_ORDER0 -# asm 1: sub crypto_sign_ed25519_amd64_51_30k_batch_ORDER0, -#include -#include "ed25519-donna/ed25519-donna.h" - -static int ed25519_seckey_expand(unsigned char *sk,const unsigned char *seed) -{ - crypto_hash_sha512(sk,seed,32); - sk[0] &= 248; - sk[31] &= 127; - sk[31] |= 64; - - return 0; -} - -static int ed25519_seckey(unsigned char *sk) -{ - unsigned char seed[32]; - - randombytes(seed,32); - return ed25519_seckey_expand(sk,seed); -} - -static int ed25519_pubkey(unsigned char *pk,const unsigned char *sk) -{ - bignum256modm a; - ge25519 ALIGN(16) A; - - expand256_modm(a,sk,32); - ge25519_scalarmult_base_niels(&A,ge25519_niels_base_multiples,a); - ge25519_pack(pk,&A); - - return 0; -} - - -static int ed25519_keypair(unsigned char *pk,unsigned char *sk) -{ - ed25519_seckey(sk); - ed25519_pubkey(pk,sk); - - return 0; -} - -#define fe bignum25519 -#define ge_p1p1 ge25519_p1p1 -#define ge_p3 ge25519 - -#define ge_p1p1_to_p3 ge25519_p1p1_to_full -#define ge_p3_tobytes ge25519_pack - -#define ge_p3_batchtobytes_destructive_1 ge25519_batchpack_destructive_1 -#define ge_p3_batchtobytes_destructive_finish ge25519_batchpack_destructive_finish - -DONNA_INLINE static void ge_add(ge25519_p1p1 *r,const ge25519 *p,const ge25519_pniels *q) -{ - ge25519_pnielsadd_p1p1(r,p,q,0); -} - -DONNA_INLINE static void ge_scalarmult_base(ge25519 *A,const unsigned char *sk) -{ - bignum256modm ALIGN(16) a; - expand256_modm(a,sk,32); - ge25519_scalarmult_base_niels(A,ge25519_niels_base_multiples,a); -} - -static ge25519_pniels ALIGN(16) ge_eightpoint; -// portable representation of (basepoint * 8) -static u8 fe_ysubx[32] = { - 0xE0,0xC3,0x64,0xC7,0xDC,0xAD,0x36,0x5E, - 0x25,0xAA,0x86,0xC8,0xC7,0x85,0x5F,0x07, - 0x67,0x65,0x1C,0x3D,0x99,0xDD,0x26,0x55, - 0x9C,0xB5,0x71,0x1E,0x1D,0xC4,0xC8,0x71, -}; -static u8 fe_xaddy[32] = { - 0x9C,0xFD,0xE3,0xC2,0x2A,0x15,0x34,0x1B, - 0x3B,0xE7,0x62,0xAB,0x56,0xFA,0xDF,0xE7, - 0xCF,0xBE,0xB5,0x8D,0x83,0x8A,0x1D,0xA5, - 0xAD,0x3E,0x42,0x42,0xC9,0x4F,0x1B,0x09, -}; -static u8 fe_z[32] = { - 0x77,0xAA,0x7F,0x85,0x02,0x8E,0xF5,0xD9, - 0x52,0xFE,0x8F,0xE6,0x8A,0x52,0x21,0x4A, - 0xCB,0x8D,0x1C,0x05,0x7D,0xAD,0x4A,0x1B, - 0xC6,0x7B,0x23,0x9D,0x4C,0x3F,0xD6,0x02, -}; -static u8 fe_t2d[32] = { - 0x4E,0x06,0xF4,0xFB,0x04,0x0B,0xCE,0x86, - 0x6B,0x52,0xBB,0x96,0x0A,0xCE,0x11,0x3C, - 0xCD,0xEF,0x4A,0x46,0x68,0x47,0xAA,0x72, - 0x5F,0x65,0x90,0x91,0xA8,0x38,0xCA,0x37, -}; - -// initialize from packed representation -static void ge_initeightpoint(void) -{ - memset(&ge_eightpoint,0,sizeof(ge_eightpoint)); - curve25519_expand(ge_eightpoint.ysubx,fe_ysubx); - curve25519_expand(ge_eightpoint.xaddy,fe_xaddy); - curve25519_expand(ge_eightpoint.z,fe_z); - curve25519_expand(ge_eightpoint.t2d,fe_t2d); -} - -#endif diff --git a/ed25519/ed25519_impl_post.h b/ed25519/ed25519_impl_post.h new file mode 100644 index 0000000..0f5a3e8 --- /dev/null +++ b/ed25519/ed25519_impl_post.h @@ -0,0 +1,85 @@ + +#undef ed25519_seckey +#undef ed25519_seckey_expand +#undef ed25519_pubkey +#undef ed25519_keygen + +#undef ge_eightpoint +#undef ge_initeightpoint + +#undef ge_add +#undef ge_p3_batchtobytes_destructive_1 +#undef ge_p3_batchtobytes_destructive_finish +#undef ge_scalarmult_base + + +#ifdef ED25519_ref10 + +#undef ge_frombytes_negate_vartime +#undef ge_tobytes +#undef ge_p3_tobytes +#undef ge_p2_0 +#undef ge_p3_0 +#undef ge_precomp_0 +#undef ge_p3_to_p2 +#undef ge_p3_to_cached +#undef ge_p1p1_to_p2 +#undef ge_p1p1_to_p3 +#undef ge_p2_dbl +#undef ge_p3_dbl +#undef ge_madd +#undef ge_msub +#undef ge_sub +#undef ge_scalarmult_base +#undef ge_double_scalarmult_vartime + +#endif + + +#if defined(ED25519_amd64_51_30k) || defined(ED25519_amd64_64_24k) + +#undef ge25519 +#undef ge25519_base +#undef ge25519_unpackneg_vartime +#undef ge25519_pack +#undef ge25519_isneutral_vartime +#undef ge25519_add +#undef ge25519_double +#undef ge25519_double_scalarmult_vartime +#undef ge25519_multi_scalarmult_vartime +#undef ge25519_scalarmult_base +#undef ge25519_p1p1_to_p2 +#undef ge25519_p1p1_to_p3 +#undef ge25519_p1p1_to_pniels +#undef ge25519_add_p1p1 +#undef ge25519_dbl_p1p1 +#undef choose_t +#undef choose_t_smultq +#undef ge25519_nielsadd2 +#undef ge25519_nielsadd_p1p1 +#undef ge25519_pnielsadd_p1p1 +#undef ge25519_p3 + +#undef fe +#undef ge_p1p1 +#undef ge_p3 +#undef ge_p1p1_to_p3 +#undef ge_p3_tobytes + +#endif + + +#ifdef ED25519_donna + +#undef fe_ysubx +#undef fe_xaddy +#undef fe_z +#undef fe_t2d + +#undef fe +#undef ge_p1p1 +#undef ge_p3 +#undef ge_p1p1_to_p3 +#undef ge_p3_tobytes + +#endif diff --git a/ed25519/ed25519_impl_pre.h b/ed25519/ed25519_impl_pre.h new file mode 100644 index 0000000..375c9b0 --- /dev/null +++ b/ed25519/ed25519_impl_pre.h @@ -0,0 +1,255 @@ +#ifndef ED25519_donna +# if defined(_MSC_VER) +# define ALIGN(x) __declspec(align(x)) +# elif defined(__GNUC__) +# undef ALIGN +# define ALIGN(x) __attribute__((aligned(x))) +# else +# ifndef ALIGN +# define ALIGN(x) +# endif +# endif +#endif + + +#define ed25519_seckey CRYPTO_NAMESPACE(seckey) +#define ed25519_seckey_expand CRYPTO_NAMESPACE(seckey_expand) +#define ed25519_pubkey CRYPTO_NAMESPACE(pubkey) +#define ed25519_keygen CRYPTO_NAMESPACE(keygen) + +#define ge_eightpoint CRYPTO_NAMESPACE(ge_eightpoint) +#define ge_initeightpoint CRYPTO_NAMESPACE(ge_initeightpoint) + + +#ifdef ED25519_ref10 + +#include "ref10/crypto_sign.h" +#include "ref10/ge.h" + +/* The basepoint multiplied by 8. */ +static const ge_cached ge_eightpoint = { + /* YplusX */ + { + 48496028, -16430416, 15164263, 11885335, 60784617, -4866353, 46481863, + -2771805, 9708580, 2387263 + }, + /* YmunusX */ + { + -10173472, -5540046, 21277639, 4080693, 1932823, -14916249, -9515873, + -21787995, -36575460, 29827857 + }, + /* Z */ + { + 25143927, -10256223, -3515585, 5715072, 19432778, -14905909, 22462083, + -8862871, 13226552, 743677 + }, + /* T2d */ + { + -784818, -8208065, -28479270, 5551579, 15746872, 4911053, 19117091, + 11267669, -24569594, 14624995 + } +}; +inline static void ge_initeightpoint(void) {} + +#endif + + +#ifdef ED25519_amd64_51_30k + +#include "amd64-51-30k/crypto_sign.h" +#include "amd64-51-30k/ge25519.h" + +#endif + + +#ifdef ED25519_amd64_64_24k + +#include "amd64-64-24k/crypto_sign.h" +#include "amd64-64-24k/ge25519.h" + +#endif + + +// common +#if defined(ED25519_amd64_51_30k) || defined(ED25519_amd64_64_24k) + +#define fe fe25519 +#define ge_p1p1 ge25519_p1p1 +#define ge_p3 ge25519_p3 +#define ge_p1p1_to_p3 ge25519_p1p1_to_p3 +#define ge_p3_tobytes ge25519_pack +#define ge_add ge25519_pnielsadd_p1p1 + +#define ge_p3_batchtobytes_destructive_1 ge25519_batchpack_destructive_1 +#define ge_p3_batchtobytes_destructive_finish ge25519_batchpack_destructive_finish + +#define ge_scalarmult_base CRYPTO_NAMESPACE(ge_scalarmult_base) + +#endif + + +#ifdef ED25519_amd64_51_30k +static inline void ge_scalarmult_base(ge_p3 *gepk,const unsigned char *sk) +{ + sc25519 scsk; + + sc25519_from32bytes(&scsk,sk); + ge25519_scalarmult_base(gepk,&scsk); +} + +/* The basepoint multiplied by 8. */ +static const ge25519_pniels ge_eightpoint = { + // ysubx + {{ 1880013609944032, 273850692840390, 1250787290086935, 789632210881694, 2001713562248987 }}, + // xaddy + {{ 1149173309373852, 797611345273702, 1925224452816873, 2065787175387590, 160206517707811 }}, + // z + {{ 1563516364368503, 383531986082622, 1251481213240650, 1657022631558786, 49907331879479 }}, + // t2d + {{ 1700965895112270, 372560131616985, 329575203620664, 756160485635107, 981466775886086 }}, +}; +inline static void ge_initeightpoint(void) {} +#endif + + +#ifdef ED25519_amd64_64_24k +static inline void ge_scalarmult_base(ge_p3 *gepk,const unsigned char *sk) +{ + sc25519 scsk; + + sc25519_from32bytes(&scsk,sk); + ge25519_scalarmult_base(gepk,&scsk); +} + +/* The basepoint multiplied by 8. */ +static const ge25519_pniels ge_eightpoint = { + // ysubx + {{ 6788804652057281504U, 531290374162262565U, 6135835192563885415U, 8199018750971852188U }}, + // xaddy + {{ 1960215011215539612U, 16708348392717346619U, 11897818088205565647U, 656205896531197613U }}, + // z + {{ 15705615417005288055U, 5341641389565279826U, 1966574939768917451U, 204420431378348998U }}, + // t2d + {{ 9713713562319586894U, 4328467261753610859U, 8262494979546083277U, 4020087914029409631U }}, +}; +inline static void ge_initeightpoint(void) {} +#endif + + +#ifdef ED25519_donna + +#define ED25519_CUSTOMRANDOM +#define ED25519_CUSTOMHASH +#include +#include +#include "ed25519-donna/ed25519-donna.h" + +static int ed25519_seckey_expand(unsigned char *sk,const unsigned char *seed) +{ + crypto_hash_sha512(sk,seed,32); + sk[0] &= 248; + sk[31] &= 127; + sk[31] |= 64; + + return 0; +} + +static int ed25519_seckey(unsigned char *sk) +{ + unsigned char seed[32]; + + randombytes(seed,32); + return ed25519_seckey_expand(sk,seed); +} + +static int ed25519_pubkey(unsigned char *pk,const unsigned char *sk) +{ + bignum256modm a; + ge25519 ALIGN(16) A; + + expand256_modm(a,sk,32); + ge25519_scalarmult_base_niels(&A,ge25519_niels_base_multiples,a); + ge25519_pack(pk,&A); + + return 0; +} + + +static int ed25519_keypair(unsigned char *pk,unsigned char *sk) +{ + ed25519_seckey(sk); + ed25519_pubkey(pk,sk); + + return 0; +} + +#define fe bignum25519 +#define ge_p1p1 ge25519_p1p1 +#define ge_p3 ge25519 + +#define ge_p1p1_to_p3 ge25519_p1p1_to_full +#define ge_p3_tobytes ge25519_pack + +#define ge_p3_batchtobytes_destructive_1 ge25519_batchpack_destructive_1 +#define ge_p3_batchtobytes_destructive_finish ge25519_batchpack_destructive_finish + + +#define ge_add CRYPTO_NAMESPACE(ge_add) +#define ge_scalarmult_base CRYPTO_NAMESPACE(ge_scalarmult_base) + + +DONNA_INLINE static void ge_add(ge25519_p1p1 *r,const ge25519 *p,const ge25519_pniels *q) +{ + ge25519_pnielsadd_p1p1(r,p,q,0); +} + +DONNA_INLINE static void ge_scalarmult_base(ge25519 *A,const unsigned char *sk) +{ + bignum256modm ALIGN(16) a; + expand256_modm(a,sk,32); + ge25519_scalarmult_base_niels(A,ge25519_niels_base_multiples,a); +} + +#define fe_ysubx CRYPTO_NAMESPACE(fe_ysubx) +#define fe_xaddy CRYPTO_NAMESPACE(fe_xaddy) +#define fe_z CRYPTO_NAMESPACE(fe_z) +#define fe_t2d CRYPTO_NAMESPACE(fe_t2d) + +static ge25519_pniels ALIGN(16) ge_eightpoint; +// portable representation of (basepoint * 8) +static u8 fe_ysubx[32] = { + 0xE0,0xC3,0x64,0xC7,0xDC,0xAD,0x36,0x5E, + 0x25,0xAA,0x86,0xC8,0xC7,0x85,0x5F,0x07, + 0x67,0x65,0x1C,0x3D,0x99,0xDD,0x26,0x55, + 0x9C,0xB5,0x71,0x1E,0x1D,0xC4,0xC8,0x71, +}; +static u8 fe_xaddy[32] = { + 0x9C,0xFD,0xE3,0xC2,0x2A,0x15,0x34,0x1B, + 0x3B,0xE7,0x62,0xAB,0x56,0xFA,0xDF,0xE7, + 0xCF,0xBE,0xB5,0x8D,0x83,0x8A,0x1D,0xA5, + 0xAD,0x3E,0x42,0x42,0xC9,0x4F,0x1B,0x09, +}; +static u8 fe_z[32] = { + 0x77,0xAA,0x7F,0x85,0x02,0x8E,0xF5,0xD9, + 0x52,0xFE,0x8F,0xE6,0x8A,0x52,0x21,0x4A, + 0xCB,0x8D,0x1C,0x05,0x7D,0xAD,0x4A,0x1B, + 0xC6,0x7B,0x23,0x9D,0x4C,0x3F,0xD6,0x02, +}; +static u8 fe_t2d[32] = { + 0x4E,0x06,0xF4,0xFB,0x04,0x0B,0xCE,0x86, + 0x6B,0x52,0xBB,0x96,0x0A,0xCE,0x11,0x3C, + 0xCD,0xEF,0x4A,0x46,0x68,0x47,0xAA,0x72, + 0x5F,0x65,0x90,0x91,0xA8,0x38,0xCA,0x37, +}; + +// initialize from packed representation +static void ge_initeightpoint(void) +{ + memset(&ge_eightpoint,0,sizeof(ge_eightpoint)); + curve25519_expand(ge_eightpoint.ysubx,fe_ysubx); + curve25519_expand(ge_eightpoint.xaddy,fe_xaddy); + curve25519_expand(ge_eightpoint.z,fe_z); + curve25519_expand(ge_eightpoint.t2d,fe_t2d); +} + +#endif diff --git a/ed25519/ref10/crypto_sign.h b/ed25519/ref10/crypto_sign.h index 71b2543..35b0a74 100644 --- a/ed25519/ref10/crypto_sign.h +++ b/ed25519/ref10/crypto_sign.h @@ -1,8 +1,9 @@ -#define crypto_sign ed25519_ref10_sign -#define crypto_sign_keypair ed25519_ref10_keygen -#define crypto_sign_seckey ed25519_ref10_seckey -#define crypto_sign_seckey_expand ed25519_ref10_seckey_expand -#define crypto_sign_pubkey ed25519_ref10_pubkey -#define crypto_sign_open ed25519_ref10_open +#define crypto_sign CRYPTO_NAMESPACE(sign) +#define crypto_sign_keypair CRYPTO_NAMESPACE(keygen) +#define crypto_sign_seckey CRYPTO_NAMESPACE(seckey) +#define crypto_sign_seckey_expand CRYPTO_NAMESPACE(seckey_expand) +#define crypto_sign_pubkey CRYPTO_NAMESPACE(pubkey) +#define crypto_sign_open CRYPTO_NAMESPACE(open) +#define crypto_sign_open_batch CRYPTO_NAMESPACE(open_batch) #include "ed25519.h" diff --git a/ed25519/ref10/ed25519.h b/ed25519/ref10/ed25519.h index 406aafc..2122ad5 100644 --- a/ed25519/ref10/ed25519.h +++ b/ed25519/ref10/ed25519.h @@ -1,13 +1,13 @@ -int ed25519_ref10_seckey(unsigned char *sk); -int ed25519_ref10_seckey_expand(unsigned char *sk,const unsigned char *seed); -int ed25519_ref10_pubkey(unsigned char *pk,const unsigned char *sk); -int ed25519_ref10_keygen(unsigned char *pk,unsigned char *sk); -int ed25519_ref10_sign( +int crypto_sign_seckey(unsigned char *sk); +int crypto_sign_seckey_expand(unsigned char *sk,const unsigned char *seed); +int crypto_sign_pubkey(unsigned char *pk,const unsigned char *sk); +int crypto_sign_keypair(unsigned char *pk,unsigned char *sk); +int crypto_sign( unsigned char *sm,unsigned long long *smlen, const unsigned char *m,unsigned long long mlen, const unsigned char *sk ); -int ed25519_ref10_open( +int crypto_sign_open( unsigned char *m,unsigned long long *mlen, const unsigned char *sm,unsigned long long smlen, const unsigned char *pk diff --git a/ed25519/ref10/fe.h b/ed25519/ref10/fe.h index 11aeebb..f4ec2bf 100644 --- a/ed25519/ref10/fe.h +++ b/ed25519/ref10/fe.h @@ -14,25 +14,25 @@ t[0]+2^26 t[1]+2^51 t[2]+2^77 t[3]+2^102 t[4]+...+2^230 t[9]. Bounds on each t[i] vary depending on context. */ -#define fe_frombytes crypto_sign_ed25519_ref10_fe_frombytes -#define fe_tobytes crypto_sign_ed25519_ref10_fe_tobytes -#define fe_copy crypto_sign_ed25519_ref10_fe_copy -#define fe_isnonzero crypto_sign_ed25519_ref10_fe_isnonzero -#define fe_isnegative crypto_sign_ed25519_ref10_fe_isnegative -#define fe_0 crypto_sign_ed25519_ref10_fe_0 -#define fe_1 crypto_sign_ed25519_ref10_fe_1 -#define fe_cswap crypto_sign_ed25519_ref10_fe_cswap -#define fe_cmov crypto_sign_ed25519_ref10_fe_cmov -#define fe_add crypto_sign_ed25519_ref10_fe_add -#define fe_sub crypto_sign_ed25519_ref10_fe_sub -#define fe_neg crypto_sign_ed25519_ref10_fe_neg -#define fe_mul crypto_sign_ed25519_ref10_fe_mul -#define fe_sq crypto_sign_ed25519_ref10_fe_sq -#define fe_sq2 crypto_sign_ed25519_ref10_fe_sq2 -#define fe_mul121666 crypto_sign_ed25519_ref10_fe_mul121666 -#define fe_invert crypto_sign_ed25519_ref10_fe_invert -#define fe_batchinvert crypto_sign_ed25519_ref10_fe_batchinvert -#define fe_pow22523 crypto_sign_ed25519_ref10_fe_pow22523 +#define fe_frombytes CRYPTO_NAMESPACE(fe_frombytes) +#define fe_tobytes CRYPTO_NAMESPACE(fe_tobytes) +#define fe_copy CRYPTO_NAMESPACE(fe_copy) +#define fe_isnonzero CRYPTO_NAMESPACE(fe_isnonzero) +#define fe_isnegative CRYPTO_NAMESPACE(fe_isnegative) +#define fe_0 CRYPTO_NAMESPACE(fe_0) +#define fe_1 CRYPTO_NAMESPACE(fe_1) +#define fe_cswap CRYPTO_NAMESPACE(fe_cswap) +#define fe_cmov CRYPTO_NAMESPACE(fe_cmov) +#define fe_add CRYPTO_NAMESPACE(fe_add) +#define fe_sub CRYPTO_NAMESPACE(fe_sub) +#define fe_neg CRYPTO_NAMESPACE(fe_neg) +#define fe_mul CRYPTO_NAMESPACE(fe_mul) +#define fe_sq CRYPTO_NAMESPACE(fe_sq) +#define fe_sq2 CRYPTO_NAMESPACE(fe_sq2) +#define fe_mul121666 CRYPTO_NAMESPACE(fe_mul121666) +#define fe_invert CRYPTO_NAMESPACE(fe_invert) +#define fe_batchinvert CRYPTO_NAMESPACE(fe_batchinvert) +#define fe_pow22523 CRYPTO_NAMESPACE(fe_pow22523) extern void fe_frombytes(fe,const unsigned char *); extern void fe_tobytes(unsigned char *,const fe); diff --git a/ed25519/ref10/fe_isnonzero.c b/ed25519/ref10/fe_isnonzero.c index 0261e92..4756800 100644 --- a/ed25519/ref10/fe_isnonzero.c +++ b/ed25519/ref10/fe_isnonzero.c @@ -9,7 +9,7 @@ Preconditions: |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. */ -static const unsigned char zero[32] = {0}; +static const unsigned char zero[32]; int fe_isnonzero(const fe f) { diff --git a/ed25519/ref10/ge.h b/ed25519/ref10/ge.h index 3ae05b6..c69a5b3 100644 --- a/ed25519/ref10/ge.h +++ b/ed25519/ref10/ge.h @@ -52,28 +52,28 @@ typedef struct { typedef unsigned char bytes32[32]; -#define ge_frombytes_negate_vartime crypto_sign_ed25519_ref10_ge_frombytes_negate_vartime -#define ge_tobytes crypto_sign_ed25519_ref10_ge_tobytes -#define ge_p3_tobytes crypto_sign_ed25519_ref10_ge_p3_tobytes -#define ge_p3_batchtobytes_destructive_1 crypto_sign_ed25519_ref10_ge_p3_batchtobytes_destructive_1 -#define ge_p3_batchtobytes_destructive_finish crypto_sign_ed25519_ref10_ge_p3_batchtobytes_destructive_finish +#define ge_frombytes_negate_vartime CRYPTO_NAMESPACE(ge_frombytes_negate_vartime) +#define ge_tobytes CRYPTO_NAMESPACE(ge_tobytes) +#define ge_p3_tobytes CRYPTO_NAMESPACE(ge_p3_tobytes) +#define ge_p3_batchtobytes_destructive_1 CRYPTO_NAMESPACE(ge_p3_batchtobytes_destructive_1) +#define ge_p3_batchtobytes_destructive_finish CRYPTO_NAMESPACE(ge_p3_batchtobytes_destructive_finish) -#define ge_p2_0 crypto_sign_ed25519_ref10_ge_p2_0 -#define ge_p3_0 crypto_sign_ed25519_ref10_ge_p3_0 -#define ge_precomp_0 crypto_sign_ed25519_ref10_ge_precomp_0 -#define ge_p3_to_p2 crypto_sign_ed25519_ref10_ge_p3_to_p2 -#define ge_p3_to_cached crypto_sign_ed25519_ref10_ge_p3_to_cached -#define ge_p1p1_to_p2 crypto_sign_ed25519_ref10_ge_p1p1_to_p2 -#define ge_p1p1_to_p3 crypto_sign_ed25519_ref10_ge_p1p1_to_p3 -#define ge_p2_dbl crypto_sign_ed25519_ref10_ge_p2_dbl -#define ge_p3_dbl crypto_sign_ed25519_ref10_ge_p3_dbl +#define ge_p2_0 CRYPTO_NAMESPACE(ge_p2_0) +#define ge_p3_0 CRYPTO_NAMESPACE(ge_p3_0) +#define ge_precomp_0 CRYPTO_NAMESPACE(ge_precomp_0) +#define ge_p3_to_p2 CRYPTO_NAMESPACE(ge_p3_to_p2) +#define ge_p3_to_cached CRYPTO_NAMESPACE(ge_p3_to_cached) +#define ge_p1p1_to_p2 CRYPTO_NAMESPACE(ge_p1p1_to_p2) +#define ge_p1p1_to_p3 CRYPTO_NAMESPACE(ge_p1p1_to_p3) +#define ge_p2_dbl CRYPTO_NAMESPACE(ge_p2_dbl) +#define ge_p3_dbl CRYPTO_NAMESPACE(ge_p3_dbl) -#define ge_madd crypto_sign_ed25519_ref10_ge_madd -#define ge_msub crypto_sign_ed25519_ref10_ge_msub -#define ge_add crypto_sign_ed25519_ref10_ge_add -#define ge_sub crypto_sign_ed25519_ref10_ge_sub -#define ge_scalarmult_base crypto_sign_ed25519_ref10_ge_scalarmult_base -#define ge_double_scalarmult_vartime crypto_sign_ed25519_ref10_ge_double_scalarmult_vartime +#define ge_madd CRYPTO_NAMESPACE(ge_madd) +#define ge_msub CRYPTO_NAMESPACE(ge_msub) +#define ge_add CRYPTO_NAMESPACE(ge_add) +#define ge_sub CRYPTO_NAMESPACE(ge_sub) +#define ge_scalarmult_base CRYPTO_NAMESPACE(ge_scalarmult_base) +#define ge_double_scalarmult_vartime CRYPTO_NAMESPACE(ge_double_scalarmult_vartime) extern void ge_tobytes(unsigned char *,const ge_p2 *); extern void ge_p3_tobytes(unsigned char *,const ge_p3 *); diff --git a/ed25519/ref10/ge_double_scalarmult.c b/ed25519/ref10/ge_double_scalarmult.c index f8bf4bf..a2df6e9 100644 --- a/ed25519/ref10/ge_double_scalarmult.c +++ b/ed25519/ref10/ge_double_scalarmult.c @@ -32,7 +32,7 @@ static void slide(signed char *r,const unsigned char *a) } -static ge_precomp Bi[8] = { +static const ge_precomp Bi[8] = { #include "base2.h" } ; diff --git a/ed25519/ref10/ge_p3_tobytes.c b/ed25519/ref10/ge_p3_tobytes.c index b251f60..21cb2fc 100644 --- a/ed25519/ref10/ge_p3_tobytes.c +++ b/ed25519/ref10/ge_p3_tobytes.c @@ -1,6 +1,6 @@ #include "ge.h" -void ge_p3_tobytes(bytes32 s,const ge_p3 *h) +void ge_p3_tobytes(unsigned char *s,const ge_p3 *h) { fe recip; fe x; diff --git a/ed25519/ref10/ge_scalarmult_base.c b/ed25519/ref10/ge_scalarmult_base.c index 421e4fa..6707547 100644 --- a/ed25519/ref10/ge_scalarmult_base.c +++ b/ed25519/ref10/ge_scalarmult_base.c @@ -19,7 +19,7 @@ static unsigned char negative(signed char b) return x; } -static void cmov(ge_precomp *t,ge_precomp *u,unsigned char b) +static void cmov(ge_precomp *t,const ge_precomp *u,unsigned char b) { fe_cmov(t->yplusx,u->yplusx,b); fe_cmov(t->yminusx,u->yminusx,b); @@ -27,7 +27,7 @@ static void cmov(ge_precomp *t,ge_precomp *u,unsigned char b) } /* base[i][j] = (j+1)*256^i*B */ -static ge_precomp base[32][8] = { +static const ge_precomp base[32][8] = { #include "base.h" } ; diff --git a/ed25519/ref10/sc.h b/ed25519/ref10/sc.h index d32ed2e..81c8594 100644 --- a/ed25519/ref10/sc.h +++ b/ed25519/ref10/sc.h @@ -6,8 +6,8 @@ The set of scalars is \Z/l where l = 2^252 + 27742317777372353535851937790883648493. */ -#define sc_reduce crypto_sign_ed25519_ref10_sc_reduce -#define sc_muladd crypto_sign_ed25519_ref10_sc_muladd +#define sc_reduce CRYPTO_NAMESPACE(sc_reduce) +#define sc_muladd CRYPTO_NAMESPACE(sc_muladd) extern void sc_reduce(unsigned char *); extern void sc_muladd(unsigned char *,const unsigned char *,const unsigned char *,const unsigned char *); diff --git a/test_ed25519.c b/test_ed25519.c index b87135c..7fe26c6 100644 --- a/test_ed25519.c +++ b/test_ed25519.c @@ -6,6 +6,7 @@ #include "types.h" #include "base16.h" #include "ed25519/ed25519.h" +#include "ed25519/ed25519_impl_pre.h" #include "testutil.h" struct pktest { @@ -60,3 +61,5 @@ int main(void) return 0; } + +#include "ed25519/ed25519_impl_post.h" diff --git a/worker.c b/worker.c index 7f8cb03..2a3d2e9 100644 --- a/worker.c +++ b/worker.c @@ -18,6 +18,7 @@ #include "base32.h" #include "keccak.h" #include "ed25519/ed25519.h" +#include "ed25519/ed25519_impl_pre.h" #include "ioutil.h" #include "common.h" #include "yaml.h" @@ -180,7 +181,6 @@ static inline void shiftpk(u8 *dst,const u8 *src,size_t sbits) dst[i] = 0; } -#include "worker_slow.inc.h" // in little-endian order, 32 bytes aka 256 bits @@ -195,7 +195,6 @@ static void addsztoscalar32(u8 *dst,size_t v) } } -#include "worker_fast.inc.h" #ifdef PASSPHRASE @@ -213,7 +212,6 @@ static void reseedright(u8 sk[SECRET_LEN]) } #endif // PASSPHRASE -#include "worker_fast_pass.inc.h" #if !defined(BATCHNUM) @@ -225,6 +223,15 @@ size_t worker_batch_memuse(void) return (sizeof(ge_p3) + sizeof(fe) + sizeof(bytes32)) * BATCHNUM; } +#include "worker_slow.inc.h" + +#include "worker_fast.inc.h" + +#include "worker_fast_pass.inc.h" + #include "worker_batch.inc.h" #include "worker_batch_pass.inc.h" + +// XXX this is useless here, but will end up somewhere like that when i'll modularize stuff +#include "ed25519/ed25519_impl_post.h" diff --git a/worker_impl.inc.h b/worker_impl.inc.h new file mode 100644 index 0000000..70b786d --- /dev/null +++ b/worker_impl.inc.h @@ -0,0 +1 @@ +// TODO